diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index 2caab1b9..38413705 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,4 +1,4 @@
-# Copyright 2021-2024, The Khronos Group Inc. Inc.
+# Copyright 2021-2024, The Khronos Group Inc.
#
# SPDX-License-Identifier: CC-BY-4.0
diff --git a/.github/workflows/msvc-build-preset.yml b/.github/workflows/msvc-build-preset.yml
index 4ac2b583..7479ca5e 100644
--- a/.github/workflows/msvc-build-preset.yml
+++ b/.github/workflows/msvc-build-preset.yml
@@ -34,7 +34,7 @@ jobs:
lfs: true
- name: Get modern CMake and Ninja
- uses: lukka/get-cmake@v3.27.7
+ uses: lukka/get-cmake@v3.28.3
- name: Add msbuild to PATH
uses: microsoft/setup-msbuild@v1.3
diff --git a/.reuse/dep5 b/.reuse/dep5
index 02ee9cd1..ab32d6b8 100644
--- a/.reuse/dep5
+++ b/.reuse/dep5
@@ -69,7 +69,7 @@ License: MIT
Comment: Unmodified, vendored copy of commit e6c415837c5a487809fdbb2f71f1080d454eb99a
Files: external/python/jinja2/*
- external/python/Jinja2-2.10.3.dist-info/*
+ external/python/Jinja2-2.11.3.dist-info/*
Copyright: 2013-2019 by the Jinja team
2007 Pallets
License: BSD-3-Clause
diff --git a/changes/conformance/mr.3043.gl.md b/changes/conformance/mr.3043.gl.md
new file mode 100644
index 00000000..9ff33140
--- /dev/null
+++ b/changes/conformance/mr.3043.gl.md
@@ -0,0 +1 @@
+Fix: Corrects the CTS warning for when Wrist Z variance is above the 14 degree threshold.
diff --git a/changes/conformance/mr.3044.gl.1.md b/changes/conformance/mr.3044.gl.1.md
new file mode 100644
index 00000000..106cdb44
--- /dev/null
+++ b/changes/conformance/mr.3044.gl.1.md
@@ -0,0 +1 @@
+Improvement: Code cleanup and documentation in the conformance layer.
diff --git a/changes/conformance/mr.3044.gl.md b/changes/conformance/mr.3044.gl.md
new file mode 100644
index 00000000..34c2b057
--- /dev/null
+++ b/changes/conformance/mr.3044.gl.md
@@ -0,0 +1,4 @@
+---
+- issue.1883.gl
+---
+New test: Validate that `XrEventDataInteractionProfileChanged` is only queued during xrSyncActions using the conformance layer.
diff --git a/changes/conformance/mr.3151.gl.md b/changes/conformance/mr.3151.gl.md
new file mode 100644
index 00000000..870e79ad
--- /dev/null
+++ b/changes/conformance/mr.3151.gl.md
@@ -0,0 +1 @@
+Improvement: Reduce the maximum time allowed for transitioning session state in debug mode from 1 hour to 1 minute, and add a notice message in debug mode explaining this.
diff --git a/external/python/Jinja2-2.10.3.dist-info/RECORD b/external/python/Jinja2-2.10.3.dist-info/RECORD
deleted file mode 100644
index 88dc35cb..00000000
--- a/external/python/Jinja2-2.10.3.dist-info/RECORD
+++ /dev/null
@@ -1,62 +0,0 @@
-Jinja2-2.10.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
-Jinja2-2.10.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
-Jinja2-2.10.3.dist-info/METADATA,sha256=RD_zg-jTU2K8LbwVqRNYNxo-RmRD-g08ZimL2R1mUEg,3380
-Jinja2-2.10.3.dist-info/RECORD,,
-Jinja2-2.10.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
-Jinja2-2.10.3.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
-Jinja2-2.10.3.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61
-Jinja2-2.10.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
-jinja2/__init__.py,sha256=9fIkcl501fvTG_9oTAHveZrNmWHUnC3cb1ccCJQ1VzQ,2616
-jinja2/__pycache__/__init__.cpython-39.pyc,,
-jinja2/__pycache__/_compat.cpython-39.pyc,,
-jinja2/__pycache__/_identifier.cpython-39.pyc,,
-jinja2/__pycache__/asyncfilters.cpython-39.pyc,,
-jinja2/__pycache__/asyncsupport.cpython-39.pyc,,
-jinja2/__pycache__/bccache.cpython-39.pyc,,
-jinja2/__pycache__/compiler.cpython-39.pyc,,
-jinja2/__pycache__/constants.cpython-39.pyc,,
-jinja2/__pycache__/debug.cpython-39.pyc,,
-jinja2/__pycache__/defaults.cpython-39.pyc,,
-jinja2/__pycache__/environment.cpython-39.pyc,,
-jinja2/__pycache__/exceptions.cpython-39.pyc,,
-jinja2/__pycache__/ext.cpython-39.pyc,,
-jinja2/__pycache__/filters.cpython-39.pyc,,
-jinja2/__pycache__/idtracking.cpython-39.pyc,,
-jinja2/__pycache__/lexer.cpython-39.pyc,,
-jinja2/__pycache__/loaders.cpython-39.pyc,,
-jinja2/__pycache__/meta.cpython-39.pyc,,
-jinja2/__pycache__/nativetypes.cpython-39.pyc,,
-jinja2/__pycache__/nodes.cpython-39.pyc,,
-jinja2/__pycache__/optimizer.cpython-39.pyc,,
-jinja2/__pycache__/parser.cpython-39.pyc,,
-jinja2/__pycache__/runtime.cpython-39.pyc,,
-jinja2/__pycache__/sandbox.cpython-39.pyc,,
-jinja2/__pycache__/tests.cpython-39.pyc,,
-jinja2/__pycache__/utils.cpython-39.pyc,,
-jinja2/__pycache__/visitor.cpython-39.pyc,,
-jinja2/_compat.py,sha256=mS-2MTiCpkkOd-JKvxQQxNQFEEu-YVwUnwVHruplYoo,2685
-jinja2/_identifier.py,sha256=W1QBSY-iJsyt6oR_nKSuNNCzV95vLIOYgUNPUI1d5gU,1726
-jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144
-jinja2/asyncsupport.py,sha256=UErQ3YlTLaSjFb94P4MVn08-aVD9jJxty2JVfMRb-1M,7878
-jinja2/bccache.py,sha256=FF8Qij2CGK6oKdHb9Vz396YHh1kssAxqX2XsB6uMvLk,12719
-jinja2/compiler.py,sha256=BqC5U6JxObSRhblyT_a6Tp5GtEU5z3US1a4jLQaxxgo,65386
-jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626
-jinja2/debug.py,sha256=BAWmOZJGeOKY6g3OCJ1v0_GrUKz1LHp4Z-3m_tJIT-U,12281
-jinja2/defaults.py,sha256=Em-95hmsJxIenDCZFB1YSvf9CNhe9rBmytN3yUrBcWA,1400
-jinja2/environment.py,sha256=VnkAkqw8JbjZct4tAyHlpBrka2vqB-Z58RAP-32P1ZY,50849
-jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428
-jinja2/ext.py,sha256=atMQydEC86tN1zUsdQiHw5L5cF62nDbqGue25Yiu3N4,24500
-jinja2/filters.py,sha256=yOAJk0MsH-_gEC0i0U6NweVQhbtYaC-uE8xswHFLF4w,36528
-jinja2/idtracking.py,sha256=2GbDSzIvGArEBGLkovLkqEfmYxmWsEf8c3QZwM4uNsw,9197
-jinja2/lexer.py,sha256=ySEPoXd1g7wRjsuw23uimS6nkGN5aqrYwcOKxCaVMBQ,28559
-jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382
-jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340
-jinja2/nativetypes.py,sha256=_sJhS8f-8Q0QMIC0dm1YEdLyxEyoO-kch8qOL5xUDfE,7308
-jinja2/nodes.py,sha256=L10L_nQDfubLhO3XjpF9qz46FSh2clL-3e49ogVlMmA,30853
-jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722
-jinja2/parser.py,sha256=lPzTEbcpTRBLw8ii6OYyExHeAhaZLMA05Hpv4ll3ULk,35875
-jinja2/runtime.py,sha256=nmO38W08p1d1S4MdvtCgzzLIqVs4J4nwwiMZjZmVQaY,27644
-jinja2/sandbox.py,sha256=i6nacG3tCN-4UwEpzDpl6TcQdCR-zRMsoEahiQ01LiY,17080
-jinja2/tests.py,sha256=18yVEZwhD79Osy5wlK_m0GTtY_9OPtiSklURgvltGo8,4214
-jinja2/utils.py,sha256=xYUsq6h5_xDLVT4b5dvdjQ6H7S9WfUglrVIJ6qyPTRw,20501
-jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316
diff --git a/external/python/Jinja2-2.10.3.dist-info/INSTALLER b/external/python/Jinja2-2.11.3.dist-info/INSTALLER
similarity index 100%
rename from external/python/Jinja2-2.10.3.dist-info/INSTALLER
rename to external/python/Jinja2-2.11.3.dist-info/INSTALLER
diff --git a/external/python/Jinja2-2.10.3.dist-info/LICENSE.rst b/external/python/Jinja2-2.11.3.dist-info/LICENSE.rst
similarity index 100%
rename from external/python/Jinja2-2.10.3.dist-info/LICENSE.rst
rename to external/python/Jinja2-2.11.3.dist-info/LICENSE.rst
diff --git a/external/python/Jinja2-2.10.3.dist-info/METADATA b/external/python/Jinja2-2.11.3.dist-info/METADATA
similarity index 95%
rename from external/python/Jinja2-2.10.3.dist-info/METADATA
rename to external/python/Jinja2-2.11.3.dist-info/METADATA
index 11c545a4..1af8df0f 100644
--- a/external/python/Jinja2-2.10.3.dist-info/METADATA
+++ b/external/python/Jinja2-2.11.3.dist-info/METADATA
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: Jinja2
-Version: 2.10.3
+Version: 2.11.3
Summary: A very fast and expressive template engine.
Home-page: https://palletsprojects.com/p/jinja/
Author: Armin Ronacher
@@ -24,11 +24,14 @@ Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/x-rst
Requires-Dist: MarkupSafe (>=0.23)
Provides-Extra: i18n
Requires-Dist: Babel (>=0.8) ; extra == 'i18n'
diff --git a/external/python/Jinja2-2.11.3.dist-info/RECORD b/external/python/Jinja2-2.11.3.dist-info/RECORD
new file mode 100644
index 00000000..048538a9
--- /dev/null
+++ b/external/python/Jinja2-2.11.3.dist-info/RECORD
@@ -0,0 +1,35 @@
+Jinja2-2.11.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Jinja2-2.11.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
+Jinja2-2.11.3.dist-info/METADATA,sha256=PscpJ1C3RSp8xcjV3fAuTz13rKbGxmzJXnMQFH-WKhs,3535
+Jinja2-2.11.3.dist-info/RECORD,,
+Jinja2-2.11.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+Jinja2-2.11.3.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+Jinja2-2.11.3.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61
+Jinja2-2.11.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+jinja2/__init__.py,sha256=LZUXmxJc2GIchfSAeMWsxCWiQYO-w1-736f2Q3I8ms8,1549
+jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191
+jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775
+jinja2/asyncfilters.py,sha256=XJtYXTxFvcJ5xwk6SaDL4S0oNnT0wPYvXBCSzc482fI,4250
+jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209
+jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139
+jinja2/compiler.py,sha256=Ta9W1Lit542wItAHXlDcg0sEOsFDMirCdlFPHAurg4o,66284
+jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458
+jinja2/debug.py,sha256=neR7GIGGjZH3_ILJGVUYy3eLQCCaWJMXOb7o0kGInWc,8529
+jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126
+jinja2/environment.py,sha256=XDSLKc4SqNLMOwTSq3TbWEyA5WyXfuLuVD0wAVjEFwM,50629
+jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425
+jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441
+jinja2/filters.py,sha256=9ORilsZrUoydSI9upz8_qGy7gozDWLYoFmlIBFSVRnQ,41439
+jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211
+jinja2/lexer.py,sha256=nUFLRKhhKmmEWkLI65nQePgcQs7qsRdjVYZETMt_v0g,30331
+jinja2/loaders.py,sha256=C-fST_dmFjgWkp0ZuCkrgICAoOsoSIF28wfAFink0oU,17666
+jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131
+jinja2/nativetypes.py,sha256=Ul__gtVw4xH-0qvUvnCNHedQeNDwmEuyLJztzzSPeRg,2753
+jinja2/nodes.py,sha256=Mk1oJPVgIjnQw9WOqILvcu3rLepcFZ0ahxQm2mbwDwc,31095
+jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457
+jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660
+jinja2/runtime.py,sha256=0y-BRyIEZ9ltByL2Id6GpHe1oDRQAwNeQvI0SKobNMw,30618
+jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127
+jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799
+jinja2/utils.py,sha256=Wy4yC3IByqUWwnKln6SdaixdzgK74P6F5nf-gQZrYnU,22436
+jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240
diff --git a/external/python/Jinja2-2.10.3.dist-info/REQUESTED b/external/python/Jinja2-2.11.3.dist-info/REQUESTED
similarity index 100%
rename from external/python/Jinja2-2.10.3.dist-info/REQUESTED
rename to external/python/Jinja2-2.11.3.dist-info/REQUESTED
diff --git a/external/python/Jinja2-2.10.3.dist-info/WHEEL b/external/python/Jinja2-2.11.3.dist-info/WHEEL
similarity index 70%
rename from external/python/Jinja2-2.10.3.dist-info/WHEEL
rename to external/python/Jinja2-2.11.3.dist-info/WHEEL
index 8b701e93..01b8fc7d 100644
--- a/external/python/Jinja2-2.10.3.dist-info/WHEEL
+++ b/external/python/Jinja2-2.11.3.dist-info/WHEEL
@@ -1,5 +1,5 @@
Wheel-Version: 1.0
-Generator: bdist_wheel (0.33.6)
+Generator: bdist_wheel (0.36.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
diff --git a/external/python/Jinja2-2.10.3.dist-info/entry_points.txt b/external/python/Jinja2-2.11.3.dist-info/entry_points.txt
similarity index 100%
rename from external/python/Jinja2-2.10.3.dist-info/entry_points.txt
rename to external/python/Jinja2-2.11.3.dist-info/entry_points.txt
diff --git a/external/python/Jinja2-2.10.3.dist-info/top_level.txt b/external/python/Jinja2-2.11.3.dist-info/top_level.txt
similarity index 100%
rename from external/python/Jinja2-2.10.3.dist-info/top_level.txt
rename to external/python/Jinja2-2.11.3.dist-info/top_level.txt
diff --git a/external/python/jinja2/__init__.py b/external/python/jinja2/__init__.py
index 0eaf7214..f17866f6 100644
--- a/external/python/jinja2/__init__.py
+++ b/external/python/jinja2/__init__.py
@@ -1,83 +1,44 @@
# -*- coding: utf-8 -*-
+"""Jinja is a template engine written in pure Python. It provides a
+non-XML syntax that supports inline expressions and an optional
+sandboxed environment.
"""
- jinja2
- ~~~~~~
-
- Jinja2 is a template engine written in pure Python. It provides a
- Django inspired non-XML syntax but supports inline expressions and
- an optional sandboxed environment.
-
- Nutshell
- --------
-
- Here a small example of a Jinja2 template::
-
- {% extends 'base.html' %}
- {% block title %}Memberlist{% endblock %}
- {% block content %}
-
- {% endblock %}
-
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-__docformat__ = 'restructuredtext en'
-__version__ = "2.10.3"
-
-# high level interface
-from jinja2.environment import Environment, Template
-
-# loaders
-from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \
- DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \
- ModuleLoader
-
-# bytecode caches
-from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \
- MemcachedBytecodeCache
-
-# undefined types
-from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \
- make_logging_undefined
-
-# exceptions
-from jinja2.exceptions import TemplateError, UndefinedError, \
- TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \
- TemplateAssertionError, TemplateRuntimeError
-
-# decorators and public utilities
-from jinja2.filters import environmentfilter, contextfilter, \
- evalcontextfilter
-from jinja2.utils import Markup, escape, clear_caches, \
- environmentfunction, evalcontextfunction, contextfunction, \
- is_undefined, select_autoescape
-
-__all__ = [
- 'Environment', 'Template', 'BaseLoader', 'FileSystemLoader',
- 'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader',
- 'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache',
- 'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined',
- 'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound',
- 'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError',
- 'TemplateRuntimeError',
- 'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape',
- 'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined',
- 'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined',
- 'select_autoescape',
-]
-
-
-def _patch_async():
- from jinja2.utils import have_async_gen
- if have_async_gen:
- from jinja2.asyncsupport import patch_all
- patch_all()
-
-
-_patch_async()
-del _patch_async
+from markupsafe import escape
+from markupsafe import Markup
+
+from .bccache import BytecodeCache
+from .bccache import FileSystemBytecodeCache
+from .bccache import MemcachedBytecodeCache
+from .environment import Environment
+from .environment import Template
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateError
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .filters import contextfilter
+from .filters import environmentfilter
+from .filters import evalcontextfilter
+from .loaders import BaseLoader
+from .loaders import ChoiceLoader
+from .loaders import DictLoader
+from .loaders import FileSystemLoader
+from .loaders import FunctionLoader
+from .loaders import ModuleLoader
+from .loaders import PackageLoader
+from .loaders import PrefixLoader
+from .runtime import ChainableUndefined
+from .runtime import DebugUndefined
+from .runtime import make_logging_undefined
+from .runtime import StrictUndefined
+from .runtime import Undefined
+from .utils import clear_caches
+from .utils import contextfunction
+from .utils import environmentfunction
+from .utils import evalcontextfunction
+from .utils import is_undefined
+from .utils import select_autoescape
+
+__version__ = "2.11.3"
diff --git a/external/python/jinja2/_compat.py b/external/python/jinja2/_compat.py
index 4dbf6ea0..1f044954 100644
--- a/external/python/jinja2/_compat.py
+++ b/external/python/jinja2/_compat.py
@@ -1,22 +1,12 @@
# -*- coding: utf-8 -*-
-"""
- jinja2._compat
- ~~~~~~~~~~~~~~
-
- Some py2/py3 compatibility support based on a stripped down
- version of six so we don't have to depend on a specific version
- of it.
-
- :copyright: Copyright 2013 by the Jinja team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
+# flake8: noqa
+import marshal
import sys
PY2 = sys.version_info[0] == 2
-PYPY = hasattr(sys, 'pypy_translation_info')
+PYPY = hasattr(sys, "pypy_translation_info")
_identity = lambda x: x
-
if not PY2:
unichr = chr
range_type = range
@@ -30,6 +20,7 @@
import pickle
from io import BytesIO, StringIO
+
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
@@ -46,6 +37,9 @@ def reraise(tp, value, tb=None):
implements_to_string = _identity
encode_filename = _identity
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+
else:
unichr = unichr
text_type = unicode
@@ -59,11 +53,13 @@ def reraise(tp, value, tb=None):
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
+
NativeStringIO = BytesIO
- exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
+ exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
from itertools import imap, izip, ifilter
+
intern = intern
def implements_iterator(cls):
@@ -73,14 +69,25 @@ def implements_iterator(cls):
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
- cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
+ cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
return cls
def encode_filename(filename):
if isinstance(filename, unicode):
- return filename.encode('utf-8')
+ return filename.encode("utf-8")
return filename
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
@@ -90,7 +97,8 @@ def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
+
+ return type.__new__(metaclass, "temporary_class", (), {})
try:
@@ -103,3 +111,22 @@ def __new__(cls, name, this_bases, d):
from collections import abc
except ImportError:
import collections as abc
+
+
+try:
+ from os import fspath
+except ImportError:
+ try:
+ from pathlib import PurePath
+ except ImportError:
+ PurePath = None
+
+ def fspath(path):
+ if hasattr(path, "__fspath__"):
+ return path.__fspath__()
+
+ # Python 3.5 doesn't have __fspath__ yet, use str.
+ if PurePath is not None and isinstance(path, PurePath):
+ return str(path)
+
+ return path
diff --git a/external/python/jinja2/_identifier.py b/external/python/jinja2/_identifier.py
index 2eac35d5..224d5449 100644
--- a/external/python/jinja2/_identifier.py
+++ b/external/python/jinja2/_identifier.py
@@ -1,2 +1,6 @@
+import re
+
# generated by scripts/generate_identifier_pattern.py
-pattern = '·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯'
+pattern = re.compile(
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+)
diff --git a/external/python/jinja2/asyncfilters.py b/external/python/jinja2/asyncfilters.py
index 5c1f46d7..3d98dbcc 100644
--- a/external/python/jinja2/asyncfilters.py
+++ b/external/python/jinja2/asyncfilters.py
@@ -1,12 +1,13 @@
from functools import wraps
-from jinja2.asyncsupport import auto_aiter
-from jinja2 import filters
+from . import filters
+from .asyncsupport import auto_aiter
+from .asyncsupport import auto_await
async def auto_to_seq(value):
seq = []
- if hasattr(value, '__aiter__'):
+ if hasattr(value, "__aiter__"):
async for item in value:
seq.append(item)
else:
@@ -16,8 +17,7 @@ async def auto_to_seq(value):
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
- seq, func = filters.prepare_select_or_reject(
- args, kwargs, modfunc, lookup_attr)
+ seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
if seq:
async for item in auto_aiter(seq):
if func(item):
@@ -26,14 +26,19 @@ async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
def dualfilter(normal_filter, async_filter):
wrap_evalctx = False
- if getattr(normal_filter, 'environmentfilter', False):
- is_async = lambda args: args[0].is_async
+ if getattr(normal_filter, "environmentfilter", False) is True:
+
+ def is_async(args):
+ return args[0].is_async
+
wrap_evalctx = False
else:
- if not getattr(normal_filter, 'evalcontextfilter', False) and \
- not getattr(normal_filter, 'contextfilter', False):
- wrap_evalctx = True
- is_async = lambda args: args[0].environment.is_async
+ has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
+ has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
+ wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
+
+ def is_async(args):
+ return args[0].environment.is_async
@wraps(normal_filter)
def wrapper(*args, **kwargs):
@@ -55,6 +60,7 @@ def wrapper(*args, **kwargs):
def asyncfiltervariant(original):
def decorator(f):
return dualfilter(original, f)
+
return decorator
@@ -63,19 +69,22 @@ async def do_first(environment, seq):
try:
return await auto_aiter(seq).__anext__()
except StopAsyncIteration:
- return environment.undefined('No first item, sequence was empty.')
+ return environment.undefined("No first item, sequence was empty.")
@asyncfiltervariant(filters.do_groupby)
async def do_groupby(environment, value, attribute):
expr = filters.make_attrgetter(environment, attribute)
- return [filters._GroupTuple(key, await auto_to_seq(values))
- for key, values in filters.groupby(sorted(
- await auto_to_seq(value), key=expr), expr)]
+ return [
+ filters._GroupTuple(key, await auto_to_seq(values))
+ for key, values in filters.groupby(
+ sorted(await auto_to_seq(value), key=expr), expr
+ )
+ ]
@asyncfiltervariant(filters.do_join)
-async def do_join(eval_ctx, value, d=u'', attribute=None):
+async def do_join(eval_ctx, value, d=u"", attribute=None):
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
@@ -109,7 +118,7 @@ async def do_map(*args, **kwargs):
seq, func = filters.prepare_map(args, kwargs)
if seq:
async for item in auto_aiter(seq):
- yield func(item)
+ yield await auto_await(func(item))
@asyncfiltervariant(filters.do_sum)
@@ -118,7 +127,10 @@ async def do_sum(environment, iterable, attribute=None, start=0):
if attribute is not None:
func = filters.make_attrgetter(environment, attribute)
else:
- func = lambda x: x
+
+ def func(x):
+ return x
+
async for item in auto_aiter(iterable):
rv += func(item)
return rv
@@ -130,17 +142,17 @@ async def do_slice(value, slices, fill_with=None):
ASYNC_FILTERS = {
- 'first': do_first,
- 'groupby': do_groupby,
- 'join': do_join,
- 'list': do_list,
+ "first": do_first,
+ "groupby": do_groupby,
+ "join": do_join,
+ "list": do_list,
# we intentionally do not support do_last because that would be
# ridiculous
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'map': do_map,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'sum': do_sum,
- 'slice': do_slice,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "map": do_map,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "sum": do_sum,
+ "slice": do_slice,
}
diff --git a/external/python/jinja2/asyncsupport.py b/external/python/jinja2/asyncsupport.py
index b1e7b5ce..78ba3739 100644
--- a/external/python/jinja2/asyncsupport.py
+++ b/external/python/jinja2/asyncsupport.py
@@ -1,29 +1,27 @@
# -*- coding: utf-8 -*-
+"""The code for async support. Importing this patches Jinja on supported
+Python versions.
"""
- jinja2.asyncsupport
- ~~~~~~~~~~~~~~~~~~~
-
- Has all the code for async support which is implemented as a patch
- for supported Python versions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import sys
import asyncio
import inspect
from functools import update_wrapper
-from jinja2.utils import concat, internalcode, Markup
-from jinja2.environment import TemplateModule
-from jinja2.runtime import LoopContextBase, _last_iteration
+from markupsafe import Markup
+
+from .environment import TemplateModule
+from .runtime import LoopContext
+from .utils import concat
+from .utils import internalcode
+from .utils import missing
async def concat_async(async_gen):
rv = []
+
async def collect():
async for event in async_gen:
rv.append(event)
+
await collect()
return concat(rv)
@@ -34,10 +32,7 @@ async def generate_async(self, *args, **kwargs):
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
+ yield self.environment.handle_exception()
def wrap_generate_func(original_generate):
@@ -48,17 +43,18 @@ def _convert_generator(self, loop, args, kwargs):
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
+
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
+
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
- raise RuntimeError('The environment was not created with async mode '
- 'enabled.')
+ raise RuntimeError("The environment was not created with async mode enabled.")
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
@@ -66,8 +62,7 @@ async def render_async(self, *args, **kwargs):
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
+ return self.environment.handle_exception()
def wrap_render_func(original_render):
@@ -76,6 +71,7 @@ def render(self, *args, **kwargs):
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
+
return update_wrapper(render, original_render)
@@ -109,6 +105,7 @@ def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
+
return update_wrapper(_invoke, original_invoke)
@@ -124,9 +121,9 @@ def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
- raise RuntimeError('Template module attribute is unavailable '
- 'in async mode')
+ raise RuntimeError("Template module attribute is unavailable in async mode")
return original_default_module(self)
+
return _get_default_module
@@ -139,30 +136,30 @@ async def make_module_async(self, vars=None, shared=False, locals=None):
def patch_template():
- from jinja2 import Template
+ from . import Template
+
Template.generate = wrap_generate_func(Template.generate)
- Template.generate_async = update_wrapper(
- generate_async, Template.generate_async)
- Template.render_async = update_wrapper(
- render_async, Template.render_async)
+ Template.generate_async = update_wrapper(generate_async, Template.generate_async)
+ Template.render_async = update_wrapper(render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
- Template._get_default_module = wrap_default_module(
- Template._get_default_module)
+ Template._get_default_module = wrap_default_module(Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
- make_module_async, Template.make_module_async)
+ make_module_async, Template.make_module_async
+ )
def patch_runtime():
- from jinja2.runtime import BlockReference, Macro
- BlockReference.__call__ = wrap_block_reference_call(
- BlockReference.__call__)
+ from .runtime import BlockReference, Macro
+
+ BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
- from jinja2.filters import FILTERS
- from jinja2.asyncfilters import ASYNC_FILTERS
+ from .filters import FILTERS
+ from .asyncfilters import ASYNC_FILTERS
+
FILTERS.update(ASYNC_FILTERS)
@@ -179,7 +176,7 @@ async def auto_await(value):
async def auto_aiter(iterable):
- if hasattr(iterable, '__aiter__'):
+ if hasattr(iterable, "__aiter__"):
async for item in iterable:
yield item
return
@@ -187,70 +184,81 @@ async def auto_aiter(iterable):
yield item
-class AsyncLoopContext(LoopContextBase):
-
- def __init__(self, async_iterator, undefined, after, length, recurse=None,
- depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._async_iterator = async_iterator
- self._after = after
- self._length = length
+class AsyncLoopContext(LoopContext):
+ _to_iterator = staticmethod(auto_aiter)
@property
- def length(self):
- if self._length is None:
- raise TypeError('Loop length for some iterators cannot be '
- 'lazily calculated in async mode')
+ async def length(self):
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = [x async for x in self._iterator]
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
return self._length
- def __aiter__(self):
- return AsyncLoopContextIterator(self)
+ @property
+ async def revindex0(self):
+ return await self.length - self.index
+ @property
+ async def revindex(self):
+ return await self.length - self.index0
+
+ async def _peek_next(self):
+ if self._after is not missing:
+ return self._after
+
+ try:
+ self._after = await self._iterator.__anext__()
+ except StopAsyncIteration:
+ self._after = missing
-class AsyncLoopContextIterator(object):
- __slots__ = ('context',)
+ return self._after
- def __init__(self, context):
- self.context = context
+ @property
+ async def last(self):
+ return await self._peek_next() is missing
+
+ @property
+ async def nextitem(self):
+ rv = await self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
def __aiter__(self):
return self
async def __anext__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopAsyncIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- try:
- ctx._after = await ctx._async_iterator.__anext__()
- except StopAsyncIteration:
- ctx._after = _last_iteration
- return ctx._current, ctx
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = await self._iterator.__anext__()
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
- # Length is more complicated and less efficient in async mode. The
- # reason for this is that we cannot know if length will be used
- # upfront but because length is a property we cannot lazily execute it
- # later. This means that we need to buffer it up and measure :(
- #
- # We however only do this for actual iterators, not for async
- # iterators as blocking here does not seem like the best idea in the
- # world.
- try:
- length = len(iterable)
- except (TypeError, AttributeError):
- if not hasattr(iterable, '__aiter__'):
- iterable = tuple(iterable)
- length = len(iterable)
- else:
- length = None
- async_iterator = auto_aiter(iterable)
- try:
- after = await async_iterator.__anext__()
- except StopAsyncIteration:
- after = _last_iteration
- return AsyncLoopContext(async_iterator, undefined, after, length, recurse,
- depth0)
+ import warnings
+
+ warnings.warn(
+ "This template must be recompiled with at least Jinja 2.11, or"
+ " it will fail in 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return AsyncLoopContext(iterable, undefined, recurse, depth0)
+
+
+patch_all()
diff --git a/external/python/jinja2/bccache.py b/external/python/jinja2/bccache.py
index 507a9b3d..9c066103 100644
--- a/external/python/jinja2/bccache.py
+++ b/external/python/jinja2/bccache.py
@@ -1,60 +1,37 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.bccache
- ~~~~~~~~~~~~~~
-
- This module implements the bytecode cache system Jinja is optionally
- using. This is useful if you have very complex template situations and
- the compiliation of all those templates slow down your application too
- much.
-
- Situations where this is useful are often forking web applications that
- are initialized on the first request.
+"""The optional bytecode cache system. This is useful if you have very
+complex template situations and the compilation of all those templates
+slows down your application too much.
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
+Situations where this is useful are often forking web applications that
+are initialized on the first request.
"""
-from os import path, listdir
+import errno
+import fnmatch
import os
-import sys
import stat
-import errno
-import marshal
+import sys
import tempfile
-import fnmatch
from hashlib import sha1
-from jinja2.utils import open_if_exists
-from jinja2._compat import BytesIO, pickle, PY2, text_type
-
-
-# marshal works better on 3.x, one hack less required
-if not PY2:
- marshal_dump = marshal.dump
- marshal_load = marshal.load
-else:
-
- def marshal_dump(code, f):
- if isinstance(f, file):
- marshal.dump(code, f)
- else:
- f.write(marshal.dumps(code))
-
- def marshal_load(f):
- if isinstance(f, file):
- return marshal.load(f)
- return marshal.loads(f.read())
-
-
-bc_version = 3
-
-# magic version used to only change with new jinja versions. With 2.6
-# we change this to also take Python version changes into account. The
-# reason for this is that Python tends to segfault if fed earlier bytecode
-# versions because someone thought it would be a good idea to reuse opcodes
-# or make Python incompatible with earlier versions.
-bc_magic = 'j2'.encode('ascii') + \
- pickle.dumps(bc_version, 2) + \
- pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
+from os import listdir
+from os import path
+
+from ._compat import BytesIO
+from ._compat import marshal_dump
+from ._compat import marshal_load
+from ._compat import pickle
+from ._compat import text_type
+from .utils import open_if_exists
+
+bc_version = 4
+# Magic bytes to identify Jinja bytecode cache files. Contains the
+# Python major and minor version to avoid loading incompatible bytecode
+# if a project upgrades its Python version.
+bc_magic = (
+ b"j2"
+ + pickle.dumps(bc_version, 2)
+ + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
+)
class Bucket(object):
@@ -98,7 +75,7 @@ def load_bytecode(self, f):
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
- raise TypeError('can\'t write empty bucket')
+ raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
@@ -140,7 +117,7 @@ def dump_bytecode(self, bucket):
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
- Jinja2.
+ Jinja.
"""
def load_bytecode(self, bucket):
@@ -158,24 +135,24 @@ def dump_bytecode(self, bucket):
raise NotImplementedError()
def clear(self):
- """Clears the cache. This method is not used by Jinja2 but should be
+ """Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
- hash = sha1(name.encode('utf-8'))
+ hash = sha1(name.encode("utf-8"))
if filename is not None:
- filename = '|' + filename
+ filename = "|" + filename
if isinstance(filename, text_type):
- filename = filename.encode('utf-8')
+ filename = filename.encode("utf-8")
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
- return sha1(source.encode('utf-8')).hexdigest()
+ return sha1(source.encode("utf-8")).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
@@ -210,7 +187,7 @@ class FileSystemBytecodeCache(BytecodeCache):
This bytecode cache supports clearing of the cache using the clear method.
"""
- def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
+ def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
@@ -218,19 +195,21 @@ def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
def _get_default_cache_dir(self):
def _unsafe_dir():
- raise RuntimeError('Cannot determine safe temp directory. You '
- 'need to explicitly provide one.')
+ raise RuntimeError(
+ "Cannot determine safe temp directory. You "
+ "need to explicitly provide one."
+ )
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
- if os.name == 'nt':
+ if os.name == "nt":
return tmpdir
- if not hasattr(os, 'getuid'):
+ if not hasattr(os, "getuid"):
_unsafe_dir()
- dirname = '_jinja2-cache-%d' % os.getuid()
+ dirname = "_jinja2-cache-%d" % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
@@ -241,18 +220,22 @@ def _unsafe_dir():
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
- if actual_dir_stat.st_uid != os.getuid() \
- or not stat.S_ISDIR(actual_dir_stat.st_mode) \
- or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
_unsafe_dir()
return actual_dir
@@ -261,7 +244,7 @@ def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
- f = open_if_exists(self._get_cache_filename(bucket), 'rb')
+ f = open_if_exists(self._get_cache_filename(bucket), "rb")
if f is not None:
try:
bucket.load_bytecode(f)
@@ -269,7 +252,7 @@ def load_bytecode(self, bucket):
f.close()
def dump_bytecode(self, bucket):
- f = open(self._get_cache_filename(bucket), 'wb')
+ f = open(self._get_cache_filename(bucket), "wb")
try:
bucket.write_bytecode(f)
finally:
@@ -280,7 +263,8 @@ def clear(self):
# write access on the file system and the function does not exist
# normally.
from os import remove
- files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
+
+ files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
for filename in files:
try:
remove(path.join(self.directory, filename))
@@ -333,8 +317,13 @@ class MemcachedBytecodeCache(BytecodeCache):
`ignore_memcache_errors` parameter.
"""
- def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
- ignore_memcache_errors=True):
+ def __init__(
+ self,
+ client,
+ prefix="jinja2/bytecode/",
+ timeout=None,
+ ignore_memcache_errors=True,
+ ):
self.client = client
self.prefix = prefix
self.timeout = timeout
diff --git a/external/python/jinja2/compiler.py b/external/python/jinja2/compiler.py
index d534a827..63297b42 100644
--- a/external/python/jinja2/compiler.py
+++ b/external/python/jinja2/compiler.py
@@ -1,59 +1,62 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.compiler
- ~~~~~~~~~~~~~~~
-
- Compiles nodes into python code.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
+"""Compiles nodes from the parser into Python code."""
+from collections import namedtuple
+from functools import update_wrapper
from itertools import chain
-from copy import deepcopy
from keyword import iskeyword as is_python_keyword
-from functools import update_wrapper
-from jinja2 import nodes
-from jinja2.nodes import EvalContext
-from jinja2.visitor import NodeVisitor
-from jinja2.optimizer import Optimizer
-from jinja2.exceptions import TemplateAssertionError
-from jinja2.utils import Markup, concat, escape
-from jinja2._compat import range_type, text_type, string_types, \
- iteritems, NativeStringIO, imap, izip
-from jinja2.idtracking import Symbols, VAR_LOAD_PARAMETER, \
- VAR_LOAD_RESOLVE, VAR_LOAD_ALIAS, VAR_LOAD_UNDEFINED
+from markupsafe import escape
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import izip
+from ._compat import NativeStringIO
+from ._compat import range_type
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import TemplateAssertionError
+from .idtracking import Symbols
+from .idtracking import VAR_LOAD_ALIAS
+from .idtracking import VAR_LOAD_PARAMETER
+from .idtracking import VAR_LOAD_RESOLVE
+from .idtracking import VAR_LOAD_UNDEFINED
+from .nodes import EvalContext
+from .optimizer import Optimizer
+from .utils import concat
+from .visitor import NodeVisitor
operators = {
- 'eq': '==',
- 'ne': '!=',
- 'gt': '>',
- 'gteq': '>=',
- 'lt': '<',
- 'lteq': '<=',
- 'in': 'in',
- 'notin': 'not in'
+ "eq": "==",
+ "ne": "!=",
+ "gt": ">",
+ "gteq": ">=",
+ "lt": "<",
+ "lteq": "<=",
+ "in": "in",
+ "notin": "not in",
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
-if hasattr(dict, 'iteritems'):
- dict_item_iter = 'iteritems'
+if hasattr(dict, "iteritems"):
+ dict_item_iter = "iteritems"
else:
- dict_item_iter = 'items'
+ dict_item_iter = "items"
-code_features = ['division']
+code_features = ["division"]
# does this python version support generator stops? (PEP 0479)
try:
- exec('from __future__ import generator_stop')
- code_features.append('generator_stop')
+ exec("from __future__ import generator_stop")
+ code_features.append("generator_stop")
except SyntaxError:
pass
# does this python version support yield from?
try:
- exec('def f(): yield from x()')
+ exec("def f(): yield from x()")
except SyntaxError:
supports_yield_from = False
else:
@@ -68,17 +71,19 @@ def new_func(self, node, frame, **kwargs):
if new_node != node:
return self.visit(new_node, frame)
return f(self, node, frame, **kwargs)
+
return update_wrapper(new_func, f)
-def generate(node, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
+def generate(
+ node, environment, name, filename, stream=None, defer_init=False, optimized=True
+):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
- raise TypeError('Can\'t compile non template nodes')
- generator = environment.code_generator_class(environment, name, filename,
- stream, defer_init,
- optimized)
+ raise TypeError("Can't compile non template nodes")
+ generator = environment.code_generator_class(
+ environment, name, filename, stream, defer_init, optimized
+ )
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
@@ -119,7 +124,6 @@ def find_undeclared(nodes, names):
class MacroRef(object):
-
def __init__(self, node):
self.node = node
self.accesses_caller = False
@@ -132,8 +136,7 @@ class Frame(object):
def __init__(self, eval_ctx, parent=None, level=None):
self.eval_ctx = eval_ctx
- self.symbols = Symbols(parent and parent.symbols or None,
- level=level)
+ self.symbols = Symbols(parent and parent.symbols or None, level=level)
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
@@ -223,7 +226,7 @@ def __init__(self, names):
self.undeclared = set()
def visit_Name(self, node):
- if node.ctx == 'load' and node.name in self.names:
+ if node.ctx == "load" and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
@@ -242,9 +245,9 @@ class CompilerExit(Exception):
class CodeGenerator(NodeVisitor):
-
- def __init__(self, environment, name, filename, stream=None,
- defer_init=False, optimized=True):
+ def __init__(
+ self, environment, name, filename, stream=None, defer_init=False, optimized=True
+ ):
if stream is None:
stream = NativeStringIO()
self.environment = environment
@@ -306,7 +309,7 @@ def __init__(self, environment, name, filename, stream=None,
self._param_def_block = []
# Tracks the current context.
- self._context_reference_stack = ['context']
+ self._context_reference_stack = ["context"]
# -- Various compilation helpers
@@ -317,30 +320,30 @@ def fail(self, msg, lineno):
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
- return 't_%d' % self._last_identifier
+ return "t_%d" % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
- self.writeline('%s = []' % frame.buffer)
+ self.writeline("%s = []" % frame.buffer)
def return_buffer_contents(self, frame, force_unescaped=False):
"""Return the buffer contents of the frame."""
if not force_unescaped:
if frame.eval_ctx.volatile:
- self.writeline('if context.eval_ctx.autoescape:')
+ self.writeline("if context.eval_ctx.autoescape:")
self.indent()
- self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
self.outdent()
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
- self.writeline('return concat(%s)' % frame.buffer)
+ self.writeline("return concat(%s)" % frame.buffer)
self.outdent()
return
elif frame.eval_ctx.autoescape:
- self.writeline('return Markup(concat(%s))' % frame.buffer)
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
return
- self.writeline('return concat(%s)' % frame.buffer)
+ self.writeline("return concat(%s)" % frame.buffer)
def indent(self):
"""Indent by one."""
@@ -353,14 +356,14 @@ def outdent(self, step=1):
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
- self.writeline('yield ', node)
+ self.writeline("yield ", node)
else:
- self.writeline('%s.append(' % frame.buffer, node)
+ self.writeline("%s.append(" % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
- self.write(')')
+ self.write(")")
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
@@ -373,7 +376,7 @@ def blockvisit(self, nodes, frame):
is no buffer a dummy ``if 0: yield None`` is written automatically.
"""
try:
- self.writeline('pass')
+ self.writeline("pass")
for node in nodes:
self.visit(node, frame)
except CompilerExit:
@@ -383,14 +386,13 @@ def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
- self.stream.write('\n' * self._new_lines)
+ self.stream.write("\n" * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
- self.debug_info.append((self._write_debug_info,
- self.code_lineno))
+ self.debug_info.append((self._write_debug_info, self.code_lineno))
self._write_debug_info = None
self._first_write = False
- self.stream.write(' ' * self._indentation)
+ self.stream.write(" " * self._indentation)
self._new_lines = 0
self.stream.write(x)
@@ -410,7 +412,7 @@ def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
- error could occour. The extra keyword arguments should be given
+ error could occur. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
@@ -422,41 +424,41 @@ def signature(self, node, frame, extra_kwargs=None):
break
for arg in node.args:
- self.write(', ')
+ self.write(", ")
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
- self.write(', ')
+ self.write(", ")
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write(', %s=%s' % (key, value))
+ self.write(", %s=%s" % (key, value))
if node.dyn_args:
- self.write(', *')
+ self.write(", *")
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
- self.write(', **dict({')
+ self.write(", **dict({")
else:
- self.write(', **{')
+ self.write(", **{")
for kwarg in node.kwargs:
- self.write('%r: ' % kwarg.key)
+ self.write("%r: " % kwarg.key)
self.visit(kwarg.value, frame)
- self.write(', ')
+ self.write(", ")
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
- self.write('%r: %s, ' % (key, value))
+ self.write("%r: %s, " % (key, value))
if node.dyn_kwargs is not None:
- self.write('}, **')
+ self.write("}, **")
self.visit(node.dyn_kwargs, frame)
- self.write(')')
+ self.write(")")
else:
- self.write('}')
+ self.write("}")
elif node.dyn_kwargs is not None:
- self.write(', **')
+ self.write(", **")
self.visit(node.dyn_kwargs, frame)
def pull_dependencies(self, nodes):
@@ -464,13 +466,14 @@ def pull_dependencies(self, nodes):
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
- for dependency in 'filters', 'tests':
+ for dependency in "filters", "tests":
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
- self.writeline('%s = environment.%s[%r]' %
- (mapping[name], dependency, name))
+ self.writeline(
+ "%s = environment.%s[%r]" % (mapping[name], dependency, name)
+ )
def enter_frame(self, frame):
undefs = []
@@ -478,16 +481,15 @@ def enter_frame(self, frame):
if action == VAR_LOAD_PARAMETER:
pass
elif action == VAR_LOAD_RESOLVE:
- self.writeline('%s = %s(%r)' %
- (target, self.get_resolve_func(), param))
+ self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
elif action == VAR_LOAD_ALIAS:
- self.writeline('%s = %s' % (target, param))
+ self.writeline("%s = %s" % (target, param))
elif action == VAR_LOAD_UNDEFINED:
undefs.append(target)
else:
- raise NotImplementedError('unknown load instruction')
+ raise NotImplementedError("unknown load instruction")
if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
+ self.writeline("%s = missing" % " = ".join(undefs))
def leave_frame(self, frame, with_python_scope=False):
if not with_python_scope:
@@ -495,12 +497,12 @@ def leave_frame(self, frame, with_python_scope=False):
for target, _ in iteritems(frame.symbols.loads):
undefs.append(target)
if undefs:
- self.writeline('%s = missing' % ' = '.join(undefs))
+ self.writeline("%s = missing" % " = ".join(undefs))
def func(self, name):
if self.environment.is_async:
- return 'async def %s' % name
- return 'def %s' % name
+ return "async def %s" % name
+ return "def %s" % name
def macro_body(self, node, frame):
"""Dump the function def of a macro or call block."""
@@ -512,16 +514,16 @@ def macro_body(self, node, frame):
skip_special_params = set()
args = []
for idx, arg in enumerate(node.args):
- if arg.name == 'caller':
+ if arg.name == "caller":
explicit_caller = idx
- if arg.name in ('kwargs', 'varargs'):
+ if arg.name in ("kwargs", "varargs"):
skip_special_params.add(arg.name)
args.append(frame.symbols.ref(arg.name))
- undeclared = find_undeclared(node.body, ('caller', 'kwargs', 'varargs'))
+ undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
- if 'caller' in undeclared:
- # In older Jinja2 versions there was a bug that allowed caller
+ if "caller" in undeclared:
+ # In older Jinja versions there was a bug that allowed caller
# to retain the special behavior even if it was mentioned in
# the argument list. However thankfully this was only really
# working if it was the last argument. So we are explicitly
@@ -531,23 +533,26 @@ def macro_body(self, node, frame):
try:
node.defaults[explicit_caller - len(node.args)]
except IndexError:
- self.fail('When defining macros or call blocks the '
- 'special "caller" argument must be omitted '
- 'or be given a default.', node.lineno)
+ self.fail(
+ "When defining macros or call blocks the "
+ 'special "caller" argument must be omitted '
+ "or be given a default.",
+ node.lineno,
+ )
else:
- args.append(frame.symbols.declare_parameter('caller'))
+ args.append(frame.symbols.declare_parameter("caller"))
macro_ref.accesses_caller = True
- if 'kwargs' in undeclared and not 'kwargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('kwargs'))
+ if "kwargs" in undeclared and "kwargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("kwargs"))
macro_ref.accesses_kwargs = True
- if 'varargs' in undeclared and not 'varargs' in skip_special_params:
- args.append(frame.symbols.declare_parameter('varargs'))
+ if "varargs" in undeclared and "varargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("varargs"))
macro_ref.accesses_varargs = True
# macros are delayed, they never require output checks
frame.require_output_check = False
frame.symbols.analyze_node(node)
- self.writeline('%s(%s):' % (self.func('macro'), ', '.join(args)), node)
+ self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
self.indent()
self.buffer(frame)
@@ -556,17 +561,17 @@ def macro_body(self, node, frame):
self.push_parameter_definitions(frame)
for idx, arg in enumerate(node.args):
ref = frame.symbols.ref(arg.name)
- self.writeline('if %s is missing:' % ref)
+ self.writeline("if %s is missing:" % ref)
self.indent()
try:
default = node.defaults[idx - len(node.args)]
except IndexError:
- self.writeline('%s = undefined(%r, name=%r)' % (
- ref,
- 'parameter %r was not provided' % arg.name,
- arg.name))
+ self.writeline(
+ "%s = undefined(%r, name=%r)"
+ % (ref, "parameter %r was not provided" % arg.name, arg.name)
+ )
else:
- self.writeline('%s = ' % ref)
+ self.writeline("%s = " % ref)
self.visit(default, frame)
self.mark_parameter_stored(ref)
self.outdent()
@@ -581,35 +586,46 @@ def macro_body(self, node, frame):
def macro_def(self, macro_ref, frame):
"""Dump the macro definition for the def created by macro_body."""
- arg_tuple = ', '.join(repr(x.name) for x in macro_ref.node.args)
- name = getattr(macro_ref.node, 'name', None)
+ arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, "name", None)
if len(macro_ref.node.args) == 1:
- arg_tuple += ','
- self.write('Macro(environment, macro, %r, (%s), %r, %r, %r, '
- 'context.eval_ctx.autoescape)' %
- (name, arg_tuple, macro_ref.accesses_kwargs,
- macro_ref.accesses_varargs, macro_ref.accesses_caller))
+ arg_tuple += ","
+ self.write(
+ "Macro(environment, macro, %r, (%s), %r, %r, %r, "
+ "context.eval_ctx.autoescape)"
+ % (
+ name,
+ arg_tuple,
+ macro_ref.accesses_kwargs,
+ macro_ref.accesses_varargs,
+ macro_ref.accesses_caller,
+ )
+ )
def position(self, node):
"""Return a human readable position for the node."""
- rv = 'line %d' % node.lineno
+ rv = "line %d" % node.lineno
if self.name is not None:
- rv += ' in ' + repr(self.name)
+ rv += " in " + repr(self.name)
return rv
def dump_local_context(self, frame):
- return '{%s}' % ', '.join(
- '%r: %s' % (name, target) for name, target
- in iteritems(frame.symbols.dump_stores()))
+ return "{%s}" % ", ".join(
+ "%r: %s" % (name, target)
+ for name, target in iteritems(frame.symbols.dump_stores())
+ )
def write_commons(self):
"""Writes a common preamble that is used by root and block functions.
Primarily this sets up common local helpers and enforces a generator
through a dead branch.
"""
- self.writeline('resolve = context.resolve_or_missing')
- self.writeline('undefined = environment.undefined')
- self.writeline('if 0: yield None')
+ self.writeline("resolve = context.resolve_or_missing")
+ self.writeline("undefined = environment.undefined")
+ # always use the standard Undefined class for the implicit else of
+ # conditional expressions
+ self.writeline("cond_expr_undefined = Undefined")
+ self.writeline("if 0: yield None")
def push_parameter_definitions(self, frame):
"""Pushes all parameter targets from the given frame into a local
@@ -642,12 +658,12 @@ def get_context_ref(self):
def get_resolve_func(self):
target = self._context_reference_stack[-1]
- if target == 'context':
- return 'resolve'
- return '%s.resolve' % target
+ if target == "context":
+ return "resolve"
+ return "%s.resolve" % target
def derive_context(self, frame):
- return '%s.derived(%s)' % (
+ return "%s.derived(%s)" % (
self.get_context_ref(),
self.dump_local_context(frame),
)
@@ -669,44 +685,48 @@ def pop_assign_tracking(self, frame):
vars = self._assign_stack.pop()
if not frame.toplevel or not vars:
return
- public_names = [x for x in vars if x[:1] != '_']
+ public_names = [x for x in vars if x[:1] != "_"]
if len(vars) == 1:
name = next(iter(vars))
ref = frame.symbols.ref(name)
- self.writeline('context.vars[%r] = %s' % (name, ref))
+ self.writeline("context.vars[%r] = %s" % (name, ref))
else:
- self.writeline('context.vars.update({')
+ self.writeline("context.vars.update({")
for idx, name in enumerate(vars):
if idx:
- self.write(', ')
+ self.write(", ")
ref = frame.symbols.ref(name)
- self.write('%r: %s' % (name, ref))
- self.write('})')
+ self.write("%r: %s" % (name, ref))
+ self.write("})")
if public_names:
if len(public_names) == 1:
- self.writeline('context.exported_vars.add(%r)' %
- public_names[0])
+ self.writeline("context.exported_vars.add(%r)" % public_names[0])
else:
- self.writeline('context.exported_vars.update((%s))' %
- ', '.join(imap(repr, public_names)))
+ self.writeline(
+ "context.exported_vars.update((%s))"
+ % ", ".join(imap(repr, public_names))
+ )
# -- Statement Visitors
def visit_Template(self, node, frame=None):
- assert frame is None, 'no root frame allowed'
+ assert frame is None, "no root frame allowed"
eval_ctx = EvalContext(self.environment, self.name)
- from jinja2.runtime import __all__ as exported
- self.writeline('from __future__ import %s' % ', '.join(code_features))
- self.writeline('from jinja2.runtime import ' + ', '.join(exported))
+ from .runtime import exported
+
+ self.writeline("from __future__ import %s" % ", ".join(code_features))
+ self.writeline("from jinja2.runtime import " + ", ".join(exported))
if self.environment.is_async:
- self.writeline('from jinja2.asyncsupport import auto_await, '
- 'auto_aiter, make_async_loop_context')
+ self.writeline(
+ "from jinja2.asyncsupport import auto_await, "
+ "auto_aiter, AsyncLoopContext"
+ )
# if we want a deferred initialization we cannot move the
# environment into a local name
- envenv = not self.defer_init and ', environment=environment' or ''
+ envenv = not self.defer_init and ", environment=environment" or ""
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
@@ -715,7 +735,7 @@ def visit_Template(self, node, frame=None):
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
- self.fail('block %r defined twice' % block.name, block.lineno)
+ self.fail("block %r defined twice" % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
@@ -723,32 +743,32 @@ def visit_Template(self, node, frame=None):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
- if '.' in imp:
- module, obj = imp.rsplit('.', 1)
- self.writeline('from %s import %s as %s' %
- (module, obj, alias))
+ if "." in imp:
+ module, obj = imp.rsplit(".", 1)
+ self.writeline("from %s import %s as %s" % (module, obj, alias))
else:
- self.writeline('import %s as %s' % (imp, alias))
+ self.writeline("import %s as %s" % (imp, alias))
# add the load name
- self.writeline('name = %r' % self.name)
+ self.writeline("name = %r" % self.name)
# generate the root render function.
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('root'), envenv), extra=1)
+ self.writeline(
+ "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
+ )
self.indent()
self.write_commons()
# process the root
frame = Frame(eval_ctx)
- if 'self' in find_undeclared(node.body, ('self',)):
- ref = frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
+ if "self" in find_undeclared(node.body, ("self",)):
+ ref = frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
frame.symbols.analyze_node(node)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
if have_extends:
- self.writeline('parent_template = None')
+ self.writeline("parent_template = None")
self.enter_frame(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
@@ -759,39 +779,42 @@ def visit_Template(self, node, frame=None):
if have_extends:
if not self.has_known_extends:
self.indent()
- self.writeline('if parent_template is not None:')
+ self.writeline("if parent_template is not None:")
self.indent()
if supports_yield_from and not self.environment.is_async:
- self.writeline('yield from parent_template.'
- 'root_render_func(context)')
+ self.writeline("yield from parent_template.root_render_func(context)")
else:
- self.writeline('%sfor event in parent_template.'
- 'root_render_func(context):' %
- (self.environment.is_async and 'async ' or ''))
+ self.writeline(
+ "%sfor event in parent_template."
+ "root_render_func(context):"
+ % (self.environment.is_async and "async " or "")
+ )
self.indent()
- self.writeline('yield event')
+ self.writeline("yield event")
self.outdent()
self.outdent(1 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
- self.writeline('%s(context, missing=missing%s):' %
- (self.func('block_' + name), envenv),
- block, 1)
+ self.writeline(
+ "%s(context, missing=missing%s):"
+ % (self.func("block_" + name), envenv),
+ block,
+ 1,
+ )
self.indent()
self.write_commons()
# It's important that we do not make this frame a child of the
# toplevel template. This would cause a variety of
# interesting issues with identifier tracking.
block_frame = Frame(eval_ctx)
- undeclared = find_undeclared(block.body, ('self', 'super'))
- if 'self' in undeclared:
- ref = block_frame.symbols.declare_parameter('self')
- self.writeline('%s = TemplateReference(context)' % ref)
- if 'super' in undeclared:
- ref = block_frame.symbols.declare_parameter('super')
- self.writeline('%s = context.super(%r, '
- 'block_%s)' % (ref, name, name))
+ undeclared = find_undeclared(block.body, ("self", "super"))
+ if "self" in undeclared:
+ ref = block_frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
+ if "super" in undeclared:
+ ref = block_frame.symbols.declare_parameter("super")
+ self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
block_frame.symbols.analyze_node(block)
block_frame.block = name
self.enter_frame(block_frame)
@@ -800,13 +823,15 @@ def visit_Template(self, node, frame=None):
self.leave_frame(block_frame, with_python_scope=True)
self.outdent()
- self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
- for x in self.blocks),
- extra=1)
+ self.writeline(
+ "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
+ extra=1,
+ )
# add a function that returns the debug info
- self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
- in self.debug_info))
+ self.writeline(
+ "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
+ )
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
@@ -817,7 +842,7 @@ def visit_Block(self, node, frame):
if self.has_known_extends:
return
if self.extends_so_far > 0:
- self.writeline('if parent_template is None:')
+ self.writeline("if parent_template is None:")
self.indent()
level += 1
@@ -826,16 +851,22 @@ def visit_Block(self, node, frame):
else:
context = self.get_context_ref()
- if supports_yield_from and not self.environment.is_async and \
- frame.buffer is None:
- self.writeline('yield from context.blocks[%r][0](%s)' % (
- node.name, context), node)
+ if (
+ supports_yield_from
+ and not self.environment.is_async
+ and frame.buffer is None
+ ):
+ self.writeline(
+ "yield from context.blocks[%r][0](%s)" % (node.name, context), node
+ )
else:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in context.blocks[%r][0](%s):' % (
- loop, node.name, context), node)
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
+ node,
+ )
self.indent()
- self.simple_write('event', frame)
+ self.simple_write("event", frame)
self.outdent()
self.outdent(level)
@@ -843,8 +874,7 @@ def visit_Block(self, node, frame):
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
- self.fail('cannot use extend from a non top-level scope',
- node.lineno)
+ self.fail("cannot use extend from a non top-level scope", node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
@@ -856,10 +886,9 @@ def visit_Extends(self, node, frame):
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
- self.writeline('if parent_template is not None:')
+ self.writeline("if parent_template is not None:")
self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'extended multiple times')
+ self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
@@ -868,14 +897,14 @@ def visit_Extends(self, node, frame):
else:
self.outdent()
- self.writeline('parent_template = environment.get_template(', node)
+ self.writeline("parent_template = environment.get_template(", node)
self.visit(node.template, frame)
- self.write(', %r)' % self.name)
- self.writeline('for name, parent_block in parent_template.'
- 'blocks.%s():' % dict_item_iter)
+ self.write(", %r)" % self.name)
+ self.writeline(
+ "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
+ )
self.indent()
- self.writeline('context.blocks.setdefault(name, []).'
- 'append(parent_block)')
+ self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
self.outdent()
# if this extends statement was in the root level we can take
@@ -890,52 +919,56 @@ def visit_Extends(self, node, frame):
def visit_Include(self, node, frame):
"""Handles includes."""
if node.ignore_missing:
- self.writeline('try:')
+ self.writeline("try:")
self.indent()
- func_name = 'get_or_select_template'
+ func_name = "get_or_select_template"
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
- func_name = 'get_template'
+ func_name = "get_template"
elif isinstance(node.template.value, (tuple, list)):
- func_name = 'select_template'
+ func_name = "select_template"
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
- func_name = 'select_template'
+ func_name = "select_template"
- self.writeline('template = environment.%s(' % func_name, node)
+ self.writeline("template = environment.%s(" % func_name, node)
self.visit(node.template, frame)
- self.write(', %r)' % self.name)
+ self.write(", %r)" % self.name)
if node.ignore_missing:
self.outdent()
- self.writeline('except TemplateNotFound:')
+ self.writeline("except TemplateNotFound:")
self.indent()
- self.writeline('pass')
+ self.writeline("pass")
self.outdent()
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
skip_event_yield = False
if node.with_context:
- loop = self.environment.is_async and 'async for' or 'for'
- self.writeline('%s event in template.root_render_func('
- 'template.new_context(context.get_all(), True, '
- '%s)):' % (loop, self.dump_local_context(frame)))
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in template.root_render_func("
+ "template.new_context(context.get_all(), True, "
+ "%s)):" % (loop, self.dump_local_context(frame))
+ )
elif self.environment.is_async:
- self.writeline('for event in (await '
- 'template._get_default_module_async())'
- '._body_stream:')
+ self.writeline(
+ "for event in (await "
+ "template._get_default_module_async())"
+ "._body_stream:"
+ )
else:
if supports_yield_from:
- self.writeline('yield from template._get_default_module()'
- '._body_stream')
+ self.writeline("yield from template._get_default_module()._body_stream")
skip_event_yield = True
else:
- self.writeline('for event in template._get_default_module()'
- '._body_stream:')
+ self.writeline(
+ "for event in template._get_default_module()._body_stream:"
+ )
if not skip_event_yield:
self.indent()
- self.simple_write('event', frame)
+ self.simple_write("event", frame)
self.outdent()
if node.ignore_missing:
@@ -943,40 +976,50 @@ def visit_Include(self, node, frame):
def visit_Import(self, node, frame):
"""Visit regular imports."""
- self.writeline('%s = ' % frame.symbols.ref(node.target), node)
+ self.writeline("%s = " % frame.symbols.ref(node.target), node)
if frame.toplevel:
- self.write('context.vars[%r] = ' % node.target)
+ self.write("context.vars[%r] = " % node.target)
if self.environment.is_async:
- self.write('await ')
- self.write('environment.get_template(')
+ self.write("await ")
+ self.write("environment.get_template(")
self.visit(node.template, frame)
- self.write(', %r).' % self.name)
+ self.write(", %r)." % self.name)
if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
elif self.environment.is_async:
- self.write('_get_default_module_async()')
+ self.write("_get_default_module_async()")
else:
- self.write('_get_default_module()')
- if frame.toplevel and not node.target.startswith('_'):
- self.writeline('context.exported_vars.discard(%r)' % node.target)
+ self.write("_get_default_module()")
+ if frame.toplevel and not node.target.startswith("_"):
+ self.writeline("context.exported_vars.discard(%r)" % node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
- self.write('included_template = %senvironment.get_template('
- % (self.environment.is_async and 'await ' or ''))
+ self.write(
+ "included_template = %senvironment.get_template("
+ % (self.environment.is_async and "await " or "")
+ )
self.visit(node.template, frame)
- self.write(', %r).' % self.name)
+ self.write(", %r)." % self.name)
if node.with_context:
- self.write('make_module%s(context.get_all(), True, %s)'
- % (self.environment.is_async and '_async' or '',
- self.dump_local_context(frame)))
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
elif self.environment.is_async:
- self.write('_get_default_module_async()')
+ self.write("_get_default_module_async()")
else:
- self.write('_get_default_module()')
+ self.write("_get_default_module()")
var_names = []
discarded_names = []
@@ -985,41 +1028,51 @@ def visit_FromImport(self, node, frame):
name, alias = name
else:
alias = name
- self.writeline('%s = getattr(included_template, '
- '%r, missing)' % (frame.symbols.ref(alias), name))
- self.writeline('if %s is missing:' % frame.symbols.ref(alias))
+ self.writeline(
+ "%s = getattr(included_template, "
+ "%r, missing)" % (frame.symbols.ref(alias), name)
+ )
+ self.writeline("if %s is missing:" % frame.symbols.ref(alias))
self.indent()
- self.writeline('%s = undefined(%r %% '
- 'included_template.__name__, '
- 'name=%r)' %
- (frame.symbols.ref(alias),
- 'the template %%r (imported on %s) does '
- 'not export the requested name %s' % (
- self.position(node),
- repr(name)
- ), name))
+ self.writeline(
+ "%s = undefined(%r %% "
+ "included_template.__name__, "
+ "name=%r)"
+ % (
+ frame.symbols.ref(alias),
+ "the template %%r (imported on %s) does "
+ "not export the requested name %s"
+ % (self.position(node), repr(name)),
+ name,
+ )
+ )
self.outdent()
if frame.toplevel:
var_names.append(alias)
- if not alias.startswith('_'):
+ if not alias.startswith("_"):
discarded_names.append(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
- self.writeline('context.vars[%r] = %s' %
- (name, frame.symbols.ref(name)))
+ self.writeline(
+ "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
+ )
else:
- self.writeline('context.vars.update({%s})' % ', '.join(
- '%r: %s' % (name, frame.symbols.ref(name)) for name in var_names
- ))
+ self.writeline(
+ "context.vars.update({%s})"
+ % ", ".join(
+ "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
+ )
+ )
if discarded_names:
if len(discarded_names) == 1:
- self.writeline('context.exported_vars.discard(%r)' %
- discarded_names[0])
+ self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
else:
- self.writeline('context.exported_vars.difference_'
- 'update((%s))' % ', '.join(imap(repr, discarded_names)))
+ self.writeline(
+ "context.exported_vars.difference_"
+ "update((%s))" % ", ".join(imap(repr, discarded_names))
+ )
def visit_For(self, node, frame):
loop_frame = frame.inner()
@@ -1029,35 +1082,35 @@ def visit_For(self, node, frame):
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
- extended_loop = node.recursive or 'loop' in \
- find_undeclared(node.iter_child_nodes(
- only=('body',)), ('loop',))
+ extended_loop = node.recursive or "loop" in find_undeclared(
+ node.iter_child_nodes(only=("body",)), ("loop",)
+ )
loop_ref = None
if extended_loop:
- loop_ref = loop_frame.symbols.declare_parameter('loop')
+ loop_ref = loop_frame.symbols.declare_parameter("loop")
- loop_frame.symbols.analyze_node(node, for_branch='body')
+ loop_frame.symbols.analyze_node(node, for_branch="body")
if node.else_:
- else_frame.symbols.analyze_node(node, for_branch='else')
+ else_frame.symbols.analyze_node(node, for_branch="else")
if node.test:
loop_filter_func = self.temporary_identifier()
- test_frame.symbols.analyze_node(node, for_branch='test')
- self.writeline('%s(fiter):' % self.func(loop_filter_func), node.test)
+ test_frame.symbols.analyze_node(node, for_branch="test")
+ self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
self.indent()
self.enter_frame(test_frame)
- self.writeline(self.environment.is_async and 'async for ' or 'for ')
+ self.writeline(self.environment.is_async and "async for " or "for ")
self.visit(node.target, loop_frame)
- self.write(' in ')
- self.write(self.environment.is_async and 'auto_aiter(fiter)' or 'fiter')
- self.write(':')
+ self.write(" in ")
+ self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
+ self.write(":")
self.indent()
- self.writeline('if ', node.test)
+ self.writeline("if ", node.test)
self.visit(node.test, test_frame)
- self.write(':')
+ self.write(":")
self.indent()
- self.writeline('yield ')
+ self.writeline("yield ")
self.visit(node.target, loop_frame)
self.outdent(3)
self.leave_frame(test_frame, with_python_scope=True)
@@ -1066,8 +1119,9 @@ def visit_For(self, node, frame):
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if node.recursive:
- self.writeline('%s(reciter, loop_render_func, depth=0):' %
- self.func('loop'), node)
+ self.writeline(
+ "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
+ )
self.indent()
self.buffer(loop_frame)
@@ -1077,57 +1131,60 @@ def visit_For(self, node, frame):
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
- self.writeline('%s = missing' % loop_ref)
+ self.writeline("%s = missing" % loop_ref)
for name in node.find_all(nodes.Name):
- if name.ctx == 'store' and name.name == 'loop':
- self.fail('Can\'t assign to special loop variable '
- 'in for-loop target', name.lineno)
+ if name.ctx == "store" and name.name == "loop":
+ self.fail(
+ "Can't assign to special loop variable in for-loop target",
+ name.lineno,
+ )
if node.else_:
iteration_indicator = self.temporary_identifier()
- self.writeline('%s = 1' % iteration_indicator)
+ self.writeline("%s = 1" % iteration_indicator)
- self.writeline(self.environment.is_async and 'async for ' or 'for ', node)
+ self.writeline(self.environment.is_async and "async for " or "for ", node)
self.visit(node.target, loop_frame)
if extended_loop:
if self.environment.is_async:
- self.write(', %s in await make_async_loop_context(' % loop_ref)
+ self.write(", %s in AsyncLoopContext(" % loop_ref)
else:
- self.write(', %s in LoopContext(' % loop_ref)
+ self.write(", %s in LoopContext(" % loop_ref)
else:
- self.write(' in ')
+ self.write(" in ")
if node.test:
- self.write('%s(' % loop_filter_func)
+ self.write("%s(" % loop_filter_func)
if node.recursive:
- self.write('reciter')
+ self.write("reciter")
else:
if self.environment.is_async and not extended_loop:
- self.write('auto_aiter(')
+ self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async and not extended_loop:
- self.write(')')
+ self.write(")")
if node.test:
- self.write(')')
+ self.write(")")
if node.recursive:
- self.write(', undefined, loop_render_func, depth):')
+ self.write(", undefined, loop_render_func, depth):")
else:
- self.write(extended_loop and ', undefined):' or ':')
+ self.write(extended_loop and ", undefined):" or ":")
self.indent()
self.enter_frame(loop_frame)
self.blockvisit(node.body, loop_frame)
if node.else_:
- self.writeline('%s = 0' % iteration_indicator)
+ self.writeline("%s = 0" % iteration_indicator)
self.outdent()
- self.leave_frame(loop_frame, with_python_scope=node.recursive
- and not node.else_)
+ self.leave_frame(
+ loop_frame, with_python_scope=node.recursive and not node.else_
+ )
if node.else_:
- self.writeline('if %s:' % iteration_indicator)
+ self.writeline("if %s:" % iteration_indicator)
self.indent()
self.enter_frame(else_frame)
self.blockvisit(node.else_, else_frame)
@@ -1141,33 +1198,33 @@ def visit_For(self, node, frame):
self.outdent()
self.start_write(frame, node)
if self.environment.is_async:
- self.write('await ')
- self.write('loop(')
+ self.write("await ")
+ self.write("loop(")
if self.environment.is_async:
- self.write('auto_aiter(')
+ self.write("auto_aiter(")
self.visit(node.iter, frame)
if self.environment.is_async:
- self.write(')')
- self.write(', loop)')
+ self.write(")")
+ self.write(", loop)")
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
- self.writeline('if ', node)
+ self.writeline("if ", node)
self.visit(node.test, if_frame)
- self.write(':')
+ self.write(":")
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
for elif_ in node.elif_:
- self.writeline('elif ', elif_)
+ self.writeline("elif ", elif_)
self.visit(elif_.test, if_frame)
- self.write(':')
+ self.write(":")
self.indent()
self.blockvisit(elif_.body, if_frame)
self.outdent()
if node.else_:
- self.writeline('else:')
+ self.writeline("else:")
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
@@ -1176,16 +1233,15 @@ def visit_Macro(self, node, frame):
macro_frame, macro_ref = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
- if not node.name.startswith('_'):
- self.write('context.exported_vars.add(%r)' % node.name)
- ref = frame.symbols.ref(node.name)
- self.writeline('context.vars[%r] = ' % node.name)
- self.write('%s = ' % frame.symbols.ref(node.name))
+ if not node.name.startswith("_"):
+ self.write("context.exported_vars.add(%r)" % node.name)
+ self.writeline("context.vars[%r] = " % node.name)
+ self.write("%s = " % frame.symbols.ref(node.name))
self.macro_def(macro_ref, macro_frame)
def visit_CallBlock(self, node, frame):
call_frame, macro_ref = self.macro_body(node, frame)
- self.writeline('caller = ')
+ self.writeline("caller = ")
self.macro_def(macro_ref, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, frame, forward_caller=True)
@@ -1206,10 +1262,10 @@ def visit_With(self, node, frame):
with_frame = frame.inner()
with_frame.symbols.analyze_node(node)
self.enter_frame(with_frame)
- for idx, (target, expr) in enumerate(izip(node.targets, node.values)):
+ for target, expr in izip(node.targets, node.values):
self.newline()
self.visit(target, with_frame)
- self.write(' = ')
+ self.write(" = ")
self.visit(expr, frame)
self.blockvisit(node.body, with_frame)
self.leave_frame(with_frame)
@@ -1218,156 +1274,187 @@ def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
- def visit_Output(self, node, frame):
- # if we have a known extends statement, we don't output anything
- # if we are in a require_output_check section
- if self.has_known_extends and frame.require_output_check:
- return
+ _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
+ #: The default finalize function if the environment isn't configured
+ #: with one. Or if the environment has one, this is called on that
+ #: function's output for constants.
+ _default_finalize = text_type
+ _finalize = None
+
+ def _make_finalize(self):
+ """Build the finalize function to be used on constants and at
+ runtime. Cached so it's only created once for all output nodes.
+
+ Returns a ``namedtuple`` with the following attributes:
+
+ ``const``
+ A function to finalize constant data at compile time.
+
+ ``src``
+ Source code to output around nodes to be evaluated at
+ runtime.
+ """
+ if self._finalize is not None:
+ return self._finalize
+
+ finalize = default = self._default_finalize
+ src = None
- allow_constant_finalize = True
if self.environment.finalize:
- func = self.environment.finalize
- if getattr(func, 'contextfunction', False) or \
- getattr(func, 'evalcontextfunction', False):
- allow_constant_finalize = False
- elif getattr(func, 'environmentfunction', False):
- finalize = lambda x: text_type(
- self.environment.finalize(self.environment, x))
- else:
- finalize = lambda x: text_type(self.environment.finalize(x))
+ src = "environment.finalize("
+ env_finalize = self.environment.finalize
+
+ def finalize(value):
+ return default(env_finalize(value))
+
+ if getattr(env_finalize, "contextfunction", False) is True:
+ src += "context, "
+ finalize = None # noqa: F811
+ elif getattr(env_finalize, "evalcontextfunction", False) is True:
+ src += "context.eval_ctx, "
+ finalize = None
+ elif getattr(env_finalize, "environmentfunction", False) is True:
+ src += "environment, "
+
+ def finalize(value):
+ return default(env_finalize(self.environment, value))
+
+ self._finalize = self._FinalizeInfo(finalize, src)
+ return self._finalize
+
+ def _output_const_repr(self, group):
+ """Given a group of constant values converted from ``Output``
+ child nodes, produce a string to write to the template module
+ source.
+ """
+ return repr(concat(group))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ """Try to optimize a child of an ``Output`` node by trying to
+ convert it to constant, finalized data at compile time.
+
+ If :exc:`Impossible` is raised, the node is not constant and
+ will be evaluated at runtime. Any other exception will also be
+ evaluated at runtime for easier debugging.
+ """
+ const = node.as_const(frame.eval_ctx)
+
+ if frame.eval_ctx.autoescape:
+ const = escape(const)
+
+ # Template data doesn't go through finalize.
+ if isinstance(node, nodes.TemplateData):
+ return text_type(const)
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ """Output extra source code before visiting a child of an
+ ``Output`` node.
+ """
+ if frame.eval_ctx.volatile:
+ self.write("(escape if context.eval_ctx.autoescape else to_string)(")
+ elif frame.eval_ctx.autoescape:
+ self.write("escape(")
else:
- finalize = text_type
+ self.write("to_string(")
+
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ """Output extra source code after visiting a child of an
+ ``Output`` node.
+ """
+ self.write(")")
+
+ if finalize.src is not None:
+ self.write(")")
- # if we are inside a frame that requires output checking, we do so
- outdent_later = False
+ def visit_Output(self, node, frame):
+ # If an extends is active, don't render outside a block.
if frame.require_output_check:
- self.writeline('if parent_template is None:')
+ # A top-level extends is known to exist at compile time.
+ if self.has_known_extends:
+ return
+
+ self.writeline("if parent_template is None:")
self.indent()
- outdent_later = True
- # try to evaluate as many chunks as possible into a static
- # string at compile time.
+ finalize = self._make_finalize()
body = []
+
+ # Evaluate constants at compile time if possible. Each item in
+ # body will be either a list of static data or a node to be
+ # evaluated at runtime.
for child in node.nodes:
try:
- if not allow_constant_finalize:
+ if not (
+ # If the finalize function requires runtime context,
+ # constants can't be evaluated at compile time.
+ finalize.const
+ # Unless it's basic template data that won't be
+ # finalized anyway.
+ or isinstance(child, nodes.TemplateData)
+ ):
raise nodes.Impossible()
- const = child.as_const(frame.eval_ctx)
- except nodes.Impossible:
- body.append(child)
- continue
- # the frame can't be volatile here, becaus otherwise the
- # as_const() function would raise an Impossible exception
- # at that point.
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
- const = finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node
- # at runtime for easier debugging
+
+ const = self._output_child_to_const(child, frame, finalize)
+ except (nodes.Impossible, Exception):
+ # The node was not constant and needs to be evaluated at
+ # runtime. Or another error was raised, which is easier
+ # to debug at runtime.
body.append(child)
continue
+
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
+ if frame.buffer is not None:
+ if len(body) == 1:
+ self.writeline("%s.append(" % frame.buffer)
+ else:
+ self.writeline("%s.extend((" % frame.buffer)
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ # A group of constant data to join and output.
+ val = self._output_const_repr(item)
+
+ if frame.buffer is None:
+ self.writeline("yield " + val)
else:
- self.writeline('%s.extend((' % frame.buffer)
- self.indent()
- for item in body:
- if isinstance(item, list):
- val = repr(concat(item))
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
+ self.writeline(val + ",")
+ else:
+ if frame.buffer is None:
+ self.writeline("yield ", item)
else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
- close = 1
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape'
- ' else to_string)(')
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- else:
- self.write('to_string(')
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- "contextfunction", False):
- self.write('context, ')
- close += 1
- self.visit(item, frame)
- self.write(')' * close)
- if frame.buffer is not None:
- self.write(',')
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
+ self.newline(item)
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
- for item in body:
- if isinstance(item, list):
- format.append(concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
- for argument in arguments:
- self.newline(argument)
- close = 0
- if frame.eval_ctx.volatile:
- self.write('(escape if context.eval_ctx.autoescape else'
- ' to_string)(')
- close += 1
- elif frame.eval_ctx.autoescape:
- self.write('escape(')
- close += 1
- if self.environment.finalize is not None:
- self.write('environment.finalize(')
- if getattr(self.environment.finalize,
- 'contextfunction', False):
- self.write('context, ')
- elif getattr(self.environment.finalize,
- 'evalcontextfunction', False):
- self.write('context.eval_ctx, ')
- elif getattr(self.environment.finalize,
- 'environmentfunction', False):
- self.write('environment, ')
- close += 1
- self.visit(argument, frame)
- self.write(')' * close + ', ')
+ # A node to be evaluated at runtime.
+ self._output_child_pre(item, frame, finalize)
+ self.visit(item, frame)
+ self._output_child_post(item, frame, finalize)
+
+ if frame.buffer is not None:
+ self.write(",")
+
+ if frame.buffer is not None:
self.outdent()
- self.writeline(')')
+ self.writeline(")" if len(body) == 1 else "))")
- if outdent_later:
+ if frame.require_output_check:
self.outdent()
def visit_Assign(self, node, frame):
self.push_assign_tracking()
self.newline(node)
self.visit(node.target, frame)
- self.write(' = ')
+ self.write(" = ")
self.visit(node.node, frame)
self.pop_assign_tracking(frame)
@@ -1384,20 +1471,19 @@ def visit_AssignBlock(self, node, frame):
self.blockvisit(node.body, block_frame)
self.newline(node)
self.visit(node.target, frame)
- self.write(' = (Markup if context.eval_ctx.autoescape '
- 'else identity)(')
+ self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
if node.filter is not None:
self.visit_Filter(node.filter, block_frame)
else:
- self.write('concat(%s)' % block_frame.buffer)
- self.write(')')
+ self.write("concat(%s)" % block_frame.buffer)
+ self.write(")")
self.pop_assign_tracking(frame)
self.leave_frame(block_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
- if node.ctx == 'store' and frame.toplevel:
+ if node.ctx == "store" and frame.toplevel:
if self._assign_stack:
self._assign_stack[-1].add(node.name)
ref = frame.symbols.ref(node.name)
@@ -1405,12 +1491,17 @@ def visit_Name(self, node, frame):
# If we are looking up a variable we might have to deal with the
# case where it's undefined. We can skip that case if the load
# instruction indicates a parameter which are always defined.
- if node.ctx == 'load':
+ if node.ctx == "load":
load = frame.symbols.find_load(ref)
- if not (load is not None and load[0] == VAR_LOAD_PARAMETER and \
- not self.parameter_is_undeclared(ref)):
- self.write('(undefined(name=%r) if %s is missing else %s)' %
- (node.name, ref, ref))
+ if not (
+ load is not None
+ and load[0] == VAR_LOAD_PARAMETER
+ and not self.parameter_is_undeclared(ref)
+ ):
+ self.write(
+ "(undefined(name=%r) if %s is missing else %s)"
+ % (node.name, ref, ref)
+ )
return
self.write(ref)
@@ -1420,12 +1511,14 @@ def visit_NSRef(self, node, frame):
# `foo.bar` notation they will be parsed as a normal attribute access
# when used anywhere but in a `set` context
ref = frame.symbols.ref(node.name)
- self.writeline('if not isinstance(%s, Namespace):' % ref)
+ self.writeline("if not isinstance(%s, Namespace):" % ref)
self.indent()
- self.writeline('raise TemplateRuntimeError(%r)' %
- 'cannot assign attribute on non-namespace object')
+ self.writeline(
+ "raise TemplateRuntimeError(%r)"
+ % "cannot assign attribute on non-namespace object"
+ )
self.outdent()
- self.writeline('%s[%r]' % (ref, node.attr))
+ self.writeline("%s[%r]" % (ref, node.attr))
def visit_Const(self, node, frame):
val = node.as_const(frame.eval_ctx)
@@ -1438,230 +1531,256 @@ def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
- self.write('(Markup if context.eval_ctx.autoescape else identity)(%r)'
- % node.data)
+ self.write(
+ "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
+ )
def visit_Tuple(self, node, frame):
- self.write('(')
+ self.write("(")
idx = -1
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item, frame)
- self.write(idx == 0 and ',)' or ')')
+ self.write(idx == 0 and ",)" or ")")
def visit_List(self, node, frame):
- self.write('[')
+ self.write("[")
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item, frame)
- self.write(']')
+ self.write("]")
def visit_Dict(self, node, frame):
- self.write('{')
+ self.write("{")
for idx, item in enumerate(node.items):
if idx:
- self.write(', ')
+ self.write(", ")
self.visit(item.key, frame)
- self.write(': ')
+ self.write(": ")
self.visit(item.value, frame)
- self.write('}')
+ self.write("}")
- def binop(operator, interceptable=True):
+ def binop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_binops:
- self.write('environment.call_binop(context, %r, ' % operator)
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_binops
+ ):
+ self.write("environment.call_binop(context, %r, " % operator)
self.visit(node.left, frame)
- self.write(', ')
+ self.write(", ")
self.visit(node.right, frame)
else:
- self.write('(')
+ self.write("(")
self.visit(node.left, frame)
- self.write(' %s ' % operator)
+ self.write(" %s " % operator)
self.visit(node.right, frame)
- self.write(')')
+ self.write(")")
+
return visitor
- def uaop(operator, interceptable=True):
+ def uaop(operator, interceptable=True): # noqa: B902
@optimizeconst
def visitor(self, node, frame):
- if self.environment.sandboxed and \
- operator in self.environment.intercepted_unops:
- self.write('environment.call_unop(context, %r, ' % operator)
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_unops
+ ):
+ self.write("environment.call_unop(context, %r, " % operator)
self.visit(node.node, frame)
else:
- self.write('(' + operator)
+ self.write("(" + operator)
self.visit(node.node, frame)
- self.write(')')
+ self.write(")")
+
return visitor
- visit_Add = binop('+')
- visit_Sub = binop('-')
- visit_Mul = binop('*')
- visit_Div = binop('/')
- visit_FloorDiv = binop('//')
- visit_Pow = binop('**')
- visit_Mod = binop('%')
- visit_And = binop('and', interceptable=False)
- visit_Or = binop('or', interceptable=False)
- visit_Pos = uaop('+')
- visit_Neg = uaop('-')
- visit_Not = uaop('not ', interceptable=False)
+ visit_Add = binop("+")
+ visit_Sub = binop("-")
+ visit_Mul = binop("*")
+ visit_Div = binop("/")
+ visit_FloorDiv = binop("//")
+ visit_Pow = binop("**")
+ visit_Mod = binop("%")
+ visit_And = binop("and", interceptable=False)
+ visit_Or = binop("or", interceptable=False)
+ visit_Pos = uaop("+")
+ visit_Neg = uaop("-")
+ visit_Not = uaop("not ", interceptable=False)
del binop, uaop
@optimizeconst
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
- func_name = '(context.eval_ctx.volatile and' \
- ' markup_join or unicode_join)'
+ func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
elif frame.eval_ctx.autoescape:
- func_name = 'markup_join'
+ func_name = "markup_join"
else:
- func_name = 'unicode_join'
- self.write('%s((' % func_name)
+ func_name = "unicode_join"
+ self.write("%s((" % func_name)
for arg in node.nodes:
self.visit(arg, frame)
- self.write(', ')
- self.write('))')
+ self.write(", ")
+ self.write("))")
@optimizeconst
def visit_Compare(self, node, frame):
+ self.write("(")
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
+ self.write(")")
def visit_Operand(self, node, frame):
- self.write(' %s ' % operators[node.op])
+ self.write(" %s " % operators[node.op])
self.visit(node.expr, frame)
@optimizeconst
def visit_Getattr(self, node, frame):
- self.write('environment.getattr(')
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getattr(")
self.visit(node.node, frame)
- self.write(', %r)' % node.attr)
+ self.write(", %r)" % node.attr)
+
+ if self.environment.is_async:
+ self.write("))")
@optimizeconst
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
- self.write('[')
+ self.write("[")
self.visit(node.arg, frame)
- self.write(']')
+ self.write("]")
else:
- self.write('environment.getitem(')
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getitem(")
self.visit(node.node, frame)
- self.write(', ')
+ self.write(", ")
self.visit(node.arg, frame)
- self.write(')')
+ self.write(")")
+
+ if self.environment.is_async:
+ self.write("))")
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
- self.write(':')
+ self.write(":")
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
- self.write(':')
+ self.write(":")
self.visit(node.step, frame)
@optimizeconst
def visit_Filter(self, node, frame):
if self.environment.is_async:
- self.write('await auto_await(')
- self.write(self.filters[node.name] + '(')
+ self.write("await auto_await(")
+ self.write(self.filters[node.name] + "(")
func = self.environment.filters.get(node.name)
if func is None:
- self.fail('no filter named %r' % node.name, node.lineno)
- if getattr(func, 'contextfilter', False):
- self.write('context, ')
- elif getattr(func, 'evalcontextfilter', False):
- self.write('context.eval_ctx, ')
- elif getattr(func, 'environmentfilter', False):
- self.write('environment, ')
+ self.fail("no filter named %r" % node.name, node.lineno)
+ if getattr(func, "contextfilter", False) is True:
+ self.write("context, ")
+ elif getattr(func, "evalcontextfilter", False) is True:
+ self.write("context.eval_ctx, ")
+ elif getattr(func, "environmentfilter", False) is True:
+ self.write("environment, ")
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
- self.write('(context.eval_ctx.autoescape and'
- ' Markup(concat(%s)) or concat(%s))' %
- (frame.buffer, frame.buffer))
+ self.write(
+ "(context.eval_ctx.autoescape and"
+ " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
+ )
elif frame.eval_ctx.autoescape:
- self.write('Markup(concat(%s))' % frame.buffer)
+ self.write("Markup(concat(%s))" % frame.buffer)
else:
- self.write('concat(%s)' % frame.buffer)
+ self.write("concat(%s)" % frame.buffer)
self.signature(node, frame)
- self.write(')')
+ self.write(")")
if self.environment.is_async:
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_Test(self, node, frame):
- self.write(self.tests[node.name] + '(')
+ self.write(self.tests[node.name] + "(")
if node.name not in self.environment.tests:
- self.fail('no test named %r' % node.name, node.lineno)
+ self.fail("no test named %r" % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
- self.write('undefined(%r)' % ('the inline if-'
- 'expression on %s evaluated to false and '
- 'no else section was defined.' % self.position(node)))
-
- self.write('(')
+ self.write(
+ "cond_expr_undefined(%r)"
+ % (
+ "the inline if-"
+ "expression on %s evaluated to false and "
+ "no else section was defined." % self.position(node)
+ )
+ )
+
+ self.write("(")
self.visit(node.expr1, frame)
- self.write(' if ')
+ self.write(" if ")
self.visit(node.test, frame)
- self.write(' else ')
+ self.write(" else ")
write_expr2()
- self.write(')')
+ self.write(")")
@optimizeconst
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.is_async:
- self.write('await auto_await(')
+ self.write("await auto_await(")
if self.environment.sandboxed:
- self.write('environment.call(context, ')
+ self.write("environment.call(context, ")
else:
- self.write('context.call(')
+ self.write("context.call(")
self.visit(node.node, frame)
- extra_kwargs = forward_caller and {'caller': 'caller'} or None
+ extra_kwargs = forward_caller and {"caller": "caller"} or None
self.signature(node, frame, extra_kwargs)
- self.write(')')
+ self.write(")")
if self.environment.is_async:
- self.write(')')
+ self.write(")")
def visit_Keyword(self, node, frame):
- self.write(node.key + '=')
+ self.write(node.key + "=")
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
- self.write('Markup(')
+ self.write("Markup(")
self.visit(node.expr, frame)
- self.write(')')
+ self.write(")")
def visit_MarkSafeIfAutoescape(self, node, frame):
- self.write('(context.eval_ctx.autoescape and Markup or identity)(')
+ self.write("(context.eval_ctx.autoescape and Markup or identity)(")
self.visit(node.expr, frame)
- self.write(')')
+ self.write(")")
def visit_EnvironmentAttribute(self, node, frame):
- self.write('environment.' + node.name)
+ self.write("environment." + node.name)
def visit_ExtensionAttribute(self, node, frame):
- self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
+ self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
@@ -1670,13 +1789,16 @@ def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
- self.write('context')
+ self.write("context")
+
+ def visit_DerivedContextReference(self, node, frame):
+ self.write(self.derive_context(frame))
def visit_Continue(self, node, frame):
- self.writeline('continue', node)
+ self.writeline("continue", node)
def visit_Break(self, node, frame):
- self.writeline('break', node)
+ self.writeline("break", node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
@@ -1687,8 +1809,8 @@ def visit_Scope(self, node, frame):
def visit_OverlayScope(self, node, frame):
ctx = self.temporary_identifier()
- self.writeline('%s = %s' % (ctx, self.derive_context(frame)))
- self.writeline('%s.vars = ' % ctx)
+ self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
+ self.writeline("%s.vars = " % ctx)
self.visit(node.context, frame)
self.push_context_reference(ctx)
@@ -1701,7 +1823,7 @@ def visit_OverlayScope(self, node, frame):
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
- self.writeline('context.eval_ctx.%s = ' % keyword.key)
+ self.writeline("context.eval_ctx.%s = " % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
@@ -1713,9 +1835,9 @@ def visit_EvalContextModifier(self, node, frame):
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
saved_ctx = frame.eval_ctx.save()
- self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
+ self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(saved_ctx)
- self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
+ self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
diff --git a/external/python/jinja2/constants.py b/external/python/jinja2/constants.py
index 11efd1ed..bf7f2ca7 100644
--- a/external/python/jinja2/constants.py
+++ b/external/python/jinja2/constants.py
@@ -1,17 +1,6 @@
# -*- coding: utf-8 -*-
-"""
- jinja.constants
- ~~~~~~~~~~~~~~~
-
- Various constants.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-
-
#: list of lorem ipsum words used by the lipsum() helper function
-LOREM_IPSUM_WORDS = u'''\
+LOREM_IPSUM_WORDS = u"""\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
@@ -29,4 +18,4 @@
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
-viverra volutpat vulputate'''
+viverra volutpat vulputate"""
diff --git a/external/python/jinja2/debug.py b/external/python/jinja2/debug.py
index d3c1a3a8..5d8aec31 100644
--- a/external/python/jinja2/debug.py
+++ b/external/python/jinja2/debug.py
@@ -1,378 +1,268 @@
-# -*- coding: utf-8 -*-
-"""
- jinja2.debug
- ~~~~~~~~~~~~
-
- Implements the debug interface for Jinja. This module does some pretty
- ugly stuff with the Python traceback system in order to achieve tracebacks
- with correct line numbers, locals and contents.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
import sys
-import traceback
-from types import TracebackType, CodeType
-from jinja2.utils import missing, internal_code
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2._compat import iteritems, reraise, PY2
+from types import CodeType
-# on pypy we can take advantage of transparent proxies
-try:
- from __pypy__ import tproxy
-except ImportError:
- tproxy = None
+from . import TemplateSyntaxError
+from ._compat import PYPY
+from .utils import internal_code
+from .utils import missing
-# how does the raise helper look like?
-try:
- exec("raise TypeError, 'foo'")
-except SyntaxError:
- raise_helper = 'raise __jinja_exception__[1]'
-except TypeError:
- raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
+def rewrite_traceback_stack(source=None):
+ """Rewrite the current exception to replace any tracebacks from
+ within compiled template code with tracebacks that look like they
+ came from the template source.
+ This must be called within an ``except`` block.
-class TracebackFrameProxy(object):
- """Proxies a traceback frame."""
+ :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
+ the current ``exc_info`` is used.
+ :param source: For ``TemplateSyntaxError``, the original source if
+ known.
+ :return: A :meth:`sys.exc_info` tuple that can be re-raised.
+ """
+ exc_type, exc_value, tb = sys.exc_info()
- def __init__(self, tb):
- self.tb = tb
- self._tb_next = None
+ if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
+ exc_value.translated = True
+ exc_value.source = source
- @property
- def tb_next(self):
- return self._tb_next
+ try:
+ # Remove the old traceback on Python 3, otherwise the frames
+ # from the compiler still show up.
+ exc_value.with_traceback(None)
+ except AttributeError:
+ pass
- def set_next(self, next):
- if tb_set_next is not None:
- try:
- tb_set_next(self.tb, next and next.tb or None)
- except Exception:
- # this function can fail due to all the hackery it does
- # on various python implementations. We just catch errors
- # down and ignore them if necessary.
- pass
- self._tb_next = next
-
- @property
- def is_jinja_frame(self):
- return '__jinja_template__' in self.tb.tb_frame.f_globals
-
- def __getattr__(self, name):
- return getattr(self.tb, name)
-
-
-def make_frame_proxy(frame):
- proxy = TracebackFrameProxy(frame)
- if tproxy is None:
- return proxy
- def operation_handler(operation, *args, **kwargs):
- if operation in ('__getattribute__', '__getattr__'):
- return getattr(proxy, args[0])
- elif operation == '__setattr__':
- proxy.__setattr__(*args, **kwargs)
- else:
- return getattr(proxy, operation)(*args, **kwargs)
- return tproxy(TracebackType, operation_handler)
-
-
-class ProcessedTraceback(object):
- """Holds a Jinja preprocessed traceback for printing or reraising."""
-
- def __init__(self, exc_type, exc_value, frames):
- assert frames, 'no frames for this traceback?'
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.frames = frames
-
- # newly concatenate the frames (which are proxies)
- prev_tb = None
- for tb in self.frames:
- if prev_tb is not None:
- prev_tb.set_next(tb)
- prev_tb = tb
- prev_tb.set_next(None)
-
- def render_as_text(self, limit=None):
- """Return a string with the traceback."""
- lines = traceback.format_exception(self.exc_type, self.exc_value,
- self.frames[0], limit=limit)
- return ''.join(lines).rstrip()
-
- def render_as_html(self, full=False):
- """Return a unicode string with the traceback as rendered HTML."""
- from jinja2.debugrenderer import render_traceback
- return u'%s\n\n' % (
- render_traceback(self, full=full),
- self.render_as_text().decode('utf-8', 'replace')
+ # Outside of runtime, so the frame isn't executing template
+ # code, but it still needs to point at the template.
+ tb = fake_traceback(
+ exc_value, None, exc_value.filename or "", exc_value.lineno
)
-
- @property
- def is_template_syntax_error(self):
- """`True` if this is a template syntax error."""
- return isinstance(self.exc_value, TemplateSyntaxError)
-
- @property
- def exc_info(self):
- """Exception info tuple with a proxy around the frame objects."""
- return self.exc_type, self.exc_value, self.frames[0]
-
- @property
- def standard_exc_info(self):
- """Standard python exc_info for re-raising"""
- tb = self.frames[0]
- # the frame will be an actual traceback (or transparent proxy) if
- # we are on pypy or a python implementation with support for tproxy
- if type(tb) is not TracebackType:
- tb = tb.tb
- return self.exc_type, self.exc_value, tb
-
-
-def make_traceback(exc_info, source_hint=None):
- """Creates a processed traceback object from the exc_info."""
- exc_type, exc_value, tb = exc_info
- if isinstance(exc_value, TemplateSyntaxError):
- exc_info = translate_syntax_error(exc_value, source_hint)
- initial_skip = 0
else:
- initial_skip = 1
- return translate_exception(exc_info, initial_skip)
-
-
-def translate_syntax_error(error, source=None):
- """Rewrites a syntax error to please traceback systems."""
- error.source = source
- error.translated = True
- exc_info = (error.__class__, error, None)
- filename = error.filename
- if filename is None:
- filename = ''
- return fake_exc_info(exc_info, filename, error.lineno)
+ # Skip the frame for the render function.
+ tb = tb.tb_next
+ stack = []
-def translate_exception(exc_info, initial_skip=0):
- """If passed an exc_info it will automatically rewrite the exceptions
- all the way down to the correct line numbers and frames.
- """
- tb = exc_info[2]
- frames = []
-
- # skip some internal frames if wanted
- for x in range(initial_skip):
- if tb is not None:
- tb = tb.tb_next
- initial_tb = tb
-
+ # Build the stack of traceback object, replacing any in template
+ # code with the source file and line information.
while tb is not None:
- # skip frames decorated with @internalcode. These are internal
- # calls we can't avoid and that are useless in template debugging
- # output.
+ # Skip frames decorated with @internalcode. These are internal
+ # calls that aren't useful in template debugging output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
- # save a reference to the next frame if we override the current
- # one with a faked one.
- next = tb.tb_next
+ template = tb.tb_frame.f_globals.get("__jinja_template__")
- # fake template exceptions
- template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
- tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
- lineno)[2]
+ fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
+ stack.append(fake_tb)
+ else:
+ stack.append(tb)
- frames.append(make_frame_proxy(tb))
- tb = next
+ tb = tb.tb_next
- # if we don't have any exceptions in the frames left, we have to
- # reraise it unchanged.
- # XXX: can we backup here? when could this happen?
- if not frames:
- reraise(exc_info[0], exc_info[1], exc_info[2])
+ tb_next = None
- return ProcessedTraceback(exc_info[0], exc_info[1], frames)
+ # Assign tb_next in reverse to avoid circular references.
+ for tb in reversed(stack):
+ tb_next = tb_set_next(tb, tb_next)
+ return exc_type, exc_value, tb_next
-def get_jinja_locals(real_locals):
- ctx = real_locals.get('context')
- if ctx:
- locals = ctx.get_all().copy()
+
+def fake_traceback(exc_value, tb, filename, lineno):
+ """Produce a new traceback object that looks like it came from the
+ template source instead of the compiled code. The filename, line
+ number, and location name will point to the template, and the local
+ variables will be the current template context.
+
+ :param exc_value: The original exception to be re-raised to create
+ the new traceback.
+ :param tb: The original traceback to get the local variables and
+ code info from.
+ :param filename: The template filename.
+ :param lineno: The line number in the template source.
+ """
+ if tb is not None:
+ # Replace the real locals with the context that would be
+ # available at that point in the template.
+ locals = get_template_locals(tb.tb_frame.f_locals)
+ locals.pop("__jinja_exception__", None)
else:
locals = {}
+ globals = {
+ "__name__": filename,
+ "__file__": filename,
+ "__jinja_exception__": exc_value,
+ }
+ # Raise an exception at the correct line number.
+ code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
+
+ # Build a new code object that points to the template file and
+ # replaces the location with a block name.
+ try:
+ location = "template"
+
+ if tb is not None:
+ function = tb.tb_frame.f_code.co_name
+
+ if function == "root":
+ location = "top-level template code"
+ elif function.startswith("block_"):
+ location = 'block "%s"' % function[6:]
+
+ # Collect arguments for the new code object. CodeType only
+ # accepts positional arguments, and arguments were inserted in
+ # new Python versions.
+ code_args = []
+
+ for attr in (
+ "argcount",
+ "posonlyargcount", # Python 3.8
+ "kwonlyargcount", # Python 3
+ "nlocals",
+ "stacksize",
+ "flags",
+ "code", # codestring
+ "consts", # constants
+ "names",
+ "varnames",
+ ("filename", filename),
+ ("name", location),
+ "firstlineno",
+ "lnotab",
+ "freevars",
+ "cellvars",
+ ):
+ if isinstance(attr, tuple):
+ # Replace with given value.
+ code_args.append(attr[1])
+ continue
+
+ try:
+ # Copy original value if it exists.
+ code_args.append(getattr(code, "co_" + attr))
+ except AttributeError:
+ # Some arguments were added later.
+ continue
+
+ code = CodeType(*code_args)
+ except Exception:
+ # Some environments such as Google App Engine don't support
+ # modifying code objects.
+ pass
+
+ # Execute the new code, which is guaranteed to raise, and return
+ # the new traceback without this frame.
+ try:
+ exec(code, globals, locals)
+ except BaseException:
+ return sys.exc_info()[2].tb_next
+
+
+def get_template_locals(real_locals):
+ """Based on the runtime locals, get the context that would be
+ available at that point in the template.
+ """
+ # Start with the current template context.
+ ctx = real_locals.get("context")
+
+ if ctx:
+ data = ctx.get_all().copy()
+ else:
+ data = {}
+
+ # Might be in a derived context that only sets local variables
+ # rather than pushing a context. Local variables follow the scheme
+ # l_depth_name. Find the highest-depth local that has a value for
+ # each name.
local_overrides = {}
- for name, value in iteritems(real_locals):
- if not name.startswith('l_') or value is missing:
+ for name, value in real_locals.items():
+ if not name.startswith("l_") or value is missing:
+ # Not a template variable, or no longer relevant.
continue
+
try:
- _, depth, name = name.split('_', 2)
+ _, depth, name = name.split("_", 2)
depth = int(depth)
except ValueError:
continue
+
cur_depth = local_overrides.get(name, (-1,))[0]
+
if cur_depth < depth:
local_overrides[name] = (depth, value)
- for name, (_, value) in iteritems(local_overrides):
+ # Modify the context with any derived context.
+ for name, (_, value) in local_overrides.items():
if value is missing:
- locals.pop(name, None)
+ data.pop(name, None)
else:
- locals[name] = value
+ data[name] = value
- return locals
+ return data
-def fake_exc_info(exc_info, filename, lineno):
- """Helper for `translate_exception`."""
- exc_type, exc_value, tb = exc_info
+if sys.version_info >= (3, 7):
+ # tb_next is directly assignable as of Python 3.7
+ def tb_set_next(tb, tb_next):
+ tb.tb_next = tb_next
+ return tb
- # figure the real context out
- if tb is not None:
- locals = get_jinja_locals(tb.tb_frame.f_locals)
- # if there is a local called __jinja_exception__, we get
- # rid of it to not break the debug functionality.
- locals.pop('__jinja_exception__', None)
- else:
- locals = {}
-
- # assamble fake globals we need
- globals = {
- '__name__': filename,
- '__file__': filename,
- '__jinja_exception__': exc_info[:2],
-
- # we don't want to keep the reference to the template around
- # to not cause circular dependencies, but we mark it as Jinja
- # frame for the ProcessedTraceback
- '__jinja_template__': None
- }
-
- # and fake the exception
- code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
-
- # if it's possible, change the name of the code. This won't work
- # on some python environments such as google appengine
+elif PYPY:
+ # PyPy might have special support, and won't work with ctypes.
try:
- if tb is None:
- location = 'template'
- else:
- function = tb.tb_frame.f_code.co_name
- if function == 'root':
- location = 'top-level template code'
- elif function.startswith('block_'):
- location = 'block "%s"' % function[6:]
- else:
- location = 'template'
-
- if PY2:
- code = CodeType(0, code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- else:
- code = CodeType(0, code.co_kwonlyargcount,
- code.co_nlocals, code.co_stacksize,
- code.co_flags, code.co_code, code.co_consts,
- code.co_names, code.co_varnames, filename,
- location, code.co_firstlineno,
- code.co_lnotab, (), ())
- except Exception as e:
- pass
+ import tputil
+ except ImportError:
+ # Without tproxy support, use the original traceback.
+ def tb_set_next(tb, tb_next):
+ return tb
- # execute the code and catch the new traceback
- try:
- exec(code, globals, locals)
- except:
- exc_info = sys.exc_info()
- new_tb = exc_info[2].tb_next
+ else:
+ # With tproxy support, create a proxy around the traceback that
+ # returns the new tb_next.
+ def tb_set_next(tb, tb_next):
+ def controller(op):
+ if op.opname == "__getattribute__" and op.args[0] == "tb_next":
+ return tb_next
- # return without this frame
- return exc_info[:2] + (new_tb,)
+ return op.delegate()
+ return tputil.make_proxy(controller, obj=tb)
-def _init_ugly_crap():
- """This function implements a few ugly things so that we can patch the
- traceback objects. The function returned allows resetting `tb_next` on
- any python traceback object. Do not attempt to use this on non cpython
- interpreters
- """
- import ctypes
- from types import TracebackType
- if PY2:
- # figure out size of _Py_ssize_t for Python 2:
- if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
- _Py_ssize_t = ctypes.c_int64
- else:
- _Py_ssize_t = ctypes.c_int
- else:
- # platform ssize_t on Python 3
- _Py_ssize_t = ctypes.c_ssize_t
+else:
+ # Use ctypes to assign tb_next at the C level since it's read-only
+ # from Python.
+ import ctypes
- # regular python
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
- ]
-
- # python with trace
- if hasattr(sys, 'getobjects'):
- class _PyObject(ctypes.Structure):
- pass
- _PyObject._fields_ = [
- ('_ob_next', ctypes.POINTER(_PyObject)),
- ('_ob_prev', ctypes.POINTER(_PyObject)),
- ('ob_refcnt', _Py_ssize_t),
- ('ob_type', ctypes.POINTER(_PyObject))
+ class _CTraceback(ctypes.Structure):
+ _fields_ = [
+ # Extra PyObject slots when compiled with Py_TRACE_REFS.
+ ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
+ # Only care about tb_next as an object, not a traceback.
+ ("tb_next", ctypes.py_object),
]
- class _Traceback(_PyObject):
- pass
- _Traceback._fields_ = [
- ('tb_next', ctypes.POINTER(_Traceback)),
- ('tb_frame', ctypes.POINTER(_PyObject)),
- ('tb_lasti', ctypes.c_int),
- ('tb_lineno', ctypes.c_int)
- ]
-
- def tb_set_next(tb, next):
- """Set the tb_next attribute of a traceback object."""
- if not (isinstance(tb, TracebackType) and
- (next is None or isinstance(next, TracebackType))):
- raise TypeError('tb_set_next arguments must be traceback objects')
- obj = _Traceback.from_address(id(tb))
- if tb.tb_next is not None:
- old = _Traceback.from_address(id(tb.tb_next))
- old.ob_refcnt -= 1
- if next is None:
- obj.tb_next = ctypes.POINTER(_Traceback)()
- else:
- next = _Traceback.from_address(id(next))
- next.ob_refcnt += 1
- obj.tb_next = ctypes.pointer(next)
+ def tb_set_next(tb, tb_next):
+ c_tb = _CTraceback.from_address(id(tb))
- return tb_set_next
+ # Clear out the old tb_next.
+ if tb.tb_next is not None:
+ c_tb_next = ctypes.py_object(tb.tb_next)
+ c_tb.tb_next = ctypes.py_object()
+ ctypes.pythonapi.Py_DecRef(c_tb_next)
+ # Assign the new tb_next.
+ if tb_next is not None:
+ c_tb_next = ctypes.py_object(tb_next)
+ ctypes.pythonapi.Py_IncRef(c_tb_next)
+ c_tb.tb_next = c_tb_next
-# try to get a tb_set_next implementation if we don't have transparent
-# proxies.
-tb_set_next = None
-if tproxy is None:
- # traceback.tb_next can be modified since CPython 3.7
- if sys.version_info >= (3, 7):
- def tb_set_next(tb, next):
- tb.tb_next = next
- else:
- # On Python 3.6 and older, use ctypes
- try:
- tb_set_next = _init_ugly_crap()
- except Exception:
- pass
-del _init_ugly_crap
+ return tb
diff --git a/external/python/jinja2/defaults.py b/external/python/jinja2/defaults.py
index 7c93dec0..8e0e7d77 100644
--- a/external/python/jinja2/defaults.py
+++ b/external/python/jinja2/defaults.py
@@ -1,56 +1,44 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.defaults
- ~~~~~~~~~~~~~~~
-
- Jinja default filters and tags.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import range_type
-from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner, Namespace
-
+from ._compat import range_type
+from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
+from .tests import TESTS as DEFAULT_TESTS # noqa: F401
+from .utils import Cycler
+from .utils import generate_lorem_ipsum
+from .utils import Joiner
+from .utils import Namespace
# defaults for the parser / lexer
-BLOCK_START_STRING = '{%'
-BLOCK_END_STRING = '%}'
-VARIABLE_START_STRING = '{{'
-VARIABLE_END_STRING = '}}'
-COMMENT_START_STRING = '{#'
-COMMENT_END_STRING = '#}'
+BLOCK_START_STRING = "{%"
+BLOCK_END_STRING = "%}"
+VARIABLE_START_STRING = "{{"
+VARIABLE_END_STRING = "}}"
+COMMENT_START_STRING = "{#"
+COMMENT_END_STRING = "#}"
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
-NEWLINE_SEQUENCE = '\n'
+NEWLINE_SEQUENCE = "\n"
KEEP_TRAILING_NEWLINE = False
-
# default filters, tests and namespace
-from jinja2.filters import FILTERS as DEFAULT_FILTERS
-from jinja2.tests import TESTS as DEFAULT_TESTS
+
DEFAULT_NAMESPACE = {
- 'range': range_type,
- 'dict': dict,
- 'lipsum': generate_lorem_ipsum,
- 'cycler': Cycler,
- 'joiner': Joiner,
- 'namespace': Namespace
+ "range": range_type,
+ "dict": dict,
+ "lipsum": generate_lorem_ipsum,
+ "cycler": Cycler,
+ "joiner": Joiner,
+ "namespace": Namespace,
}
-
# default policies
DEFAULT_POLICIES = {
- 'compiler.ascii_str': True,
- 'urlize.rel': 'noopener',
- 'urlize.target': None,
- 'truncate.leeway': 5,
- 'json.dumps_function': None,
- 'json.dumps_kwargs': {'sort_keys': True},
- 'ext.i18n.trimmed': False,
+ "compiler.ascii_str": True,
+ "urlize.rel": "noopener",
+ "urlize.target": None,
+ "truncate.leeway": 5,
+ "json.dumps_function": None,
+ "json.dumps_kwargs": {"sort_keys": True},
+ "ext.i18n.trimmed": False,
}
-
-
-# export all constants
-__all__ = tuple(x for x in locals().keys() if x.isupper())
diff --git a/external/python/jinja2/environment.py b/external/python/jinja2/environment.py
index 549d9afa..8430390e 100644
--- a/external/python/jinja2/environment.py
+++ b/external/python/jinja2/environment.py
@@ -1,60 +1,83 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.environment
- ~~~~~~~~~~~~~~~~~~
-
- Provides a class that holds runtime and parsing time options.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""Classes for managing templates and their runtime and compile time
+options.
"""
import os
import sys
import weakref
-from functools import reduce, partial
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- DEFAULT_FILTERS, DEFAULT_TESTS, DEFAULT_NAMESPACE, \
- DEFAULT_POLICIES, KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.lexer import get_lexer, TokenStream
-from jinja2.parser import Parser
-from jinja2.nodes import EvalContext
-from jinja2.compiler import generate, CodeGenerator
-from jinja2.runtime import Undefined, new_context, Context
-from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
- TemplatesNotFound, TemplateRuntimeError
-from jinja2.utils import import_string, LRUCache, Markup, missing, \
- concat, consume, internalcode, have_async_gen
-from jinja2._compat import imap, ifilter, string_types, iteritems, \
- text_type, reraise, implements_iterator, implements_to_string, \
- encode_filename, PY2, PYPY
-
+from functools import partial
+from functools import reduce
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import encode_filename
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import PYPY
+from ._compat import reraise
+from ._compat import string_types
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import generate
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import DEFAULT_FILTERS
+from .defaults import DEFAULT_NAMESPACE
+from .defaults import DEFAULT_POLICIES
+from .defaults import DEFAULT_TESTS
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .lexer import get_lexer
+from .lexer import TokenStream
+from .nodes import EvalContext
+from .parser import Parser
+from .runtime import Context
+from .runtime import new_context
+from .runtime import Undefined
+from .utils import concat
+from .utils import consume
+from .utils import have_async_gen
+from .utils import import_string
+from .utils import internalcode
+from .utils import LRUCache
+from .utils import missing
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
-# the function to create jinja traceback objects. This is dynamically
-# imported on the first exception in the exception handler.
-_make_traceback = None
+def get_spontaneous_environment(cls, *args):
+ """Return a new spontaneous environment. A spontaneous environment
+ is used for templates created directly rather than through an
+ existing environment.
-def get_spontaneous_environment(*args):
- """Return a new spontaneous environment. A spontaneous environment is an
- unnamed and unaccessible (in theory) environment that is used for
- templates generated from a string and not from the file system.
+ :param cls: Environment class to create.
+ :param args: Positional arguments passed to environment.
"""
+ key = (cls, args)
+
try:
- env = _spontaneous_environments.get(args)
- except TypeError:
- return Environment(*args)
- if env is not None:
+ return _spontaneous_environments[key]
+ except KeyError:
+ _spontaneous_environments[key] = env = cls(*args)
+ env.shared = True
return env
- _spontaneous_environments[args] = env = Environment(*args)
- env.shared = True
- return env
def create_cache(size):
@@ -93,20 +116,25 @@ def fail_for_missing_callable(string, name):
try:
name._fail_with_undefined_error()
except Exception as e:
- msg = '%s (%s; did you forget to quote the callable name?)' % (msg, e)
+ msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
raise TemplateRuntimeError(msg)
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
- assert issubclass(environment.undefined, Undefined), 'undefined must ' \
- 'be a subclass of undefined because filters depend on it.'
- assert environment.block_start_string != \
- environment.variable_start_string != \
- environment.comment_start_string, 'block, variable and comment ' \
- 'start strings must be different'
- assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
- 'newline_sequence set to unknown line ending string.'
+ assert issubclass(
+ environment.undefined, Undefined
+ ), "undefined must be a subclass of undefined because filters depend on it."
+ assert (
+ environment.block_start_string
+ != environment.variable_start_string
+ != environment.comment_start_string
+ ), "block, variable and comment start strings must be different"
+ assert environment.newline_sequence in (
+ "\r",
+ "\r\n",
+ "\n",
+ ), "newline_sequence set to unknown line ending string."
return environment
@@ -191,7 +219,7 @@ class Environment(object):
`autoescape`
If set to ``True`` the XML/HTML autoescaping feature is enabled by
default. For more details about autoescaping see
- :class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
+ :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return ``True`` or ``False`` depending on autoescape should be
enabled by default.
@@ -249,10 +277,6 @@ class Environment(object):
#: must not be modified
shared = False
- #: these are currently EXPERIMENTAL undocumented features.
- exception_handler = None
- exception_formatter = None
-
#: the class that is used for code generation. See
#: :class:`~jinja2.compiler.CodeGenerator` for more information.
code_generator_class = CodeGenerator
@@ -261,29 +285,31 @@ class Environment(object):
#: :class:`~jinja2.runtime.Context` for more information.
context_class = Context
- def __init__(self,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- loader=None,
- cache_size=400,
- auto_reload=True,
- bytecode_cache=None,
- enable_async=False):
+ def __init__(
+ self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None,
+ enable_async=False,
+ ):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
@@ -334,6 +360,9 @@ def __init__(self,
self.enable_async = enable_async
self.is_async = self.enable_async and have_async_gen
+ if self.is_async:
+ # runs patch_all() to enable async support
+ from . import asyncsupport # noqa: F401
_environment_sanity_check(self)
@@ -353,15 +382,28 @@ def extend(self, **attributes):
if not hasattr(self, key):
setattr(self, key, value)
- def overlay(self, block_start_string=missing, block_end_string=missing,
- variable_start_string=missing, variable_end_string=missing,
- comment_start_string=missing, comment_end_string=missing,
- line_statement_prefix=missing, line_comment_prefix=missing,
- trim_blocks=missing, lstrip_blocks=missing,
- extensions=missing, optimized=missing,
- undefined=missing, finalize=missing, autoescape=missing,
- loader=missing, cache_size=missing, auto_reload=missing,
- bytecode_cache=missing):
+ def overlay(
+ self,
+ block_start_string=missing,
+ block_end_string=missing,
+ variable_start_string=missing,
+ variable_end_string=missing,
+ comment_start_string=missing,
+ comment_end_string=missing,
+ line_statement_prefix=missing,
+ line_comment_prefix=missing,
+ trim_blocks=missing,
+ lstrip_blocks=missing,
+ extensions=missing,
+ optimized=missing,
+ undefined=missing,
+ finalize=missing,
+ autoescape=missing,
+ loader=missing,
+ cache_size=missing,
+ auto_reload=missing,
+ bytecode_cache=missing,
+ ):
"""Create a new overlay environment that shares all the data with the
current environment except for cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
@@ -374,7 +416,7 @@ def overlay(self, block_start_string=missing, block_end_string=missing,
through.
"""
args = dict(locals())
- del args['self'], args['cache_size'], args['extensions']
+ del args["self"], args["cache_size"], args["extensions"]
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
@@ -402,8 +444,7 @@ def overlay(self, block_start_string=missing, block_end_string=missing,
def iter_extensions(self):
"""Iterates over the extensions by priority."""
- return iter(sorted(self.extensions.values(),
- key=lambda x: x.priority))
+ return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
@@ -435,8 +476,9 @@ def getattr(self, obj, attribute):
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
- def call_filter(self, name, value, args=None, kwargs=None,
- context=None, eval_ctx=None):
+ def call_filter(
+ self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
+ ):
"""Invokes a filter on a value the same way the compiler does it.
Note that on Python 3 this might return a coroutine in case the
@@ -448,21 +490,22 @@ def call_filter(self, name, value, args=None, kwargs=None,
"""
func = self.filters.get(name)
if func is None:
- fail_for_missing_callable('no filter named %r', name)
+ fail_for_missing_callable("no filter named %r", name)
args = [value] + list(args or ())
- if getattr(func, 'contextfilter', False):
+ if getattr(func, "contextfilter", False) is True:
if context is None:
- raise TemplateRuntimeError('Attempted to invoke context '
- 'filter without context')
+ raise TemplateRuntimeError(
+ "Attempted to invoke context filter without context"
+ )
args.insert(0, context)
- elif getattr(func, 'evalcontextfilter', False):
+ elif getattr(func, "evalcontextfilter", False) is True:
if eval_ctx is None:
if context is not None:
eval_ctx = context.eval_ctx
else:
eval_ctx = EvalContext(self)
args.insert(0, eval_ctx)
- elif getattr(func, 'environmentfilter', False):
+ elif getattr(func, "environmentfilter", False) is True:
args.insert(0, self)
return func(*args, **(kwargs or {}))
@@ -473,7 +516,7 @@ def call_test(self, name, value, args=None, kwargs=None):
"""
func = self.tests.get(name)
if func is None:
- fail_for_missing_callable('no test named %r', name)
+ fail_for_missing_callable("no test named %r", name)
return func(value, *(args or ()), **(kwargs or {}))
@internalcode
@@ -483,14 +526,13 @@ def parse(self, source, name=None, filename=None):
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
- If you are :ref:`developing Jinja2 extensions `
+ If you are :ref:`developing Jinja extensions `
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
+ self.handle_exception(source=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
@@ -510,16 +552,18 @@ def lex(self, source, name=None, filename=None):
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source)
+ self.handle_exception(source=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
- return reduce(lambda s, e: e.preprocess(s, name, filename),
- self.iter_extensions(), text_type(source))
+ return reduce(
+ lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(),
+ text_type(source),
+ )
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
@@ -539,8 +583,14 @@ def _generate(self, source, name, filename, defer_init=False):
.. versionadded:: 2.5
"""
- return generate(source, self, name, filename, defer_init=defer_init,
- optimized=self.optimized)
+ return generate(
+ source,
+ self,
+ name,
+ filename,
+ defer_init=defer_init,
+ optimized=self.optimized,
+ )
def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
@@ -548,11 +598,10 @@ def _compile(self, source, filename):
.. versionadded:: 2.5
"""
- return compile(source, filename, 'exec')
+ return compile(source, filename, "exec")
@internalcode
- def compile(self, source, name=None, filename=None, raw=False,
- defer_init=False):
+ def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
@@ -577,18 +626,16 @@ def compile(self, source, name=None, filename=None, raw=False,
if isinstance(source, string_types):
source_hint = source
source = self._parse(source, name, filename)
- source = self._generate(source, name, filename,
- defer_init=defer_init)
+ source = self._generate(source, name, filename, defer_init=defer_init)
if raw:
return source
if filename is None:
- filename = ''
+ filename = ""
else:
filename = encode_filename(filename)
return self._compile(source, filename)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- self.handle_exception(exc_info, source_hint=source_hint)
+ self.handle_exception(source=source_hint)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
@@ -618,26 +665,32 @@ def compile_expression(self, source, undefined_to_none=True):
.. versionadded:: 2.1
"""
- parser = Parser(self, source, state='variable')
- exc_info = None
+ parser = Parser(self, source, state="variable")
try:
expr = parser.parse_expression()
if not parser.stream.eos:
- raise TemplateSyntaxError('chunk after expression',
- parser.stream.current.lineno,
- None, None)
+ raise TemplateSyntaxError(
+ "chunk after expression", parser.stream.current.lineno, None, None
+ )
expr.set_environment(self)
except TemplateSyntaxError:
- exc_info = sys.exc_info()
- if exc_info is not None:
- self.handle_exception(exc_info, source_hint=source)
- body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
+ if sys.exc_info() is not None:
+ self.handle_exception(source=source)
+
+ body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
- def compile_templates(self, target, extensions=None, filter_func=None,
- zip='deflated', log_function=None,
- ignore_errors=True, py_compile=False):
+ def compile_templates(
+ self,
+ target,
+ extensions=None,
+ filter_func=None,
+ zip="deflated",
+ log_function=None,
+ ignore_errors=True,
+ py_compile=False,
+ ):
"""Finds all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be stored in a directory.
@@ -660,42 +713,52 @@ def compile_templates(self, target, extensions=None, filter_func=None,
.. versionadded:: 2.4
"""
- from jinja2.loaders import ModuleLoader
+ from .loaders import ModuleLoader
if log_function is None:
- log_function = lambda x: None
+
+ def log_function(x):
+ pass
if py_compile:
if not PY2 or PYPY:
- from warnings import warn
- warn(Warning('py_compile has no effect on pypy or Python 3'))
+ import warnings
+
+ warnings.warn(
+ "'py_compile=True' has no effect on PyPy or Python"
+ " 3 and will be removed in version 3.0",
+ DeprecationWarning,
+ stacklevel=2,
+ )
py_compile = False
else:
import imp
import marshal
- py_header = imp.get_magic() + \
- u'\xff\xff\xff\xff'.encode('iso-8859-15')
+
+ py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
# Python 3.3 added a source filesize to the header
if sys.version_info >= (3, 3):
- py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15')
+ py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
- def write_file(filename, data, mode):
+ def write_file(filename, data):
if zip:
info = ZipInfo(filename)
info.external_attr = 0o755 << 16
zip_file.writestr(info, data)
else:
- f = open(os.path.join(target, filename), mode)
- try:
+ if isinstance(data, text_type):
+ data = data.encode("utf8")
+
+ with open(os.path.join(target, filename), "wb") as f:
f.write(data)
- finally:
- f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
- zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
- stored=ZIP_STORED)[zip])
+
+ zip_file = ZipFile(
+ target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
+ )
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
@@ -717,18 +780,16 @@ def write_file(filename, data, mode):
if py_compile:
c = self._compile(code, encode_filename(filename))
- write_file(filename + 'c', py_header +
- marshal.dumps(c), 'wb')
- log_function('Byte-compiled "%s" as %s' %
- (name, filename + 'c'))
+ write_file(filename + "c", py_header + marshal.dumps(c))
+ log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
else:
- write_file(filename, code, 'w')
+ write_file(filename, code)
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
- log_function('Finished compiling templates')
+ log_function("Finished compiling templates")
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
@@ -746,38 +807,29 @@ def list_templates(self, extensions=None, filter_func=None):
.. versionadded:: 2.4
"""
- x = self.loader.list_templates()
+ names = self.loader.list_templates()
+
if extensions is not None:
if filter_func is not None:
- raise TypeError('either extensions or filter_func '
- 'can be passed, but not both')
- filter_func = lambda x: '.' in x and \
- x.rsplit('.', 1)[1] in extensions
+ raise TypeError(
+ "either extensions or filter_func can be passed, but not both"
+ )
+
+ def filter_func(x):
+ return "." in x and x.rsplit(".", 1)[1] in extensions
+
if filter_func is not None:
- x = list(ifilter(filter_func, x))
- return x
+ names = [name for name in names if filter_func(name)]
+
+ return names
- def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
+ def handle_exception(self, source=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
- global _make_traceback
- if exc_info is None:
- exc_info = sys.exc_info()
-
- # the debugging module is imported when it's used for the first time.
- # we're doing a lot of stuff there and for applications that do not
- # get any exceptions in template rendering there is no need to load
- # all of that.
- if _make_traceback is None:
- from jinja2.debug import make_traceback as _make_traceback
- traceback = _make_traceback(exc_info, source_hint)
- if rendered and self.exception_formatter is not None:
- return self.exception_formatter(traceback)
- if self.exception_handler is not None:
- self.exception_handler(traceback)
- exc_type, exc_value, tb = traceback.standard_exc_info
- reraise(exc_type, exc_value, tb)
+ from .debug import rewrite_traceback_stack
+
+ reraise(*rewrite_traceback_stack(source=source))
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
@@ -794,12 +846,13 @@ def join_path(self, template, parent):
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
- raise TypeError('no loader for this environment specified')
+ raise TypeError("no loader for this environment specified")
cache_key = (weakref.ref(self.loader), name)
if self.cache is not None:
template = self.cache.get(cache_key)
- if template is not None and (not self.auto_reload or
- template.is_up_to_date):
+ if template is not None and (
+ not self.auto_reload or template.is_up_to_date
+ ):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
@@ -835,15 +888,24 @@ def select_template(self, names, parent=None, globals=None):
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
- .. versionadded:: 2.3
+ .. versionchanged:: 2.11
+ If names is :class:`Undefined`, an :exc:`UndefinedError` is
+ raised instead. If no templates were found and names
+ contains :class:`Undefined`, the message is more helpful.
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
+
+ .. versionadded:: 2.3
"""
+ if isinstance(names, Undefined):
+ names._fail_with_undefined_error()
+
if not names:
- raise TemplatesNotFound(message=u'Tried to select from an empty list '
- u'of templates.')
+ raise TemplatesNotFound(
+ message=u"Tried to select from an empty list " u"of templates."
+ )
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
@@ -852,20 +914,19 @@ def select_template(self, names, parent=None, globals=None):
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
- except TemplateNotFound:
+ except (TemplateNotFound, UndefinedError):
pass
raise TemplatesNotFound(names)
@internalcode
- def get_or_select_template(self, template_name_or_list,
- parent=None, globals=None):
+ def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
- if isinstance(template_name_or_list, string_types):
+ if isinstance(template_name_or_list, (string_types, Undefined)):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
@@ -916,32 +977,57 @@ class Template(object):
StopIteration
"""
- def __new__(cls, source,
- block_start_string=BLOCK_START_STRING,
- block_end_string=BLOCK_END_STRING,
- variable_start_string=VARIABLE_START_STRING,
- variable_end_string=VARIABLE_END_STRING,
- comment_start_string=COMMENT_START_STRING,
- comment_end_string=COMMENT_END_STRING,
- line_statement_prefix=LINE_STATEMENT_PREFIX,
- line_comment_prefix=LINE_COMMENT_PREFIX,
- trim_blocks=TRIM_BLOCKS,
- lstrip_blocks=LSTRIP_BLOCKS,
- newline_sequence=NEWLINE_SEQUENCE,
- keep_trailing_newline=KEEP_TRAILING_NEWLINE,
- extensions=(),
- optimized=True,
- undefined=Undefined,
- finalize=None,
- autoescape=False,
- enable_async=False):
+ #: Type of environment to create when creating a template directly
+ #: rather than through an existing environment.
+ environment_class = Environment
+
+ def __new__(
+ cls,
+ source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ enable_async=False,
+ ):
env = get_spontaneous_environment(
- block_start_string, block_end_string, variable_start_string,
- variable_end_string, comment_start_string, comment_end_string,
- line_statement_prefix, line_comment_prefix, trim_blocks,
- lstrip_blocks, newline_sequence, keep_trailing_newline,
- frozenset(extensions), optimized, undefined, finalize, autoescape,
- None, 0, False, None, enable_async)
+ cls.environment_class,
+ block_start_string,
+ block_end_string,
+ variable_start_string,
+ variable_end_string,
+ comment_start_string,
+ comment_end_string,
+ line_statement_prefix,
+ line_comment_prefix,
+ trim_blocks,
+ lstrip_blocks,
+ newline_sequence,
+ keep_trailing_newline,
+ frozenset(extensions),
+ optimized,
+ undefined,
+ finalize,
+ autoescape,
+ None,
+ 0,
+ False,
+ None,
+ enable_async,
+ )
return env.from_string(source, template_class=cls)
@classmethod
@@ -949,10 +1035,7 @@ def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
- namespace = {
- 'environment': environment,
- '__file__': code.co_filename
- }
+ namespace = {"environment": environment, "__file__": code.co_filename}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
@@ -972,21 +1055,21 @@ def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
- t.name = namespace['name']
- t.filename = namespace['__file__']
- t.blocks = namespace['blocks']
+ t.name = namespace["name"]
+ t.filename = namespace["__file__"]
+ t.blocks = namespace["blocks"]
# render function and module
- t.root_render_func = namespace['root']
+ t.root_render_func = namespace["root"]
t._module = None
# debug and loader helpers
- t._debug_info = namespace['debug_info']
+ t._debug_info = namespace["debug_info"]
t._uptodate = None
# store the reference
- namespace['environment'] = environment
- namespace['__jinja_template__'] = t
+ namespace["environment"] = environment
+ namespace["__jinja_template__"] = t
return t
@@ -1004,8 +1087,7 @@ def render(self, *args, **kwargs):
try:
return concat(self.root_render_func(self.new_context(vars)))
except Exception:
- exc_info = sys.exc_info()
- return self.environment.handle_exception(exc_info, True)
+ self.environment.handle_exception()
def render_async(self, *args, **kwargs):
"""This works similar to :meth:`render` but returns a coroutine
@@ -1017,8 +1099,9 @@ def render_async(self, *args, **kwargs):
await template.render_async(knights='that say nih; asynchronously')
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
@@ -1039,29 +1122,28 @@ def generate(self, *args, **kwargs):
for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
- exc_info = sys.exc_info()
- else:
- return
- yield self.environment.handle_exception(exc_info, True)
+ yield self.environment.handle_exception()
def generate_async(self, *args, **kwargs):
"""An async version of :meth:`generate`. Works very similarly but
returns an async iterator instead.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
- is passed as it to the context without adding the globals.
+ is passed as is to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
- return new_context(self.environment, self.name, self.blocks,
- vars, shared, self.globals, locals)
+ return new_context(
+ self.environment, self.name, self.blocks, vars, shared, self.globals, locals
+ )
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
@@ -1074,13 +1156,14 @@ def make_module(self, vars=None, shared=False, locals=None):
def make_module_async(self, vars=None, shared=False, locals=None):
"""As template module creation can invoke template code for
- asynchronous exections this method must be used instead of the
+ asynchronous executions this method must be used instead of the
normal :meth:`make_module` one. Likewise the module attribute
becomes unavailable in async mode.
"""
# see asyncsupport for the actual implementation
- raise NotImplementedError('This feature is not available for this '
- 'version of Python')
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
@internalcode
def _get_default_module(self):
@@ -1124,15 +1207,16 @@ def is_up_to_date(self):
@property
def debug_info(self):
"""The debug info mapping."""
- return [tuple(imap(int, x.split('='))) for x in
- self._debug_info.split('&')]
+ if self._debug_info:
+ return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
+ return []
def __repr__(self):
if self.name is None:
- name = 'memory:%x' % id(self)
+ name = "memory:%x" % id(self)
else:
name = repr(self.name)
- return '<%s %s>' % (self.__class__.__name__, name)
+ return "<%s %s>" % (self.__class__.__name__, name)
@implements_to_string
@@ -1145,10 +1229,12 @@ class TemplateModule(object):
def __init__(self, template, context, body_stream=None):
if body_stream is None:
if context.environment.is_async:
- raise RuntimeError('Async mode requires a body stream '
- 'to be passed to a template module. Use '
- 'the async methods of the API you are '
- 'using.')
+ raise RuntimeError(
+ "Async mode requires a body stream "
+ "to be passed to a template module. Use "
+ "the async methods of the API you are "
+ "using."
+ )
body_stream = list(template.root_render_func(context))
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
@@ -1162,10 +1248,10 @@ def __str__(self):
def __repr__(self):
if self.__name__ is None:
- name = 'memory:%x' % id(self)
+ name = "memory:%x" % id(self)
else:
name = repr(self.__name__)
- return '<%s %s>' % (self.__class__.__name__, name)
+ return "<%s %s>" % (self.__class__.__name__, name)
class TemplateExpression(object):
@@ -1181,7 +1267,7 @@ def __init__(self, template, undefined_to_none):
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
- rv = context.vars['result']
+ rv = context.vars["result"]
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
@@ -1203,7 +1289,7 @@ def __init__(self, gen):
self._gen = gen
self.disable_buffering()
- def dump(self, fp, encoding=None, errors='strict'):
+ def dump(self, fp, encoding=None, errors="strict"):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specify an `encoding`.
@@ -1215,15 +1301,15 @@ def dump(self, fp, encoding=None, errors='strict'):
close = False
if isinstance(fp, string_types):
if encoding is None:
- encoding = 'utf-8'
- fp = open(fp, 'wb')
+ encoding = "utf-8"
+ fp = open(fp, "wb")
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
- if hasattr(fp, 'writelines'):
+ if hasattr(fp, "writelines"):
fp.writelines(iterable)
else:
for item in iterable:
@@ -1259,7 +1345,7 @@ def _buffered_generator(self, size):
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
- raise ValueError('buffer size too small')
+ raise ValueError("buffer size too small")
self.buffered = True
self._next = partial(next, self._buffered_generator(size))
diff --git a/external/python/jinja2/exceptions.py b/external/python/jinja2/exceptions.py
index c018a33e..0bf2003e 100644
--- a/external/python/jinja2/exceptions.py
+++ b/external/python/jinja2/exceptions.py
@@ -1,23 +1,18 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.exceptions
- ~~~~~~~~~~~~~~~~~
-
- Jinja exceptions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2._compat import imap, text_type, PY2, implements_to_string
+from ._compat import imap
+from ._compat import implements_to_string
+from ._compat import PY2
+from ._compat import text_type
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
+
def __init__(self, message=None):
if message is not None:
- message = text_type(message).encode('utf-8')
+ message = text_type(message).encode("utf-8")
Exception.__init__(self, message)
@property
@@ -25,11 +20,13 @@ def message(self):
if self.args:
message = self.args[0]
if message is not None:
- return message.decode('utf-8', 'replace')
+ return message.decode("utf-8", "replace")
def __unicode__(self):
- return self.message or u''
+ return self.message or u""
+
else:
+
def __init__(self, message=None):
Exception.__init__(self, message)
@@ -43,16 +40,28 @@ def message(self):
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
- """Raised if a template does not exist."""
+ """Raised if a template does not exist.
+
+ .. versionchanged:: 2.11
+ If the given name is :class:`Undefined` and no message was
+ provided, an :exc:`UndefinedError` is raised.
+ """
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
- IOError.__init__(self)
+ IOError.__init__(self, name)
+
if message is None:
+ from .runtime import Undefined
+
+ if isinstance(name, Undefined):
+ name._fail_with_undefined_error()
+
message = name
+
self.message = message
self.name = name
self.templates = [name]
@@ -66,13 +75,28 @@ class TemplatesNotFound(TemplateNotFound):
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
+ .. versionchanged:: 2.11
+ If a name in the list of names is :class:`Undefined`, a message
+ about it being undefined is shown rather than the empty string.
+
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
- message = u'none of the templates given were found: ' + \
- u', '.join(imap(text_type, names))
+ from .runtime import Undefined
+
+ parts = []
+
+ for name in names:
+ if isinstance(name, Undefined):
+ parts.append(name._undefined_message)
+ else:
+ parts.append(name)
+
+ message = u"none of the templates given were found: " + u", ".join(
+ imap(text_type, parts)
+ )
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@@ -98,11 +122,11 @@ def __str__(self):
return self.message
# otherwise attach some stuff
- location = 'line %d' % self.lineno
+ location = "line %d" % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
- lines = [self.message, ' ' + location]
+ lines = [self.message, " " + location]
# if the source is set, add the line to the output
if self.source is not None:
@@ -111,9 +135,16 @@ def __str__(self):
except IndexError:
line = None
if line:
- lines.append(' ' + line.strip())
+ lines.append(" " + line.strip())
+
+ return u"\n".join(lines)
- return u'\n'.join(lines)
+ def __reduce__(self):
+ # https://bugs.python.org/issue1692335 Exceptions that take
+ # multiple required arguments have problems with pickling.
+ # Without this, raises TypeError: __init__() missing 1 required
+ # positional argument: 'lineno'
+ return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
diff --git a/external/python/jinja2/ext.py b/external/python/jinja2/ext.py
index 0734a84f..9141be4d 100644
--- a/external/python/jinja2/ext.py
+++ b/external/python/jinja2/ext.py
@@ -1,42 +1,49 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.ext
- ~~~~~~~~~~
-
- Jinja extensions allow to add custom tags similar to the way django custom
- tags work. By default two example extensions exist: an i18n and a cache
- extension.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
+"""Extension API for adding custom tags and behavior."""
+import pprint
import re
-
-from jinja2 import nodes
-from jinja2.defaults import BLOCK_START_STRING, \
- BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \
- COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \
- LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \
- KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS
-from jinja2.environment import Environment
-from jinja2.runtime import concat
-from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError
-from jinja2.utils import contextfunction, import_string, Markup
-from jinja2._compat import with_metaclass, string_types, iteritems
-
+from sys import version_info
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import with_metaclass
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .nodes import ContextReference
+from .runtime import concat
+from .utils import contextfunction
+from .utils import import_string
# the only real useful gettext functions for a Jinja template. Note
# that ugettext must be assigned to gettext as Jinja doesn't support
# non unicode strings.
-GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext')
+GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
+
+_ws_re = re.compile(r"\s*\n\s*")
class ExtensionRegistry(type):
"""Gives the extension an unique identifier."""
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
- rv.identifier = rv.__module__ + '.' + rv.__name__
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
+ rv.identifier = rv.__module__ + "." + rv.__name__
return rv
@@ -91,10 +98,6 @@ def filter_stream(self, stream):
to filter tokens returned. This method has to return an iterable of
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
:class:`~jinja2.lexer.TokenStream`.
-
- In the `ext` folder of the Jinja2 source distribution there is a file
- called `inlinegettext.py` which implements a filter that utilizes this
- method.
"""
return stream
@@ -116,8 +119,9 @@ def attr(self, name, lineno=None):
"""
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
- def call_method(self, name, args=None, kwargs=None, dyn_args=None,
- dyn_kwargs=None, lineno=None):
+ def call_method(
+ self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
+ ):
"""Call a method of the extension. This is a shortcut for
:meth:`attr` + :class:`jinja2.nodes.Call`.
"""
@@ -125,13 +129,19 @@ def call_method(self, name, args=None, kwargs=None, dyn_args=None,
args = []
if kwargs is None:
kwargs = []
- return nodes.Call(self.attr(name, lineno=lineno), args, kwargs,
- dyn_args, dyn_kwargs, lineno=lineno)
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
@contextfunction
def _gettext_alias(__context, *args, **kwargs):
- return __context.call(__context.resolve('gettext'), *args, **kwargs)
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
def _make_new_gettext(func):
@@ -140,24 +150,31 @@ def gettext(__context, __string, **variables):
rv = __context.call(func, __string)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
return rv % variables
+
return gettext
def _make_new_ngettext(func):
@contextfunction
def ngettext(__context, __singular, __plural, __num, **variables):
- variables.setdefault('num', __num)
+ variables.setdefault("num", __num)
rv = __context.call(func, __singular, __plural, __num)
if __context.eval_ctx.autoescape:
rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
return rv % variables
+
return ngettext
class InternationalizationExtension(Extension):
- """This extension adds gettext support to Jinja2."""
- tags = set(['trans'])
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
# TODO: the i18n extension is currently reevaluating values in a few
# situations. Take this example:
@@ -168,30 +185,28 @@ class InternationalizationExtension(Extension):
def __init__(self, environment):
Extension.__init__(self, environment)
- environment.globals['_'] = _gettext_alias
+ environment.globals["_"] = _gettext_alias
environment.extend(
install_gettext_translations=self._install,
install_null_translations=self._install_null,
install_gettext_callables=self._install_callables,
uninstall_gettext_translations=self._uninstall,
extract_translations=self._extract,
- newstyle_gettext=False
+ newstyle_gettext=False,
)
def _install(self, translations, newstyle=None):
- gettext = getattr(translations, 'ugettext', None)
+ gettext = getattr(translations, "ugettext", None)
if gettext is None:
gettext = translations.gettext
- ngettext = getattr(translations, 'ungettext', None)
+ ngettext = getattr(translations, "ungettext", None)
if ngettext is None:
ngettext = translations.ngettext
self._install_callables(gettext, ngettext, newstyle)
def _install_null(self, newstyle=None):
self._install_callables(
- lambda x: x,
- lambda s, p, n: (n != 1 and (p,) or (s,))[0],
- newstyle
+ lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
)
def _install_callables(self, gettext, ngettext, newstyle=None):
@@ -200,13 +215,10 @@ def _install_callables(self, gettext, ngettext, newstyle=None):
if self.environment.newstyle_gettext:
gettext = _make_new_gettext(gettext)
ngettext = _make_new_ngettext(ngettext)
- self.environment.globals.update(
- gettext=gettext,
- ngettext=ngettext
- )
+ self.environment.globals.update(gettext=gettext, ngettext=ngettext)
def _uninstall(self, translations):
- for key in 'gettext', 'ngettext':
+ for key in "gettext", "ngettext":
self.environment.globals.pop(key, None)
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
@@ -226,41 +238,44 @@ def parse(self, parser):
plural_expr_assignment = None
variables = {}
trimmed = None
- while parser.stream.current.type != 'block_end':
+ while parser.stream.current.type != "block_end":
if variables:
- parser.stream.expect('comma')
+ parser.stream.expect("comma")
# skip colon for python compatibility
- if parser.stream.skip_if('colon'):
+ if parser.stream.skip_if("colon"):
break
- name = parser.stream.expect('name')
+ name = parser.stream.expect("name")
if name.value in variables:
- parser.fail('translatable variable %r defined twice.' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "translatable variable %r defined twice." % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
# expressions
- if parser.stream.current.type == 'assign':
+ if parser.stream.current.type == "assign":
next(parser.stream)
variables[name.value] = var = parser.parse_expression()
- elif trimmed is None and name.value in ('trimmed', 'notrimmed'):
- trimmed = name.value == 'trimmed'
+ elif trimmed is None and name.value in ("trimmed", "notrimmed"):
+ trimmed = name.value == "trimmed"
continue
else:
- variables[name.value] = var = nodes.Name(name.value, 'load')
+ variables[name.value] = var = nodes.Name(name.value, "load")
if plural_expr is None:
if isinstance(var, nodes.Call):
- plural_expr = nodes.Name('_trans', 'load')
+ plural_expr = nodes.Name("_trans", "load")
variables[name.value] = plural_expr
plural_expr_assignment = nodes.Assign(
- nodes.Name('_trans', 'store'), var)
+ nodes.Name("_trans", "store"), var
+ )
else:
plural_expr = var
- num_called_num = name.value == 'num'
+ num_called_num = name.value == "num"
- parser.stream.expect('block_end')
+ parser.stream.expect("block_end")
plural = None
have_plural = False
@@ -271,22 +286,24 @@ def parse(self, parser):
if singular_names:
referenced.update(singular_names)
if plural_expr is None:
- plural_expr = nodes.Name(singular_names[0], 'load')
- num_called_num = singular_names[0] == 'num'
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
# if we have a pluralize block, we parse that too
- if parser.stream.current.test('name:pluralize'):
+ if parser.stream.current.test("name:pluralize"):
have_plural = True
next(parser.stream)
- if parser.stream.current.type != 'block_end':
- name = parser.stream.expect('name')
+ if parser.stream.current.type != "block_end":
+ name = parser.stream.expect("name")
if name.value not in variables:
- parser.fail('unknown variable %r for pluralization' %
- name.value, name.lineno,
- exc=TemplateAssertionError)
+ parser.fail(
+ "unknown variable %r for pluralization" % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
plural_expr = variables[name.value]
- num_called_num = name.value == 'num'
- parser.stream.expect('block_end')
+ num_called_num = name.value == "num"
+ parser.stream.expect("block_end")
plural_names, plural = self._parse_block(parser, False)
next(parser.stream)
referenced.update(plural_names)
@@ -296,88 +313,97 @@ def parse(self, parser):
# register free names as simple name expressions
for var in referenced:
if var not in variables:
- variables[var] = nodes.Name(var, 'load')
+ variables[var] = nodes.Name(var, "load")
if not have_plural:
plural_expr = None
elif plural_expr is None:
- parser.fail('pluralize without variables', lineno)
+ parser.fail("pluralize without variables", lineno)
if trimmed is None:
- trimmed = self.environment.policies['ext.i18n.trimmed']
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
if trimmed:
singular = self._trim_whitespace(singular)
if plural:
plural = self._trim_whitespace(plural)
- node = self._make_node(singular, plural, variables, plural_expr,
- bool(referenced),
- num_called_num and have_plural)
+ node = self._make_node(
+ singular,
+ plural,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
node.set_lineno(lineno)
if plural_expr_assignment is not None:
return [plural_expr_assignment, node]
else:
return node
- def _trim_whitespace(self, string, _ws_re=re.compile(r'\s*\n\s*')):
- return _ws_re.sub(' ', string.strip())
+ def _trim_whitespace(self, string, _ws_re=_ws_re):
+ return _ws_re.sub(" ", string.strip())
def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
- if parser.stream.current.type == 'data':
- buf.append(parser.stream.current.value.replace('%', '%%'))
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
next(parser.stream)
- elif parser.stream.current.type == 'variable_begin':
+ elif parser.stream.current.type == "variable_begin":
next(parser.stream)
- name = parser.stream.expect('name').value
+ name = parser.stream.expect("name").value
referenced.append(name)
- buf.append('%%(%s)s' % name)
- parser.stream.expect('variable_end')
- elif parser.stream.current.type == 'block_begin':
+ buf.append("%%(%s)s" % name)
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
next(parser.stream)
- if parser.stream.current.test('name:endtrans'):
+ if parser.stream.current.test("name:endtrans"):
break
- elif parser.stream.current.test('name:pluralize'):
+ elif parser.stream.current.test("name:pluralize"):
if allow_pluralize:
break
- parser.fail('a translatable section can have only one '
- 'pluralize section')
- parser.fail('control structures in translatable sections are '
- 'not allowed')
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ parser.fail(
+ "control structures in translatable sections are not allowed"
+ )
elif parser.stream.eos:
- parser.fail('unclosed translation block')
+ parser.fail("unclosed translation block")
else:
- assert False, 'internal parser error'
+ raise RuntimeError("internal parser error")
return referenced, concat(buf)
- def _make_node(self, singular, plural, variables, plural_expr,
- vars_referenced, num_called_num):
+ def _make_node(
+ self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
+ ):
"""Generates a useful node from the data provided."""
# no variables referenced? no need to escape for old style
# gettext invocations only if there are vars.
if not vars_referenced and not self.environment.newstyle_gettext:
- singular = singular.replace('%%', '%')
+ singular = singular.replace("%%", "%")
if plural:
- plural = plural.replace('%%', '%')
+ plural = plural.replace("%%", "%")
# singular only:
if plural_expr is None:
- gettext = nodes.Name('gettext', 'load')
- node = nodes.Call(gettext, [nodes.Const(singular)],
- [], None, None)
+ gettext = nodes.Name("gettext", "load")
+ node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
# singular and plural
else:
- ngettext = nodes.Name('ngettext', 'load')
- node = nodes.Call(ngettext, [
- nodes.Const(singular),
- nodes.Const(plural),
- plural_expr
- ], [], None, None)
+ ngettext = nodes.Name("ngettext", "load")
+ node = nodes.Call(
+ ngettext,
+ [nodes.Const(singular), nodes.Const(plural), plural_expr],
+ [],
+ None,
+ None,
+ )
# in case newstyle gettext is used, the method is powerful
# enough to handle the variable expansion and autoescape
@@ -386,7 +412,7 @@ def _make_node(self, singular, plural, variables, plural_expr,
for key, value in iteritems(variables):
# the function adds that later anyways in case num was
# called num, so just skip it.
- if num_called_num and key == 'num':
+ if num_called_num and key == "num":
continue
node.kwargs.append(nodes.Keyword(key, value))
@@ -396,18 +422,24 @@ def _make_node(self, singular, plural, variables, plural_expr,
# environment with autoescaping turned on
node = nodes.MarkSafeIfAutoescape(node)
if variables:
- node = nodes.Mod(node, nodes.Dict([
- nodes.Pair(nodes.Const(key), value)
- for key, value in variables.items()
- ]))
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
return nodes.Output([node])
class ExprStmtExtension(Extension):
- """Adds a `do` tag to Jinja2 that works like the print statement just
+ """Adds a `do` tag to Jinja that works like the print statement just
that it doesn't print the return value.
"""
- tags = set(['do'])
+
+ tags = set(["do"])
def parse(self, parser):
node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
@@ -417,11 +449,12 @@ def parse(self, parser):
class LoopControlExtension(Extension):
"""Adds break and continue to the template engine."""
- tags = set(['break', 'continue'])
+
+ tags = set(["break", "continue"])
def parse(self, parser):
token = next(parser.stream)
- if token.value == 'break':
+ if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
@@ -434,8 +467,50 @@ class AutoEscapeExtension(Extension):
pass
-def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
- babel_style=True):
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+ {% debug %}
+
+ .. code-block:: text
+
+ {'context': {'cycler': ,
+ ...,
+ 'namespace': },
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser):
+ lineno = parser.stream.expect("name:debug").lineno
+ context = ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context):
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ if version_info[:2] >= (3, 4):
+ return pprint.pformat(result, depth=3, compact=True)
+ else:
+ return pprint.pformat(result, depth=3)
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
"""Extract localizable strings from the given template node. Per
default this function returns matches in babel style that means non string
parameters as well as keyword arguments are returned as `None`. This
@@ -471,19 +546,20 @@ def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS,
extraction interface or extract comments yourself.
"""
for node in node.find_all(nodes.Call):
- if not isinstance(node.node, nodes.Name) or \
- node.node.name not in gettext_functions:
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
continue
strings = []
for arg in node.args:
- if isinstance(arg, nodes.Const) and \
- isinstance(arg.value, string_types):
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
strings.append(arg.value)
else:
strings.append(None)
- for arg in node.kwargs:
+ for _ in node.kwargs:
strings.append(None)
if node.dyn_args is not None:
strings.append(None)
@@ -517,9 +593,10 @@ def __init__(self, tokens, comment_tags):
def find_backwards(self, offset):
try:
- for _, token_type, token_value in \
- reversed(self.tokens[self.offset:offset]):
- if token_type in ('comment', 'linecomment'):
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
try:
prefix, comment = token_value.split(None, 1)
except ValueError:
@@ -533,7 +610,7 @@ def find_backwards(self, offset):
def find_comments(self, lineno):
if not self.comment_tags or self.last_lineno > lineno:
return []
- for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]):
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
if token_lineno > lineno:
return self.find_backwards(self.offset + idx)
return self.find_backwards(len(self.tokens))
@@ -545,7 +622,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
.. versionchanged:: 2.3
Basic support for translation comments was added. If `comment_tags`
is now set to a list of keywords for extraction, the extractor will
- try to find the best preceeding comment that begins with one of the
+ try to find the best preceding comment that begins with one of the
keywords. For best results, make sure to not have more than one
gettext call in one line of code and the matching comment in the
same line or the line before.
@@ -568,7 +645,7 @@ def babel_extract(fileobj, keywords, comment_tags, options):
(comments will be empty currently)
"""
extensions = set()
- for extension in options.get('extensions', '').split(','):
+ for extension in options.get("extensions", "").split(","):
extension = extension.strip()
if not extension:
continue
@@ -577,38 +654,37 @@ def babel_extract(fileobj, keywords, comment_tags, options):
extensions.add(InternationalizationExtension)
def getbool(options, key, default=False):
- return options.get(key, str(default)).lower() in \
- ('1', 'on', 'yes', 'true')
+ return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
- silent = getbool(options, 'silent', True)
+ silent = getbool(options, "silent", True)
environment = Environment(
- options.get('block_start_string', BLOCK_START_STRING),
- options.get('block_end_string', BLOCK_END_STRING),
- options.get('variable_start_string', VARIABLE_START_STRING),
- options.get('variable_end_string', VARIABLE_END_STRING),
- options.get('comment_start_string', COMMENT_START_STRING),
- options.get('comment_end_string', COMMENT_END_STRING),
- options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX,
- options.get('line_comment_prefix') or LINE_COMMENT_PREFIX,
- getbool(options, 'trim_blocks', TRIM_BLOCKS),
- getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS),
+ options.get("block_start_string", BLOCK_START_STRING),
+ options.get("block_end_string", BLOCK_END_STRING),
+ options.get("variable_start_string", VARIABLE_START_STRING),
+ options.get("variable_end_string", VARIABLE_END_STRING),
+ options.get("comment_start_string", COMMENT_START_STRING),
+ options.get("comment_end_string", COMMENT_END_STRING),
+ options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
NEWLINE_SEQUENCE,
- getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE),
+ getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
frozenset(extensions),
cache_size=0,
- auto_reload=False
+ auto_reload=False,
)
- if getbool(options, 'trimmed'):
- environment.policies['ext.i18n.trimmed'] = True
- if getbool(options, 'newstyle_gettext'):
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
environment.newstyle_gettext = True
- source = fileobj.read().decode(options.get('encoding', 'utf-8'))
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
try:
node = environment.parse(source)
tokens = list(environment.lex(environment.preprocess(source)))
- except TemplateSyntaxError as e:
+ except TemplateSyntaxError:
if not silent:
raise
# skip templates with syntax errors
@@ -625,3 +701,4 @@ def getbool(options, key, default=False):
loopcontrols = LoopControlExtension
with_ = WithExtension
autoescape = AutoEscapeExtension
+debug = DebugExtension
diff --git a/external/python/jinja2/filters.py b/external/python/jinja2/filters.py
index 267dddda..74b108dc 100644
--- a/external/python/jinja2/filters.py
+++ b/external/python/jinja2/filters.py
@@ -1,29 +1,31 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.filters
- ~~~~~~~~~~~~~~
-
- Bundled jinja filters.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
+"""Built-in template filters used with the ``|`` operator."""
import math
import random
+import re
import warnings
-
-from itertools import groupby, chain
from collections import namedtuple
-from jinja2.utils import Markup, escape, pformat, urlize, soft_unicode, \
- unicode_urlencode, htmlsafe_json_dumps
-from jinja2.runtime import Undefined
-from jinja2.exceptions import FilterArgumentError
-from jinja2._compat import imap, string_types, text_type, iteritems, PY2
+from itertools import chain
+from itertools import groupby
+
+from markupsafe import escape
+from markupsafe import Markup
+from markupsafe import soft_unicode
+from ._compat import abc
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import FilterArgumentError
+from .runtime import Undefined
+from .utils import htmlsafe_json_dumps
+from .utils import pformat
+from .utils import unicode_urlencode
+from .utils import urlize
-_word_re = re.compile(r'\w+', re.UNICODE)
-_word_beginning_split_re = re.compile(r'([-\s\(\{\[\<]+)', re.UNICODE)
+_word_re = re.compile(r"\w+", re.UNICODE)
+_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
def contextfilter(f):
@@ -59,23 +61,21 @@ def ignore_case(value):
return value.lower() if isinstance(value, string_types) else value
-def make_attrgetter(environment, attribute, postprocess=None):
+def make_attrgetter(environment, attribute, postprocess=None, default=None):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
- if attribute is None:
- attribute = []
- elif isinstance(attribute, string_types):
- attribute = [int(x) if x.isdigit() else x for x in attribute.split('.')]
- else:
- attribute = [attribute]
+ attribute = _prepare_attribute_parts(attribute)
def attrgetter(item):
for part in attribute:
item = environment.getitem(item, part)
+ if default and isinstance(item, Undefined):
+ item = default
+
if postprocess is not None:
item = postprocess(item)
@@ -84,32 +84,84 @@ def attrgetter(item):
return attrgetter
+def make_multi_attrgetter(environment, attribute, postprocess=None):
+ """Returns a callable that looks up the given comma separated
+ attributes from a passed object with the rules of the environment.
+ Dots are allowed to access attributes of each attribute. Integer
+ parts in paths are looked up as integers.
+
+ The value returned by the returned callable is a list of extracted
+ attribute values.
+
+ Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
+ """
+ attribute_parts = (
+ attribute.split(",") if isinstance(attribute, string_types) else [attribute]
+ )
+ attribute = [
+ _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
+ ]
+
+ def attrgetter(item):
+ items = [None] * len(attribute)
+ for i, attribute_part in enumerate(attribute):
+ item_i = item
+ for part in attribute_part:
+ item_i = environment.getitem(item_i, part)
+
+ if postprocess is not None:
+ item_i = postprocess(item_i)
+
+ items[i] = item_i
+ return items
+
+ return attrgetter
+
+
+def _prepare_attribute_parts(attr):
+ if attr is None:
+ return []
+ elif isinstance(attr, string_types):
+ return [int(x) if x.isdigit() else x for x in attr.split(".")]
+ else:
+ return [attr]
+
+
def do_forceescape(value):
"""Enforce HTML escaping. This will probably double escape variables."""
- if hasattr(value, '__html__'):
+ if hasattr(value, "__html__"):
value = value.__html__()
return escape(text_type(value))
def do_urlencode(value):
- """Escape strings for use in URLs (uses UTF-8 encoding). It accepts both
- dictionaries and regular strings as well as pairwise iterables.
+ """Quote data for use in a URL path or query using UTF-8.
+
+ Basic wrapper around :func:`urllib.parse.quote` when given a
+ string, or :func:`urllib.parse.urlencode` for a dict or iterable.
+
+ :param value: Data to quote. A string will be quoted directly. A
+ dict or iterable of ``(key, value)`` pairs will be joined as a
+ query string.
+
+ When given a string, "/" is not quoted. HTTP servers treat "/" and
+ "%2F" equivalently in paths. If you need quoted slashes, use the
+ ``|replace("/", "%2F")`` filter.
.. versionadded:: 2.7
"""
- itemiter = None
- if isinstance(value, dict):
- itemiter = iteritems(value)
- elif not isinstance(value, string_types):
- try:
- itemiter = iter(value)
- except TypeError:
- pass
- if itemiter is None:
+ if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
return unicode_urlencode(value)
- return u'&'.join(unicode_urlencode(k) + '=' +
- unicode_urlencode(v, for_qs=True)
- for k, v in itemiter)
+
+ if isinstance(value, dict):
+ items = iteritems(value)
+ else:
+ items = iter(value)
+
+ return u"&".join(
+ "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
+ for k, v in items
+ )
@evalcontextfilter
@@ -132,8 +184,11 @@ def do_replace(eval_ctx, s, old, new, count=None):
count = -1
if not eval_ctx.autoescape:
return text_type(s).replace(text_type(old), text_type(new), count)
- if hasattr(old, '__html__') or hasattr(new, '__html__') and \
- not hasattr(s, '__html__'):
+ if (
+ hasattr(old, "__html__")
+ or hasattr(new, "__html__")
+ and not hasattr(s, "__html__")
+ ):
s = escape(s)
else:
s = soft_unicode(s)
@@ -174,13 +229,13 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
As you can see it automatically prepends a space in front of the item
if the filter returned something unless the second parameter is false.
"""
- rv = u' '.join(
+ rv = u" ".join(
u'%s="%s"' % (escape(key), escape(value))
for key, value in iteritems(d)
if value is not None and not isinstance(value, Undefined)
)
if autospace and rv:
- rv = u' ' + rv
+ rv = u" " + rv
if _eval_ctx.autoescape:
rv = Markup(rv)
return rv
@@ -197,39 +252,40 @@ def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
- return ''.join(
- [item[0].upper() + item[1:].lower()
- for item in _word_beginning_split_re.split(soft_unicode(s))
- if item])
+ return "".join(
+ [
+ item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_unicode(s))
+ if item
+ ]
+ )
-def do_dictsort(value, case_sensitive=False, by='key', reverse=False):
+def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
- {% for item in mydict|dictsort %}
+ {% for key, value in mydict|dictsort %}
sort the dict by key, case insensitive
- {% for item in mydict|dictsort(reverse=true) %}
+ {% for key, value in mydict|dictsort(reverse=true) %}
sort the dict by key, case insensitive, reverse order
- {% for item in mydict|dictsort(true) %}
+ {% for key, value in mydict|dictsort(true) %}
sort the dict by key, case sensitive
- {% for item in mydict|dictsort(false, 'value') %}
+ {% for key, value in mydict|dictsort(false, 'value') %}
sort the dict by value, case insensitive
"""
- if by == 'key':
+ if by == "key":
pos = 0
- elif by == 'value':
+ elif by == "value":
pos = 1
else:
- raise FilterArgumentError(
- 'You can only sort by either "key" or "value"'
- )
+ raise FilterArgumentError('You can only sort by either "key" or "value"')
def sort_func(item):
value = item[pos]
@@ -243,48 +299,62 @@ def sort_func(item):
@environmentfilter
-def do_sort(
- environment, value, reverse=False, case_sensitive=False, attribute=None
-):
- """Sort an iterable. Per default it sorts ascending, if you pass it
- true as first argument it will reverse the sorting.
+def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
+ """Sort an iterable using Python's :func:`sorted`.
+
+ .. sourcecode:: jinja
+
+ {% for city in cities|sort %}
+ ...
+ {% endfor %}
- If the iterable is made of strings the third parameter can be used to
- control the case sensitiveness of the comparison which is disabled by
- default.
+ :param reverse: Sort descending instead of ascending.
+ :param case_sensitive: When sorting strings, sort upper and lower
+ case separately.
+ :param attribute: When sorting objects or dicts, an attribute or
+ key to sort by. Can use dot notation like ``"address.city"``.
+ Can be a list of attributes like ``"age,name"``.
+
+ The sort is stable, it does not change the relative order of
+ elements that compare equal. This makes it is possible to chain
+ sorts on different attributes and ordering.
.. sourcecode:: jinja
- {% for item in iterable|sort %}
+ {% for user in users|sort(attribute="name")
+ |sort(reverse=true, attribute="age") %}
...
{% endfor %}
- It is also possible to sort by an attribute (for example to sort
- by the date of an object) by specifying the `attribute` parameter:
+ As a shortcut to chaining when the direction is the same for all
+ attributes, pass a comma separate list of attributes.
.. sourcecode:: jinja
- {% for item in iterable|sort(attribute='date') %}
+ {% for user users|sort(attribute="age,name") %}
...
{% endfor %}
+ .. versionchanged:: 2.11.0
+ The ``attribute`` parameter can be a comma separated list of
+ attributes, e.g. ``"age,name"``.
+
.. versionchanged:: 2.6
- The `attribute` parameter was added.
+ The ``attribute`` parameter was added.
"""
- key_func = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
+ key_func = make_multi_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return sorted(value, key=key_func, reverse=reverse)
@environmentfilter
def do_unique(environment, value, case_sensitive=False, attribute=None):
- """Returns a list of unique items from the the given iterable.
+ """Returns a list of unique items from the given iterable.
.. sourcecode:: jinja
- {{ ['foo', 'bar', 'foobar', 'FooBar']|unique }}
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
-> ['foo', 'bar', 'foobar']
The unique items are yielded in the same order as their first occurrence in
@@ -294,8 +364,7 @@ def do_unique(environment, value, case_sensitive=False, attribute=None):
:param attribute: Filter objects with unique values for this attribute.
"""
getter = make_attrgetter(
- environment, attribute,
- postprocess=ignore_case if not case_sensitive else None
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
seen = set()
@@ -313,11 +382,10 @@ def _min_or_max(environment, value, func, case_sensitive, attribute):
try:
first = next(it)
except StopIteration:
- return environment.undefined('No aggregated item, sequence was empty.')
+ return environment.undefined("No aggregated item, sequence was empty.")
key_func = make_attrgetter(
- environment, attribute,
- ignore_case if not case_sensitive else None
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
)
return func(chain([first], it), key=key_func)
@@ -332,7 +400,7 @@ def do_min(environment, value, case_sensitive=False, attribute=None):
-> 1
:param case_sensitive: Treat upper and lower case strings as distinct.
- :param attribute: Get the object with the max value of this attribute.
+ :param attribute: Get the object with the min value of this attribute.
"""
return _min_or_max(environment, value, min, case_sensitive, attribute)
@@ -352,7 +420,7 @@ def do_max(environment, value, case_sensitive=False, attribute=None):
return _min_or_max(environment, value, max, case_sensitive, attribute)
-def do_default(value, default_value=u'', boolean=False):
+def do_default(value, default_value=u"", boolean=False):
"""If the value is undefined it will return the passed default value,
otherwise the value of the variable:
@@ -368,6 +436,12 @@ def do_default(value, default_value=u'', boolean=False):
.. sourcecode:: jinja
{{ ''|default('the string was empty', true) }}
+
+ .. versionchanged:: 2.11
+ It's now possible to configure the :class:`~jinja2.Environment` with
+ :class:`~jinja2.ChainableUndefined` to make the `default` filter work
+ on nested elements and attributes that may contain undefined values
+ in the chain without getting an :exc:`~jinja2.UndefinedError`.
"""
if isinstance(value, Undefined) or (boolean and not value):
return default_value
@@ -375,7 +449,7 @@ def do_default(value, default_value=u'', boolean=False):
@evalcontextfilter
-def do_join(eval_ctx, value, d=u'', attribute=None):
+def do_join(eval_ctx, value, d=u"", attribute=None):
"""Return a string which is the concatenation of the strings in the
sequence. The separator between elements is an empty string per
default, you can define it with the optional parameter:
@@ -400,17 +474,17 @@ def do_join(eval_ctx, value, d=u'', attribute=None):
if attribute is not None:
value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
- # no automatic escaping? joining is a lot eaiser then
+ # no automatic escaping? joining is a lot easier then
if not eval_ctx.autoescape:
return text_type(d).join(imap(text_type, value))
# if the delimiter doesn't have an html representation we check
# if any of the items has. If yes we do a coercion to Markup
- if not hasattr(d, '__html__'):
+ if not hasattr(d, "__html__"):
value = list(value)
do_escape = False
for idx, item in enumerate(value):
- if hasattr(item, '__html__'):
+ if hasattr(item, "__html__"):
do_escape = True
else:
value[idx] = text_type(item)
@@ -435,16 +509,25 @@ def do_first(environment, seq):
try:
return next(iter(seq))
except StopIteration:
- return environment.undefined('No first item, sequence was empty.')
+ return environment.undefined("No first item, sequence was empty.")
@environmentfilter
def do_last(environment, seq):
- """Return the last item of a sequence."""
+ """
+ Return the last item of a sequence.
+
+ Note: Does not work with generators. You may want to explicitly
+ convert it to a list:
+
+ .. sourcecode:: jinja
+
+ {{ data | selectattr('name', '==', 'Jinja') | list | last }}
+ """
try:
return next(iter(reversed(seq)))
except StopIteration:
- return environment.undefined('No last item, sequence was empty.')
+ return environment.undefined("No last item, sequence was empty.")
@contextfilter
@@ -453,7 +536,7 @@ def do_random(context, seq):
try:
return random.choice(seq)
except IndexError:
- return context.environment.undefined('No random item, sequence was empty.')
+ return context.environment.undefined("No random item, sequence was empty.")
def do_filesizeformat(value, binary=False):
@@ -465,25 +548,25 @@ def do_filesizeformat(value, binary=False):
bytes = float(value)
base = binary and 1024 or 1000
prefixes = [
- (binary and 'KiB' or 'kB'),
- (binary and 'MiB' or 'MB'),
- (binary and 'GiB' or 'GB'),
- (binary and 'TiB' or 'TB'),
- (binary and 'PiB' or 'PB'),
- (binary and 'EiB' or 'EB'),
- (binary and 'ZiB' or 'ZB'),
- (binary and 'YiB' or 'YB')
+ (binary and "KiB" or "kB"),
+ (binary and "MiB" or "MB"),
+ (binary and "GiB" or "GB"),
+ (binary and "TiB" or "TB"),
+ (binary and "PiB" or "PB"),
+ (binary and "EiB" or "EB"),
+ (binary and "ZiB" or "ZB"),
+ (binary and "YiB" or "YB"),
]
if bytes == 1:
- return '1 Byte'
+ return "1 Byte"
elif bytes < base:
- return '%d Bytes' % bytes
+ return "%d Bytes" % bytes
else:
for i, prefix in enumerate(prefixes):
unit = base ** (i + 2)
if bytes < unit:
- return '%.1f %s' % ((base * bytes / unit), prefix)
- return '%.1f %s' % ((base * bytes / unit), prefix)
+ return "%.1f %s" % ((base * bytes / unit), prefix)
+ return "%.1f %s" % ((base * bytes / unit), prefix)
def do_pprint(value, verbose=False):
@@ -496,8 +579,9 @@ def do_pprint(value, verbose=False):
@evalcontextfilter
-def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
- target=None, rel=None):
+def do_urlize(
+ eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
+):
"""Converts URLs in plain text into clickable links.
If you pass the filter an additional integer it will shorten the urls
@@ -520,22 +604,20 @@ def do_urlize(eval_ctx, value, trim_url_limit=None, nofollow=False,
The *target* parameter was added.
"""
policies = eval_ctx.environment.policies
- rel = set((rel or '').split() or [])
+ rel = set((rel or "").split() or [])
if nofollow:
- rel.add('nofollow')
- rel.update((policies['urlize.rel'] or '').split())
+ rel.add("nofollow")
+ rel.update((policies["urlize.rel"] or "").split())
if target is None:
- target = policies['urlize.target']
- rel = ' '.join(sorted(rel)) or None
+ target = policies["urlize.target"]
+ rel = " ".join(sorted(rel)) or None
rv = urlize(value, trim_url_limit, rel=rel, target=target)
if eval_ctx.autoescape:
rv = Markup(rv)
return rv
-def do_indent(
- s, width=4, first=False, blank=False, indentfirst=None
-):
+def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
"""Return a copy of the string with each line indented by 4 spaces. The
first line and blank lines are not indented by default.
@@ -549,22 +631,31 @@ def do_indent(
Rename the ``indentfirst`` argument to ``first``.
"""
if indentfirst is not None:
- warnings.warn(DeprecationWarning(
- 'The "indentfirst" argument is renamed to "first".'
- ), stacklevel=2)
+ warnings.warn(
+ "The 'indentfirst' argument is renamed to 'first' and will"
+ " be removed in version 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
first = indentfirst
- s += u'\n' # this quirk is necessary for splitlines method
- indention = u' ' * width
+ indention = u" " * width
+ newline = u"\n"
+
+ if isinstance(s, Markup):
+ indention = Markup(indention)
+ newline = Markup(newline)
+
+ s += newline # this quirk is necessary for splitlines method
if blank:
- rv = (u'\n' + indention).join(s.splitlines())
+ rv = (newline + indention).join(s.splitlines())
else:
lines = s.splitlines()
rv = lines.pop(0)
if lines:
- rv += u'\n' + u'\n'.join(
+ rv += newline + newline.join(
indention + line if line else line for line in lines
)
@@ -575,7 +666,7 @@ def do_indent(
@environmentfilter
-def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
+def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
"""Return a truncated copy of the string. The length is specified
with the first parameter which defaults to ``255``. If the second
parameter is ``true`` the filter will cut the text at length. Otherwise
@@ -596,46 +687,81 @@ def do_truncate(env, s, length=255, killwords=False, end='...', leeway=None):
{{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
-> "foo bar..."
- The default leeway on newer Jinja2 versions is 5 and was 0 before but
+ The default leeway on newer Jinja versions is 5 and was 0 before but
can be reconfigured globally.
"""
if leeway is None:
- leeway = env.policies['truncate.leeway']
- assert length >= len(end), 'expected length >= %s, got %s' % (len(end), length)
- assert leeway >= 0, 'expected leeway >= 0, got %s' % leeway
+ leeway = env.policies["truncate.leeway"]
+ assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
+ assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
if len(s) <= length + leeway:
return s
if killwords:
- return s[:length - len(end)] + end
- result = s[:length - len(end)].rsplit(' ', 1)[0]
+ return s[: length - len(end)] + end
+ result = s[: length - len(end)].rsplit(" ", 1)[0]
return result + end
@environmentfilter
-def do_wordwrap(environment, s, width=79, break_long_words=True,
- wrapstring=None):
+def do_wordwrap(
+ environment,
+ s,
+ width=79,
+ break_long_words=True,
+ wrapstring=None,
+ break_on_hyphens=True,
+):
+ """Wrap a string to the given width. Existing newlines are treated
+ as paragraphs to be wrapped separately.
+
+ :param s: Original text to wrap.
+ :param width: Maximum length of wrapped lines.
+ :param break_long_words: If a word is longer than ``width``, break
+ it across lines.
+ :param break_on_hyphens: If a word contains hyphens, it may be split
+ across lines.
+ :param wrapstring: String to join each wrapped line. Defaults to
+ :attr:`Environment.newline_sequence`.
+
+ .. versionchanged:: 2.11
+ Existing newlines are treated as paragraphs wrapped separately.
+
+ .. versionchanged:: 2.11
+ Added the ``break_on_hyphens`` parameter.
+
+ .. versionchanged:: 2.7
+ Added the ``wrapstring`` parameter.
"""
- Return a copy of the string passed to the filter wrapped after
- ``79`` characters. You can override this default using the first
- parameter. If you set the second parameter to `false` Jinja will not
- split words apart if they are longer than `width`. By default, the newlines
- will be the default newlines for the environment, but this can be changed
- using the wrapstring keyword argument.
- .. versionadded:: 2.7
- Added support for the `wrapstring` parameter.
- """
+ import textwrap
+
if not wrapstring:
wrapstring = environment.newline_sequence
- import textwrap
- return wrapstring.join(textwrap.wrap(s, width=width, expand_tabs=False,
- replace_whitespace=False,
- break_long_words=break_long_words))
+
+ # textwrap.wrap doesn't consider existing newlines when wrapping.
+ # If the string has a newline before width, wrap will still insert
+ # a newline at width, resulting in a short line. Instead, split and
+ # wrap each paragraph individually.
+ return wrapstring.join(
+ [
+ wrapstring.join(
+ textwrap.wrap(
+ line,
+ width=width,
+ expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words,
+ break_on_hyphens=break_on_hyphens,
+ )
+ )
+ for line in s.splitlines()
+ ]
+ )
def do_wordcount(s):
"""Count the words in that string."""
- return len(_word_re.findall(s))
+ return len(_word_re.findall(soft_unicode(s)))
def do_int(value, default=0, base=10):
@@ -671,29 +797,40 @@ def do_float(value, default=0.0):
def do_format(value, *args, **kwargs):
- """
- Apply python string formatting on an object:
+ """Apply the given values to a `printf-style`_ format string, like
+ ``string % values``.
.. sourcecode:: jinja
- {{ "%s - %s"|format("Hello?", "Foo!") }}
- -> Hello? - Foo!
+ {{ "%s, %s!"|format(greeting, name) }}
+ Hello, World!
+
+ In most cases it should be more convenient and efficient to use the
+ ``%`` operator or :meth:`str.format`.
+
+ .. code-block:: text
+
+ {{ "%s, %s!" % (greeting, name) }}
+ {{ "{}, {}!".format(greeting, name) }}
+
+ .. _printf-style: https://docs.python.org/library/stdtypes.html
+ #printf-style-string-formatting
"""
if args and kwargs:
- raise FilterArgumentError('can\'t handle positional and keyword '
- 'arguments at the same time')
+ raise FilterArgumentError(
+ "can't handle positional and keyword arguments at the same time"
+ )
return soft_unicode(value) % (kwargs or args)
-def do_trim(value):
- """Strip leading and trailing whitespace."""
- return soft_unicode(value).strip()
+def do_trim(value, chars=None):
+ """Strip leading and trailing characters, by default whitespace."""
+ return soft_unicode(value).strip(chars)
def do_striptags(value):
- """Strip SGML/XML tags and replace adjacent whitespace by one space.
- """
- if hasattr(value, '__html__'):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space."""
+ if hasattr(value, "__html__"):
value = value.__html__()
return Markup(text_type(value)).striptags()
@@ -705,7 +842,7 @@ def do_slice(value, slices, fill_with=None):
.. sourcecode:: html+jinja
-
+
{%- for column in items|slice(3) %}
{%- for item in column %}
@@ -765,7 +902,7 @@ def do_batch(value, linecount, fill_with=None):
yield tmp
-def do_round(value, precision=0, method='common'):
+def do_round(value, precision=0, method="common"):
"""Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
@@ -791,9 +928,9 @@ def do_round(value, precision=0, method='common'):
{{ 42.55|round|int }}
-> 43
"""
- if not method in ('common', 'ceil', 'floor'):
- raise FilterArgumentError('method must be common, ceil or floor')
- if method == 'common':
+ if method not in {"common", "ceil", "floor"}:
+ raise FilterArgumentError("method must be common, ceil or floor")
+ if method == "common":
return round(value, precision)
func = getattr(math, method)
return func(value * (10 ** precision)) / (10 ** precision)
@@ -804,52 +941,51 @@ def do_round(value, precision=0, method='common'):
# we do not want to accidentally expose an auto generated repr in case
# people start to print this out in comments or something similar for
# debugging.
-_GroupTuple = namedtuple('_GroupTuple', ['grouper', 'list'])
+_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
_GroupTuple.__repr__ = tuple.__repr__
_GroupTuple.__str__ = tuple.__str__
+
@environmentfilter
def do_groupby(environment, value, attribute):
- """Group a sequence of objects by a common attribute.
+ """Group a sequence of objects by an attribute using Python's
+ :func:`itertools.groupby`. The attribute can use dot notation for
+ nested access, like ``"address.city"``. Unlike Python's ``groupby``,
+ the values are sorted first so only one group is returned for each
+ unique value.
- If you for example have a list of dicts or objects that represent persons
- with `gender`, `first_name` and `last_name` attributes and you want to
- group all users by genders you can do something like the following
- snippet:
+ For example, a list of ``User`` objects with a ``city`` attribute
+ can be rendered in groups. In this example, ``grouper`` refers to
+ the ``city`` value of the group.
.. sourcecode:: html+jinja
-
- {% for group in persons|groupby('gender') %}
- - {{ group.grouper }}
- {% for person in group.list %}
- - {{ person.first_name }} {{ person.last_name }}
- {% endfor %}
- {% endfor %}
-
+ {% for city, items in users|groupby("city") %}
+ - {{ city }}
+
{% for user in items %}
+ - {{ user.name }}
+ {% endfor %}
+
+ {% endfor %}
- Additionally it's possible to use tuple unpacking for the grouper and
- list:
+ ``groupby`` yields namedtuples of ``(grouper, list)``, which
+ can be used instead of the tuple unpacking above. ``grouper`` is the
+ value of the attribute, and ``list`` is the items with that value.
.. sourcecode:: html+jinja
-
- {% for grouper, list in persons|groupby('gender') %}
- ...
- {% endfor %}
-
-
- As you can see the item we're grouping by is stored in the `grouper`
- attribute and the `list` contains all the objects that have this grouper
- in common.
+ {% for group in users|groupby("city") %}
+ - {{ group.grouper }}: {{ group.list|join(", ") }}
+ {% endfor %}
.. versionchanged:: 2.6
- It's now possible to use dotted notation to group by the child
- attribute of another attribute.
+ The attribute supports dot notation for nested access.
"""
expr = make_attrgetter(environment, attribute)
- return [_GroupTuple(key, list(values)) for key, values
- in groupby(sorted(value, key=expr), expr)]
+ return [
+ _GroupTuple(key, list(values))
+ for key, values in groupby(sorted(value, key=expr), expr)
+ ]
@environmentfilter
@@ -906,7 +1042,7 @@ def do_reverse(value):
rv.reverse()
return rv
except TypeError:
- raise FilterArgumentError('argument must be iterable')
+ raise FilterArgumentError("argument must be iterable")
@environmentfilter
@@ -927,8 +1063,9 @@ def do_attr(environment, obj, name):
except AttributeError:
pass
else:
- if environment.sandboxed and not \
- environment.is_safe_attribute(obj, name, value):
+ if environment.sandboxed and not environment.is_safe_attribute(
+ obj, name, value
+ ):
return environment.unsafe_undefined(obj, name)
return value
return environment.undefined(obj=obj, name=name)
@@ -947,6 +1084,13 @@ def do_map(*args, **kwargs):
Users on this page: {{ users|map(attribute='username')|join(', ') }}
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
+
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
@@ -955,6 +1099,17 @@ def do_map(*args, **kwargs):
Users on this page: {{ titles|map('lower')|join(', ') }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u.username for u in users)
+ (u.username or "Anonymous" for u in users)
+ (do_lower(x) for x in titles)
+
+ .. versionchanged:: 2.11.0
+ Added the ``default`` parameter.
+
.. versionadded:: 2.7
"""
seq, func = prepare_map(args, kwargs)
@@ -980,6 +1135,13 @@ def do_select(*args, **kwargs):
{{ numbers|select("lessthan", 42) }}
{{ strings|select("equalto", "mystring") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if test_odd(n))
+ (n for n in numbers if test_divisibleby(n, 3))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, False)
@@ -998,6 +1160,12 @@ def do_reject(*args, **kwargs):
{{ numbers|reject("odd") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if not test_odd(n))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, False)
@@ -1019,6 +1187,13 @@ def do_selectattr(*args, **kwargs):
{{ users|selectattr("is_active") }}
{{ users|selectattr("email", "none") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if user.is_active)
+ (u for user in users if test_none(user.email))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: x, True)
@@ -1038,6 +1213,13 @@ def do_rejectattr(*args, **kwargs):
{{ users|rejectattr("is_active") }}
{{ users|rejectattr("email", "none") }}
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if not user.is_active)
+ (u for user in users if not test_none(user.email))
+
.. versionadded:: 2.7
"""
return select_or_reject(args, kwargs, lambda x: not x, True)
@@ -1070,32 +1252,38 @@ def do_tojson(eval_ctx, value, indent=None):
.. versionadded:: 2.9
"""
policies = eval_ctx.environment.policies
- dumper = policies['json.dumps_function']
- options = policies['json.dumps_kwargs']
+ dumper = policies["json.dumps_function"]
+ options = policies["json.dumps_kwargs"]
if indent is not None:
options = dict(options)
- options['indent'] = indent
+ options["indent"] = indent
return htmlsafe_json_dumps(value, dumper=dumper, **options)
def prepare_map(args, kwargs):
context = args[0]
seq = args[1]
+ default = None
- if len(args) == 2 and 'attribute' in kwargs:
- attribute = kwargs.pop('attribute')
+ if len(args) == 2 and "attribute" in kwargs:
+ attribute = kwargs.pop("attribute")
+ default = kwargs.pop("default", None)
if kwargs:
- raise FilterArgumentError('Unexpected keyword argument %r' %
- next(iter(kwargs)))
- func = make_attrgetter(context.environment, attribute)
+ raise FilterArgumentError(
+ "Unexpected keyword argument %r" % next(iter(kwargs))
+ )
+ func = make_attrgetter(context.environment, attribute, default=default)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
- raise FilterArgumentError('map requires a filter argument')
- func = lambda item: context.environment.call_filter(
- name, item, args, kwargs, context=context)
+ raise FilterArgumentError("map requires a filter argument")
+
+ def func(item):
+ return context.environment.call_filter(
+ name, item, args, kwargs, context=context
+ )
return seq, func
@@ -1107,18 +1295,22 @@ def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
try:
attr = args[2]
except LookupError:
- raise FilterArgumentError('Missing parameter for attribute name')
+ raise FilterArgumentError("Missing parameter for attribute name")
transfunc = make_attrgetter(context.environment, attr)
off = 1
else:
off = 0
- transfunc = lambda x: x
+
+ def transfunc(x):
+ return x
try:
name = args[2 + off]
- args = args[3 + off:]
- func = lambda item: context.environment.call_test(
- name, item, args, kwargs)
+ args = args[3 + off :]
+
+ def func(item):
+ return context.environment.call_test(name, item, args, kwargs)
+
except LookupError:
func = bool
@@ -1134,57 +1326,57 @@ def select_or_reject(args, kwargs, modfunc, lookup_attr):
FILTERS = {
- 'abs': abs,
- 'attr': do_attr,
- 'batch': do_batch,
- 'capitalize': do_capitalize,
- 'center': do_center,
- 'count': len,
- 'd': do_default,
- 'default': do_default,
- 'dictsort': do_dictsort,
- 'e': escape,
- 'escape': escape,
- 'filesizeformat': do_filesizeformat,
- 'first': do_first,
- 'float': do_float,
- 'forceescape': do_forceescape,
- 'format': do_format,
- 'groupby': do_groupby,
- 'indent': do_indent,
- 'int': do_int,
- 'join': do_join,
- 'last': do_last,
- 'length': len,
- 'list': do_list,
- 'lower': do_lower,
- 'map': do_map,
- 'min': do_min,
- 'max': do_max,
- 'pprint': do_pprint,
- 'random': do_random,
- 'reject': do_reject,
- 'rejectattr': do_rejectattr,
- 'replace': do_replace,
- 'reverse': do_reverse,
- 'round': do_round,
- 'safe': do_mark_safe,
- 'select': do_select,
- 'selectattr': do_selectattr,
- 'slice': do_slice,
- 'sort': do_sort,
- 'string': soft_unicode,
- 'striptags': do_striptags,
- 'sum': do_sum,
- 'title': do_title,
- 'trim': do_trim,
- 'truncate': do_truncate,
- 'unique': do_unique,
- 'upper': do_upper,
- 'urlencode': do_urlencode,
- 'urlize': do_urlize,
- 'wordcount': do_wordcount,
- 'wordwrap': do_wordwrap,
- 'xmlattr': do_xmlattr,
- 'tojson': do_tojson,
+ "abs": abs,
+ "attr": do_attr,
+ "batch": do_batch,
+ "capitalize": do_capitalize,
+ "center": do_center,
+ "count": len,
+ "d": do_default,
+ "default": do_default,
+ "dictsort": do_dictsort,
+ "e": escape,
+ "escape": escape,
+ "filesizeformat": do_filesizeformat,
+ "first": do_first,
+ "float": do_float,
+ "forceescape": do_forceescape,
+ "format": do_format,
+ "groupby": do_groupby,
+ "indent": do_indent,
+ "int": do_int,
+ "join": do_join,
+ "last": do_last,
+ "length": len,
+ "list": do_list,
+ "lower": do_lower,
+ "map": do_map,
+ "min": do_min,
+ "max": do_max,
+ "pprint": do_pprint,
+ "random": do_random,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "replace": do_replace,
+ "reverse": do_reverse,
+ "round": do_round,
+ "safe": do_mark_safe,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "slice": do_slice,
+ "sort": do_sort,
+ "string": soft_unicode,
+ "striptags": do_striptags,
+ "sum": do_sum,
+ "title": do_title,
+ "trim": do_trim,
+ "truncate": do_truncate,
+ "unique": do_unique,
+ "upper": do_upper,
+ "urlencode": do_urlencode,
+ "urlize": do_urlize,
+ "wordcount": do_wordcount,
+ "wordwrap": do_wordwrap,
+ "xmlattr": do_xmlattr,
+ "tojson": do_tojson,
}
diff --git a/external/python/jinja2/idtracking.py b/external/python/jinja2/idtracking.py
index 491bfe08..9a0d8380 100644
--- a/external/python/jinja2/idtracking.py
+++ b/external/python/jinja2/idtracking.py
@@ -1,11 +1,10 @@
-from jinja2.visitor import NodeVisitor
-from jinja2._compat import iteritems
+from ._compat import iteritems
+from .visitor import NodeVisitor
-
-VAR_LOAD_PARAMETER = 'param'
-VAR_LOAD_RESOLVE = 'resolve'
-VAR_LOAD_ALIAS = 'alias'
-VAR_LOAD_UNDEFINED = 'undefined'
+VAR_LOAD_PARAMETER = "param"
+VAR_LOAD_RESOLVE = "resolve"
+VAR_LOAD_ALIAS = "alias"
+VAR_LOAD_UNDEFINED = "undefined"
def find_symbols(nodes, parent_symbols=None):
@@ -23,7 +22,6 @@ def symbols_for_node(node, parent_symbols=None):
class Symbols(object):
-
def __init__(self, parent=None, level=None):
if level is None:
if parent is None:
@@ -41,7 +39,7 @@ def analyze_node(self, node, **kwargs):
visitor.visit(node, **kwargs)
def _define_ref(self, name, load=None):
- ident = 'l_%d_%s' % (self.level, name)
+ ident = "l_%d_%s" % (self.level, name)
self.refs[name] = ident
if load is not None:
self.loads[ident] = load
@@ -62,8 +60,10 @@ def find_ref(self, name):
def ref(self, name):
rv = self.find_ref(name)
if rv is None:
- raise AssertionError('Tried to resolve a name to a reference that '
- 'was unknown to the frame (%r)' % name)
+ raise AssertionError(
+ "Tried to resolve a name to a reference that "
+ "was unknown to the frame (%r)" % name
+ )
return rv
def copy(self):
@@ -118,7 +118,7 @@ def branch_update(self, branch_symbols):
if branch_count == len(branch_symbols):
continue
target = self.find_ref(name)
- assert target is not None, 'should not happen'
+ assert target is not None, "should not happen"
if self.parent is not None:
outer_target = self.parent.find_ref(name)
@@ -149,7 +149,6 @@ def dump_param_targets(self):
class RootVisitor(NodeVisitor):
-
def __init__(self, symbols):
self.sym_visitor = FrameSymbolVisitor(symbols)
@@ -157,35 +156,39 @@ def _simple_visit(self, node, **kwargs):
for child in node.iter_child_nodes():
self.sym_visitor.visit(child)
- visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \
- visit_Scope = visit_If = visit_ScopedEvalContextModifier = \
- _simple_visit
+ visit_Template = (
+ visit_Block
+ ) = (
+ visit_Macro
+ ) = (
+ visit_FilterBlock
+ ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
def visit_AssignBlock(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
def visit_CallBlock(self, node, **kwargs):
- for child in node.iter_child_nodes(exclude=('call',)):
+ for child in node.iter_child_nodes(exclude=("call",)):
self.sym_visitor.visit(child)
def visit_OverlayScope(self, node, **kwargs):
for child in node.body:
self.sym_visitor.visit(child)
- def visit_For(self, node, for_branch='body', **kwargs):
- if for_branch == 'body':
+ def visit_For(self, node, for_branch="body", **kwargs):
+ if for_branch == "body":
self.sym_visitor.visit(node.target, store_as_param=True)
branch = node.body
- elif for_branch == 'else':
+ elif for_branch == "else":
branch = node.else_
- elif for_branch == 'test':
+ elif for_branch == "test":
self.sym_visitor.visit(node.target, store_as_param=True)
if node.test is not None:
self.sym_visitor.visit(node.test)
return
else:
- raise RuntimeError('Unknown for branch')
+ raise RuntimeError("Unknown for branch")
for item in branch or ():
self.sym_visitor.visit(item)
@@ -196,8 +199,9 @@ def visit_With(self, node, **kwargs):
self.sym_visitor.visit(child)
def generic_visit(self, node, *args, **kwargs):
- raise NotImplementedError('Cannot find symbols for %r' %
- node.__class__.__name__)
+ raise NotImplementedError(
+ "Cannot find symbols for %r" % node.__class__.__name__
+ )
class FrameSymbolVisitor(NodeVisitor):
@@ -208,11 +212,11 @@ def __init__(self, symbols):
def visit_Name(self, node, store_as_param=False, **kwargs):
"""All assignments to names go through this function."""
- if store_as_param or node.ctx == 'param':
+ if store_as_param or node.ctx == "param":
self.symbols.declare_parameter(node.name)
- elif node.ctx == 'store':
+ elif node.ctx == "store":
self.symbols.store(node.name)
- elif node.ctx == 'load':
+ elif node.ctx == "load":
self.symbols.load(node.name)
def visit_NSRef(self, node, **kwargs):
diff --git a/external/python/jinja2/lexer.py b/external/python/jinja2/lexer.py
index 6fd135dd..552356a1 100644
--- a/external/python/jinja2/lexer.py
+++ b/external/python/jinja2/lexer.py
@@ -1,185 +1,194 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.lexer
- ~~~~~~~~~~~~
-
- This module implements a Jinja / Python combination lexer. The
- `Lexer` class provided by this module is used to do some preprocessing
- for Jinja.
-
- On the one hand it filters out invalid operators like the bitshift
- operators we don't allow in templates. On the other hand it separates
- template code and python code in expressions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
+is used to do some preprocessing. It filters out invalid operators like
+the bitshift operators we don't allow in templates. It separates
+template code and python code in expressions.
"""
import re
+from ast import literal_eval
from collections import deque
from operator import itemgetter
-from jinja2._compat import implements_iterator, intern, iteritems, text_type
-from jinja2.exceptions import TemplateSyntaxError
-from jinja2.utils import LRUCache
+from ._compat import implements_iterator
+from ._compat import intern
+from ._compat import iteritems
+from ._compat import text_type
+from .exceptions import TemplateSyntaxError
+from .utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
-whitespace_re = re.compile(r'\s+', re.U)
-string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
- r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
-integer_re = re.compile(r'\d+')
+whitespace_re = re.compile(r"\s+", re.U)
+newline_re = re.compile(r"(\r\n|\r|\n)")
+string_re = re.compile(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
+)
+integer_re = re.compile(r"(\d+_)*\d+")
+float_re = re.compile(
+ r"""
+ (?', 'eval')
+ compile("föö", "", "eval")
except SyntaxError:
- # no Unicode support, use ASCII identifiers
- name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
+ # Python 2, no Unicode support, use ASCII identifiers
+ name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
check_ident = False
else:
- # Unicode support, build a pattern to match valid characters, and set flag
- # to use str.isidentifier to validate during lexing
- from jinja2 import _identifier
- name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
- check_ident = True
- # remove the pattern from memory after building the regex
- import sys
- del sys.modules['jinja2._identifier']
- import jinja2
- del jinja2._identifier
- del _identifier
+ # Unicode support, import generated re pattern and set flag to use
+ # str.isidentifier to validate during lexing.
+ from ._identifier import pattern as name_re
-float_re = re.compile(r'(?': TOKEN_GT,
- '>=': TOKEN_GTEQ,
- '<': TOKEN_LT,
- '<=': TOKEN_LTEQ,
- '=': TOKEN_ASSIGN,
- '.': TOKEN_DOT,
- ':': TOKEN_COLON,
- '|': TOKEN_PIPE,
- ',': TOKEN_COMMA,
- ';': TOKEN_SEMICOLON
+ "+": TOKEN_ADD,
+ "-": TOKEN_SUB,
+ "/": TOKEN_DIV,
+ "//": TOKEN_FLOORDIV,
+ "*": TOKEN_MUL,
+ "%": TOKEN_MOD,
+ "**": TOKEN_POW,
+ "~": TOKEN_TILDE,
+ "[": TOKEN_LBRACKET,
+ "]": TOKEN_RBRACKET,
+ "(": TOKEN_LPAREN,
+ ")": TOKEN_RPAREN,
+ "{": TOKEN_LBRACE,
+ "}": TOKEN_RBRACE,
+ "==": TOKEN_EQ,
+ "!=": TOKEN_NE,
+ ">": TOKEN_GT,
+ ">=": TOKEN_GTEQ,
+ "<": TOKEN_LT,
+ "<=": TOKEN_LTEQ,
+ "=": TOKEN_ASSIGN,
+ ".": TOKEN_DOT,
+ ":": TOKEN_COLON,
+ "|": TOKEN_PIPE,
+ ",": TOKEN_COMMA,
+ ";": TOKEN_SEMICOLON,
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
-assert len(operators) == len(reverse_operators), 'operators dropped'
-operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
- sorted(operators, key=lambda x: -len(x))))
-
-ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
- TOKEN_COMMENT_END, TOKEN_WHITESPACE,
- TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
- TOKEN_LINECOMMENT])
-ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
- TOKEN_COMMENT, TOKEN_LINECOMMENT])
+assert len(operators) == len(reverse_operators), "operators dropped"
+operator_re = re.compile(
+ "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
+)
+
+ignored_tokens = frozenset(
+ [
+ TOKEN_COMMENT_BEGIN,
+ TOKEN_COMMENT,
+ TOKEN_COMMENT_END,
+ TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN,
+ TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT,
+ ]
+)
+ignore_if_empty = frozenset(
+ [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
+)
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
- TOKEN_COMMENT_BEGIN: 'begin of comment',
- TOKEN_COMMENT_END: 'end of comment',
- TOKEN_COMMENT: 'comment',
- TOKEN_LINECOMMENT: 'comment',
- TOKEN_BLOCK_BEGIN: 'begin of statement block',
- TOKEN_BLOCK_END: 'end of statement block',
- TOKEN_VARIABLE_BEGIN: 'begin of print statement',
- TOKEN_VARIABLE_END: 'end of print statement',
- TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
- TOKEN_LINESTATEMENT_END: 'end of line statement',
- TOKEN_DATA: 'template data / text',
- TOKEN_EOF: 'end of template'
+ TOKEN_COMMENT_BEGIN: "begin of comment",
+ TOKEN_COMMENT_END: "end of comment",
+ TOKEN_COMMENT: "comment",
+ TOKEN_LINECOMMENT: "comment",
+ TOKEN_BLOCK_BEGIN: "begin of statement block",
+ TOKEN_BLOCK_END: "end of statement block",
+ TOKEN_VARIABLE_BEGIN: "begin of print statement",
+ TOKEN_VARIABLE_END: "end of print statement",
+ TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
+ TOKEN_LINESTATEMENT_END: "end of line statement",
+ TOKEN_DATA: "template data / text",
+ TOKEN_EOF: "end of template",
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
- if token.type == 'name':
+ if token.type == TOKEN_NAME:
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
- if ':' in expr:
- type, value = expr.split(':', 1)
- if type == 'name':
+ if ":" in expr:
+ type, value = expr.split(":", 1)
+ if type == TOKEN_NAME:
return value
else:
type = expr
@@ -197,21 +206,39 @@ def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
- (len(environment.comment_start_string), 'comment',
- e(environment.comment_start_string)),
- (len(environment.block_start_string), 'block',
- e(environment.block_start_string)),
- (len(environment.variable_start_string), 'variable',
- e(environment.variable_start_string))
+ (
+ len(environment.comment_start_string),
+ TOKEN_COMMENT_BEGIN,
+ e(environment.comment_start_string),
+ ),
+ (
+ len(environment.block_start_string),
+ TOKEN_BLOCK_BEGIN,
+ e(environment.block_start_string),
+ ),
+ (
+ len(environment.variable_start_string),
+ TOKEN_VARIABLE_BEGIN,
+ e(environment.variable_start_string),
+ ),
]
if environment.line_statement_prefix is not None:
- rules.append((len(environment.line_statement_prefix), 'linestatement',
- r'^[ \t\v]*' + e(environment.line_statement_prefix)))
+ rules.append(
+ (
+ len(environment.line_statement_prefix),
+ TOKEN_LINESTATEMENT_BEGIN,
+ r"^[ \t\v]*" + e(environment.line_statement_prefix),
+ )
+ )
if environment.line_comment_prefix is not None:
- rules.append((len(environment.line_comment_prefix), 'linecomment',
- r'(?:^|(?<=\S))[^\S\r\n]*' +
- e(environment.line_comment_prefix)))
+ rules.append(
+ (
+ len(environment.line_comment_prefix),
+ TOKEN_LINECOMMENT_BEGIN,
+ r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
+ )
+ )
return [x[1:] for x in sorted(rules, reverse=True)]
@@ -231,6 +258,7 @@ def __call__(self, lineno, filename):
class Token(tuple):
"""Token class."""
+
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
@@ -240,7 +268,7 @@ def __new__(cls, lineno, type, value):
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
- elif self.type == 'name':
+ elif self.type == "name":
return self.value
return self.type
@@ -253,8 +281,8 @@ def test(self, expr):
# passed an iterable of not interned strings.
if self.type == expr:
return True
- elif ':' in expr:
- return expr.split(':', 1) == [self.type, self.value]
+ elif ":" in expr:
+ return expr.split(":", 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
@@ -265,11 +293,7 @@ def test_any(self, *iterable):
return False
def __repr__(self):
- return 'Token(%r, %r, %r)' % (
- self.lineno,
- self.type,
- self.value
- )
+ return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
@implements_iterator
@@ -306,7 +330,7 @@ def __init__(self, generator, name, filename):
self.name = name
self.filename = filename
self.closed = False
- self.current = Token(1, TOKEN_INITIAL, '')
+ self.current = Token(1, TOKEN_INITIAL, "")
next(self)
def __iter__(self):
@@ -314,9 +338,13 @@ def __iter__(self):
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
+
__nonzero__ = __bool__ # py2
- eos = property(lambda x: not x, doc="Are we at the end of the stream?")
+ @property
+ def eos(self):
+ """Are we at the end of the stream?"""
+ return not self
def push(self, token):
"""Push a token back to the stream."""
@@ -332,7 +360,7 @@ def look(self):
def skip(self, n=1):
"""Got n tokens ahead."""
- for x in range(n):
+ for _ in range(n):
next(self)
def next_if(self, expr):
@@ -363,7 +391,7 @@ def __next__(self):
def close(self):
"""Close the stream."""
- self.current = Token(self.current.lineno, TOKEN_EOF, '')
+ self.current = Token(self.current.lineno, TOKEN_EOF, "")
self._iter = None
self.closed = True
@@ -374,14 +402,18 @@ def expect(self, expr):
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
- raise TemplateSyntaxError('unexpected end of template, '
- 'expected %r.' % expr,
- self.current.lineno,
- self.name, self.filename)
- raise TemplateSyntaxError("expected token %r, got %r" %
- (expr, describe_token(self.current)),
- self.current.lineno,
- self.name, self.filename)
+ raise TemplateSyntaxError(
+ "unexpected end of template, expected %r." % expr,
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+ raise TemplateSyntaxError(
+ "expected token %r, got %r" % (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
try:
return self.current
finally:
@@ -390,18 +422,20 @@ def expect(self, expr):
def get_lexer(environment):
"""Return a lexer which is probably cached."""
- key = (environment.block_start_string,
- environment.block_end_string,
- environment.variable_start_string,
- environment.variable_end_string,
- environment.comment_start_string,
- environment.comment_end_string,
- environment.line_statement_prefix,
- environment.line_comment_prefix,
- environment.trim_blocks,
- environment.lstrip_blocks,
- environment.newline_sequence,
- environment.keep_trailing_newline)
+ key = (
+ environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline,
+ )
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
@@ -409,6 +443,19 @@ def get_lexer(environment):
return lexer
+class OptionalLStrip(tuple):
+ """A special tuple for marking a point in the state that can have
+ lstrip applied.
+ """
+
+ __slots__ = ()
+
+ # Even though it looks like a no-op, creating instances fails
+ # without this.
+ def __new__(cls, *members, **kwargs):
+ return super(OptionalLStrip, cls).__new__(cls, members)
+
+
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
@@ -419,9 +466,11 @@ class Lexer(object):
def __init__(self, environment):
# shortcuts
- c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
+ def c(x):
+ return re.compile(x, re.M | re.S)
+
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
@@ -429,7 +478,7 @@ def __init__(self, environment):
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
- (operator_re, TOKEN_OPERATOR, None)
+ (operator_re, TOKEN_OPERATOR, None),
]
# assemble the root lexing rule. because "|" is ungreedy
@@ -441,108 +490,120 @@ def __init__(self, environment):
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
- block_suffix_re = environment.trim_blocks and '\\n?' or ''
-
- # strip leading spaces if lstrip_blocks is enabled
- prefix_re = {}
- if environment.lstrip_blocks:
- # use '{%+' to manually disable lstrip_blocks behavior
- no_lstrip_re = e('+')
- # detect overlap between block and variable or comment strings
- block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
- # make sure we don't mistake a block for a variable or a comment
- m = block_diff.match(environment.comment_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
- m = block_diff.match(environment.variable_start_string)
- no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
-
- # detect overlap between comment and variable strings
- comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
- m = comment_diff.match(environment.variable_start_string)
- no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
-
- lstrip_re = r'^[ \t]*'
- block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
- lstrip_re,
- e(environment.block_start_string),
- no_lstrip_re,
- e(environment.block_start_string),
- )
- comment_prefix_re = r'%s%s%s|%s\+?' % (
- lstrip_re,
- e(environment.comment_start_string),
- no_variable_re,
- e(environment.comment_start_string),
- )
- prefix_re['block'] = block_prefix_re
- prefix_re['comment'] = comment_prefix_re
- else:
- block_prefix_re = '%s' % e(environment.block_start_string)
+ block_suffix_re = environment.trim_blocks and "\\n?" or ""
+
+ # If lstrip is enabled, it should not be applied if there is any
+ # non-whitespace between the newline and block.
+ self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
- 'root': [
+ "root": [
# directives
- (c('(.*?)(?:%s)' % '|'.join(
- [r'(?P(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string)
- )] + [
- r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
- for n, r in root_tag_rules
- ])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
+ (
+ c(
+ "(.*?)(?:%s)"
+ % "|".join(
+ [
+ r"(?P%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ )
+ ]
+ + [
+ r"(?P<%s>%s(\-|\+|))" % (n, r)
+ for n, r in root_tag_rules
+ ]
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, "#bygroup"),
+ "#bygroup",
+ ),
# data
- (c('.+'), TOKEN_DATA, None)
+ (c(".+"), TOKEN_DATA, None),
],
# comments
TOKEN_COMMENT_BEGIN: [
- (c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
- e(environment.comment_end_string),
- e(environment.comment_end_string),
- block_suffix_re
- )), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
- (c('(.)'), (Failure('Missing end of comment tag'),), None)
+ (
+ c(
+ r"(.*?)((?:\-%s\s*|%s)%s)"
+ % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re,
+ )
+ ),
+ (TOKEN_COMMENT, TOKEN_COMMENT_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of comment tag"),), None),
],
# blocks
TOKEN_BLOCK_BEGIN: [
- (c(r'(?:\-%s\s*|%s)%s' % (
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), TOKEN_BLOCK_END, '#pop'),
- ] + tag_rules,
+ (
+ c(
+ r"(?:\-%s\s*|%s)%s"
+ % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ TOKEN_BLOCK_END,
+ "#pop",
+ ),
+ ]
+ + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
- (c(r'\-%s\s*|%s' % (
- e(environment.variable_end_string),
- e(environment.variable_end_string)
- )), TOKEN_VARIABLE_END, '#pop')
- ] + tag_rules,
+ (
+ c(
+ r"\-%s\s*|%s"
+ % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string),
+ )
+ ),
+ TOKEN_VARIABLE_END,
+ "#pop",
+ )
+ ]
+ + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
- (c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
- e(environment.block_start_string),
- block_prefix_re,
- e(environment.block_end_string),
- e(environment.block_end_string),
- block_suffix_re
- )), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
- (c('(.)'), (Failure('Missing end of raw directive'),), None)
+ (
+ c(
+ r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of raw directive"),), None),
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
- (c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
- ] + tag_rules,
+ (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
+ ]
+ + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
- (c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
- TOKEN_LINECOMMENT_END), '#pop')
- ]
+ (
+ c(r"(.*?)()(?=\n|$)"),
+ (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
+ "#pop",
+ )
+ ],
}
def _normalize_newlines(self, value):
@@ -550,8 +611,7 @@ def _normalize_newlines(self, value):
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
- """Calls tokeniter + tokenize and wraps it in a token stream.
- """
+ """Calls tokeniter + tokenize and wraps it in a token stream."""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
@@ -562,37 +622,40 @@ def wrap(self, stream, name=None, filename=None):
for lineno, token, value in stream:
if token in ignored_tokens:
continue
- elif token == 'linestatement_begin':
- token = 'block_begin'
- elif token == 'linestatement_end':
- token = 'block_end'
+ elif token == TOKEN_LINESTATEMENT_BEGIN:
+ token = TOKEN_BLOCK_BEGIN
+ elif token == TOKEN_LINESTATEMENT_END:
+ token = TOKEN_BLOCK_END
# we are not interested in those tokens in the parser
- elif token in ('raw_begin', 'raw_end'):
+ elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
continue
- elif token == 'data':
+ elif token == TOKEN_DATA:
value = self._normalize_newlines(value)
- elif token == 'keyword':
+ elif token == "keyword":
token = value
- elif token == 'name':
+ elif token == TOKEN_NAME:
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
- 'Invalid character in identifier',
- lineno, name, filename)
- elif token == 'string':
+ "Invalid character in identifier", lineno, name, filename
+ )
+ elif token == TOKEN_STRING:
# try to unescape string
try:
- value = self._normalize_newlines(value[1:-1]) \
- .encode('ascii', 'backslashreplace') \
- .decode('unicode-escape')
+ value = (
+ self._normalize_newlines(value[1:-1])
+ .encode("ascii", "backslashreplace")
+ .decode("unicode-escape")
+ )
except Exception as e:
- msg = str(e).split(':')[-1].strip()
+ msg = str(e).split(":")[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
- elif token == 'integer':
- value = int(value)
- elif token == 'float':
- value = float(value)
- elif token == 'operator':
+ elif token == TOKEN_INTEGER:
+ value = int(value.replace("_", ""))
+ elif token == TOKEN_FLOAT:
+ # remove all "_" first to support more Python versions
+ value = literal_eval(value.replace("_", ""))
+ elif token == TOKEN_OPERATOR:
token = operators[value]
yield Token(lineno, token, value)
@@ -603,23 +666,23 @@ def tokeniter(self, source, name, filename=None, state=None):
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
- for newline in ('\r\n', '\r', '\n'):
+ for newline in ("\r\n", "\r", "\n"):
if source.endswith(newline):
- lines.append('')
+ lines.append("")
break
- source = '\n'.join(lines)
+ source = "\n".join(lines)
pos = 0
lineno = 1
- stack = ['root']
- if state is not None and state != 'root':
- assert state in ('variable', 'block'), 'invalid state'
- stack.append(state + '_begin')
- else:
- state = 'root'
+ stack = ["root"]
+ if state is not None and state != "root":
+ assert state in ("variable", "block"), "invalid state"
+ stack.append(state + "_begin")
statetokens = self.rules[stack[-1]]
source_length = len(source)
-
balancing_stack = []
+ lstrip_unless_re = self.lstrip_unless_re
+ newlines_stripped = 0
+ line_starting = True
while 1:
# tokenizer loop
@@ -633,13 +696,48 @@ def tokeniter(self, source, name, filename=None, state=None):
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
- if balancing_stack and \
- tokens in ('variable_end', 'block_end',
- 'linestatement_end'):
+ if balancing_stack and tokens in (
+ TOKEN_VARIABLE_END,
+ TOKEN_BLOCK_END,
+ TOKEN_LINESTATEMENT_END,
+ ):
continue
# tuples support more options
if isinstance(tokens, tuple):
+ groups = m.groups()
+
+ if isinstance(tokens, OptionalLStrip):
+ # Rule supports lstrip. Match will look like
+ # text, block type, whitespace control, type, control, ...
+ text = groups[0]
+
+ # Skipping the text and first type, every other group is the
+ # whitespace control for each type. One of the groups will be
+ # -, +, or empty string instead of None.
+ strip_sign = next(g for g in groups[2::2] if g is not None)
+
+ if strip_sign == "-":
+ # Strip all whitespace between the text and the tag.
+ stripped = text.rstrip()
+ newlines_stripped = text[len(stripped) :].count("\n")
+ groups = (stripped,) + groups[1:]
+ elif (
+ # Not marked for preserving whitespace.
+ strip_sign != "+"
+ # lstrip is enabled.
+ and lstrip_unless_re is not None
+ # Not a variable expression.
+ and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
+ ):
+ # The start of text between the last newline and the tag.
+ l_pos = text.rfind("\n") + 1
+ if l_pos > 0 or line_starting:
+ # If there's only whitespace between the newline and the
+ # tag, strip it.
+ if not lstrip_unless_re.search(text, l_pos):
+ groups = (text[:l_pos],) + groups[1:]
+
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
@@ -647,51 +745,57 @@ def tokeniter(self, source, name, filename=None, state=None):
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
- elif token == '#bygroup':
+ elif token == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
- lineno += value.count('\n')
+ lineno += value.count("\n")
break
else:
- raise RuntimeError('%r wanted to resolve '
- 'the token dynamically'
- ' but no group matched'
- % regex)
+ raise RuntimeError(
+ "%r wanted to resolve "
+ "the token dynamically"
+ " but no group matched" % regex
+ )
# normal group
else:
- data = m.group(idx + 1)
+ data = groups[idx]
if data or token not in ignore_if_empty:
yield lineno, token, data
- lineno += data.count('\n')
+ lineno += data.count("\n") + newlines_stripped
+ newlines_stripped = 0
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
- if tokens == 'operator':
- if data == '{':
- balancing_stack.append('}')
- elif data == '(':
- balancing_stack.append(')')
- elif data == '[':
- balancing_stack.append(']')
- elif data in ('}', ')', ']'):
+ if tokens == TOKEN_OPERATOR:
+ if data == "{":
+ balancing_stack.append("}")
+ elif data == "(":
+ balancing_stack.append(")")
+ elif data == "[":
+ balancing_stack.append("]")
+ elif data in ("}", ")", "]"):
if not balancing_stack:
- raise TemplateSyntaxError('unexpected \'%s\'' %
- data, lineno, name,
- filename)
+ raise TemplateSyntaxError(
+ "unexpected '%s'" % data, lineno, name, filename
+ )
expected_op = balancing_stack.pop()
if expected_op != data:
- raise TemplateSyntaxError('unexpected \'%s\', '
- 'expected \'%s\'' %
- (data, expected_op),
- lineno, name,
- filename)
+ raise TemplateSyntaxError(
+ "unexpected '%s', "
+ "expected '%s'" % (data, expected_op),
+ lineno,
+ name,
+ filename,
+ )
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
- lineno += data.count('\n')
+ lineno += data.count("\n")
+
+ line_starting = m.group()[-1:] == "\n"
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
@@ -701,19 +805,20 @@ def tokeniter(self, source, name, filename=None, state=None):
# handle state changes
if new_state is not None:
# remove the uppermost state
- if new_state == '#pop':
+ if new_state == "#pop":
stack.pop()
# resolve the new state by group checking
- elif new_state == '#bygroup':
+ elif new_state == "#bygroup":
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
- raise RuntimeError('%r wanted to resolve the '
- 'new state dynamically but'
- ' no group matched' %
- regex)
+ raise RuntimeError(
+ "%r wanted to resolve the "
+ "new state dynamically but"
+ " no group matched" % regex
+ )
# direct state name given
else:
stack.append(new_state)
@@ -722,8 +827,9 @@ def tokeniter(self, source, name, filename=None, state=None):
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
- raise RuntimeError('%r yielded empty string without '
- 'stack change' % regex)
+ raise RuntimeError(
+ "%r yielded empty string without stack change" % regex
+ )
# publish new function and start again
pos = pos2
break
@@ -734,6 +840,9 @@ def tokeniter(self, source, name, filename=None, state=None):
if pos >= source_length:
return
# something went wrong
- raise TemplateSyntaxError('unexpected char %r at %d' %
- (source[pos], pos), lineno,
- name, filename)
+ raise TemplateSyntaxError(
+ "unexpected char %r at %d" % (source[pos], pos),
+ lineno,
+ name,
+ filename,
+ )
diff --git a/external/python/jinja2/loaders.py b/external/python/jinja2/loaders.py
index 4c797937..457c4b59 100644
--- a/external/python/jinja2/loaders.py
+++ b/external/python/jinja2/loaders.py
@@ -1,22 +1,21 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.loaders
- ~~~~~~~~~~~~~~
-
- Jinja loader classes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
+"""API and implementations for loading templates from different data
+sources.
"""
import os
import sys
import weakref
-from types import ModuleType
-from os import path
from hashlib import sha1
-from jinja2.exceptions import TemplateNotFound
-from jinja2.utils import open_if_exists, internalcode
-from jinja2._compat import string_types, iteritems
+from os import path
+from types import ModuleType
+
+from ._compat import abc
+from ._compat import fspath
+from ._compat import iteritems
+from ._compat import string_types
+from .exceptions import TemplateNotFound
+from .utils import internalcode
+from .utils import open_if_exists
def split_template_path(template):
@@ -24,12 +23,14 @@ def split_template_path(template):
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
- for piece in template.split('/'):
- if path.sep in piece \
- or (path.altsep and path.altsep in piece) or \
- piece == path.pardir:
+ for piece in template.split("/"):
+ if (
+ path.sep in piece
+ or (path.altsep and path.altsep in piece)
+ or piece == path.pardir
+ ):
raise TemplateNotFound(template)
- elif piece and piece != '.':
+ elif piece and piece != ".":
pieces.append(piece)
return pieces
@@ -86,15 +87,16 @@ def get_source(self, environment, template):
the template will be reloaded.
"""
if not self.has_source_access:
- raise RuntimeError('%s cannot provide access to the source' %
- self.__class__.__name__)
+ raise RuntimeError(
+ "%s cannot provide access to the source" % self.__class__.__name__
+ )
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
- raise TypeError('this loader cannot iterate over all templates')
+ raise TypeError("this loader cannot iterate over all templates")
@internalcode
def load(self, environment, name, globals=None):
@@ -131,8 +133,9 @@ def load(self, environment, name, globals=None):
bucket.code = code
bcc.set_bucket(bucket)
- return environment.template_class.from_code(environment, code,
- globals, uptodate)
+ return environment.template_class.from_code(
+ environment, code, globals, uptodate
+ )
class FileSystemLoader(BaseLoader):
@@ -153,14 +156,20 @@ class FileSystemLoader(BaseLoader):
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
- .. versionchanged:: 2.8+
- The *followlinks* parameter was added.
+ .. versionchanged:: 2.8
+ The ``followlinks`` parameter was added.
"""
- def __init__(self, searchpath, encoding='utf-8', followlinks=False):
- if isinstance(searchpath, string_types):
+ def __init__(self, searchpath, encoding="utf-8", followlinks=False):
+ if not isinstance(searchpath, abc.Iterable) or isinstance(
+ searchpath, string_types
+ ):
searchpath = [searchpath]
- self.searchpath = list(searchpath)
+
+ # In Python 3.5, os.path.join doesn't support Path. This can be
+ # simplified to list(searchpath) when Python 3.5 is dropped.
+ self.searchpath = [fspath(p) for p in searchpath]
+
self.encoding = encoding
self.followlinks = followlinks
@@ -183,6 +192,7 @@ def uptodate():
return path.getmtime(filename) == mtime
except OSError:
return False
+
return contents, filename, uptodate
raise TemplateNotFound(template)
@@ -190,12 +200,14 @@ def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
- for dirpath, dirnames, filenames in walk_dir:
+ for dirpath, _, filenames in walk_dir:
for filename in filenames:
- template = os.path.join(dirpath, filename) \
- [len(searchpath):].strip(os.path.sep) \
- .replace(os.path.sep, '/')
- if template[:2] == './':
+ template = (
+ os.path.join(dirpath, filename)[len(searchpath) :]
+ .strip(os.path.sep)
+ .replace(os.path.sep, "/")
+ )
+ if template[:2] == "./":
template = template[2:]
if template not in found:
found.add(template)
@@ -217,10 +229,11 @@ class PackageLoader(BaseLoader):
from the file system and not a zip file.
"""
- def __init__(self, package_name, package_path='templates',
- encoding='utf-8'):
- from pkg_resources import DefaultProvider, ResourceManager, \
- get_provider
+ def __init__(self, package_name, package_path="templates", encoding="utf-8"):
+ from pkg_resources import DefaultProvider
+ from pkg_resources import get_provider
+ from pkg_resources import ResourceManager
+
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
@@ -230,14 +243,17 @@ def __init__(self, package_name, package_path='templates',
def get_source(self, environment, template):
pieces = split_template_path(template)
- p = '/'.join((self.package_path,) + tuple(pieces))
+ p = "/".join((self.package_path,) + tuple(pieces))
+
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
+
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
+
def uptodate():
try:
return path.getmtime(filename) == mtime
@@ -249,19 +265,24 @@ def uptodate():
def list_templates(self):
path = self.package_path
- if path[:2] == './':
+
+ if path[:2] == "./":
path = path[2:]
- elif path == '.':
- path = ''
+ elif path == ".":
+ path = ""
+
offset = len(path)
results = []
+
def _walk(path):
for filename in self.provider.resource_listdir(path):
- fullname = path + '/' + filename
+ fullname = path + "/" + filename
+
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
- results.append(fullname[offset:].lstrip('/'))
+ results.append(fullname[offset:].lstrip("/"))
+
_walk(path)
results.sort()
return results
@@ -334,7 +355,7 @@ class PrefixLoader(BaseLoader):
by loading ``'app2/index.html'`` the file from the second.
"""
- def __init__(self, mapping, delimiter='/'):
+ def __init__(self, mapping, delimiter="/"):
self.mapping = mapping
self.delimiter = delimiter
@@ -434,19 +455,20 @@ class ModuleLoader(BaseLoader):
has_source_access = False
def __init__(self, path):
- package_name = '_jinja2_module_templates_%x' % id(self)
+ package_name = "_jinja2_module_templates_%x" % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
- if isinstance(path, string_types):
+
+ if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
path = [path]
- else:
- path = list(path)
- mod.__path__ = path
- sys.modules[package_name] = weakref.proxy(mod,
- lambda x: sys.modules.pop(package_name, None))
+ mod.__path__ = [fspath(p) for p in path]
+
+ sys.modules[package_name] = weakref.proxy(
+ mod, lambda x: sys.modules.pop(package_name, None)
+ )
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
@@ -456,20 +478,20 @@ def __init__(self, path):
@staticmethod
def get_template_key(name):
- return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
+ return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
@staticmethod
def get_module_filename(name):
- return ModuleLoader.get_template_key(name) + '.py'
+ return ModuleLoader.get_template_key(name) + ".py"
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
- module = '%s.%s' % (self.package_name, key)
+ module = "%s.%s" % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
- mod = __import__(module, None, None, ['root'])
+ mod = __import__(module, None, None, ["root"])
except ImportError:
raise TemplateNotFound(name)
@@ -478,4 +500,5 @@ def load(self, environment, name, globals=None):
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
- environment, mod.__dict__, globals)
+ environment, mod.__dict__, globals
+ )
diff --git a/external/python/jinja2/meta.py b/external/python/jinja2/meta.py
index 7421914f..3795aace 100644
--- a/external/python/jinja2/meta.py
+++ b/external/python/jinja2/meta.py
@@ -1,25 +1,18 @@
# -*- coding: utf-8 -*-
+"""Functions that expose information about templates that might be
+interesting for introspection.
"""
- jinja2.meta
- ~~~~~~~~~~~
-
- This module implements various functions that exposes information about
- templates that might be interesting for various kinds of applications.
-
- :copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.compiler import CodeGenerator
-from jinja2._compat import string_types, iteritems
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from .compiler import CodeGenerator
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
- CodeGenerator.__init__(self, environment, '',
- '')
+ CodeGenerator.__init__(self, environment, "", "")
self.undeclared_identifiers = set()
def write(self, x):
@@ -29,7 +22,7 @@ def enter_frame(self, frame):
"""Remember all undeclared identifiers."""
CodeGenerator.enter_frame(self, frame)
for _, (action, param) in iteritems(frame.symbols.loads):
- if action == 'resolve':
+ if action == "resolve" and param not in self.environment.globals:
self.undeclared_identifiers.add(param)
@@ -72,8 +65,9 @@ def find_referenced_templates(ast):
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
- for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
- nodes.Include)):
+ for node in ast.find_all(
+ (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
+ ):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
@@ -96,8 +90,9 @@ def find_referenced_templates(ast):
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
- elif isinstance(node, nodes.Include) and \
- isinstance(node.template.value, (tuple, list)):
+ elif isinstance(node, nodes.Include) and isinstance(
+ node.template.value, (tuple, list)
+ ):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
diff --git a/external/python/jinja2/nativetypes.py b/external/python/jinja2/nativetypes.py
index fe17e413..a9ead4e2 100644
--- a/external/python/jinja2/nativetypes.py
+++ b/external/python/jinja2/nativetypes.py
@@ -1,19 +1,23 @@
-import sys
from ast import literal_eval
-from itertools import islice, chain
-from jinja2 import nodes
-from jinja2._compat import text_type
-from jinja2.compiler import CodeGenerator, has_safe_repr
-from jinja2.environment import Environment, Template
-from jinja2.utils import concat, escape
+from itertools import chain
+from itertools import islice
+
+from . import nodes
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import has_safe_repr
+from .environment import Environment
+from .environment import Template
def native_concat(nodes):
- """Return a native Python type from the list of compiled nodes. If the
- result is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
+ """Return a native Python type from the list of compiled nodes. If
+ the result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
+ the string is returned.
+
+ :param nodes: Iterable of nodes to concatenate.
"""
head = list(islice(nodes, 2))
@@ -21,200 +25,70 @@ def native_concat(nodes):
return None
if len(head) == 1:
- out = head[0]
+ raw = head[0]
else:
- out = u''.join([text_type(v) for v in chain(head, nodes)])
+ raw = u"".join([text_type(v) for v in chain(head, nodes)])
try:
- return literal_eval(out)
+ return literal_eval(raw)
except (ValueError, SyntaxError, MemoryError):
- return out
+ return raw
class NativeCodeGenerator(CodeGenerator):
- """A code generator which avoids injecting ``to_string()`` calls around the
- internal code Jinja uses to render templates.
+ """A code generator which renders Python types by not adding
+ ``to_string()`` around output nodes.
"""
- def visit_Output(self, node, frame):
- """Same as :meth:`CodeGenerator.visit_Output`, but do not call
- ``to_string`` on output nodes in generated code.
- """
- if self.has_known_extends and frame.require_output_check:
- return
-
- finalize = self.environment.finalize
- finalize_context = getattr(finalize, 'contextfunction', False)
- finalize_eval = getattr(finalize, 'evalcontextfunction', False)
- finalize_env = getattr(finalize, 'environmentfunction', False)
-
- if finalize is not None:
- if finalize_context or finalize_eval:
- const_finalize = None
- elif finalize_env:
- def const_finalize(x):
- return finalize(self.environment, x)
- else:
- const_finalize = finalize
- else:
- def const_finalize(x):
- return x
-
- # If we are inside a frame that requires output checking, we do so.
- outdent_later = False
-
- if frame.require_output_check:
- self.writeline('if parent_template is None:')
- self.indent()
- outdent_later = True
-
- # Try to evaluate as many chunks as possible into a static string at
- # compile time.
- body = []
-
- for child in node.nodes:
- try:
- if const_finalize is None:
- raise nodes.Impossible()
-
- const = child.as_const(frame.eval_ctx)
- if not has_safe_repr(const):
- raise nodes.Impossible()
- except nodes.Impossible:
- body.append(child)
- continue
-
- # the frame can't be volatile here, because otherwise the as_const
- # function would raise an Impossible exception at that point
- try:
- if frame.eval_ctx.autoescape:
- if hasattr(const, '__html__'):
- const = const.__html__()
- else:
- const = escape(const)
-
- const = const_finalize(const)
- except Exception:
- # if something goes wrong here we evaluate the node at runtime
- # for easier debugging
- body.append(child)
- continue
-
- if body and isinstance(body[-1], list):
- body[-1].append(const)
- else:
- body.append([const])
-
- # if we have less than 3 nodes or a buffer we yield or extend/append
- if len(body) < 3 or frame.buffer is not None:
- if frame.buffer is not None:
- # for one item we append, for more we extend
- if len(body) == 1:
- self.writeline('%s.append(' % frame.buffer)
- else:
- self.writeline('%s.extend((' % frame.buffer)
-
- self.indent()
-
- for item in body:
- if isinstance(item, list):
- val = repr(native_concat(item))
-
- if frame.buffer is None:
- self.writeline('yield ' + val)
- else:
- self.writeline(val + ',')
- else:
- if frame.buffer is None:
- self.writeline('yield ', item)
- else:
- self.newline(item)
-
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
-
- close += 1
-
- self.visit(item, frame)
-
- if close > 0:
- self.write(')' * close)
-
- if frame.buffer is not None:
- self.write(',')
-
- if frame.buffer is not None:
- # close the open parentheses
- self.outdent()
- self.writeline(len(body) == 1 and ')' or '))')
-
- # otherwise we create a format string as this is faster in that case
- else:
- format = []
- arguments = []
-
- for item in body:
- if isinstance(item, list):
- format.append(native_concat(item).replace('%', '%%'))
- else:
- format.append('%s')
- arguments.append(item)
-
- self.writeline('yield ')
- self.write(repr(concat(format)) + ' % (')
- self.indent()
-
- for argument in arguments:
- self.newline(argument)
- close = 0
-
- if finalize is not None:
- self.write('environment.finalize(')
-
- if finalize_context:
- self.write('context, ')
- elif finalize_eval:
- self.write('context.eval_ctx, ')
- elif finalize_env:
- self.write('environment, ')
-
- close += 1
-
- self.visit(argument, frame)
- self.write(')' * close + ', ')
-
- self.outdent()
- self.writeline(')')
+ @staticmethod
+ def _default_finalize(value):
+ return value
+
+ def _output_const_repr(self, group):
+ return repr(u"".join([text_type(v) for v in group]))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ const = node.as_const(frame.eval_ctx)
+
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
- if outdent_later:
- self.outdent()
+ if isinstance(node, nodes.TemplateData):
+ return const
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(")")
+
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
class NativeTemplate(Template):
+ environment_class = NativeEnvironment
+
def render(self, *args, **kwargs):
- """Render the template to produce a native Python type. If the result
- is a single node, its value is returned. Otherwise, the nodes are
- concatenated as strings. If the result can be parsed with
- :func:`ast.literal_eval`, the parsed value is returned. Otherwise, the
- string is returned.
+ """Render the template to produce a native Python type. If the
+ result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed
+ with :func:`ast.literal_eval`, the parsed value is returned.
+ Otherwise, the string is returned.
"""
vars = dict(*args, **kwargs)
try:
return native_concat(self.root_render_func(self.new_context(vars)))
except Exception:
- exc_info = sys.exc_info()
+ return self.environment.handle_exception()
- return self.environment.handle_exception(exc_info, True)
-
-class NativeEnvironment(Environment):
- """An environment that renders templates to native Python types."""
-
- code_generator_class = NativeCodeGenerator
- template_class = NativeTemplate
+NativeEnvironment.template_class = NativeTemplate
diff --git a/external/python/jinja2/nodes.py b/external/python/jinja2/nodes.py
index 4d9a01ad..95bd614a 100644
--- a/external/python/jinja2/nodes.py
+++ b/external/python/jinja2/nodes.py
@@ -1,54 +1,39 @@
# -*- coding: utf-8 -*-
+"""AST nodes generated by the parser for the compiler. Also provides
+some node tree helper functions used by the parser and compiler in order
+to normalize nodes.
"""
- jinja2.nodes
- ~~~~~~~~~~~~
-
- This module implements additional nodes derived from the ast base node.
-
- It also provides some node tree helper functions like `in_lineno` and
- `get_nodes` used by the parser and translator in order to normalize
- python and jinja nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import types
import operator
-
from collections import deque
-from jinja2.utils import Markup
-from jinja2._compat import izip, with_metaclass, text_type, PY2
-
-#: the types we support for context functions
-_context_function_types = (types.FunctionType, types.MethodType)
+from markupsafe import Markup
+from ._compat import izip
+from ._compat import PY2
+from ._compat import text_type
+from ._compat import with_metaclass
_binop_to_func = {
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod,
- '+': operator.add,
- '-': operator.sub
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ "+": operator.add,
+ "-": operator.sub,
}
-_uaop_to_func = {
- 'not': operator.not_,
- '+': operator.pos,
- '-': operator.neg
-}
+_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
_cmpop_to_func = {
- 'eq': operator.eq,
- 'ne': operator.ne,
- 'gt': operator.gt,
- 'gteq': operator.ge,
- 'lt': operator.lt,
- 'lteq': operator.le,
- 'in': lambda a, b: a in b,
- 'notin': lambda a, b: a not in b
+ "eq": operator.eq,
+ "ne": operator.ne,
+ "gt": operator.gt,
+ "gteq": operator.ge,
+ "lt": operator.lt,
+ "lteq": operator.le,
+ "in": lambda a, b: a in b,
+ "notin": lambda a, b: a not in b,
}
@@ -61,16 +46,16 @@ class NodeType(type):
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
- def __new__(cls, name, bases, d):
- for attr in 'fields', 'attributes':
+ def __new__(mcs, name, bases, d):
+ for attr in "fields", "attributes":
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
- assert len(bases) == 1, 'multiple inheritance not allowed'
- assert len(storage) == len(set(storage)), 'layout conflict'
+ assert len(bases) == 1, "multiple inheritance not allowed"
+ assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
- d.setdefault('abstract', False)
- return type.__new__(cls, name, bases, d)
+ d.setdefault("abstract", False)
+ return type.__new__(mcs, name, bases, d)
class EvalContext(object):
@@ -97,15 +82,17 @@ def revert(self, old):
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
- raise RuntimeError('if no eval context is passed, the '
- 'node must have an attached '
- 'environment.')
+ raise RuntimeError(
+ "if no eval context is passed, the "
+ "node must have an attached "
+ "environment."
+ )
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
- """Baseclass for all Jinja2 nodes. There are a number of nodes available
+ """Baseclass for all Jinja nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
@@ -120,30 +107,32 @@ class Node(with_metaclass(NodeType, object)):
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
+
fields = ()
- attributes = ('lineno', 'environment')
+ attributes = ("lineno", "environment")
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
- raise TypeError('abstract nodes are not instanciable')
+ raise TypeError("abstract nodes are not instantiable")
if fields:
if len(fields) != len(self.fields):
if not self.fields:
- raise TypeError('%r takes 0 arguments' %
- self.__class__.__name__)
- raise TypeError('%r takes 0 or %d argument%s' % (
- self.__class__.__name__,
- len(self.fields),
- len(self.fields) != 1 and 's' or ''
- ))
+ raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
+ raise TypeError(
+ "%r takes 0 or %d argument%s"
+ % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and "s" or "",
+ )
+ )
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
- raise TypeError('unknown attribute %r' %
- next(iter(attributes)))
+ raise TypeError("unknown attribute %r" % next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
@@ -153,9 +142,11 @@ def iter_fields(self, exclude=None, only=None):
should be sets or tuples of field names.
"""
for name in self.fields:
- if (exclude is only is None) or \
- (exclude is not None and name not in exclude) or \
- (only is not None and name in only):
+ if (
+ (exclude is only is None)
+ or (exclude is not None and name not in exclude)
+ or (only is not None and name in only)
+ ):
try:
yield name, getattr(self, name)
except AttributeError:
@@ -166,7 +157,7 @@ def iter_child_nodes(self, exclude=None, only=None):
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
- for field, item in self.iter_fields(exclude, only):
+ for _, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
@@ -200,7 +191,7 @@ def set_ctx(self, ctx):
todo = deque([self])
while todo:
node = todo.popleft()
- if 'ctx' in node.fields:
+ if "ctx" in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
@@ -210,7 +201,7 @@ def set_lineno(self, lineno, override=False):
todo = deque([self])
while todo:
node = todo.popleft()
- if 'lineno' in node.attributes:
+ if "lineno" in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
@@ -226,8 +217,9 @@ def set_environment(self, environment):
return self
def __eq__(self, other):
- return type(self) is type(other) and \
- tuple(self.iter_fields()) == tuple(other.iter_fields())
+ return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
+ other.iter_fields()
+ )
def __ne__(self, other):
return not self.__eq__(other)
@@ -236,10 +228,9 @@ def __ne__(self, other):
__hash__ = object.__hash__
def __repr__(self):
- return '%s(%s)' % (
+ return "%s(%s)" % (
self.__class__.__name__,
- ', '.join('%s=%r' % (arg, getattr(self, arg, None)) for
- arg in self.fields)
+ ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
)
def dump(self):
@@ -248,37 +239,39 @@ def _dump(node):
buf.append(repr(node))
return
- buf.append('nodes.%s(' % node.__class__.__name__)
+ buf.append("nodes.%s(" % node.__class__.__name__)
if not node.fields:
- buf.append(')')
+ buf.append(")")
return
for idx, field in enumerate(node.fields):
if idx:
- buf.append(', ')
+ buf.append(", ")
value = getattr(node, field)
if isinstance(value, list):
- buf.append('[')
+ buf.append("[")
for idx, item in enumerate(value):
if idx:
- buf.append(', ')
+ buf.append(", ")
_dump(item)
- buf.append(']')
+ buf.append("]")
else:
_dump(value)
- buf.append(')')
+ buf.append(")")
+
buf = []
_dump(self)
- return ''.join(buf)
-
+ return "".join(buf)
class Stmt(Node):
"""Base node for all statements."""
+
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
+
abstract = True
@@ -286,19 +279,22 @@ class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
- fields = ('body',)
+
+ fields = ("body",)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
- fields = ('nodes',)
+
+ fields = ("nodes",)
class Extends(Stmt):
"""Represents an extends statement."""
- fields = ('template',)
+
+ fields = ("template",)
class For(Stmt):
@@ -309,12 +305,14 @@ class For(Stmt):
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
- fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive')
+
+ fields = ("target", "iter", "body", "else_", "test", "recursive")
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
- fields = ('test', 'body', 'elif_', 'else_')
+
+ fields = ("test", "body", "elif_", "else_")
class Macro(Stmt):
@@ -322,19 +320,22 @@ class Macro(Stmt):
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
- fields = ('name', 'args', 'defaults', 'body')
+
+ fields = ("name", "args", "defaults", "body")
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
- fields = ('call', 'args', 'defaults', 'body')
+
+ fields = ("call", "args", "defaults", "body")
class FilterBlock(Stmt):
"""Node for filter sections."""
- fields = ('body', 'filter')
+
+ fields = ("body", "filter")
class With(Stmt):
@@ -343,22 +344,26 @@ class With(Stmt):
.. versionadded:: 2.9.3
"""
- fields = ('targets', 'values', 'body')
+
+ fields = ("targets", "values", "body")
class Block(Stmt):
"""A node that represents a block."""
- fields = ('name', 'body', 'scoped')
+
+ fields = ("name", "body", "scoped")
class Include(Stmt):
"""A node that represents the include tag."""
- fields = ('template', 'with_context', 'ignore_missing')
+
+ fields = ("template", "with_context", "ignore_missing")
class Import(Stmt):
"""A node that represents the import tag."""
- fields = ('template', 'target', 'with_context')
+
+ fields = ("template", "target", "with_context")
class FromImport(Stmt):
@@ -372,26 +377,31 @@ class FromImport(Stmt):
The list of names may contain tuples if aliases are wanted.
"""
- fields = ('template', 'names', 'with_context')
+
+ fields = ("template", "names", "with_context")
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
- fields = ('node',)
+
+ fields = ("node",)
class Assign(Stmt):
"""Assigns an expression to a target."""
- fields = ('target', 'node')
+
+ fields = ("target", "node")
class AssignBlock(Stmt):
"""Assigns a block to a target."""
- fields = ('target', 'filter', 'body')
+
+ fields = ("target", "filter", "body")
class Expr(Node):
"""Baseclass for all expressions."""
+
abstract = True
def as_const(self, eval_ctx=None):
@@ -414,15 +424,18 @@ def can_assign(self):
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
- fields = ('left', 'right')
+
+ fields = ("left", "right")
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_binops:
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_binops
+ ):
raise Impossible()
f = _binop_to_func[self.operator]
try:
@@ -433,15 +446,18 @@ def as_const(self, eval_ctx=None):
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
- fields = ('node',)
+
+ fields = ("node",)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
- if self.environment.sandboxed and \
- self.operator in self.environment.intercepted_unops:
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_unops
+ ):
raise Impossible()
f = _uaop_to_func[self.operator]
try:
@@ -458,16 +474,17 @@ class Name(Expr):
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
- fields = ('name', 'ctx')
+
+ fields = ("name", "ctx")
def can_assign(self):
- return self.name not in ('true', 'false', 'none',
- 'True', 'False', 'None')
+ return self.name not in ("true", "false", "none", "True", "False", "None")
class NSRef(Expr):
"""Reference to a namespace value assignment"""
- fields = ('name', 'attr')
+
+ fields = ("name", "attr")
def can_assign(self):
# We don't need any special checks here; NSRef assignments have a
@@ -479,6 +496,7 @@ def can_assign(self):
class Literal(Expr):
"""Baseclass for literals."""
+
abstract = True
@@ -488,14 +506,18 @@ class Const(Literal):
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
- fields = ('value',)
+
+ fields = ("value",)
def as_const(self, eval_ctx=None):
rv = self.value
- if PY2 and type(rv) is text_type and \
- self.environment.policies['compiler.ascii_str']:
+ if (
+ PY2
+ and type(rv) is text_type
+ and self.environment.policies["compiler.ascii_str"]
+ ):
try:
- rv = rv.encode('ascii')
+ rv = rv.encode("ascii")
except UnicodeError:
pass
return rv
@@ -507,6 +529,7 @@ def from_untrusted(cls, value, lineno=None, environment=None):
an `Impossible` exception.
"""
from .compiler import has_safe_repr
+
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
@@ -514,7 +537,8 @@ def from_untrusted(cls, value, lineno=None, environment=None):
class TemplateData(Literal):
"""A constant template string."""
- fields = ('data',)
+
+ fields = ("data",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -530,7 +554,8 @@ class Tuple(Literal):
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
- fields = ('items', 'ctx')
+
+ fields = ("items", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -545,7 +570,8 @@ def can_assign(self):
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
- fields = ('items',)
+
+ fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -556,7 +582,8 @@ class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
- fields = ('items',)
+
+ fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -565,7 +592,8 @@ def as_const(self, eval_ctx=None):
class Pair(Helper):
"""A key, value pair for dicts."""
- fields = ('key', 'value')
+
+ fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -574,7 +602,8 @@ def as_const(self, eval_ctx=None):
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
- fields = ('key', 'value')
+
+ fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -585,7 +614,8 @@ class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
- fields = ('test', 'expr1', 'expr2')
+
+ fields = ("test", "expr1", "expr2")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -626,7 +656,7 @@ class Filter(Expr):
filtered. Buffers are created by macros and filter blocks.
"""
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -636,28 +666,27 @@ def as_const(self, eval_ctx=None):
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
- # call in a list beause it is assuming we are talking about the
+ # call in a list because it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
- if filter_ is None or getattr(filter_, 'contextfilter', False):
+ if filter_ is None or getattr(filter_, "contextfilter", False) is True:
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
- if (
- eval_ctx.environment.is_async
- and getattr(filter_, 'asyncfiltervariant', False)
+ if eval_ctx.environment.is_async and getattr(
+ filter_, "asyncfiltervariant", False
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
- if getattr(filter_, 'evalcontextfilter', False):
+ if getattr(filter_, "evalcontextfilter", False) is True:
args.insert(0, eval_ctx)
- elif getattr(filter_, 'environmentfilter', False):
+ elif getattr(filter_, "environmentfilter", False) is True:
args.insert(0, self.environment)
try:
@@ -671,7 +700,7 @@ class Test(Expr):
rest of the fields are the same as for :class:`Call`.
"""
- fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
test = self.environment.tests.get(self.name)
@@ -696,20 +725,23 @@ class Call(Expr):
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
- fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs')
+
+ fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
- fields = ('node', 'arg', 'ctx')
+
+ fields = ("node", "arg", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- if self.ctx != 'load':
+ if self.ctx != "load":
raise Impossible()
try:
- return self.environment.getitem(self.node.as_const(eval_ctx),
- self.arg.as_const(eval_ctx))
+ return self.environment.getitem(
+ self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
+ )
except Exception:
raise Impossible()
@@ -721,15 +753,15 @@ class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
- fields = ('node', 'attr', 'ctx')
+
+ fields = ("node", "attr", "ctx")
def as_const(self, eval_ctx=None):
- if self.ctx != 'load':
+ if self.ctx != "load":
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
- return self.environment.getattr(self.node.as_const(eval_ctx),
- self.attr)
+ return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
except Exception:
raise Impossible()
@@ -741,14 +773,17 @@ class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
- fields = ('start', 'stop', 'step')
+
+ fields = ("start", "stop", "step")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
+
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
+
return slice(const(self.start), const(self.stop), const(self.step))
@@ -756,82 +791,103 @@ class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
- fields = ('nodes',)
+
+ fields = ("nodes",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
- return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+ return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
- fields = ('expr', 'ops')
+
+ fields = ("expr", "ops")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
+
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
+
+ if not result:
+ return False
+
value = new_value
except Exception:
raise Impossible()
+
return result
class Operand(Helper):
"""Holds an operator and an expression."""
- fields = ('op', 'expr')
+
+ fields = ("op", "expr")
+
if __debug__:
- Operand.__doc__ += '\nThe following operators are available: ' + \
- ', '.join(sorted('``%s``' % x for x in set(_binop_to_func) |
- set(_uaop_to_func) | set(_cmpop_to_func)))
+ Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
+ sorted(
+ "``%s``" % x
+ for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
+ )
+ )
class Mul(BinExpr):
"""Multiplies the left with the right node."""
- operator = '*'
+
+ operator = "*"
class Div(BinExpr):
"""Divides the left by the right node."""
- operator = '/'
+
+ operator = "/"
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
- operator = '//'
+
+ operator = "//"
class Add(BinExpr):
"""Add the left to the right node."""
- operator = '+'
+
+ operator = "+"
class Sub(BinExpr):
"""Subtract the right from the left node."""
- operator = '-'
+
+ operator = "-"
class Mod(BinExpr):
"""Left modulo right."""
- operator = '%'
+
+ operator = "%"
class Pow(BinExpr):
"""Left to the power of right."""
- operator = '**'
+
+ operator = "**"
class And(BinExpr):
"""Short circuited AND."""
- operator = 'and'
+
+ operator = "and"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -840,7 +896,8 @@ def as_const(self, eval_ctx=None):
class Or(BinExpr):
"""Short circuited OR."""
- operator = 'or'
+
+ operator = "or"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -849,17 +906,20 @@ def as_const(self, eval_ctx=None):
class Not(UnaryExpr):
"""Negate the expression."""
- operator = 'not'
+
+ operator = "not"
class Neg(UnaryExpr):
"""Make the expression negative."""
- operator = '-'
+
+ operator = "-"
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
- operator = '+'
+
+ operator = "+"
# Helpers for extensions
@@ -869,7 +929,8 @@ class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
- fields = ('name',)
+
+ fields = ("name",)
class ExtensionAttribute(Expr):
@@ -879,7 +940,8 @@ class ExtensionAttribute(Expr):
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
- fields = ('identifier', 'name')
+
+ fields = ("identifier", "name")
class ImportedName(Expr):
@@ -888,7 +950,8 @@ class ImportedName(Expr):
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
- fields = ('importname',)
+
+ fields = ("importname",)
class InternalName(Expr):
@@ -898,16 +961,20 @@ class InternalName(Expr):
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
- fields = ('name',)
+
+ fields = ("name",)
def __init__(self):
- raise TypeError('Can\'t create internal names. Use the '
- '`free_identifier` method on a parser.')
+ raise TypeError(
+ "Can't create internal names. Use the "
+ "`free_identifier` method on a parser."
+ )
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
- fields = ('expr',)
+
+ fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -920,7 +987,8 @@ class MarkSafeIfAutoescape(Expr):
.. versionadded:: 2.5
"""
- fields = ('expr',)
+
+ fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
@@ -942,6 +1010,20 @@ class ContextReference(Expr):
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
+
+ This is basically equivalent to using the
+ :func:`~jinja2.contextfunction` decorator when using the
+ high-level API, which causes a reference to the context to be passed
+ as the first argument to a function.
+ """
+
+
+class DerivedContextReference(Expr):
+ """Return the current template context including locals. Behaves
+ exactly like :class:`ContextReference`, but includes local
+ variables, such as from a ``for`` loop.
+
+ .. versionadded:: 2.11
"""
@@ -955,7 +1037,8 @@ class Break(Stmt):
class Scope(Stmt):
"""An artificial scope."""
- fields = ('body',)
+
+ fields = ("body",)
class OverlayScope(Stmt):
@@ -971,7 +1054,8 @@ class OverlayScope(Stmt):
.. versionadded:: 2.10
"""
- fields = ('context', 'body')
+
+ fields = ("context", "body")
class EvalContextModifier(Stmt):
@@ -982,7 +1066,8 @@ class EvalContextModifier(Stmt):
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
- fields = ('options',)
+
+ fields = ("options",)
class ScopedEvalContextModifier(EvalContextModifier):
@@ -990,10 +1075,14 @@ class ScopedEvalContextModifier(EvalContextModifier):
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
- fields = ('body',)
+
+ fields = ("body",)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
- raise TypeError('can\'t create custom node types')
-NodeType.__new__ = staticmethod(_failing_new); del _failing_new
+ raise TypeError("can't create custom node types")
+
+
+NodeType.__new__ = staticmethod(_failing_new)
+del _failing_new
diff --git a/external/python/jinja2/optimizer.py b/external/python/jinja2/optimizer.py
index 65ab3ceb..7bc78c45 100644
--- a/external/python/jinja2/optimizer.py
+++ b/external/python/jinja2/optimizer.py
@@ -1,23 +1,15 @@
# -*- coding: utf-8 -*-
+"""The optimizer tries to constant fold expressions and modify the AST
+in place so that it should be faster to evaluate.
+
+Because the AST does not contain all the scoping information and the
+compiler has to find that out, we cannot do all the optimizations we
+want. For example, loop unrolling doesn't work because unrolled loops
+would have a different scope. The solution would be a second syntax tree
+that stored the scoping rules.
"""
- jinja2.optimizer
- ~~~~~~~~~~~~~~~~
-
- The jinja optimizer is currently trying to constant fold a few expressions
- and modify the AST in place so that it should be easier to evaluate it.
-
- Because the AST does not contain all the scoping information and the
- compiler has to find that out, we cannot do all the optimizations we
- want. For example loop unrolling doesn't work because unrolled loops would
- have a different scoping.
-
- The solution would be a second syntax tree that has the scoping rules stored.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2 import nodes
-from jinja2.visitor import NodeTransformer
+from . import nodes
+from .visitor import NodeTransformer
def optimize(node, environment):
@@ -28,22 +20,22 @@ def optimize(node, environment):
class Optimizer(NodeTransformer):
-
def __init__(self, environment):
self.environment = environment
- def fold(self, node, eval_ctx=None):
- """Do constant folding."""
- node = self.generic_visit(node)
- try:
- return nodes.Const.from_untrusted(node.as_const(eval_ctx),
- lineno=node.lineno,
- environment=self.environment)
- except nodes.Impossible:
- return node
-
- visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \
- visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \
- visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \
- visit_Filter = visit_Test = visit_CondExpr = fold
- del fold
+ def generic_visit(self, node, *args, **kwargs):
+ node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
+
+ # Do constant folding. Some other nodes besides Expr have
+ # as_const, but folding them causes errors later on.
+ if isinstance(node, nodes.Expr):
+ try:
+ return nodes.Const.from_untrusted(
+ node.as_const(args[0] if args else None),
+ lineno=node.lineno,
+ environment=self.environment,
+ )
+ except nodes.Impossible:
+ pass
+
+ return node
diff --git a/external/python/jinja2/parser.py b/external/python/jinja2/parser.py
index ed00d970..d5881066 100644
--- a/external/python/jinja2/parser.py
+++ b/external/python/jinja2/parser.py
@@ -1,41 +1,46 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.parser
- ~~~~~~~~~~~~~
-
- Implements the template parser.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-from jinja2 import nodes
-from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
-from jinja2.lexer import describe_token, describe_token_expr
-from jinja2._compat import imap
-
-
-_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
- 'macro', 'include', 'from', 'import',
- 'set', 'with', 'autoescape'])
-_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
+"""Parse tokens from the lexer into nodes for the compiler."""
+from . import nodes
+from ._compat import imap
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .lexer import describe_token
+from .lexer import describe_token_expr
+
+_statement_keywords = frozenset(
+ [
+ "for",
+ "if",
+ "block",
+ "extends",
+ "print",
+ "macro",
+ "include",
+ "from",
+ "import",
+ "set",
+ "with",
+ "autoescape",
+ ]
+)
+_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
_math_nodes = {
- 'add': nodes.Add,
- 'sub': nodes.Sub,
- 'mul': nodes.Mul,
- 'div': nodes.Div,
- 'floordiv': nodes.FloorDiv,
- 'mod': nodes.Mod,
+ "add": nodes.Add,
+ "sub": nodes.Sub,
+ "mul": nodes.Mul,
+ "div": nodes.Div,
+ "floordiv": nodes.FloorDiv,
+ "mod": nodes.Mod,
}
class Parser(object):
- """This is the central parsing class Jinja2 uses. It's passed to
+ """This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
- def __init__(self, environment, source, name=None, filename=None,
- state=None):
+ def __init__(self, environment, source, name=None, filename=None, state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
@@ -63,31 +68,37 @@ def _fail_ut_eof(self, name, end_token_stack, lineno):
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
- currently_looking = ' or '.join(
- "'%s'" % describe_token_expr(expr)
- for expr in end_token_stack[-1])
+ currently_looking = " or ".join(
+ "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
+ )
else:
currently_looking = None
if name is None:
- message = ['Unexpected end of template.']
+ message = ["Unexpected end of template."]
else:
- message = ['Encountered unknown tag \'%s\'.' % name]
+ message = ["Encountered unknown tag '%s'." % name]
if currently_looking:
if name is not None and name in expected:
- message.append('You probably made a nesting mistake. Jinja '
- 'is expecting this tag, but currently looking '
- 'for %s.' % currently_looking)
+ message.append(
+ "You probably made a nesting mistake. Jinja "
+ "is expecting this tag, but currently looking "
+ "for %s." % currently_looking
+ )
else:
- message.append('Jinja was looking for the following tags: '
- '%s.' % currently_looking)
+ message.append(
+ "Jinja was looking for the following tags: "
+ "%s." % currently_looking
+ )
if self._tag_stack:
- message.append('The innermost block that needs to be '
- 'closed is \'%s\'.' % self._tag_stack[-1])
+ message.append(
+ "The innermost block that needs to be "
+ "closed is '%s'." % self._tag_stack[-1]
+ )
- self.fail(' '.join(message), lineno)
+ self.fail(" ".join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
@@ -105,7 +116,7 @@ def fail_eof(self, end_tokens=None, lineno=None):
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
- if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
+ if self.stream.current.type in ("variable_end", "block_end", "rparen"):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
@@ -115,22 +126,22 @@ def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
- nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
+ nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
- if token.type != 'name':
- self.fail('tag name expected', token.lineno)
+ if token.type != "name":
+ self.fail("tag name expected", token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
- return getattr(self, 'parse_' + self.stream.current.value)()
- if token.value == 'call':
+ return getattr(self, "parse_" + self.stream.current.value)()
+ if token.value == "call":
return self.parse_call_block()
- if token.value == 'filter':
+ if token.value == "filter":
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
@@ -157,16 +168,16 @@ def parse_statements(self, end_tokens, drop_needle=False):
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
- self.stream.skip_if('colon')
+ self.stream.skip_if("colon")
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
- self.stream.expect('block_end')
+ self.stream.expect("block_end")
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
- if self.stream.current.type == 'eof':
+ if self.stream.current.type == "eof":
self.fail_eof(end_tokens)
if drop_needle:
@@ -177,50 +188,47 @@ def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
- if self.stream.skip_if('assign'):
+ if self.stream.skip_if("assign"):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
- body = self.parse_statements(('name:endset',),
- drop_needle=True)
+ body = self.parse_statements(("name:endset",), drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
- lineno = self.stream.expect('name:for').lineno
- target = self.parse_assign_target(extra_end_rules=('name:in',))
- self.stream.expect('name:in')
- iter = self.parse_tuple(with_condexpr=False,
- extra_end_rules=('name:recursive',))
+ lineno = self.stream.expect("name:for").lineno
+ target = self.parse_assign_target(extra_end_rules=("name:in",))
+ self.stream.expect("name:in")
+ iter = self.parse_tuple(
+ with_condexpr=False, extra_end_rules=("name:recursive",)
+ )
test = None
- if self.stream.skip_if('name:if'):
+ if self.stream.skip_if("name:if"):
test = self.parse_expression()
- recursive = self.stream.skip_if('name:recursive')
- body = self.parse_statements(('name:endfor', 'name:else'))
- if next(self.stream).value == 'endfor':
+ recursive = self.stream.skip_if("name:recursive")
+ body = self.parse_statements(("name:endfor", "name:else"))
+ if next(self.stream).value == "endfor":
else_ = []
else:
- else_ = self.parse_statements(('name:endfor',), drop_needle=True)
- return nodes.For(target, iter, body, else_, test,
- recursive, lineno=lineno)
+ else_ = self.parse_statements(("name:endfor",), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
- node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
+ node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
- node.body = self.parse_statements(('name:elif', 'name:else',
- 'name:endif'))
+ node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
node.elif_ = []
node.else_ = []
token = next(self.stream)
- if token.test('name:elif'):
+ if token.test("name:elif"):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
- elif token.test('name:else'):
- result.else_ = self.parse_statements(('name:endif',),
- drop_needle=True)
+ elif token.test("name:else"):
+ result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
break
return result
@@ -228,45 +236,42 @@ def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
- while self.stream.current.type != 'block_end':
- lineno = self.stream.current.lineno
+ while self.stream.current.type != "block_end":
if targets:
- self.stream.expect('comma')
+ self.stream.expect("comma")
target = self.parse_assign_target()
- target.set_ctx('param')
+ target.set_ctx("param")
targets.append(target)
- self.stream.expect('assign')
+ self.stream.expect("assign")
values.append(self.parse_expression())
node.targets = targets
node.values = values
- node.body = self.parse_statements(('name:endwith',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endwith",), drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
- node.options = [
- nodes.Keyword('autoescape', self.parse_expression())
- ]
- node.body = self.parse_statements(('name:endautoescape',),
- drop_needle=True)
+ node.options = [nodes.Keyword("autoescape", self.parse_expression())]
+ node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
- node.name = self.stream.expect('name').value
- node.scoped = self.stream.skip_if('name:scoped')
+ node.name = self.stream.expect("name").value
+ node.scoped = self.stream.skip_if("name:scoped")
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
- if self.stream.current.type == 'sub':
- self.fail('Block names in Jinja have to be valid Python '
- 'identifiers and may not contain hyphens, use an '
- 'underscore instead.')
-
- node.body = self.parse_statements(('name:endblock',), drop_needle=True)
- self.stream.skip_if('name:' + node.name)
+ if self.stream.current.type == "sub":
+ self.fail(
+ "Block names in Jinja have to be valid Python "
+ "identifiers and may not contain hyphens, use an "
+ "underscore instead."
+ )
+
+ node.body = self.parse_statements(("name:endblock",), drop_needle=True)
+ self.stream.skip_if("name:" + node.name)
return node
def parse_extends(self):
@@ -275,9 +280,10 @@ def parse_extends(self):
return node
def parse_import_context(self, node, default):
- if self.stream.current.test_any('name:with', 'name:without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
+ if self.stream.current.test_any(
+ "name:with", "name:without"
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
self.stream.skip()
else:
node.with_context = default
@@ -286,8 +292,9 @@ def parse_import_context(self, node, default):
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- if self.stream.current.test('name:ignore') and \
- self.stream.look().test('name:missing'):
+ if self.stream.current.test("name:ignore") and self.stream.look().test(
+ "name:missing"
+ ):
node.ignore_missing = True
self.stream.skip(2)
else:
@@ -297,67 +304,71 @@ def parse_include(self):
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect('name:as')
+ self.stream.expect("name:as")
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
- self.stream.expect('name:import')
+ self.stream.expect("name:import")
node.names = []
def parse_context():
- if self.stream.current.value in ('with', 'without') and \
- self.stream.look().test('name:context'):
- node.with_context = next(self.stream).value == 'with'
+ if self.stream.current.value in (
+ "with",
+ "without",
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
self.stream.skip()
return True
return False
while 1:
if node.names:
- self.stream.expect('comma')
- if self.stream.current.type == 'name':
+ self.stream.expect("comma")
+ if self.stream.current.type == "name":
if parse_context():
break
target = self.parse_assign_target(name_only=True)
- if target.name.startswith('_'):
- self.fail('names starting with an underline can not '
- 'be imported', target.lineno,
- exc=TemplateAssertionError)
- if self.stream.skip_if('name:as'):
+ if target.name.startswith("_"):
+ self.fail(
+ "names starting with an underline can not be imported",
+ target.lineno,
+ exc=TemplateAssertionError,
+ )
+ if self.stream.skip_if("name:as"):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
- if parse_context() or self.stream.current.type != 'comma':
+ if parse_context() or self.stream.current.type != "comma":
break
else:
- self.stream.expect('name')
- if not hasattr(node, 'with_context'):
+ self.stream.expect("name")
+ if not hasattr(node, "with_context"):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
- self.stream.expect('lparen')
- while self.stream.current.type != 'rparen':
+ self.stream.expect("lparen")
+ while self.stream.current.type != "rparen":
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
arg = self.parse_assign_target(name_only=True)
- arg.set_ctx('param')
- if self.stream.skip_if('assign'):
+ arg.set_ctx("param")
+ if self.stream.skip_if("assign"):
defaults.append(self.parse_expression())
elif defaults:
- self.fail('non-default argument follows default argument')
+ self.fail("non-default argument follows default argument")
args.append(arg)
- self.stream.expect('rparen')
+ self.stream.expect("rparen")
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
- if self.stream.current.type == 'lparen':
+ if self.stream.current.type == "lparen":
self.parse_signature(node)
else:
node.args = []
@@ -365,37 +376,40 @@ def parse_call_block(self):
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
- self.fail('expected call', node.lineno)
- node.body = self.parse_statements(('name:endcall',), drop_needle=True)
+ self.fail("expected call", node.lineno)
+ node.body = self.parse_statements(("name:endcall",), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
- node.body = self.parse_statements(('name:endfilter',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
- node.body = self.parse_statements(('name:endmacro',),
- drop_needle=True)
+ node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
- while self.stream.current.type != 'block_end':
+ while self.stream.current.type != "block_end":
if node.nodes:
- self.stream.expect('comma')
+ self.stream.expect("comma")
node.nodes.append(self.parse_expression())
return node
- def parse_assign_target(self, with_tuple=True, name_only=False,
- extra_end_rules=None, with_namespace=False):
- """Parse an assignment target. As Jinja2 allows assignments to
+ def parse_assign_target(
+ self,
+ with_tuple=True,
+ name_only=False,
+ extra_end_rules=None,
+ with_namespace=False,
+ ):
+ """Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
@@ -403,24 +417,26 @@ def parse_assign_target(self, with_tuple=True, name_only=False,
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
- if with_namespace and self.stream.look().type == 'dot':
- token = self.stream.expect('name')
+ if with_namespace and self.stream.look().type == "dot":
+ token = self.stream.expect("name")
next(self.stream) # dot
- attr = self.stream.expect('name')
+ attr = self.stream.expect("name")
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
- token = self.stream.expect('name')
- target = nodes.Name(token.value, 'store', lineno=token.lineno)
+ token = self.stream.expect("name")
+ target = nodes.Name(token.value, "store", lineno=token.lineno)
else:
if with_tuple:
- target = self.parse_tuple(simplified=True,
- extra_end_rules=extra_end_rules)
+ target = self.parse_tuple(
+ simplified=True, extra_end_rules=extra_end_rules
+ )
else:
target = self.parse_primary()
- target.set_ctx('store')
+ target.set_ctx("store")
if not target.can_assign():
- self.fail('can\'t assign to %r' % target.__class__.
- __name__.lower(), target.lineno)
+ self.fail(
+ "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
+ )
return target
def parse_expression(self, with_condexpr=True):
@@ -435,9 +451,9 @@ def parse_expression(self, with_condexpr=True):
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
- while self.stream.skip_if('name:if'):
+ while self.stream.skip_if("name:if"):
expr2 = self.parse_or()
- if self.stream.skip_if('name:else'):
+ if self.stream.skip_if("name:else"):
expr3 = self.parse_condexpr()
else:
expr3 = None
@@ -448,7 +464,7 @@ def parse_condexpr(self):
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
- while self.stream.skip_if('name:or'):
+ while self.stream.skip_if("name:or"):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
@@ -457,14 +473,14 @@ def parse_or(self):
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
- while self.stream.skip_if('name:and'):
+ while self.stream.skip_if("name:and"):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
- if self.stream.current.test('name:not'):
+ if self.stream.current.test("name:not"):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
@@ -478,12 +494,13 @@ def parse_compare(self):
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
- elif self.stream.skip_if('name:in'):
- ops.append(nodes.Operand('in', self.parse_math1()))
- elif (self.stream.current.test('name:not') and
- self.stream.look().test('name:in')):
+ elif self.stream.skip_if("name:in"):
+ ops.append(nodes.Operand("in", self.parse_math1()))
+ elif self.stream.current.test("name:not") and self.stream.look().test(
+ "name:in"
+ ):
self.stream.skip(2)
- ops.append(nodes.Operand('notin', self.parse_math1()))
+ ops.append(nodes.Operand("notin", self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
@@ -494,7 +511,7 @@ def parse_compare(self):
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
- while self.stream.current.type in ('add', 'sub'):
+ while self.stream.current.type in ("add", "sub"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
@@ -505,7 +522,7 @@ def parse_math1(self):
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
- while self.stream.current.type == 'tilde':
+ while self.stream.current.type == "tilde":
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
@@ -515,7 +532,7 @@ def parse_concat(self):
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
- while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
+ while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
@@ -526,7 +543,7 @@ def parse_math2(self):
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
- while self.stream.current.type == 'pow':
+ while self.stream.current.type == "pow":
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
@@ -536,10 +553,10 @@ def parse_pow(self):
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
- if token_type == 'sub':
+ if token_type == "sub":
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
- elif token_type == 'add':
+ elif token_type == "add":
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
@@ -551,40 +568,44 @@ def parse_unary(self, with_filter=True):
def parse_primary(self):
token = self.stream.current
- if token.type == 'name':
- if token.value in ('true', 'false', 'True', 'False'):
- node = nodes.Const(token.value in ('true', 'True'),
- lineno=token.lineno)
- elif token.value in ('none', 'None'):
+ if token.type == "name":
+ if token.value in ("true", "false", "True", "False"):
+ node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
+ elif token.value in ("none", "None"):
node = nodes.Const(None, lineno=token.lineno)
else:
- node = nodes.Name(token.value, 'load', lineno=token.lineno)
+ node = nodes.Name(token.value, "load", lineno=token.lineno)
next(self.stream)
- elif token.type == 'string':
+ elif token.type == "string":
next(self.stream)
buf = [token.value]
lineno = token.lineno
- while self.stream.current.type == 'string':
+ while self.stream.current.type == "string":
buf.append(self.stream.current.value)
next(self.stream)
- node = nodes.Const(''.join(buf), lineno=lineno)
- elif token.type in ('integer', 'float'):
+ node = nodes.Const("".join(buf), lineno=lineno)
+ elif token.type in ("integer", "float"):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
- elif token.type == 'lparen':
+ elif token.type == "lparen":
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
- self.stream.expect('rparen')
- elif token.type == 'lbracket':
+ self.stream.expect("rparen")
+ elif token.type == "lbracket":
node = self.parse_list()
- elif token.type == 'lbrace':
+ elif token.type == "lbrace":
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
- def parse_tuple(self, simplified=False, with_condexpr=True,
- extra_end_rules=None, explicit_parentheses=False):
+ def parse_tuple(
+ self,
+ simplified=False,
+ with_condexpr=True,
+ extra_end_rules=None,
+ explicit_parentheses=False,
+ ):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
@@ -609,16 +630,19 @@ def parse_tuple(self, simplified=False, with_condexpr=True,
elif with_condexpr:
parse = self.parse_expression
else:
- parse = lambda: self.parse_expression(with_condexpr=False)
+
+ def parse():
+ return self.parse_expression(with_condexpr=False)
+
args = []
is_tuple = False
while 1:
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
- if self.stream.current.type == 'comma':
+ if self.stream.current.type == "comma":
is_tuple = True
else:
break
@@ -633,46 +657,48 @@ def parse_tuple(self, simplified=False, with_condexpr=True,
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
- self.fail('Expected an expression, got \'%s\'' %
- describe_token(self.stream.current))
+ self.fail(
+ "Expected an expression, got '%s'"
+ % describe_token(self.stream.current)
+ )
- return nodes.Tuple(args, 'load', lineno=lineno)
+ return nodes.Tuple(args, "load", lineno=lineno)
def parse_list(self):
- token = self.stream.expect('lbracket')
+ token = self.stream.expect("lbracket")
items = []
- while self.stream.current.type != 'rbracket':
+ while self.stream.current.type != "rbracket":
if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbracket':
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbracket":
break
items.append(self.parse_expression())
- self.stream.expect('rbracket')
+ self.stream.expect("rbracket")
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
- token = self.stream.expect('lbrace')
+ token = self.stream.expect("lbrace")
items = []
- while self.stream.current.type != 'rbrace':
+ while self.stream.current.type != "rbrace":
if items:
- self.stream.expect('comma')
- if self.stream.current.type == 'rbrace':
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbrace":
break
key = self.parse_expression()
- self.stream.expect('colon')
+ self.stream.expect("colon")
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
- self.stream.expect('rbrace')
+ self.stream.expect("rbrace")
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == 'dot' or token_type == 'lbracket':
+ if token_type == "dot" or token_type == "lbracket":
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == 'lparen':
+ elif token_type == "lparen":
node = self.parse_call(node)
else:
break
@@ -681,13 +707,13 @@ def parse_postfix(self, node):
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
- if token_type == 'pipe':
+ if token_type == "pipe":
node = self.parse_filter(node)
- elif token_type == 'name' and self.stream.current.value == 'is':
+ elif token_type == "name" and self.stream.current.value == "is":
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
- elif token_type == 'lparen':
+ elif token_type == "lparen":
node = self.parse_call(node)
else:
break
@@ -695,53 +721,54 @@ def parse_filter_expr(self, node):
def parse_subscript(self, node):
token = next(self.stream)
- if token.type == 'dot':
+ if token.type == "dot":
attr_token = self.stream.current
next(self.stream)
- if attr_token.type == 'name':
- return nodes.Getattr(node, attr_token.value, 'load',
- lineno=token.lineno)
- elif attr_token.type != 'integer':
- self.fail('expected name or number', attr_token.lineno)
+ if attr_token.type == "name":
+ return nodes.Getattr(
+ node, attr_token.value, "load", lineno=token.lineno
+ )
+ elif attr_token.type != "integer":
+ self.fail("expected name or number", attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- if token.type == 'lbracket':
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ if token.type == "lbracket":
args = []
- while self.stream.current.type != 'rbracket':
+ while self.stream.current.type != "rbracket":
if args:
- self.stream.expect('comma')
+ self.stream.expect("comma")
args.append(self.parse_subscribed())
- self.stream.expect('rbracket')
+ self.stream.expect("rbracket")
if len(args) == 1:
arg = args[0]
else:
- arg = nodes.Tuple(args, 'load', lineno=token.lineno)
- return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
- self.fail('expected subscript expression', self.lineno)
+ arg = nodes.Tuple(args, "load", lineno=token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ self.fail("expected subscript expression", token.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
next(self.stream)
args = [None]
else:
node = self.parse_expression()
- if self.stream.current.type != 'colon':
+ if self.stream.current.type != "colon":
return node
next(self.stream)
args = [node]
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
args.append(None)
- elif self.stream.current.type not in ('rbracket', 'comma'):
+ elif self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
- if self.stream.current.type == 'colon':
+ if self.stream.current.type == "colon":
next(self.stream)
- if self.stream.current.type not in ('rbracket', 'comma'):
+ if self.stream.current.type not in ("rbracket", "comma"):
args.append(self.parse_expression())
else:
args.append(None)
@@ -751,7 +778,7 @@ def parse_subscribed(self):
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
- token = self.stream.expect('lparen')
+ token = self.stream.expect("lparen")
args = []
kwargs = []
dyn_args = dyn_kwargs = None
@@ -759,91 +786,100 @@ def parse_call(self, node):
def ensure(expr):
if not expr:
- self.fail('invalid syntax for function call expression',
- token.lineno)
+ self.fail("invalid syntax for function call expression", token.lineno)
- while self.stream.current.type != 'rparen':
+ while self.stream.current.type != "rparen":
if require_comma:
- self.stream.expect('comma')
+ self.stream.expect("comma")
# support for trailing comma
- if self.stream.current.type == 'rparen':
+ if self.stream.current.type == "rparen":
break
- if self.stream.current.type == 'mul':
+ if self.stream.current.type == "mul":
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
- elif self.stream.current.type == 'pow':
+ elif self.stream.current.type == "pow":
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
- ensure(dyn_args is None and dyn_kwargs is None)
- if self.stream.current.type == 'name' and \
- self.stream.look().type == 'assign':
+ if (
+ self.stream.current.type == "name"
+ and self.stream.look().type == "assign"
+ ):
+ # Parsing a kwarg
+ ensure(dyn_kwargs is None)
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
- kwargs.append(nodes.Keyword(key, value,
- lineno=value.lineno))
+ kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
else:
- ensure(not kwargs)
+ # Parsing an arg
+ ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
args.append(self.parse_expression())
require_comma = True
- self.stream.expect('rparen')
+ self.stream.expect("rparen")
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
- return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
- lineno=token.lineno)
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
- while self.stream.current.type == 'pipe' or start_inline:
+ while self.stream.current.type == "pipe" or start_inline:
if not start_inline:
next(self.stream)
- token = self.stream.expect('name')
+ token = self.stream.expect("name")
name = token.value
- while self.stream.current.type == 'dot':
+ while self.stream.current.type == "dot":
next(self.stream)
- name += '.' + self.stream.expect('name').value
- if self.stream.current.type == 'lparen':
+ name += "." + self.stream.expect("name").value
+ if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
- node = nodes.Filter(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
+ node = nodes.Filter(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
- if self.stream.current.test('name:not'):
+ if self.stream.current.test("name:not"):
next(self.stream)
negated = True
else:
negated = False
- name = self.stream.expect('name').value
- while self.stream.current.type == 'dot':
+ name = self.stream.expect("name").value
+ while self.stream.current.type == "dot":
next(self.stream)
- name += '.' + self.stream.expect('name').value
+ name += "." + self.stream.expect("name").value
dyn_args = dyn_kwargs = None
kwargs = []
- if self.stream.current.type == 'lparen':
+ if self.stream.current.type == "lparen":
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
- elif (self.stream.current.type in ('name', 'string', 'integer',
- 'float', 'lparen', 'lbracket',
- 'lbrace') and not
- self.stream.current.test_any('name:else', 'name:or',
- 'name:and')):
- if self.stream.current.test('name:is'):
- self.fail('You cannot chain multiple tests with is')
- args = [self.parse_primary()]
+ elif self.stream.current.type in (
+ "name",
+ "string",
+ "integer",
+ "float",
+ "lparen",
+ "lbracket",
+ "lbrace",
+ ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
+ if self.stream.current.test("name:is"):
+ self.fail("You cannot chain multiple tests with is")
+ arg_node = self.parse_primary()
+ arg_node = self.parse_postfix(arg_node)
+ args = [arg_node]
else:
args = []
- node = nodes.Test(node, name, args, kwargs, dyn_args,
- dyn_kwargs, lineno=token.lineno)
+ node = nodes.Test(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
@@ -865,29 +901,29 @@ def flush_data():
try:
while self.stream:
token = self.stream.current
- if token.type == 'data':
+ if token.type == "data":
if token.value:
- add_data(nodes.TemplateData(token.value,
- lineno=token.lineno))
+ add_data(nodes.TemplateData(token.value, lineno=token.lineno))
next(self.stream)
- elif token.type == 'variable_begin':
+ elif token.type == "variable_begin":
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
- self.stream.expect('variable_end')
- elif token.type == 'block_begin':
+ self.stream.expect("variable_end")
+ elif token.type == "block_begin":
flush_data()
next(self.stream)
- if end_tokens is not None and \
- self.stream.current.test_any(*end_tokens):
+ if end_tokens is not None and self.stream.current.test_any(
+ *end_tokens
+ ):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
- self.stream.expect('block_end')
+ self.stream.expect("block_end")
else:
- raise AssertionError('internal parsing error')
+ raise AssertionError("internal parsing error")
flush_data()
finally:
diff --git a/external/python/jinja2/runtime.py b/external/python/jinja2/runtime.py
index 5e313369..3ad79686 100644
--- a/external/python/jinja2/runtime.py
+++ b/external/python/jinja2/runtime.py
@@ -1,43 +1,62 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.runtime
- ~~~~~~~~~~~~~~
-
- Runtime helpers.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
+"""The runtime functions and state used by compiled templates."""
import sys
-
from itertools import chain
from types import MethodType
-from jinja2.nodes import EvalContext, _context_function_types
-from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
- internalcode, object_type_repr, evalcontextfunction, Namespace
-from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
- TemplateNotFound
-from jinja2._compat import imap, text_type, iteritems, \
- implements_iterator, implements_to_string, string_types, PY2, \
- with_metaclass, abc
-
+from markupsafe import escape # noqa: F401
+from markupsafe import Markup
+from markupsafe import soft_unicode
+
+from ._compat import abc
+from ._compat import imap
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import with_metaclass
+from .exceptions import TemplateNotFound # noqa: F401
+from .exceptions import TemplateRuntimeError # noqa: F401
+from .exceptions import UndefinedError
+from .nodes import EvalContext
+from .utils import concat
+from .utils import evalcontextfunction
+from .utils import internalcode
+from .utils import missing
+from .utils import Namespace # noqa: F401
+from .utils import object_type_repr
# these variables are exported to the template runtime
-__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
- 'TemplateRuntimeError', 'missing', 'concat', 'escape',
- 'markup_join', 'unicode_join', 'to_string', 'identity',
- 'TemplateNotFound', 'Namespace']
+exported = [
+ "LoopContext",
+ "TemplateReference",
+ "Macro",
+ "Markup",
+ "TemplateRuntimeError",
+ "missing",
+ "concat",
+ "escape",
+ "markup_join",
+ "unicode_join",
+ "to_string",
+ "identity",
+ "TemplateNotFound",
+ "Namespace",
+ "Undefined",
+]
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
-#: the identity function. Useful for certain things in the environment
-identity = lambda x: x
-_first_iteration = object()
-_last_iteration = object()
+def identity(x):
+ """Returns its argument. Useful for certain things in the
+ environment.
+ """
+ return x
def markup_join(seq):
@@ -46,8 +65,8 @@ def markup_join(seq):
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
- if hasattr(arg, '__html__'):
- return Markup(u'').join(chain(buf, iterator))
+ if hasattr(arg, "__html__"):
+ return Markup(u"").join(chain(buf, iterator))
return concat(buf)
@@ -56,9 +75,16 @@ def unicode_join(seq):
return concat(imap(text_type, seq))
-def new_context(environment, template_name, blocks, vars=None,
- shared=None, globals=None, locals=None):
- """Internal helper to for context creation."""
+def new_context(
+ environment,
+ template_name,
+ blocks,
+ vars=None,
+ shared=None,
+ globals=None,
+ locals=None,
+):
+ """Internal helper for context creation."""
if vars is None:
vars = {}
if shared:
@@ -73,8 +99,7 @@ def new_context(environment, template_name, blocks, vars=None,
for key, value in iteritems(locals):
if value is not missing:
parent[key] = value
- return environment.context_class(environment, parent, template_name,
- blocks)
+ return environment.context_class(environment, parent, template_name, blocks)
class TemplateReference(object):
@@ -88,20 +113,16 @@ def __getitem__(self, name):
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self.__context.name
- )
+ return "<%s %r>" % (self.__class__.__name__, self.__context.name)
def _get_func(x):
- return getattr(x, '__func__', x)
+ return getattr(x, "__func__", x)
class ContextMeta(type):
-
- def __new__(cls, name, bases, d):
- rv = type.__new__(cls, name, bases, d)
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
if bases == ():
return rv
@@ -112,11 +133,15 @@ def __new__(cls, name, bases, d):
# If we have a changed resolve but no changed default or missing
# resolve we invert the call logic.
- if resolve is not default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
+ if (
+ resolve is not default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
rv._legacy_resolve_mode = True
- elif resolve is default_resolve and \
- resolve_or_missing is default_resolve_or_missing:
+ elif (
+ resolve is default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
rv._fast_resolve_mode = True
return rv
@@ -149,6 +174,7 @@ class Context(with_metaclass(ContextMeta)):
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
+
# XXX: we want to eventually make this be a deprecation warning and
# remove it.
_legacy_resolve_mode = False
@@ -179,9 +205,9 @@ def super(self, name, current):
index = blocks.index(current) + 1
blocks[index]
except LookupError:
- return self.environment.undefined('there is no parent block '
- 'called %r.' % name,
- name='super')
+ return self.environment.undefined(
+ "there is no parent block called %r." % name, name="super"
+ )
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
@@ -232,7 +258,7 @@ def get_all(self):
return dict(self.parent, **self.vars)
@internalcode
- def call(__self, __obj, *args, **kwargs):
+ def call(__self, __obj, *args, **kwargs): # noqa: B902
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
@@ -242,55 +268,62 @@ def call(__self, __obj, *args, **kwargs):
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
- if hasattr(__obj, '__call__'):
+ if hasattr(__obj, "__call__"): # noqa: B004
fn = __obj.__call__
- for fn_type in ('contextfunction',
- 'evalcontextfunction',
- 'environmentfunction'):
+ for fn_type in (
+ "contextfunction",
+ "evalcontextfunction",
+ "environmentfunction",
+ ):
if hasattr(fn, fn_type):
__obj = fn
break
- if isinstance(__obj, _context_function_types):
- if getattr(__obj, 'contextfunction', 0):
+ if callable(__obj):
+ if getattr(__obj, "contextfunction", False) is True:
args = (__self,) + args
- elif getattr(__obj, 'evalcontextfunction', 0):
+ elif getattr(__obj, "evalcontextfunction", False) is True:
args = (__self.eval_ctx,) + args
- elif getattr(__obj, 'environmentfunction', 0):
+ elif getattr(__obj, "environmentfunction", False) is True:
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
- return __self.environment.undefined('value was undefined because '
- 'a callable raised a '
- 'StopIteration exception')
+ return __self.environment.undefined(
+ "value was undefined because "
+ "a callable raised a "
+ "StopIteration exception"
+ )
def derived(self, locals=None):
"""Internal helper function to create a derived context. This is
used in situations where the system needs a new context in the same
template that is independent.
"""
- context = new_context(self.environment, self.name, {},
- self.get_all(), True, None, locals)
+ context = new_context(
+ self.environment, self.name, {}, self.get_all(), True, None, locals
+ )
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
- def _all(meth):
- proxy = lambda self: getattr(self.get_all(), meth)()
+ def _all(meth): # noqa: B902
+ def proxy(self):
+ return getattr(self.get_all(), meth)()
+
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
- keys = _all('keys')
- values = _all('values')
- items = _all('items')
+ keys = _all("keys")
+ values = _all("values")
+ items = _all("items")
# not available on python 3
if PY2:
- iterkeys = _all('iterkeys')
- itervalues = _all('itervalues')
- iteritems = _all('iteritems')
+ iterkeys = _all("iterkeys")
+ itervalues = _all("itervalues")
+ iteritems = _all("iteritems")
del _all
def __contains__(self, name):
@@ -306,10 +339,10 @@ def __getitem__(self, key):
return item
def __repr__(self):
- return '<%s %s of %r>' % (
+ return "<%s %s of %r>" % (
self.__class__.__name__,
repr(self.get_all()),
- self.name
+ self.name,
)
@@ -329,11 +362,10 @@ def __init__(self, name, context, stack, depth):
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
- return self._context.environment. \
- undefined('there is no parent block called %r.' %
- self.name, name='super')
- return BlockReference(self.name, self._context, self._stack,
- self._depth + 1)
+ return self._context.environment.undefined(
+ "there is no parent block called %r." % self.name, name="super"
+ )
+ return BlockReference(self.name, self._context, self._stack, self._depth + 1)
@internalcode
def __call__(self):
@@ -343,143 +375,212 @@ def __call__(self):
return rv
-class LoopContextBase(object):
- """A loop context for dynamic iteration."""
+@implements_iterator
+class LoopContext:
+ """A wrapper iterable for dynamic ``for`` loops, with information
+ about the loop and iteration.
+ """
+
+ #: Current iteration of the loop, starting at 0.
+ index0 = -1
- _before = _first_iteration
- _current = _first_iteration
- _after = _last_iteration
_length = None
+ _after = missing
+ _current = missing
+ _before = missing
+ _last_changed_value = missing
- def __init__(self, undefined, recurse=None, depth0=0):
+ def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ """
+ :param iterable: Iterable to wrap.
+ :param undefined: :class:`Undefined` class to use for next and
+ previous items.
+ :param recurse: The function to render the loop body when the
+ loop is marked recursive.
+ :param depth0: Incremented when looping recursively.
+ """
+ self._iterable = iterable
+ self._iterator = self._to_iterator(iterable)
self._undefined = undefined
self._recurse = recurse
- self.index0 = -1
+ #: How many levels deep a recursive loop currently is, starting at 0.
self.depth0 = depth0
- self._last_checked_value = missing
- def cycle(self, *args):
- """Cycles among the arguments with the current loop index."""
- if not args:
- raise TypeError('no items for cycling given')
- return args[self.index0 % len(args)]
+ @staticmethod
+ def _to_iterator(iterable):
+ return iter(iterable)
- def changed(self, *value):
- """Checks whether the value has changed since the last call."""
- if self._last_checked_value != value:
- self._last_checked_value = value
- return True
- return False
+ @property
+ def length(self):
+ """Length of the iterable.
- first = property(lambda x: x.index0 == 0)
- last = property(lambda x: x._after is _last_iteration)
- index = property(lambda x: x.index0 + 1)
- revindex = property(lambda x: x.length - x.index0)
- revindex0 = property(lambda x: x.length - x.index)
- depth = property(lambda x: x.depth0 + 1)
+ If the iterable is a generator or otherwise does not have a
+ size, it is eagerly evaluated to get a size.
+ """
+ if self._length is not None:
+ return self._length
- @property
- def previtem(self):
- if self._before is _first_iteration:
- return self._undefined('there is no previous item')
- return self._before
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = list(self._iterator)
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
- @property
- def nextitem(self):
- if self._after is _last_iteration:
- return self._undefined('there is no next item')
- return self._after
+ return self._length
def __len__(self):
return self.length
- @internalcode
- def loop(self, iterable):
- if self._recurse is None:
- raise TypeError('Tried to call non recursive loop. Maybe you '
- "forgot the 'recursive' modifier.")
- return self._recurse(iterable, self._recurse, self.depth0 + 1)
+ @property
+ def depth(self):
+ """How many levels deep a recursive loop currently is, starting at 1."""
+ return self.depth0 + 1
- # a nifty trick to enhance the error message if someone tried to call
- # the the loop without or with too many arguments.
- __call__ = loop
- del loop
+ @property
+ def index(self):
+ """Current iteration of the loop, starting at 1."""
+ return self.index0 + 1
- def __repr__(self):
- return '<%s %r/%r>' % (
- self.__class__.__name__,
- self.index,
- self.length
- )
+ @property
+ def revindex0(self):
+ """Number of iterations from the end of the loop, ending at 0.
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index
-class LoopContext(LoopContextBase):
+ @property
+ def revindex(self):
+ """Number of iterations from the end of the loop, ending at 1.
- def __init__(self, iterable, undefined, recurse=None, depth0=0):
- LoopContextBase.__init__(self, undefined, recurse, depth0)
- self._iterator = iter(iterable)
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index0
- # try to get the length of the iterable early. This must be done
- # here because there are some broken iterators around where there
- # __len__ is the number of iterations left (i'm looking at your
- # listreverseiterator!).
- try:
- self._length = len(iterable)
- except (TypeError, AttributeError):
- self._length = None
- self._after = self._safe_next()
+ @property
+ def first(self):
+ """Whether this is the first iteration of the loop."""
+ return self.index0 == 0
+
+ def _peek_next(self):
+ """Return the next element in the iterable, or :data:`missing`
+ if the iterable is exhausted. Only peeks one item ahead, caching
+ the result in :attr:`_last` for use in subsequent checks. The
+ cache is reset when :meth:`__next__` is called.
+ """
+ if self._after is not missing:
+ return self._after
+
+ self._after = next(self._iterator, missing)
+ return self._after
@property
- def length(self):
- if self._length is None:
- # if was not possible to get the length of the iterator when
- # the loop context was created (ie: iterating over a generator)
- # we have to convert the iterable into a sequence and use the
- # length of that + the number of iterations so far.
- iterable = tuple(self._iterator)
- self._iterator = iter(iterable)
- iterations_done = self.index0 + 2
- self._length = len(iterable) + iterations_done
- return self._length
+ def last(self):
+ """Whether this is the last iteration of the loop.
- def __iter__(self):
- return LoopContextIterator(self)
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ return self._peek_next() is missing
- def _safe_next(self):
- try:
- return next(self._iterator)
- except StopIteration:
- return _last_iteration
+ @property
+ def previtem(self):
+ """The item in the previous iteration. Undefined during the
+ first iteration.
+ """
+ if self.first:
+ return self._undefined("there is no previous item")
+ return self._before
-@implements_iterator
-class LoopContextIterator(object):
- """The iterator for a loop context."""
- __slots__ = ('context',)
+ @property
+ def nextitem(self):
+ """The item in the next iteration. Undefined during the last
+ iteration.
- def __init__(self, context):
- self.context = context
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ rv = self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def cycle(self, *args):
+ """Return a value from the given args, cycling through based on
+ the current :attr:`index0`.
+
+ :param args: One or more values to cycle through.
+ """
+ if not args:
+ raise TypeError("no items for cycling given")
+
+ return args[self.index0 % len(args)]
+
+ def changed(self, *value):
+ """Return ``True`` if previously called with a different value
+ (including when called for the first time).
+
+ :param value: One or more values to compare to the last call.
+ """
+ if self._last_changed_value != value:
+ self._last_changed_value = value
+ return True
+
+ return False
def __iter__(self):
return self
def __next__(self):
- ctx = self.context
- ctx.index0 += 1
- if ctx._after is _last_iteration:
- raise StopIteration()
- ctx._before = ctx._current
- ctx._current = ctx._after
- ctx._after = ctx._safe_next()
- return ctx._current, ctx
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = next(self._iterator)
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+ @internalcode
+ def __call__(self, iterable):
+ """When iterating over nested data, render the body of the loop
+ recursively with the given inner iterable data.
+
+ The loop must have the ``recursive`` marker for this to work.
+ """
+ if self._recurse is None:
+ raise TypeError(
+ "The loop must have the 'recursive' marker to be called recursively."
+ )
+
+ return self._recurse(iterable, self._recurse, depth=self.depth)
+
+ def __repr__(self):
+ return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
class Macro(object):
"""Wraps a macro function."""
- def __init__(self, environment, func, name, arguments,
- catch_kwargs, catch_varargs, caller,
- default_autoescape=None):
+ def __init__(
+ self,
+ environment,
+ func,
+ name,
+ arguments,
+ catch_kwargs,
+ catch_varargs,
+ caller,
+ default_autoescape=None,
+ ):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
@@ -488,7 +589,7 @@ def __init__(self, environment, func, name, arguments,
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
- self.explicit_caller = 'caller' in arguments
+ self.explicit_caller = "caller" in arguments
if default_autoescape is None:
default_autoescape = environment.autoescape
self._default_autoescape = default_autoescape
@@ -500,9 +601,8 @@ def __call__(self, *args, **kwargs):
# decide largely based on compile-time information if a macro is
# safe or unsafe. While there was a volatile mode it was largely
# unused for deciding on escaping. This turns out to be
- # problemtic for macros because if a macro is safe or not not so
- # much depends on the escape mode when it was defined but when it
- # was used.
+ # problematic for macros because whether a macro is safe depends not
+ # on the escape mode when it was defined, but rather when it was used.
#
# Because however we export macros from the module system and
# there are historic callers that do not pass an eval context (and
@@ -510,7 +610,7 @@ def __call__(self, *args, **kwargs):
# check here.
#
# This is considered safe because an eval context is not a valid
- # argument to callables otherwise anwyays. Worst case here is
+ # argument to callables otherwise anyway. Worst case here is
# that if no eval context is passed we fall back to the compile
# time autoescape flag.
if args and isinstance(args[0], EvalContext):
@@ -520,7 +620,7 @@ def __call__(self, *args, **kwargs):
autoescape = self._default_autoescape
# try to consume the positional arguments
- arguments = list(args[:self._argument_count])
+ arguments = list(args[: self._argument_count])
off = len(arguments)
# For information why this is necessary refer to the handling
@@ -531,12 +631,12 @@ def __call__(self, *args, **kwargs):
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
- for idx, name in enumerate(self.arguments[len(arguments):]):
+ for name in self.arguments[len(arguments) :]:
try:
value = kwargs.pop(name)
except KeyError:
value = missing
- if name == 'caller':
+ if name == "caller":
found_caller = True
arguments.append(value)
else:
@@ -546,26 +646,31 @@ def __call__(self, *args, **kwargs):
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller and not found_caller:
- caller = kwargs.pop('caller', None)
+ caller = kwargs.pop("caller", None)
if caller is None:
- caller = self._environment.undefined('No caller defined',
- name='caller')
+ caller = self._environment.undefined("No caller defined", name="caller")
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
- if 'caller' in kwargs:
- raise TypeError('macro %r was invoked with two values for '
- 'the special caller argument. This is '
- 'most likely a bug.' % self.name)
- raise TypeError('macro %r takes no keyword argument %r' %
- (self.name, next(iter(kwargs))))
+ if "caller" in kwargs:
+ raise TypeError(
+ "macro %r was invoked with two values for "
+ "the special caller argument. This is "
+ "most likely a bug." % self.name
+ )
+ raise TypeError(
+ "macro %r takes no keyword argument %r"
+ % (self.name, next(iter(kwargs)))
+ )
if self.catch_varargs:
- arguments.append(args[self._argument_count:])
+ arguments.append(args[self._argument_count :])
elif len(args) > self._argument_count:
- raise TypeError('macro %r takes not more than %d argument(s)' %
- (self.name, len(self.arguments)))
+ raise TypeError(
+ "macro %r takes not more than %d argument(s)"
+ % (self.name, len(self.arguments))
+ )
return self._invoke(arguments, autoescape)
@@ -577,16 +682,16 @@ def _invoke(self, arguments, autoescape):
return rv
def __repr__(self):
- return '<%s %s>' % (
+ return "<%s %s>" % (
self.__class__.__name__,
- self.name is None and 'anonymous' or repr(self.name)
+ self.name is None and "anonymous" or repr(self.name),
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
- iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
+ iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
@@ -598,8 +703,13 @@ class Undefined(object):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
- __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
- '_undefined_exception')
+
+ __slots__ = (
+ "_undefined_hint",
+ "_undefined_obj",
+ "_undefined_name",
+ "_undefined_exception",
+ )
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
@@ -607,40 +717,86 @@ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_name = name
self._undefined_exception = exc
+ @property
+ def _undefined_message(self):
+ """Build a message about the undefined value based on how it was
+ accessed.
+ """
+ if self._undefined_hint:
+ return self._undefined_hint
+
+ if self._undefined_obj is missing:
+ return "%r is undefined" % self._undefined_name
+
+ if not isinstance(self._undefined_name, string_types):
+ return "%s has no element %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
+ return "%r has no attribute %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
- """Regular callback function for undefined objects that raises an
- `jinja2.exceptions.UndefinedError` on call.
+ """Raise an :exc:`UndefinedError` when operations are performed
+ on the undefined value.
"""
- if self._undefined_hint is None:
- if self._undefined_obj is missing:
- hint = '%r is undefined' % self._undefined_name
- elif not isinstance(self._undefined_name, string_types):
- hint = '%s has no element %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = '%r has no attribute %r' % (
- object_type_repr(self._undefined_obj),
- self._undefined_name
- )
- else:
- hint = self._undefined_hint
- raise self._undefined_exception(hint)
+ raise self._undefined_exception(self._undefined_message)
@internalcode
def __getattr__(self, name):
- if name[:2] == '__':
+ if name[:2] == "__":
raise AttributeError(name)
return self._fail_with_undefined_error()
- __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
- __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
- __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
- __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
- __float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \
- __rsub__ = _fail_with_undefined_error
+ __add__ = (
+ __radd__
+ ) = (
+ __mul__
+ ) = (
+ __rmul__
+ ) = (
+ __div__
+ ) = (
+ __rdiv__
+ ) = (
+ __truediv__
+ ) = (
+ __rtruediv__
+ ) = (
+ __floordiv__
+ ) = (
+ __rfloordiv__
+ ) = (
+ __mod__
+ ) = (
+ __rmod__
+ ) = (
+ __pos__
+ ) = (
+ __neg__
+ ) = (
+ __call__
+ ) = (
+ __getitem__
+ ) = (
+ __lt__
+ ) = (
+ __le__
+ ) = (
+ __gt__
+ ) = (
+ __ge__
+ ) = (
+ __int__
+ ) = (
+ __float__
+ ) = (
+ __complex__
+ ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
@@ -652,7 +808,7 @@ def __hash__(self):
return id(type(self))
def __str__(self):
- return u''
+ return u""
def __len__(self):
return 0
@@ -663,10 +819,11 @@ def __iter__(self):
def __nonzero__(self):
return False
+
__bool__ = __nonzero__
def __repr__(self):
- return 'Undefined'
+ return "Undefined"
def make_logging_undefined(logger=None, base=None):
@@ -691,6 +848,7 @@ def make_logging_undefined(logger=None, base=None):
"""
if logger is None:
import logging
+
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
@@ -699,26 +857,27 @@ def make_logging_undefined(logger=None, base=None):
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
- hint = '%s is undefined' % undef._undefined_name
+ hint = "%s is undefined" % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
- hint = '%s has no element %s' % (
+ hint = "%s has no element %s" % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name)
+ undef._undefined_name,
+ )
else:
- hint = '%s has no attribute %s' % (
+ hint = "%s has no attribute %s" % (
object_type_repr(undef._undefined_obj),
- undef._undefined_name)
+ undef._undefined_name,
+ )
else:
hint = undef._undefined_hint
- logger.warning('Template variable warning: %s', hint)
+ logger.warning("Template variable warning: %s", hint)
class LoggingUndefined(base):
-
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
- logger.error('Template variable error: %s', str(e))
+ logger.error("Template variable error: %s", str(e))
raise e
def __str__(self):
@@ -732,6 +891,7 @@ def __iter__(self):
return rv
if PY2:
+
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
@@ -741,7 +901,9 @@ def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
+
else:
+
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
@@ -750,6 +912,36 @@ def __bool__(self):
return LoggingUndefined
+# No @implements_to_string decorator here because __str__
+# is not overwritten from Undefined in this class.
+# This would cause a recursion error in Python 2.
+class ChainableUndefined(Undefined):
+ """An undefined that is chainable, where both ``__getattr__`` and
+ ``__getitem__`` return itself rather than raising an
+ :exc:`UndefinedError`.
+
+ >>> foo = ChainableUndefined(name='foo')
+ >>> str(foo.bar['baz'])
+ ''
+ >>> foo.bar['baz'] + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+
+ .. versionadded:: 2.11.0
+ """
+
+ __slots__ = ()
+
+ def __html__(self):
+ return self.__str__()
+
+ def __getattr__(self, _):
+ return self
+
+ __getitem__ = __getattr__
+
+
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
@@ -764,17 +956,18 @@ class DebugUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
+
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
- return u'{{ %s }}' % self._undefined_name
- return '{{ no such element: %s[%r] }}' % (
+ return u"{{ %s }}" % self._undefined_name
+ return "{{ no such element: %s[%r] }}" % (
object_type_repr(self._undefined_obj),
- self._undefined_name
+ self._undefined_name,
)
- return u'{{ undefined value printed: %s }}' % self._undefined_hint
+ return u"{{ undefined value printed: %s }}" % self._undefined_hint
@implements_to_string
@@ -797,12 +990,22 @@ class StrictUndefined(Undefined):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
+
__slots__ = ()
- __iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
- __ne__ = __bool__ = __hash__ = \
- Undefined._fail_with_undefined_error
+ __iter__ = (
+ __str__
+ ) = (
+ __len__
+ ) = (
+ __nonzero__
+ ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
-del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
+del (
+ Undefined.__slots__,
+ ChainableUndefined.__slots__,
+ DebugUndefined.__slots__,
+ StrictUndefined.__slots__,
+)
diff --git a/external/python/jinja2/sandbox.py b/external/python/jinja2/sandbox.py
index 08c22f4f..cfd7993a 100644
--- a/external/python/jinja2/sandbox.py
+++ b/external/python/jinja2/sandbox.py
@@ -1,70 +1,66 @@
# -*- coding: utf-8 -*-
+"""A sandbox layer that ensures unsafe operations cannot be performed.
+Useful when the template itself comes from an untrusted source.
"""
- jinja2.sandbox
- ~~~~~~~~~~~~~~
-
- Adds a sandbox layer to Jinja as it was the default behavior in the old
- Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
- default behavior is easier to use.
-
- The behavior can be changed by subclassing the environment.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-import types
import operator
-from jinja2.environment import Environment
-from jinja2.exceptions import SecurityError
-from jinja2._compat import string_types, PY2, abc, range_type
-from jinja2.utils import Markup
+import types
+import warnings
+from collections import deque
+from string import Formatter
from markupsafe import EscapeFormatter
-from string import Formatter
+from markupsafe import Markup
+from ._compat import abc
+from ._compat import PY2
+from ._compat import range_type
+from ._compat import string_types
+from .environment import Environment
+from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
- UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
- 'func_defaults', 'func_globals'])
+ UNSAFE_FUNCTION_ATTRIBUTES = {
+ "func_closure",
+ "func_code",
+ "func_dict",
+ "func_defaults",
+ "func_globals",
+ }
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
-
#: unsafe method attributes. function attributes are unsafe for methods too
-UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
+UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
-#: unsafe generator attirbutes.
-UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
+#: unsafe generator attributes.
+UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
-UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code'])
+UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
-UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame'])
-
-import warnings
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
# make sure we don't warn in python 2.6 about stuff we don't care about
-warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
- module='jinja2.sandbox')
-
-from collections import deque
+warnings.filterwarnings(
+ "ignore", "the sets module", DeprecationWarning, module=__name__
+)
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
-
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
+
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
@@ -73,6 +69,7 @@
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
+
_mutable_set_types += (Set,)
except ImportError:
pass
@@ -82,22 +79,46 @@
_mutable_mapping_types += (abc.MutableMapping,)
_mutable_sequence_types += (abc.MutableSequence,)
-
_mutable_spec = (
- (_mutable_set_types, frozenset([
- 'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
- 'symmetric_difference_update', 'update'
- ])),
- (_mutable_mapping_types, frozenset([
- 'clear', 'pop', 'popitem', 'setdefault', 'update'
- ])),
- (_mutable_sequence_types, frozenset([
- 'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
- ])),
- (deque, frozenset([
- 'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
- 'popleft', 'remove', 'rotate'
- ]))
+ (
+ _mutable_set_types,
+ frozenset(
+ [
+ "add",
+ "clear",
+ "difference_update",
+ "discard",
+ "pop",
+ "remove",
+ "symmetric_difference_update",
+ "update",
+ ]
+ ),
+ ),
+ (
+ _mutable_mapping_types,
+ frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
+ ),
+ (
+ _mutable_sequence_types,
+ frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
+ ),
+ (
+ deque,
+ frozenset(
+ [
+ "append",
+ "appendleft",
+ "clear",
+ "extend",
+ "extendleft",
+ "pop",
+ "popleft",
+ "remove",
+ "rotate",
+ ]
+ ),
+ ),
)
@@ -115,7 +136,7 @@ def __init__(self, args, kwargs):
self._last_index = 0
def __getitem__(self, key):
- if key == '':
+ if key == "":
idx = self._last_index
self._last_index += 1
try:
@@ -133,9 +154,9 @@ def __len__(self):
def inspect_format_method(callable):
- if not isinstance(callable, (types.MethodType,
- types.BuiltinMethodType)) or \
- callable.__name__ not in ('format', 'format_map'):
+ if not isinstance(
+ callable, (types.MethodType, types.BuiltinMethodType)
+ ) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, string_types):
@@ -186,24 +207,25 @@ def is_internal_attribute(obj, attr):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
- if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
- attr in UNSAFE_METHOD_ATTRIBUTES:
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
- if attr == 'mro':
+ if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
- elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType):
+ elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
- elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType):
+ elif hasattr(types, "AsyncGeneratorType") and isinstance(
+ obj, types.AsyncGeneratorType
+ ):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
- return attr.startswith('__')
+ return attr.startswith("__")
def modifies_known_mutable(obj, attr):
@@ -244,28 +266,26 @@ class SandboxedEnvironment(Environment):
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
+
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
- '+': operator.add,
- '-': operator.sub,
- '*': operator.mul,
- '/': operator.truediv,
- '//': operator.floordiv,
- '**': operator.pow,
- '%': operator.mod
+ "+": operator.add,
+ "-": operator.sub,
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
- default_unop_table = {
- '+': operator.pos,
- '-': operator.neg
- }
+ default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
@@ -301,7 +321,7 @@ class SandboxedEnvironment(Environment):
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
- method returns `True`, :meth:`call_unop` is excuted for this unary
+ method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
@@ -315,10 +335,9 @@ def intercept_unop(self, operator):
"""
return False
-
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
- self.globals['range'] = safe_range
+ self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
@@ -329,7 +348,7 @@ def is_safe_attribute(self, obj, attr, value):
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
- return not (attr.startswith('_') or is_internal_attribute(obj, attr))
+ return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
@@ -337,8 +356,9 @@ def is_safe_callable(self, obj):
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
- return not (getattr(obj, 'unsafe_callable', False) or
- getattr(obj, 'alters_data', False))
+ return not (
+ getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
+ )
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
@@ -398,11 +418,13 @@ def getattr(self, obj, attribute):
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
- return self.undefined('access to attribute %r of %r '
- 'object is unsafe.' % (
- attribute,
- obj.__class__.__name__
- ), name=attribute, obj=obj, exc=SecurityError)
+ return self.undefined(
+ "access to attribute %r of %r "
+ "object is unsafe." % (attribute, obj.__class__.__name__),
+ name=attribute,
+ obj=obj,
+ exc=SecurityError,
+ )
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
@@ -413,10 +435,10 @@ def format_string(self, s, args, kwargs, format_func=None):
else:
formatter = SandboxedFormatter(self)
- if format_func is not None and format_func.__name__ == 'format_map':
+ if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
- 'format_map() takes exactly one argument %d given'
+ "format_map() takes exactly one argument %d given"
% (len(args) + (kwargs is not None))
)
@@ -427,7 +449,7 @@ def format_string(self, s, args, kwargs, format_func=None):
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
- def call(__self, __context, __obj, *args, **kwargs):
+ def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
@@ -436,7 +458,7 @@ def call(__self, __context, __obj, *args, **kwargs):
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
- raise SecurityError('%r is not safely callable' % (__obj,))
+ raise SecurityError("%r is not safely callable" % (__obj,))
return __context.call(__obj, *args, **kwargs)
@@ -452,16 +474,16 @@ def is_safe_attribute(self, obj, attr, value):
return not modifies_known_mutable(obj, attr)
-# This really is not a public API apparenlty.
+# This really is not a public API apparently.
try:
from _string import formatter_field_name_split
except ImportError:
+
def formatter_field_name_split(field_name):
return field_name._formatter_field_name_split()
class SandboxedFormatterMixin(object):
-
def __init__(self, env):
self._env = env
@@ -475,14 +497,14 @@ def get_field(self, field_name, args, kwargs):
obj = self._env.getitem(obj, i)
return obj, first
-class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
-class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
diff --git a/external/python/jinja2/tests.py b/external/python/jinja2/tests.py
index bc99d66c..fabd4ce5 100644
--- a/external/python/jinja2/tests.py
+++ b/external/python/jinja2/tests.py
@@ -1,23 +1,17 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.tests
- ~~~~~~~~~~~~
-
- Jinja test functions. Used with the "is" operator.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
+"""Built-in template tests used with the ``is`` operator."""
+import decimal
import operator
import re
-from jinja2.runtime import Undefined
-from jinja2._compat import text_type, string_types, integer_types, abc
-import decimal
-
-number_re = re.compile(r'^-?\d+(\.\d+)?$')
-regex_type = type(number_re)
+from ._compat import abc
+from ._compat import integer_types
+from ._compat import string_types
+from ._compat import text_type
+from .runtime import Undefined
+number_re = re.compile(r"^-?\d+(\.\d+)?$")
+regex_type = type(number_re)
test_callable = callable
@@ -63,6 +57,48 @@ def test_none(value):
return value is None
+def test_boolean(value):
+ """Return true if the object is a boolean value.
+
+ .. versionadded:: 2.11
+ """
+ return value is True or value is False
+
+
+def test_false(value):
+ """Return true if the object is False.
+
+ .. versionadded:: 2.11
+ """
+ return value is False
+
+
+def test_true(value):
+ """Return true if the object is True.
+
+ .. versionadded:: 2.11
+ """
+ return value is True
+
+
+# NOTE: The existing 'number' test matches booleans and floats
+def test_integer(value):
+ """Return true if the object is an integer.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, integer_types) and value is not True and value is not False
+
+
+# NOTE: The existing 'number' test matches booleans and integers
+def test_float(value):
+ """Return true if the object is a float.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, float)
+
+
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
@@ -98,7 +134,7 @@ def test_sequence(value):
try:
len(value)
value.__getitem__
- except:
+ except Exception:
return False
return True
@@ -127,7 +163,7 @@ def test_iterable(value):
def test_escaped(value):
"""Check if the value is escaped."""
- return hasattr(value, '__html__')
+ return hasattr(value, "__html__")
def test_in(value, seq):
@@ -139,36 +175,41 @@ def test_in(value, seq):
TESTS = {
- 'odd': test_odd,
- 'even': test_even,
- 'divisibleby': test_divisibleby,
- 'defined': test_defined,
- 'undefined': test_undefined,
- 'none': test_none,
- 'lower': test_lower,
- 'upper': test_upper,
- 'string': test_string,
- 'mapping': test_mapping,
- 'number': test_number,
- 'sequence': test_sequence,
- 'iterable': test_iterable,
- 'callable': test_callable,
- 'sameas': test_sameas,
- 'escaped': test_escaped,
- 'in': test_in,
- '==': operator.eq,
- 'eq': operator.eq,
- 'equalto': operator.eq,
- '!=': operator.ne,
- 'ne': operator.ne,
- '>': operator.gt,
- 'gt': operator.gt,
- 'greaterthan': operator.gt,
- 'ge': operator.ge,
- '>=': operator.ge,
- '<': operator.lt,
- 'lt': operator.lt,
- 'lessthan': operator.lt,
- '<=': operator.le,
- 'le': operator.le,
+ "odd": test_odd,
+ "even": test_even,
+ "divisibleby": test_divisibleby,
+ "defined": test_defined,
+ "undefined": test_undefined,
+ "none": test_none,
+ "boolean": test_boolean,
+ "false": test_false,
+ "true": test_true,
+ "integer": test_integer,
+ "float": test_float,
+ "lower": test_lower,
+ "upper": test_upper,
+ "string": test_string,
+ "mapping": test_mapping,
+ "number": test_number,
+ "sequence": test_sequence,
+ "iterable": test_iterable,
+ "callable": test_callable,
+ "sameas": test_sameas,
+ "escaped": test_escaped,
+ "in": test_in,
+ "==": operator.eq,
+ "eq": operator.eq,
+ "equalto": operator.eq,
+ "!=": operator.ne,
+ "ne": operator.ne,
+ ">": operator.gt,
+ "gt": operator.gt,
+ "greaterthan": operator.gt,
+ "ge": operator.ge,
+ ">=": operator.ge,
+ "<": operator.lt,
+ "lt": operator.lt,
+ "lessthan": operator.lt,
+ "<=": operator.le,
+ "le": operator.le,
}
diff --git a/external/python/jinja2/utils.py b/external/python/jinja2/utils.py
index db9c5d06..6afca810 100644
--- a/external/python/jinja2/utils.py
+++ b/external/python/jinja2/utils.py
@@ -1,44 +1,32 @@
# -*- coding: utf-8 -*-
-"""
- jinja2.utils
- ~~~~~~~~~~~~
-
- Utility functions.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD, see LICENSE for more details.
-"""
-import re
import json
-import errno
+import os
+import re
+import warnings
from collections import deque
+from random import choice
+from random import randrange
+from string import ascii_letters as _letters
+from string import digits as _digits
from threading import Lock
-from jinja2._compat import text_type, string_types, implements_iterator, \
- url_quote, abc
+from markupsafe import escape
+from markupsafe import Markup
-_word_split_re = re.compile(r'(\s+)')
-_punctuation_re = re.compile(
- '^(?P(?:%s)*)(?P.*?)(?P(?:%s)*)$' % (
- '|'.join(map(re.escape, ('(', '<', '<'))),
- '|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
- )
-)
-_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
-_striptags_re = re.compile(r'(|<[^>]*>)')
-_entity_re = re.compile(r'&([^;]+);')
-_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
-_digits = '0123456789'
+from ._compat import abc
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import url_quote
# special singleton representing missing values for the runtime
-missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
+missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
# internal code
internal_code = set()
-concat = u''.join
+concat = u"".join
-_slash_escape = '\\/' not in json.dumps('/')
+_slash_escape = "\\/" not in json.dumps("/")
def contextfunction(f):
@@ -98,24 +86,26 @@ def default(var, default=''):
return default
return var
"""
- from jinja2.runtime import Undefined
+ from .runtime import Undefined
+
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
- for event in iterable:
+ for _ in iterable:
pass
def clear_caches():
- """Jinja2 keeps internal caches for environments and lexers. These are
- used so that Jinja2 doesn't have to recreate environments and lexers all
+ """Jinja keeps internal caches for environments and lexers. These are
+ used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
- from jinja2.environment import _spontaneous_environments
- from jinja2.lexer import _lexer_cache
+ from .environment import _spontaneous_environments
+ from .lexer import _lexer_cache
+
_spontaneous_environments.clear()
_lexer_cache.clear()
@@ -132,12 +122,10 @@ def import_string(import_name, silent=False):
:return: imported object
"""
try:
- if ':' in import_name:
- module, obj = import_name.split(':', 1)
- elif '.' in import_name:
- items = import_name.split('.')
- module = '.'.join(items[:-1])
- obj = items[-1]
+ if ":" in import_name:
+ module, obj = import_name.split(":", 1)
+ elif "." in import_name:
+ module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
@@ -146,15 +134,14 @@ def import_string(import_name, silent=False):
raise
-def open_if_exists(filename, mode='rb'):
+def open_if_exists(filename, mode="rb"):
"""Returns a file descriptor for the filename if that file exists,
- otherwise `None`.
+ otherwise ``None``.
"""
- try:
- return open(filename, mode)
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL):
- raise
+ if not os.path.isfile(filename):
+ return None
+
+ return open(filename, mode)
def object_type_repr(obj):
@@ -163,15 +150,19 @@ def object_type_repr(obj):
example for `None` and `Ellipsis`).
"""
if obj is None:
- return 'None'
+ return "None"
elif obj is Ellipsis:
- return 'Ellipsis'
+ return "Ellipsis"
+
+ cls = type(obj)
+
# __builtin__ in 2.x, builtins in 3.x
- if obj.__class__.__module__ in ('__builtin__', 'builtins'):
- name = obj.__class__.__name__
+ if cls.__module__ in ("__builtin__", "builtins"):
+ name = cls.__name__
else:
- name = obj.__class__.__module__ + '.' + obj.__class__.__name__
- return '%s object' % name
+ name = cls.__module__ + "." + cls.__name__
+
+ return "%s object" % name
def pformat(obj, verbose=False):
@@ -180,9 +171,11 @@ def pformat(obj, verbose=False):
"""
try:
from pretty import pretty
+
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
+
return pformat(obj)
@@ -200,45 +193,77 @@ def urlize(text, trim_url_limit=None, rel=None, target=None):
If target is not None, a target attribute will be added to the link.
"""
- trim_url = lambda x, limit=trim_url_limit: limit is not None \
- and (x[:limit] + (len(x) >=limit and '...'
- or '')) or x
- words = _word_split_re.split(text_type(escape(text)))
- rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ''
- target_attr = target and ' target="%s"' % escape(target) or ''
+ trim_url = (
+ lambda x, limit=trim_url_limit: limit is not None
+ and (x[:limit] + (len(x) >= limit and "..." or ""))
+ or x
+ )
+ words = re.split(r"(\s+)", text_type(escape(text)))
+ rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
+ target_attr = target and ' target="%s"' % escape(target) or ""
for i, word in enumerate(words):
- match = _punctuation_re.match(word)
+ head, middle, tail = "", word, ""
+ match = re.match(r"^([(<]|<)+", middle)
+
if match:
- lead, middle, trail = match.groups()
- if middle.startswith('www.') or (
- '@' not in middle and
- not middle.startswith('http://') and
- not middle.startswith('https://') and
- len(middle) > 0 and
- middle[0] in _letters + _digits and (
- middle.endswith('.org') or
- middle.endswith('.net') or
- middle.endswith('.com')
- )):
- middle = '%s' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if middle.startswith('http://') or \
- middle.startswith('https://'):
- middle = '%s' % (middle,
- rel_attr, target_attr, trim_url(middle))
- if '@' in middle and not middle.startswith('www.') and \
- not ':' in middle and _simple_email_re.match(middle):
- middle = '%s' % (middle, middle)
- if lead + middle + trail != word:
- words[i] = lead + middle + trail
- return u''.join(words)
+ head = match.group()
+ middle = middle[match.end() :]
+
+ # Unlike lead, which is anchored to the start of the string,
+ # need to check that the string ends with any of the characters
+ # before trying to match all of them, to avoid backtracking.
+ if middle.endswith((")", ">", ".", ",", "\n", ">")):
+ match = re.search(r"([)>.,\n]|>)+$", middle)
+
+ if match:
+ tail = match.group()
+ middle = middle[: match.start()]
+
+ if middle.startswith("www.") or (
+ "@" not in middle
+ and not middle.startswith("http://")
+ and not middle.startswith("https://")
+ and len(middle) > 0
+ and middle[0] in _letters + _digits
+ and (
+ middle.endswith(".org")
+ or middle.endswith(".net")
+ or middle.endswith(".com")
+ )
+ ):
+ middle = '%s' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if middle.startswith("http://") or middle.startswith("https://"):
+ middle = '%s' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if (
+ "@" in middle
+ and not middle.startswith("www.")
+ and ":" not in middle
+ and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle)
+ ):
+ middle = '%s' % (middle, middle)
+
+ words[i] = head + middle + tail
+
+ return u"".join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem ipsum for the template."""
- from jinja2.constants import LOREM_IPSUM_WORDS
- from random import choice, randrange
+ from .constants import LOREM_IPSUM_WORDS
+
words = LOREM_IPSUM_WORDS.split()
result = []
@@ -263,43 +288,53 @@ def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
- word += ','
+ word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
- word += '.'
+ word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
- p = u' '.join(p)
- if p.endswith(','):
- p = p[:-1] + '.'
- elif not p.endswith('.'):
- p += '.'
+ p = u" ".join(p)
+ if p.endswith(","):
+ p = p[:-1] + "."
+ elif not p.endswith("."):
+ p += "."
result.append(p)
if not html:
- return u'\n\n'.join(result)
- return Markup(u'\n'.join(u'%s
' % escape(x) for x in result))
+ return u"\n\n".join(result)
+ return Markup(u"\n".join(u"%s
" % escape(x) for x in result))
-def unicode_urlencode(obj, charset='utf-8', for_qs=False):
- """URL escapes a single bytestring or unicode string with the
- given charset if applicable to URL safe quoting under all rules
- that need to be considered under all supported Python versions.
+def unicode_urlencode(obj, charset="utf-8", for_qs=False):
+ """Quote a string for use in a URL using the given charset.
- If non strings are provided they are converted to their unicode
- representation first.
+ This function is misnamed, it is a wrapper around
+ :func:`urllib.parse.quote`.
+
+ :param obj: String or bytes to quote. Other types are converted to
+ string then encoded to bytes using the given charset.
+ :param charset: Encode text to bytes using this charset.
+ :param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
+
if isinstance(obj, text_type):
obj = obj.encode(charset)
- safe = not for_qs and b'/' or b''
- rv = text_type(url_quote(obj, safe))
+
+ safe = b"" if for_qs else b"/"
+ rv = url_quote(obj, safe)
+
+ if not isinstance(rv, text_type):
+ rv = rv.decode("utf-8")
+
if for_qs:
- rv = rv.replace('%20', '+')
+ rv = rv.replace("%20", "+")
+
return rv
@@ -326,9 +361,9 @@ def _postinit(self):
def __getstate__(self):
return {
- 'capacity': self.capacity,
- '_mapping': self._mapping,
- '_queue': self._queue
+ "capacity": self.capacity,
+ "_mapping": self._mapping,
+ "_queue": self._queue,
}
def __setstate__(self, d):
@@ -342,7 +377,7 @@ def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
- rv._queue = deque(self._queue)
+ rv._queue.extend(self._queue)
return rv
def get(self, key, default=None):
@@ -356,15 +391,11 @@ def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
- self._wlock.acquire()
try:
- try:
- return self[key]
- except KeyError:
- self[key] = default
- return default
- finally:
- self._wlock.release()
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
def clear(self):
"""Clear the cache."""
@@ -384,10 +415,7 @@ def __len__(self):
return len(self._mapping)
def __repr__(self):
- return '<%s %r>' % (
- self.__class__.__name__,
- self._mapping
- )
+ return "<%s %r>" % (self.__class__.__name__, self._mapping)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
@@ -436,7 +464,6 @@ def __delitem__(self, key):
try:
self._remove(key)
except ValueError:
- # __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
@@ -449,6 +476,12 @@ def items(self):
def iteritems(self):
"""Iterate over all items."""
+ warnings.warn(
+ "'iteritems()' will be removed in version 3.0. Use"
+ " 'iter(cache.items())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
return iter(self.items())
def values(self):
@@ -457,6 +490,22 @@ def values(self):
def itervalue(self):
"""Iterate over all values."""
+ warnings.warn(
+ "'itervalue()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self.values())
+
+ def itervalues(self):
+ """Iterate over all values."""
+ warnings.warn(
+ "'itervalues()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
return iter(self.values())
def keys(self):
@@ -467,12 +516,19 @@ def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
- return reversed(tuple(self._queue))
+ warnings.warn(
+ "'iterkeys()' will be removed in version 3.0. Use"
+ " 'iter(cache.keys())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self)
- __iter__ = iterkeys
+ def __iter__(self):
+ return reversed(tuple(self._queue))
def __reversed__(self):
- """Iterate over the values in the cache dict, oldest items
+ """Iterate over the keys in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
@@ -483,10 +539,12 @@ def __reversed__(self):
abc.MutableMapping.register(LRUCache)
-def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
- disabled_extensions=(),
- default_for_string=True,
- default=False):
+def select_autoescape(
+ enabled_extensions=("html", "htm", "xml"),
+ disabled_extensions=(),
+ default_for_string=True,
+ default=False,
+):
"""Intelligently sets the initial value of autoescaping based on the
filename of the template. This is the recommended way to configure
autoescaping if you do not want to write a custom function yourself.
@@ -521,10 +579,9 @@ def select_autoescape(enabled_extensions=('html', 'htm', 'xml'),
.. versionadded:: 2.9
"""
- enabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in enabled_extensions)
- disabled_patterns = tuple('.' + x.lstrip('.').lower()
- for x in disabled_extensions)
+ enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
+ disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
+
def autoescape(template_name):
if template_name is None:
return default_for_string
@@ -534,6 +591,7 @@ def autoescape(template_name):
if template_name.endswith(disabled_patterns):
return False
return default
+
return autoescape
@@ -557,35 +615,63 @@ def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
"""
if dumper is None:
dumper = json.dumps
- rv = dumper(obj, **kwargs) \
- .replace(u'<', u'\\u003c') \
- .replace(u'>', u'\\u003e') \
- .replace(u'&', u'\\u0026') \
- .replace(u"'", u'\\u0027')
+ rv = (
+ dumper(obj, **kwargs)
+ .replace(u"<", u"\\u003c")
+ .replace(u">", u"\\u003e")
+ .replace(u"&", u"\\u0026")
+ .replace(u"'", u"\\u0027")
+ )
return Markup(rv)
-@implements_iterator
class Cycler(object):
- """A cycle helper for templates."""
+ """Cycle through values by yield them one at a time, then restarting
+ once the end is reached. Available as ``cycler`` in templates.
+
+ Similar to ``loop.cycle``, but can be used outside loops or across
+ multiple loops. For example, render a list of folders and files in a
+ list, alternating giving them "odd" and "even" classes.
+
+ .. code-block:: html+jinja
+
+ {% set row_class = cycler("odd", "even") %}
+
+ {% for folder in folders %}
+ - {{ folder }}
+ {% endfor %}
+ {% for file in files %}
+
- {{ file }}
+ {% endfor %}
+
+
+ :param items: Each positional argument will be yielded in the order
+ given for each cycle.
+
+ .. versionadded:: 2.1
+ """
def __init__(self, *items):
if not items:
- raise RuntimeError('at least one item has to be provided')
+ raise RuntimeError("at least one item has to be provided")
self.items = items
- self.reset()
+ self.pos = 0
def reset(self):
- """Resets the cycle."""
+ """Resets the current item to the first item."""
self.pos = 0
@property
def current(self):
- """Returns the current item."""
+ """Return the current item. Equivalent to the item that will be
+ returned next time :meth:`next` is called.
+ """
return self.items[self.pos]
def next(self):
- """Goes one item ahead and returns it."""
+ """Return the current item, then advance :attr:`current` to the
+ next item.
+ """
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
@@ -596,27 +682,28 @@ def next(self):
class Joiner(object):
"""A joining helper for templates."""
- def __init__(self, sep=u', '):
+ def __init__(self, sep=u", "):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
- return u''
+ return u""
return self.sep
class Namespace(object):
"""A namespace object that can hold arbitrary attributes. It may be
- initialized from a dictionary or with keyword argments."""
+ initialized from a dictionary or with keyword arguments."""
- def __init__(*args, **kwargs):
+ def __init__(*args, **kwargs): # noqa: B902
self, args = args[0], args[1:]
self.__attrs = dict(*args, **kwargs)
def __getattribute__(self, name):
- if name == '_Namespace__attrs':
+ # __class__ is needed for the awaitable check in async mode
+ if name in {"_Namespace__attrs", "__class__"}:
return object.__getattribute__(self, name)
try:
return self.__attrs[name]
@@ -627,16 +714,24 @@ def __setitem__(self, name, value):
self.__attrs[name] = value
def __repr__(self):
- return '' % self.__attrs
+ return "" % self.__attrs
# does this python version support async for in and async generators?
try:
- exec('async def _():\n async for _ in ():\n yield _')
+ exec("async def _():\n async for _ in ():\n yield _")
have_async_gen = True
except SyntaxError:
have_async_gen = False
-# Imported here because that's where it was in the past
-from markupsafe import Markup, escape, soft_unicode
+def soft_unicode(s):
+ from markupsafe import soft_unicode
+
+ warnings.warn(
+ "'jinja2.utils.soft_unicode' will be removed in version 3.0."
+ " Use 'markupsafe.soft_unicode' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return soft_unicode(s)
diff --git a/external/python/jinja2/visitor.py b/external/python/jinja2/visitor.py
index ba526dfa..d1365bf1 100644
--- a/external/python/jinja2/visitor.py
+++ b/external/python/jinja2/visitor.py
@@ -1,14 +1,8 @@
# -*- coding: utf-8 -*-
+"""API for traversing the AST nodes. Implemented by the compiler and
+meta introspection.
"""
- jinja2.visitor
- ~~~~~~~~~~~~~~
-
- This module implements a visitor for the nodes.
-
- :copyright: (c) 2017 by the Jinja Team.
- :license: BSD.
-"""
-from jinja2.nodes import Node
+from .nodes import Node
class NodeVisitor(object):
@@ -28,7 +22,7 @@ def get_visitor(self, node):
exists for this node. In that case the generic visit function is
used instead.
"""
- method = 'visit_' + node.__class__.__name__
+ method = "visit_" + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
diff --git a/maintainer-scripts/common.sh b/maintainer-scripts/common.sh
index 57761ac0..c406ad29 100644
--- a/maintainer-scripts/common.sh
+++ b/maintainer-scripts/common.sh
@@ -36,7 +36,7 @@ makeSubset() {
COMMON_FILES=".gitignore .gitattributes .git-blame-ignore-revs CODE_OF_CONDUCT.md LICENSES .reuse .editorconfig HOTFIX"
export COMMON_FILES
-COMMON_EXCLUDE_PATTERN="KhronosExperimental"
+COMMON_EXCLUDE_PATTERN="(KhronosExperimental|KhronosConfidential)"
export COMMON_EXCLUDE_PATTERN
add_to_tar() {
@@ -129,7 +129,7 @@ getDocsFilenames() {
specification/sources/extprocess/ \
include/ \
specification/ \
- | grep -v "${COMMON_EXCLUDE_PATTERN}" \
+ | grep -E -v "${COMMON_EXCLUDE_PATTERN}" \
| grep -v "specification/loader" \
| grep -v "vuid[.]adoc" \
| grep -v "CMakeLists.txt" \
@@ -212,7 +212,7 @@ getSDKSourceFilenames() {
src/tests \
src/version.cmake \
src/version.gradle \
- | grep -v "${COMMON_EXCLUDE_PATTERN}" \
+ | grep -E -v "${COMMON_EXCLUDE_PATTERN}" \
| grep -v "conformance" \
| grep -v "template_gen_dispatch" \
| grep -v "function_info" \
@@ -251,7 +251,7 @@ getSDKFilenames() {
src/external/jsoncpp \
src/loader \
src/version.cmake \
- | grep -v "${COMMON_EXCLUDE_PATTERN}" \
+ | grep -E -v "${COMMON_EXCLUDE_PATTERN}" \
| grep -v "gfxwrapper" \
| grep -v "include/.gitignore" \
| grep -v "images"
@@ -313,7 +313,7 @@ getConformanceFilenames() {
src/scripts \
src/version.cmake \
src/version.gradle \
- | grep -v "${COMMON_EXCLUDE_PATTERN}" \
+ | grep -E -v "${COMMON_EXCLUDE_PATTERN}" \
| grep -v "htmldiff" \
| grep -v "katex"
}
diff --git a/specification/Makefile b/specification/Makefile
index 9bb35546..118dda23 100644
--- a/specification/Makefile
+++ b/specification/Makefile
@@ -32,7 +32,7 @@ ifneq (,$(strip $(VERY_STRICT)))
ASCIIDOC := $(ASCIIDOC) --failure-level WARN
endif
-SPECREVISION = 1.0.33
+SPECREVISION = 1.0.34
REVISION_COMPONENTS = $(subst ., ,$(SPECREVISION))
MAJORMINORVER = $(word 1,$(REVISION_COMPONENTS)).$(word 2,$(REVISION_COMPONENTS))
diff --git a/specification/registry/xr.xml b/specification/registry/xr.xml
index a3520581..b6053cc5 100644
--- a/specification/registry/xr.xml
+++ b/specification/registry/xr.xml
@@ -45,6 +45,7 @@ maintained in the default branch of the Khronos OpenXR GitHub project.
+
@@ -132,7 +133,7 @@ maintained in the default branch of the Khronos OpenXR GitHub project.
updates them automatically by processing a line at a time.
-->
// OpenXR current version number.
-#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 0, 33)
+#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 0, 34)
XR_DEFINE_HANDLE(XrFaceTrackerFB)
+
+ XR_DEFINE_HANDLE(XrFaceTracker2FB)
+
XR_DEFINE_HANDLE(XrBodyTrackerFB)
@@ -602,6 +606,12 @@ maintained in the default branch of the Khronos OpenXR GitHub project.
+
+
+
+
+
+
@@ -1701,6 +1711,38 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrTime time
+
+
+ XrStructureType type
+ void* next
+ XrBool32 supportsVisualFaceTracking
+ XrBool32 supportsAudioFaceTracking
+
+
+ XrStructureType type
+ const void* next
+ XrFaceExpressionSet2FB faceExpressionSet
+ uint32_t requestedDataSourceCount
+ XrFaceTrackingDataSource2FB* requestedDataSources
+
+
+ XrStructureType type
+ const void* next
+ XrTime time
+
+
+ XrStructureType type
+ void* next
+ uint32_t weightCount
+ float* weights
+ uint32_t confidenceCount
+ float* confidences
+ XrBool32 isValid
+ XrBool32 isEyeFollowingBlendshapesValid
+ XrFaceTrackingDataSource2FB dataSource
+ XrTime time
+
+
XrStructureType type
@@ -2583,6 +2625,23 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrUuidEXT* uuids
+
+
+ XrStructureType type
+ const void* next
+
+
+
+ XrStructureType type
+ void* next
+ uint32_t vertexCapacityInput
+ uint32_t vertexCountOutput
+ XrVector3f* vertices
+ uint32_t indexCapacityInput
+ uint32_t indexCountOutput
+ uint32_t* indices
+
+
float width
@@ -3495,6 +3554,35 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrPosef poseInMarkerSpace
+
+
+ XrStructureType type
+ void* next
+ XrExtent2Di recommendedImageDimensions
+ XrBool32 isValid
+
+
+
+ XrStructureType type
+ const void* next
+ const XrCompositionLayerBaseHeader* layer
+ XrTime predictedDisplayTime
+
+
+
+
+ XrStructureType type
+ void* next
+ XrBool32 supportsUserPresence
+
+
+
+ XrStructureType type
+ const void* next
+ XrSession session
+ XrBool32 isUserPresent
+
+
@@ -3858,6 +3946,94 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -4338,6 +4514,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
+
@@ -5337,6 +5514,24 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrFaceExpressionWeightsFB* expressionWeights
+
+
+ XrResult xrCreateFaceTracker2FB
+ XrSession session
+ const XrFaceTrackerCreateInfo2FB* createInfo
+ XrFaceTracker2FB* faceTracker
+
+
+ XrResult xrDestroyFaceTracker2FB
+ XrFaceTracker2FB faceTracker
+
+
+ XrResult xrGetFaceExpressionWeights2FB
+ XrFaceTracker2FB faceTracker
+ const XrFaceExpressionInfo2FB* expressionInfo
+ XrFaceExpressionWeights2FB* expressionWeights
+
+
XrResult xrCreateBodyTrackerFB
@@ -5849,6 +6044,14 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrSpaceContainerFB* spaceContainerOutput
+
+
+ XrResult xrGetSpaceTriangleMeshMETA
+ XrSpace space
+ const XrSpaceTriangleMeshGetInfoMETA* getInfo
+ XrSpaceTriangleMeshMETA* triangleMeshOutput
+
+
XrResult xrGetSpaceBoundingBox2DFB
@@ -6146,6 +6349,14 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
XrSpaceUserFB user
+
+
+ XrResult xrGetRecommendedLayerResolutionMETA
+ XrSession session
+ const XrRecommendedLayerResolutionGetInfoMETA* info
+ XrRecommendedLayerResolutionMETA* resolution
+
+
XrResult xrApplyForceFeedbackCurlMNDX
@@ -7667,8 +7878,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -7910,8 +8119,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8236,8 +8443,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8266,8 +8471,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8430,8 +8633,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8513,8 +8714,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8534,8 +8733,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -8617,9 +8814,9 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
+
@@ -8919,10 +9116,10 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
-
+
+
@@ -8978,8 +9175,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -9512,7 +9707,7 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
@@ -10270,9 +10465,9 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
+
@@ -10337,10 +10532,17 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
-
+
+
+
+
+
+
+
+
+
@@ -10463,10 +10665,20 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
@@ -10477,10 +10689,11 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
-
+
+
+
@@ -10590,10 +10803,34 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -12036,10 +12273,16 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
+
+
+
+
+
+
+
@@ -12242,8 +12485,6 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
-
-
@@ -13906,6 +14147,27 @@ typedef XrResult (XRAPI_PTR *PFN_xrCreateApiLayerInstance)(
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/specification/scripts/creflectiongenerator.py b/specification/scripts/creflectiongenerator.py
index 4fd5f27f..ac05aeed 100644
--- a/specification/scripts/creflectiongenerator.py
+++ b/specification/scripts/creflectiongenerator.py
@@ -25,6 +25,13 @@ def __init__(self, parent_type_name, unprotected_structs, protect_sets_and_prote
self.protect_sets_and_protected_structs = protect_sets_and_protected_structs
+class CommandData:
+ """Represents a OpenXR command"""
+
+ def __init__(self, commandName, featureName):
+ self.commandName = commandName
+ self.featureName = featureName
+
class StructData:
"""Represents a OpenXR struct type"""
@@ -71,6 +78,7 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env = make_jinja_environment(file_with_templates_as_sibs=__file__)
self.structs = []
+ self.commands = []
self.enums = []
self.bitmasks = []
self.protects = set()
@@ -119,6 +127,12 @@ def endFile(self):
((name, data) for name, data in self.registry.extdict.items()
if data.supported != 'disabled'))
+ functions_by_feature = {}
+ for x in self.commands:
+ if x.featureName not in functions_by_feature:
+ functions_by_feature[x.featureName] = []
+ functions_by_feature[x.featureName].append((x.commandName, x.featureName))
+
extensions.sort(key=lambda x: int(x[1].number))
file_data += self.template.render(
unprotectedStructs=unprotected_structs,
@@ -127,7 +141,8 @@ def endFile(self):
enums=self.enums,
bitmasks=self.bitmasks,
extensions=extensions,
- polymorphic_struct_families=polymorphic_struct_families)
+ polymorphic_struct_families=polymorphic_struct_families,
+ functions_by_feature=functions_by_feature)
write(file_data, file=self.outFile)
# Finish processing in superclass
@@ -151,6 +166,14 @@ def genType(self, typeinfo, name, alias):
self.parents[parent_struct] = set()
self.parents[parent_struct].add(name)
+ def genCmd(self, cmdinfo, name, alias):
+ OutputGenerator.genCmd(self, cmdinfo, name, alias)
+
+ if alias:
+ return
+
+ self.commands.append(CommandData(name, self.featureName))
+
def genStruct(self, typeinfo, typeName, alias):
OutputGenerator.genStruct(self, typeinfo, typeName, alias)
diff --git a/specification/scripts/docgenerator.py b/specification/scripts/docgenerator.py
index c8ea1d3b..c1aa4f12 100644
--- a/specification/scripts/docgenerator.py
+++ b/specification/scripts/docgenerator.py
@@ -354,8 +354,6 @@ def genType(self, typeinfo, name, alias):
name, category))
else:
body = self.genRequirements(name)
- if category in ('define',):
- body = body.strip()
if alias:
# If the type is an alias, just emit a typedef declaration
body += 'typedef ' + alias + ' ' + name + ';\n'
@@ -365,7 +363,11 @@ def genType(self, typeinfo, name, alias):
# Replace tags with an APIENTRY-style string
# (from self.genOpts). Copy other text through unchanged.
# If the resulting text is an empty string, do not emit it.
- body += noneStr(typeElem.text)
+ text = noneStr(typeElem.text)
+ if category in ('define',):
+ text = text.lstrip()
+ body += text
+
for elem in typeElem:
if elem.tag == 'apientry':
body += self.genOpts.apientry + noneStr(elem.tail)
diff --git a/specification/scripts/jinja_helpers.py b/specification/scripts/jinja_helpers.py
index 95495c10..67b6c678 100644
--- a/specification/scripts/jinja_helpers.py
+++ b/specification/scripts/jinja_helpers.py
@@ -58,6 +58,11 @@ def _protect_end(entity):
return "#endif // {}".format(entity.protect_string)
return ""
+def _remove_prefix(s: str, prefix: str):
+ if s.startswith(prefix):
+ return s[len(prefix):]
+ return s
+
def make_jinja_environment(file_with_templates_as_sibs=None, search_path=None):
"""Create a Jinja2 environment customized to generate C/C++ headers/code for Khronos APIs.
@@ -118,6 +123,7 @@ def make_jinja_environment(file_with_templates_as_sibs=None, search_path=None):
env.filters['undecorate'] = _undecorate
env.filters['base_name'] = _base_name
env.filters['collapse_whitespace'] = _collapse_whitespace
+ env.filters['remove_prefix'] = _remove_prefix
env.globals['protect_begin'] = _protect_begin
env.globals['protect_end'] = _protect_end
diff --git a/specification/scripts/template_openxr_reflection.h b/specification/scripts/template_openxr_reflection.h
index 664a5d1d..3c668ca8 100644
--- a/specification/scripts/template_openxr_reflection.h
+++ b/specification/scripts/template_openxr_reflection.h
@@ -109,4 +109,21 @@ XR_ENUM_STR(XrResult);
//## Preceding line intentionally left blank to absorb the trailing backslash
+
+//# for featname in functions_by_feature
+/// For every function defined by /*{ featname }*/ in this version of the spec,
+/// calls your macro with the function name and extension name.
+/// Trims the leading `xr` from the function name and the leading `XR_` from the feature name,
+/// because it is easy to add back but impossible to remove with the preprocessor.
+#define XR_LIST_FUNCTIONS_/*{ featname }*/(_) \
+//# for command, feature in functions_by_feature[featname]
+ _(/*{ command | remove_prefix("xr") }*/, /*{ feature | remove_prefix("XR_") }*/) \
+//# endfor
+
+//## Preceding line intentionally left blank to absorb the trailing backslash
+
+//# endfor
+
+//## Preceding line intentionally left blank to absorb the trailing backslash
+
#endif
diff --git a/src/common/gfxwrapper_opengl.c b/src/common/gfxwrapper_opengl.c
index f7739415..595f1d6e 100644
--- a/src/common/gfxwrapper_opengl.c
+++ b/src/common/gfxwrapper_opengl.c
@@ -512,7 +512,7 @@ void GlInitExtensions() {
glUniform2fv = (PFNGLUNIFORM2FVPROC)GetExtension("glUniform2fv");
glUniform3fv = (PFNGLUNIFORM3FVPROC)GetExtension("glUniform3fv");
glUniform4fv = (PFNGLUNIFORM4FVPROC)GetExtension("glUniform4fv");
- glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)GetExtension("glUniformMatrix3fv");
+ glUniformMatrix2fv = (PFNGLUNIFORMMATRIX2FVPROC)GetExtension("glUniformMatrix2fv");
glUniformMatrix2x3fv = (PFNGLUNIFORMMATRIX2X3FVPROC)GetExtension("glUniformMatrix2x3fv");
glUniformMatrix2x4fv = (PFNGLUNIFORMMATRIX2X4FVPROC)GetExtension("glUniformMatrix2x4fv");
glUniformMatrix3x2fv = (PFNGLUNIFORMMATRIX3X2FVPROC)GetExtension("glUniformMatrix3x2fv");
diff --git a/src/common/platform_utils.hpp b/src/common/platform_utils.hpp
index c4d75bf2..35369a14 100644
--- a/src/common/platform_utils.hpp
+++ b/src/common/platform_utils.hpp
@@ -323,6 +323,8 @@ static inline std::string PlatformUtilsGetSecureEnv(const char* name) {
const std::string envValue = PlatformUtilsGetEnv(name);
// Do not allow high integrity processes to act on data that can be controlled by medium integrity processes.
+ // Specifically, medium integrity processes can set environment variables which could then
+ // be read by high integrity processes.
if (IsHighIntegrityLevel()) {
if (!envValue.empty()) {
LogPlatformUtilsError(std::string("!!! WARNING !!! Environment variable ") + name +
diff --git a/src/conformance/conformance_layer/CustomHandleState.h b/src/conformance/conformance_layer/CustomHandleState.h
index 60b606ad..34245b89 100644
--- a/src/conformance/conformance_layer/CustomHandleState.h
+++ b/src/conformance/conformance_layer/CustomHandleState.h
@@ -17,12 +17,30 @@
#pragma once
#include "Common.h"
+#include "HandleState.h"
+
+#include
+
+//
+// XrInstance
+//
+namespace instance
+{
+ HandleState* GetInstanceState(XrInstance handle);
+}
//
// XrSession
//
namespace session
{
+ enum class SyncActionsState : uint32_t
+ {
+ NOT_CALLED_SINCE_QUEUE_EXHAUST,
+ CALLED_SINCE_QUEUE_EXHAUST,
+ ONGOING,
+ };
+
struct CustomSessionState : ICustomHandleState
{
std::mutex lock;
@@ -32,6 +50,7 @@ namespace session
bool sessionExitRequested{false};
bool frameBegun{false};
bool headless{false}; //< true if a headless extension is enabled *and* in use
+ std::atomic syncActionsState{SyncActionsState::NOT_CALLED_SINCE_QUEUE_EXHAUST};
XrStructureType graphicsBinding{XR_TYPE_UNKNOWN};
XrTime lastPredictedDisplayTime{0};
XrDuration lastPredictedDisplayPeriod{0};
@@ -46,6 +65,8 @@ namespace session
void SessionStateChanged(ConformanceHooksBase* conformanceHooks, const XrEventDataSessionStateChanged* sessionStateChanged);
void VisibilityMaskChanged(ConformanceHooksBase* conformanceHooks, const XrEventDataVisibilityMaskChangedKHR* visibilityMaskChanged);
+ void InteractionProfileChanged(ConformanceHooksBase* conformanceHooks,
+ const XrEventDataInteractionProfileChanged* interactionProfileChanged);
} // namespace session
//
diff --git a/src/conformance/conformance_layer/HandleState.h b/src/conformance/conformance_layer/HandleState.h
index c06937a7..2ca99bd7 100644
--- a/src/conformance/conformance_layer/HandleState.h
+++ b/src/conformance/conformance_layer/HandleState.h
@@ -26,6 +26,7 @@
#include
#include
+/// Base class for "custom" handle state that differs between handle types
struct ICustomHandleState
{
virtual ~ICustomHandleState() = default;
@@ -34,7 +35,7 @@ struct ICustomHandleState
using IntHandle = uint64_t; // A common type for all handles so a single map can be used.
struct ConformanceHooksBase; // forward-declare
-// Common state kept around for all XR handles.
+/// Common state kept around for all XR handles.
struct HandleState
{
HandleState(IntHandle handle_, XrObjectType type, HandleState* parent, std::shared_ptr conformanceHooks)
@@ -42,6 +43,7 @@ struct HandleState
{
}
+ /// "fork-exec" for handles, basically. Called from generated ConformanceHooksBase implementations
std::unique_ptr CloneForChild(IntHandle handle_, XrObjectType childType)
{
// Note that the cloned HandleState will start with a null customState and no children.
@@ -65,13 +67,14 @@ struct HandleState
mutable std::mutex mutex;
+ /// Non-owning pointers to handle state of child handles.
std::vector children;
- // Additional data stored by the hand-coded validations.
+ /// Additional data stored by the hand-coded validations.
std::unique_ptr customState;
};
-// Inherit from std::runtime_error so it can be caught in the ABI boundary.
+/// Handle exception type: Inherit from std::runtime_error so it can be caught in the ABI boundary.
struct HandleException : public std::runtime_error
{
HandleException(const std::string& message) : std::runtime_error(message)
@@ -85,4 +88,6 @@ void UnregisterHandleStateInternal(std::unique_lock& lockProof, Hand
void UnregisterHandleState(HandleStateKey key);
void RegisterHandleState(std::unique_ptr handleState);
+/// Retrieve common handle state based on a handle and object type enum.
+/// Throws if not found.
HandleState* GetHandleState(HandleStateKey key);
diff --git a/src/conformance/conformance_layer/Instance.cpp b/src/conformance/conformance_layer/Instance.cpp
index b3380bd8..108491dc 100644
--- a/src/conformance/conformance_layer/Instance.cpp
+++ b/src/conformance/conformance_layer/Instance.cpp
@@ -16,10 +16,22 @@
#include "ConformanceHooks.h"
#include "CustomHandleState.h"
+#include "HandleState.h"
#include "RuntimeFailure.h"
+#include
#include
#include
+#include
+
+namespace instance
+{
+ HandleState* GetInstanceState(XrInstance handle)
+ {
+ return GetHandleState({(IntHandle)handle, XR_OBJECT_TYPE_INSTANCE});
+ }
+
+} // namespace instance
/////////////////
// ABI
@@ -28,18 +40,46 @@
XrResult ConformanceHooks::xrPollEvent(XrInstance instance, XrEventDataBuffer* eventData)
{
const XrResult result = ConformanceHooksBase::xrPollEvent(instance, eventData);
+
+ if (result == XR_EVENT_UNAVAILABLE) {
+ const HandleState* const instanceState = instance::GetInstanceState(instance);
+
+ // Clear the "xrSyncActions called" flag for all known sessions
+ for (HandleState* childState : instanceState->children) {
+ if (childState->type != XR_OBJECT_TYPE_SESSION) {
+ continue;
+ }
+ session::CustomSessionState* const customSessionState =
+ dynamic_cast(childState->customState.get());
+
+ // avoid setting queue exhaust flag while xrSyncActions is ongoing
+ // caveat: it is technically possible but unlikely that an entire xrSyncActions has happened
+ // since this function forwarded the xrPollEvent call
+ session::SyncActionsState exchangeIfState = session::SyncActionsState::CALLED_SINCE_QUEUE_EXHAUST;
+ customSessionState->syncActionsState.compare_exchange_strong(
+ exchangeIfState, session::SyncActionsState::NOT_CALLED_SINCE_QUEUE_EXHAUST, //
+ std::memory_order::memory_order_seq_cst, std::memory_order::memory_order_seq_cst);
+ }
+ }
+
if (result != XR_SUCCESS) {
+ // exit now if we don't have a good event
return result;
}
- try {
- switch ((int)eventData->type) { // int cast so compiler doesn't warn about other enumerants.
+ // For each known event type, check if that's the current event type,
+ // and if so, cast the event to the derived type then call checkEventPayload().
+ // This will end up choosing an overload per event data type.
+
+ // macro for a case statement when we have defined enough defines to get that type:
+ // reinterpret cast then call to checkEventPayload
#define MAKE_CASE(STRUCT_TYPE, TYPE_ENUM) \
case TYPE_ENUM: { \
const auto typed = reinterpret_cast(eventData); \
checkEventPayload(typed); \
break; \
}
+ // macro for a case statement where a type is not available due to lack of defines, but the structure type enum still exists (needed by the reflection-generated macro)
#define MAKE_UNAVAIL_CASE(STRUCT_TYPE, TYPE_ENUM) \
case TYPE_ENUM: { \
POSSIBLE_NONCONFORMANT( \
@@ -47,6 +87,12 @@ XrResult ConformanceHooks::xrPollEvent(XrInstance instance, XrEventDataBuffer* e
eventData->type); \
break; \
}
+
+ try {
+ switch ((int)eventData->type) { // int cast so compiler doesn't warn about other enumerants.
+
+ // Use the reflection headers to generate all the case statements for things derived from XrEventDataBaseHeader
+ // and call the appropriate checkEventPayload overload on those we can cope with (those whose needed defines are defined)
XR_LIST_ALL_CHILD_STRUCTURE_TYPES_XrEventDataBaseHeader(MAKE_CASE, MAKE_UNAVAIL_CASE);
default:
@@ -98,7 +144,7 @@ void ConformanceHooks::checkEventPayload(const XrEventDataReferenceSpaceChangePe
void ConformanceHooks::checkEventPayload(const XrEventDataInteractionProfileChanged* data)
{
- (void)session::GetSessionState(data->session); // Check handle is alive/valid.
+ session::InteractionProfileChanged(this, data); // Validate session handle and timing of InteractionProfileChanged
}
void ConformanceHooks::checkEventPayload(const XrEventDataVisibilityMaskChangedKHR* data)
diff --git a/src/conformance/conformance_layer/Session.cpp b/src/conformance/conformance_layer/Session.cpp
index 9f7dae3e..01962fb7 100644
--- a/src/conformance/conformance_layer/Session.cpp
+++ b/src/conformance/conformance_layer/Session.cpp
@@ -18,6 +18,7 @@
#include "ConformanceHooks.h"
#include "CustomHandleState.h"
#include "RuntimeFailure.h"
+#include "openxr/openxr.h"
namespace
{
@@ -123,6 +124,22 @@ namespace session
}
}
}
+
+ void InteractionProfileChanged(ConformanceHooksBase* conformanceHooks,
+ const XrEventDataInteractionProfileChanged* interactionProfileChanged)
+ {
+ // Check handle is alive/valid.
+ session::CustomSessionState* const customSessionState = GetCustomSessionState(interactionProfileChanged->session);
+ // Cannot clear here because you may have gotten several of these events queued.
+ // Not very useful, but the spec doesn't forbid it.
+ session::SyncActionsState syncActionsState = customSessionState->syncActionsState.load(std::memory_order::memory_order_seq_cst);
+ if (syncActionsState == SyncActionsState::NOT_CALLED_SINCE_QUEUE_EXHAUST) {
+ conformanceHooks->ConformanceFailure(
+ XR_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT, "xrPollEvent",
+ "Event with type XR_TYPE_EVENT_DATA_INTERACTION_PROFILE_CHANGED must only be queued during xrSyncActions,"
+ " but no xrSyncActions call was made since the last time XR_EVENT_UNAVAILABLE was returned from xrPollEvent.");
+ }
+ }
} // namespace session
/////////////////
@@ -133,6 +150,7 @@ using namespace session;
XrResult ConformanceHooks::xrCreateSession(XrInstance instance, const XrSessionCreateInfo* createInfo, XrSession* session)
{
+ // Call generated base implementation, which will check return codes, create (common) handle state, set up parent/child relationships, etc.
const XrResult result = ConformanceHooksBase::xrCreateSession(instance, createInfo, session);
if (XR_SUCCEEDED(result)) {
std::unique_ptr customSessionState = std::unique_ptr(new CustomSessionState());
@@ -176,9 +194,12 @@ XrResult ConformanceHooks::xrCreateSession(XrInstance instance, const XrSessionC
XrResult ConformanceHooks::xrSyncActions(XrSession session, const XrActionsSyncInfo* syncInfo)
{
+ CustomSessionState* const customSessionState = GetCustomSessionState(session);
+ customSessionState->syncActionsState.store(session::SyncActionsState::ONGOING);
+
const XrResult result = ConformanceHooksBase::xrSyncActions(session, syncInfo);
- CustomSessionState* const customSessionState = GetCustomSessionState(session);
+ // late lock since we only touched atomics until now
std::unique_lock lock(customSessionState->lock);
if (result == XR_SESSION_NOT_FOCUSED && customSessionState->sessionState == XR_SESSION_STATE_FOCUSED) {
@@ -194,6 +215,10 @@ XrResult ConformanceHooks::xrSyncActions(XrSession session, const XrActionsSyncI
for (uint32_t i = 0; i < syncInfo->countActiveActionSets; i++) {
actionset::OnSyncActionData(result, &syncInfo->activeActionSets[i]);
}
+
+ // caveat: if xrSyncActions is called in parallel with itself, this can un-set ONGOING early
+ customSessionState->syncActionsState.store(session::SyncActionsState::CALLED_SINCE_QUEUE_EXHAUST);
+
return result;
}
diff --git a/src/conformance/conformance_test/test_XR_EXT_hand_tracking.cpp b/src/conformance/conformance_test/test_XR_EXT_hand_tracking.cpp
index 3b71da75..a2170af2 100644
--- a/src/conformance/conformance_test/test_XR_EXT_hand_tracking.cpp
+++ b/src/conformance/conformance_test/test_XR_EXT_hand_tracking.cpp
@@ -430,7 +430,7 @@ namespace Conformance
// 0.1 here represents 26 degrees variance between these orientations; which is more than can reasonable be
// explained by numerical inaccuracy...
REQUIRE_THAT(XrVector3f_Dot(&wristZAxis, &fromMiddleMetacarpalToWrist), Catch::Matchers::WithinRel(1.0f, 0.1f));
- if (XrVector3f_Dot(&wristZAxis, &fromMiddleMetacarpalToWrist) > 0.03) {
+ if (std::abs(1.0 - XrVector3f_Dot(&wristZAxis, &fromMiddleMetacarpalToWrist)) > 0.03) {
WARN("Variance between wrist z axis orientation and metacarpal greater than 14 degrees!");
}
}
diff --git a/src/conformance/conformance_test/test_XR_MSFT_controller_model.cpp b/src/conformance/conformance_test/test_XR_MSFT_controller_model.cpp
index afe38e0b..60cc1471 100644
--- a/src/conformance/conformance_test/test_XR_MSFT_controller_model.cpp
+++ b/src/conformance/conformance_test/test_XR_MSFT_controller_model.cpp
@@ -307,7 +307,7 @@ namespace Conformance
}
}
- TEST_CASE("XR_MSFT_controller_model_interactive", "[scenario][interactive][no_auto]")
+ TEST_CASE("XR_MSFT_controller_model-interactive", "[scenario][interactive][no_auto]")
{
GlobalData& globalData = GetGlobalData();
diff --git a/src/conformance/framework/conformance_utils.cpp b/src/conformance/framework/conformance_utils.cpp
index 3986c82f..387c719e 100644
--- a/src/conformance/framework/conformance_utils.cpp
+++ b/src/conformance/framework/conformance_utils.cpp
@@ -557,7 +557,7 @@ namespace Conformance
// that the session is ready.
// timeout in case the runtime will never transition to READY: 10s in release, no practical limit in debug
- auto timeoutToTransitionToSessionState = (GetGlobalData().options.debugMode ? 3600s : 10s);
+ auto timeoutToTransitionToSessionState = (GetGlobalData().options.debugMode ? 60s : 10s);
CountdownTimer countdownTimer(timeoutToTransitionToSessionState);
while ((sessionState != XR_SESSION_STATE_READY) && (!countdownTimer.IsTimeUp())) {
@@ -587,6 +587,10 @@ namespace Conformance
" If this system supports a user engagement sensor, the runtime may not transition to XR_SESSION_STATE_READY state until the user starts engaging with the device.";
}
+ if (GetGlobalData().options.debugMode) {
+ extraInfo += " Tests running using debug mode: using extended timeout of 60s to wait for XR_SESSION_STATE_READY";
+ }
+
CAPTURE(timeoutToTransitionToSessionState);
CAPTURE(sessionState);
FAIL("Time out waiting for XR_SESSION_STATE_READY session state change after creating a new session." << extraInfo);
diff --git a/src/conformance/framework/gltf_model.h b/src/conformance/framework/gltf_model.h
index f24a40c9..186c625b 100644
--- a/src/conformance/framework/gltf_model.h
+++ b/src/conformance/framework/gltf_model.h
@@ -20,7 +20,7 @@ namespace Conformance
{
public:
- RenderableGltfModelInstanceBase(ModelInstanceType&& pbrModelInstance, Pbr::FillMode fillMode = Pbr::FillMode::Solid)
+ explicit RenderableGltfModelInstanceBase(ModelInstanceType&& pbrModelInstance, Pbr::FillMode fillMode = Pbr::FillMode::Solid)
: m_pbrModelInstance(std::move(pbrModelInstance)), m_fillMode(fillMode)
{
}
diff --git a/src/conformance/framework/graphics_plugin.h b/src/conformance/framework/graphics_plugin.h
index dd4c55e8..bab8fac1 100644
--- a/src/conformance/framework/graphics_plugin.h
+++ b/src/conformance/framework/graphics_plugin.h
@@ -179,7 +179,7 @@ namespace Conformance
// or unordered_map, probably not significant
std::map nodesAndParams;
- GLTFDrawable(GLTFModelInstanceHandle handle, XrPosef pose = XrPosefCPP{}, XrVector3f scale = {1.0, 1.0, 1.0})
+ explicit GLTFDrawable(GLTFModelInstanceHandle handle, XrPosef pose = XrPosefCPP{}, XrVector3f scale = {1.0, 1.0, 1.0})
: handle(handle), params(pose, scale)
{
}
diff --git a/src/conformance/framework/pbr/PbrModel.h b/src/conformance/framework/pbr/PbrModel.h
index 3e0f8d5b..fe8bb027 100644
--- a/src/conformance/framework/pbr/PbrModel.h
+++ b/src/conformance/framework/pbr/PbrModel.h
@@ -132,7 +132,7 @@ namespace Pbr
class ModelInstance
{
protected:
- ModelInstance(std::shared_ptr model) : m_model(std::move(model))
+ explicit ModelInstance(std::shared_ptr model) : m_model(std::move(model))
{
const auto nodeCount = m_model->GetNodeCount();
diff --git a/src/loader/api_layer_interface.cpp b/src/loader/api_layer_interface.cpp
index fb509de2..a93d45da 100644
--- a/src/loader/api_layer_interface.cpp
+++ b/src/loader/api_layer_interface.cpp
@@ -72,10 +72,10 @@ XrResult ApiLayerInterface::GetApiLayerProperties(const std::string& openxr_comm
}
// Find any implicit layers which we may need to report information for.
- XrResult result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
+ XrResult result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
if (XR_SUCCEEDED(result)) {
// Find any explicit layers which we may need to report information for.
- result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_EXPLICIT_API_LAYER, manifest_files);
+ result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_EXPLICIT_API_LAYER, manifest_files);
}
if (XR_FAILED(result)) {
LoaderLogger::LogErrorMessage(openxr_command,
@@ -126,10 +126,10 @@ XrResult ApiLayerInterface::GetInstanceExtensionProperties(const std::string& op
// If a layer name is supplied, only use the information out of that one layer
if (nullptr != layer_name && 0 != strlen(layer_name)) {
- XrResult result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
+ XrResult result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
if (XR_SUCCEEDED(result)) {
// Find any explicit layers which we may need to report information for.
- result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_EXPLICIT_API_LAYER, manifest_files);
+ result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_EXPLICIT_API_LAYER, manifest_files);
if (XR_FAILED(result)) {
LoaderLogger::LogErrorMessage(
openxr_command,
@@ -155,7 +155,7 @@ XrResult ApiLayerInterface::GetInstanceExtensionProperties(const std::string& op
}
// Otherwise, we want to add only implicit API layers and explicit API layers enabled using the environment variables
} else {
- XrResult result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
+ XrResult result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_IMPLICIT_API_LAYER, manifest_files);
if (XR_SUCCEEDED(result)) {
// Find any environmentally enabled explicit layers. If they're present, treat them like implicit layers
// since we know that they're going to be enabled.
@@ -163,7 +163,8 @@ XrResult ApiLayerInterface::GetInstanceExtensionProperties(const std::string& op
AddEnvironmentApiLayers(env_enabled_layers);
if (!env_enabled_layers.empty()) {
std::vector> exp_layer_man_files = {};
- result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_EXPLICIT_API_LAYER, exp_layer_man_files);
+ result =
+ ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_EXPLICIT_API_LAYER, exp_layer_man_files);
if (XR_SUCCEEDED(result)) {
for (auto& exp_layer_man_file : exp_layer_man_files) {
for (std::string& enabled_layer : env_enabled_layers) {
@@ -197,8 +198,8 @@ XrResult ApiLayerInterface::LoadApiLayers(const std::string& openxr_command, uin
std::vector> enabled_layer_manifest_files_in_init_order = {};
// Find any implicit layers.
- XrResult result =
- ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_IMPLICIT_API_LAYER, enabled_layer_manifest_files_in_init_order);
+ XrResult result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_IMPLICIT_API_LAYER,
+ enabled_layer_manifest_files_in_init_order);
for (const auto& enabled_layer_manifest_file : enabled_layer_manifest_files_in_init_order) {
layers_already_found.insert(enabled_layer_manifest_file->LayerName());
@@ -208,7 +209,8 @@ XrResult ApiLayerInterface::LoadApiLayers(const std::string& openxr_command, uin
std::vector> explicit_layer_manifest_files = {};
if (XR_SUCCEEDED(result)) {
- result = ApiLayerManifestFile::FindManifestFiles(MANIFEST_TYPE_EXPLICIT_API_LAYER, explicit_layer_manifest_files);
+ result = ApiLayerManifestFile::FindManifestFiles(openxr_command, MANIFEST_TYPE_EXPLICIT_API_LAYER,
+ explicit_layer_manifest_files);
}
bool found_all_layers = true;
diff --git a/src/loader/loader_init_data.cpp b/src/loader/loader_init_data.cpp
index 11d3c4e7..3ba6d267 100644
--- a/src/loader/loader_init_data.cpp
+++ b/src/loader/loader_init_data.cpp
@@ -11,9 +11,9 @@
#ifdef XR_KHR_LOADER_INIT_SUPPORT
-#ifdef XR_USE_PLATFORM_ANDROID
// Check and copy the Android-specific init data.
XrResult LoaderInitData::initialize(const XrLoaderInitInfoBaseHeaderKHR* info) {
+#if defined(XR_USE_PLATFORM_ANDROID)
if (info->type != XR_TYPE_LOADER_INIT_INFO_ANDROID_KHR) {
return XR_ERROR_VALIDATION_FAILURE;
}
@@ -40,11 +40,13 @@ XrResult LoaderInitData::initialize(const XrLoaderInitInfoBaseHeaderKHR* info) {
const auto applicationContext = context.call("getApplicationContext()Landroid/content/Context;");
const auto applicationInfo = context.call("getApplicationInfo()Landroid/content/pm/ApplicationInfo;");
_native_library_path = applicationInfo.get("nativeLibraryDir");
+#else
+#error "Platform specific XR_KHR_loader_init structure is not defined for this platform."
+#endif // XR_USE_PLATFORM_ANDROID
_initialized = true;
return XR_SUCCESS;
}
-#endif // XR_USE_PLATFORM_ANDROID
XrResult InitializeLoaderInitData(const XrLoaderInitInfoBaseHeaderKHR* loaderInitInfo) {
return LoaderInitData::instance().initialize(loaderInitInfo);
diff --git a/src/loader/loader_init_data.hpp b/src/loader/loader_init_data.hpp
index fe6bc134..e3a27fc4 100644
--- a/src/loader/loader_init_data.hpp
+++ b/src/loader/loader_init_data.hpp
@@ -33,7 +33,7 @@ class LoaderInitData {
return obj;
}
-#ifdef XR_USE_PLATFORM_ANDROID
+#if defined(XR_USE_PLATFORM_ANDROID)
/*!
* Type alias for the platform-specific structure type.
*/
diff --git a/src/loader/manifest_file.cpp b/src/loader/manifest_file.cpp
index ae0842f3..4e3e5b49 100644
--- a/src/loader/manifest_file.cpp
+++ b/src/loader/manifest_file.cpp
@@ -630,53 +630,58 @@ void RuntimeManifestFile::CreateIfValid(const Json::Value &root_node, const std:
}
// Find all manifest files in the appropriate search paths/registries for the given type.
-XrResult RuntimeManifestFile::FindManifestFiles(std::vector> &manifest_files) {
+XrResult RuntimeManifestFile::FindManifestFiles(const std::string &openxr_command,
+ std::vector> &manifest_files) {
XrResult result = XR_SUCCESS;
std::string filename = PlatformUtilsGetSecureEnv(OPENXR_RUNTIME_JSON_ENV_VAR);
if (!filename.empty()) {
LoaderLogger::LogInfoMessage(
- "", "RuntimeManifestFile::FindManifestFiles - using environment variable override runtime file " + filename);
+ openxr_command,
+ "RuntimeManifestFile::FindManifestFiles - using environment variable override runtime file " + filename);
} else {
#ifdef XR_OS_WINDOWS
std::vector filenames;
ReadRuntimeDataFilesInRegistry("", "ActiveRuntime", filenames);
if (filenames.size() == 0) {
LoaderLogger::LogErrorMessage(
- "", "RuntimeManifestFile::FindManifestFiles - failed to find active runtime file in registry");
+ openxr_command, "RuntimeManifestFile::FindManifestFiles - failed to find active runtime file in registry");
return XR_ERROR_RUNTIME_UNAVAILABLE;
}
if (filenames.size() > 1) {
LoaderLogger::LogWarningMessage(
- "", "RuntimeManifestFile::FindManifestFiles - found too many default runtime files in registry");
+ openxr_command, "RuntimeManifestFile::FindManifestFiles - found too many default runtime files in registry");
}
filename = filenames[0];
- LoaderLogger::LogInfoMessage("",
+ LoaderLogger::LogInfoMessage(openxr_command,
"RuntimeManifestFile::FindManifestFiles - using registry-specified runtime file " + filename);
#elif defined(XR_OS_LINUX)
if (!FindXDGConfigFile("openxr/", XR_VERSION_MAJOR(XR_CURRENT_API_VERSION), filename)) {
LoaderLogger::LogErrorMessage(
- "", "RuntimeManifestFile::FindManifestFiles - failed to determine active runtime file path for this environment");
+ openxr_command,
+ "RuntimeManifestFile::FindManifestFiles - failed to determine active runtime file path for this environment");
return XR_ERROR_RUNTIME_UNAVAILABLE;
}
-#else
+#else // !defined(XR_OS_WINDOWS) && !defined(XR_OS_LINUX)
-#if defined(XR_USE_PLATFORM_ANDROID)
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
Json::Value virtualManifest;
result = GetPlatformRuntimeVirtualManifest(virtualManifest);
if (XR_SUCCESS == result) {
RuntimeManifestFile::CreateIfValid(virtualManifest, "", manifest_files);
return result;
}
-#endif // defined(XR_USE_PLATFORM_ANDROID)
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
if (!PlatformGetGlobalRuntimeFileName(XR_VERSION_MAJOR(XR_CURRENT_API_VERSION), filename)) {
LoaderLogger::LogErrorMessage(
- "", "RuntimeManifestFile::FindManifestFiles - failed to determine active runtime file path for this environment");
+ openxr_command,
+ "RuntimeManifestFile::FindManifestFiles - failed to determine active runtime file path for this environment");
return XR_ERROR_RUNTIME_UNAVAILABLE;
}
result = XR_SUCCESS;
- LoaderLogger::LogInfoMessage("", "RuntimeManifestFile::FindManifestFiles - using global runtime file " + filename);
-#endif
+ LoaderLogger::LogInfoMessage(openxr_command,
+ "RuntimeManifestFile::FindManifestFiles - using global runtime file " + filename);
+#endif // !defined(XR_OS_WINDOWS) && !defined(XR_OS_LINUX)
}
RuntimeManifestFile::CreateIfValid(filename, manifest_files);
@@ -692,9 +697,17 @@ ApiLayerManifestFile::ApiLayerManifestFile(ManifestFileType type, const std::str
_description(description),
_implementation_version(implementation_version) {}
-#ifdef XR_USE_PLATFORM_ANDROID
-void ApiLayerManifestFile::AddManifestFilesAndroid(ManifestFileType type,
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
+void ApiLayerManifestFile::AddManifestFilesAndroid(const std::string &openxr_command, ManifestFileType type,
std::vector> &manifest_files) {
+ if (!LoaderInitData::instance().initialized()) {
+ // This will happen for applications that do not call xrInitializeLoaderKHR
+ LoaderLogger::LogWarningMessage(
+ openxr_command,
+ "ApiLayerManifestFile::AddManifestFilesAndroid unable to add manifest files LoaderInitData not initialized.");
+ return;
+ }
+
AAssetManager *assetManager = (AAssetManager *)Android_Get_Asset_Manager();
std::vector filenames;
{
@@ -730,7 +743,7 @@ void ApiLayerManifestFile::AddManifestFilesAndroid(ManifestFileType type,
UniqueAsset asset{AAssetManager_open(assetManager, filename.c_str(), AASSET_MODE_BUFFER)};
if (!asset) {
LoaderLogger::LogWarningMessage(
- "", "ApiLayerManifestFile::AddManifestFilesAndroid unable to open asset " + filename + ", skipping");
+ openxr_command, "ApiLayerManifestFile::AddManifestFilesAndroid unable to open asset " + filename + ", skipping");
continue;
}
@@ -738,7 +751,7 @@ void ApiLayerManifestFile::AddManifestFilesAndroid(ManifestFileType type,
const char *buf = reinterpret_cast(AAsset_getBuffer(asset.get()));
if (!buf) {
LoaderLogger::LogWarningMessage(
- "", "ApiLayerManifestFile::AddManifestFilesAndroid unable to access asset" + filename + ", skipping");
+ openxr_command, "ApiLayerManifestFile::AddManifestFilesAndroid unable to access asset" + filename + ", skipping");
continue;
}
@@ -748,7 +761,7 @@ void ApiLayerManifestFile::AddManifestFilesAndroid(ManifestFileType type,
&ApiLayerManifestFile::LocateLibraryInAssets, manifest_files);
}
}
-#endif // XR_USE_PLATFORM_ANDROID
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
void ApiLayerManifestFile::CreateIfValid(ManifestFileType type, const std::string &filename, std::istream &json_stream,
LibraryLocator locate_library,
@@ -892,7 +905,7 @@ bool ApiLayerManifestFile::LocateLibraryRelativeToJson(
return true;
}
-#ifdef XR_USE_PLATFORM_ANDROID
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
bool ApiLayerManifestFile::LocateLibraryInAssets(const std::string & /* json_filename */, const std::string &library_path,
std::string &out_combined_path) {
std::string combined_path;
@@ -904,7 +917,7 @@ bool ApiLayerManifestFile::LocateLibraryInAssets(const std::string & /* json_fil
out_combined_path = combined_path;
return true;
}
-#endif
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
void ApiLayerManifestFile::PopulateApiLayerProperties(XrApiLayerProperties &props) const {
props.layerVersion = _implementation_version;
@@ -920,7 +933,7 @@ void ApiLayerManifestFile::PopulateApiLayerProperties(XrApiLayerProperties &prop
}
// Find all layer manifest files in the appropriate search paths/registries for the given type.
-XrResult ApiLayerManifestFile::FindManifestFiles(ManifestFileType type,
+XrResult ApiLayerManifestFile::FindManifestFiles(const std::string &openxr_command, ManifestFileType type,
std::vector> &manifest_files) {
std::string relative_path;
std::string override_env_var;
@@ -947,7 +960,8 @@ XrResult ApiLayerManifestFile::FindManifestFiles(ManifestFileType type,
#endif
break;
default:
- LoaderLogger::LogErrorMessage("", "ApiLayerManifestFile::FindManifestFiles - unknown manifest file requested");
+ LoaderLogger::LogErrorMessage(openxr_command,
+ "ApiLayerManifestFile::FindManifestFiles - unknown manifest file requested");
return XR_ERROR_FILE_ACCESS_ERROR;
}
@@ -966,9 +980,9 @@ XrResult ApiLayerManifestFile::FindManifestFiles(ManifestFileType type,
ApiLayerManifestFile::CreateIfValid(type, cur_file, manifest_files);
}
-#ifdef XR_USE_PLATFORM_ANDROID
- ApiLayerManifestFile::AddManifestFilesAndroid(type, manifest_files);
-#endif // XR_USE_PLATFORM_ANDROID
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
+ ApiLayerManifestFile::AddManifestFilesAndroid(openxr_command, type, manifest_files);
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
return XR_SUCCESS;
}
diff --git a/src/loader/manifest_file.hpp b/src/loader/manifest_file.hpp
index 52fe3134..801614ad 100644
--- a/src/loader/manifest_file.hpp
+++ b/src/loader/manifest_file.hpp
@@ -71,7 +71,8 @@ class ManifestFile {
class RuntimeManifestFile : public ManifestFile {
public:
// Factory method
- static XrResult FindManifestFiles(std::vector> &manifest_files);
+ static XrResult FindManifestFiles(const std::string &openxr_command,
+ std::vector> &manifest_files);
private:
RuntimeManifestFile(const std::string &filename, const std::string &library_path);
@@ -87,7 +88,8 @@ using LibraryLocator = bool (*)(const std::string &json_filename, const std::str
class ApiLayerManifestFile : public ManifestFile {
public:
// Factory method
- static XrResult FindManifestFiles(ManifestFileType type, std::vector> &manifest_files);
+ static XrResult FindManifestFiles(const std::string &openxr_command, ManifestFileType type,
+ std::vector> &manifest_files);
const std::string &LayerName() const { return _layer_name; }
void PopulateApiLayerProperties(XrApiLayerProperties &props) const;
@@ -104,11 +106,13 @@ class ApiLayerManifestFile : public ManifestFile {
/// @return false if we could not find the library.
static bool LocateLibraryRelativeToJson(const std::string &json_filename, const std::string &library_path,
std::string &out_combined_path);
-#ifdef XR_USE_PLATFORM_ANDROID
+
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
static bool LocateLibraryInAssets(const std::string &json_filename, const std::string &library_path,
std::string &out_combined_path);
- static void AddManifestFilesAndroid(ManifestFileType type, std::vector> &manifest_files);
-#endif
+ static void AddManifestFilesAndroid(const std::string &openxr_command, ManifestFileType type,
+ std::vector> &manifest_files);
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
JsonVersion _api_version;
std::string _layer_name;
diff --git a/src/loader/runtime_interface.cpp b/src/loader/runtime_interface.cpp
index 7812aca9..a0296c73 100644
--- a/src/loader/runtime_interface.cpp
+++ b/src/loader/runtime_interface.cpp
@@ -34,7 +34,7 @@
#include
#endif // XR_USE_PLATFORM_ANDROID
-#ifdef XR_USE_PLATFORM_ANDROID
+#if defined(XR_KHR_LOADER_INIT_SUPPORT) && defined(XR_USE_PLATFORM_ANDROID)
XrResult GetPlatformRuntimeVirtualManifest(Json::Value& out_manifest) {
using wrap::android::content::Context;
auto& initData = LoaderInitData::instance();
@@ -52,7 +52,7 @@ XrResult GetPlatformRuntimeVirtualManifest(Json::Value& out_manifest) {
out_manifest = virtualManifest;
return XR_SUCCESS;
}
-#endif // XR_USE_PLATFORM_ANDROID
+#endif // defined(XR_USE_PLATFORM_ANDROID) && defined(XR_KHR_LOADER_INIT_SUPPORT)
XrResult RuntimeInterface::TryLoadingSingleRuntime(const std::string& openxr_command,
std::unique_ptr& manifest_file) {
@@ -227,7 +227,6 @@ XrResult RuntimeInterface::LoadRuntime(const std::string& openxr_command) {
return XR_SUCCESS;
}
#ifdef XR_KHR_LOADER_INIT_SUPPORT
-
if (!LoaderInitData::instance().initialized()) {
LoaderLogger::LogErrorMessage(
openxr_command, "RuntimeInterface::LoadRuntime cannot run because xrInitializeLoaderKHR was not successfully called.");
@@ -238,7 +237,7 @@ XrResult RuntimeInterface::LoadRuntime(const std::string& openxr_command) {
std::vector> runtime_manifest_files = {};
// Find the available runtimes which we may need to report information for.
- XrResult last_error = RuntimeManifestFile::FindManifestFiles(runtime_manifest_files);
+ XrResult last_error = RuntimeManifestFile::FindManifestFiles(openxr_command, runtime_manifest_files);
if (XR_FAILED(last_error)) {
LoaderLogger::LogErrorMessage(openxr_command, "RuntimeInterface::LoadRuntimes - unknown error");
} else {