git-subtree-dir: pyextra git-subtree-split: 4eda4dd765c2bc719da9064774de6b2c14c322d1pull/170/head
commit
57b9ddf20e
56 changed files with 13405 additions and 0 deletions
@ -0,0 +1 @@ |
|||||||
|
*.pyc |
@ -0,0 +1,36 @@ |
|||||||
|
Jinja2 |
||||||
|
~~~~~~ |
||||||
|
|
||||||
|
Jinja2 is a template engine written in pure Python. It provides a |
||||||
|
`Django`_ inspired non-XML syntax but supports inline expressions and |
||||||
|
an optional `sandboxed`_ environment. |
||||||
|
|
||||||
|
Nutshell |
||||||
|
-------- |
||||||
|
|
||||||
|
Here a small example of a Jinja template:: |
||||||
|
|
||||||
|
{% extends 'base.html' %} |
||||||
|
{% block title %}Memberlist{% endblock %} |
||||||
|
{% block content %} |
||||||
|
<ul> |
||||||
|
{% for user in users %} |
||||||
|
<li><a href="{{ user.url }}">{{ user.username }}</a></li> |
||||||
|
{% endfor %} |
||||||
|
</ul> |
||||||
|
{% endblock %} |
||||||
|
|
||||||
|
Philosophy |
||||||
|
---------- |
||||||
|
|
||||||
|
Application logic is for the controller but don't try to make the life |
||||||
|
for the template designer too hard by giving him too few functionality. |
||||||
|
|
||||||
|
For more informations visit the new `Jinja2 webpage`_ and `documentation`_. |
||||||
|
|
||||||
|
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) |
||||||
|
.. _Django: http://www.djangoproject.com/ |
||||||
|
.. _Jinja2 webpage: http://jinja.pocoo.org/ |
||||||
|
.. _documentation: http://jinja.pocoo.org/2/documentation/ |
||||||
|
|
||||||
|
|
@ -0,0 +1 @@ |
|||||||
|
pip |
@ -0,0 +1,31 @@ |
|||||||
|
Copyright (c) 2009 by the Jinja Team, see AUTHORS for more details. |
||||||
|
|
||||||
|
Some rights reserved. |
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without |
||||||
|
modification, are permitted provided that the following conditions are |
||||||
|
met: |
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright |
||||||
|
notice, this list of conditions and the following disclaimer. |
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above |
||||||
|
copyright notice, this list of conditions and the following |
||||||
|
disclaimer in the documentation and/or other materials provided |
||||||
|
with the distribution. |
||||||
|
|
||||||
|
* The names of the contributors may not be used to endorse or |
||||||
|
promote products derived from this software without specific |
||||||
|
prior written permission. |
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
@ -0,0 +1,65 @@ |
|||||||
|
Metadata-Version: 2.0 |
||||||
|
Name: Jinja2 |
||||||
|
Version: 2.9.6 |
||||||
|
Summary: A small but fast and easy to use stand-alone template engine written in pure python. |
||||||
|
Home-page: http://jinja.pocoo.org/ |
||||||
|
Author: Armin Ronacher |
||||||
|
Author-email: armin.ronacher@active-4.com |
||||||
|
License: BSD |
||||||
|
Platform: UNKNOWN |
||||||
|
Classifier: Development Status :: 5 - Production/Stable |
||||||
|
Classifier: Environment :: Web Environment |
||||||
|
Classifier: Intended Audience :: Developers |
||||||
|
Classifier: License :: OSI Approved :: BSD License |
||||||
|
Classifier: Operating System :: OS Independent |
||||||
|
Classifier: Programming Language :: Python |
||||||
|
Classifier: Programming Language :: Python :: 2 |
||||||
|
Classifier: Programming Language :: Python :: 2.6 |
||||||
|
Classifier: Programming Language :: Python :: 2.7 |
||||||
|
Classifier: Programming Language :: Python :: 3 |
||||||
|
Classifier: Programming Language :: Python :: 3.3 |
||||||
|
Classifier: Programming Language :: Python :: 3.4 |
||||||
|
Classifier: Programming Language :: Python :: 3.5 |
||||||
|
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content |
||||||
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules |
||||||
|
Classifier: Topic :: Text Processing :: Markup :: HTML |
||||||
|
Requires-Dist: MarkupSafe (>=0.23) |
||||||
|
Provides-Extra: i18n |
||||||
|
Requires-Dist: Babel (>=0.8); extra == 'i18n' |
||||||
|
|
||||||
|
Jinja2 |
||||||
|
~~~~~~ |
||||||
|
|
||||||
|
Jinja2 is a template engine written in pure Python. It provides a |
||||||
|
`Django`_ inspired non-XML syntax but supports inline expressions and |
||||||
|
an optional `sandboxed`_ environment. |
||||||
|
|
||||||
|
Nutshell |
||||||
|
-------- |
||||||
|
|
||||||
|
Here a small example of a Jinja template:: |
||||||
|
|
||||||
|
{% extends 'base.html' %} |
||||||
|
{% block title %}Memberlist{% endblock %} |
||||||
|
{% block content %} |
||||||
|
<ul> |
||||||
|
{% for user in users %} |
||||||
|
<li><a href="{{ user.url }}">{{ user.username }}</a></li> |
||||||
|
{% endfor %} |
||||||
|
</ul> |
||||||
|
{% endblock %} |
||||||
|
|
||||||
|
Philosophy |
||||||
|
---------- |
||||||
|
|
||||||
|
Application logic is for the controller but don't try to make the life |
||||||
|
for the template designer too hard by giving him too few functionality. |
||||||
|
|
||||||
|
For more informations visit the new `Jinja2 webpage`_ and `documentation`_. |
||||||
|
|
||||||
|
.. _sandboxed: http://en.wikipedia.org/wiki/Sandbox_(computer_security) |
||||||
|
.. _Django: http://www.djangoproject.com/ |
||||||
|
.. _Jinja2 webpage: http://jinja.pocoo.org/ |
||||||
|
.. _documentation: http://jinja.pocoo.org/2/documentation/ |
||||||
|
|
||||||
|
|
@ -0,0 +1,59 @@ |
|||||||
|
jinja2/__init__.py,sha256=Cx_UnJO4i_GqvKQsOu__mvGE_eMJSsBqITa26irtg5A,2565 |
||||||
|
jinja2/_compat.py,sha256=xP60CE5Qr8FTYcDE1f54tbZLKGvMwYml4-8T7Q4KG9k,2596 |
||||||
|
jinja2/_stringdefs.py,sha256=PYtqTmmWIhjXlFBoH-eE6fJkQvlu7nxUyQ2YlFB97VA,589381 |
||||||
|
jinja2/asyncfilters.py,sha256=cTDPvrS8Hp_IkwsZ1m9af_lr5nHysw7uTa5gV0NmZVE,4144 |
||||||
|
jinja2/asyncsupport.py,sha256=ZJO1Fdd9R93sDLrk6TZNuMQGgtuDmpTlENNRkLwZF7c,7765 |
||||||
|
jinja2/bccache.py,sha256=0xoVw0R9nj3vtzPl9g-zB5BKTLFJ7FFMq2ABbn1IkCI,12793 |
||||||
|
jinja2/compiler.py,sha256=lE5owyPwT1cGGZxWyzQtZLW7Uj1g3Vw1oVtBU8Uc_uM,62929 |
||||||
|
jinja2/constants.py,sha256=uwwV8ZUhHhacAuz5PTwckfsbqBaqM7aKfyJL7kGX5YQ,1626 |
||||||
|
jinja2/debug.py,sha256=UqEbKb4zofBABwvyA77Kr0-5IAQawKqC9t8ZeTIzpGU,12038 |
||||||
|
jinja2/defaults.py,sha256=GvVEQqIRvRMCbQF2NZSr0mlEN8lxvGixU5wIIAeRe1A,1323 |
||||||
|
jinja2/environment.py,sha256=z91L_efdYs-KNs6DBxQWDyYncOwOqn_0J4M5CfFj0Q8,50848 |
||||||
|
jinja2/exceptions.py,sha256=_Rj-NVi98Q6AiEjYQOsP8dEIdu5AlmRHzcSNOPdWix4,4428 |
||||||
|
jinja2/ext.py,sha256=9xq8fd_QPBIe4Z7hE1XawB7f1EDHrVZjpb2JiRTiG94,23867 |
||||||
|
jinja2/filters.py,sha256=1OYGhyN84yVmFUIOwJNRV_StqTCfPhnRLfJTmWbEe_8,33424 |
||||||
|
jinja2/idtracking.py,sha256=HHcCOMsQhCrrjwYAmikKqq_XetXLovCjXAThh9WbRAc,8760 |
||||||
|
jinja2/lexer.py,sha256=W4A830e-fj12zRT6rL7H91F4D6xwED5LjR8iMxjWuVQ,28238 |
||||||
|
jinja2/loaders.py,sha256=xiTuURKAEObyym0nU8PCIXu_Qp8fn0AJ5oIADUUm-5Q,17382 |
||||||
|
jinja2/meta.py,sha256=fmKHxkmZYAOm9QyWWy8EMd6eefAIh234rkBMW2X4ZR8,4340 |
||||||
|
jinja2/nodes.py,sha256=4_Ucxbkohtj4BAlpV0w_MpVmIxJNaVXDTBb4EHBA2JI,29392 |
||||||
|
jinja2/optimizer.py,sha256=MsdlFACJ0FRdPtjmCAdt7JQ9SGrXFaDNUaslsWQaG3M,1722 |
||||||
|
jinja2/parser.py,sha256=3tc82qO1Ovs9och_PjirbAmnWNT77n4wWjIQ8pEVKvU,35465 |
||||||
|
jinja2/runtime.py,sha256=axkTQXg2-oc_Cm35NEMDDas3Jbq3ATxNrDOEa5v3wIw,26835 |
||||||
|
jinja2/sandbox.py,sha256=Jx4MTxly8KvdkSWyui_kHY1_ZZ0RAQL4ojAy1KDRyK0,16707 |
||||||
|
jinja2/tests.py,sha256=iFuUTbUYv7TFffq2aTswCRdIhQ6wyrby1YevChVPqkE,4428 |
||||||
|
jinja2/utils.py,sha256=BIFqeXXsCUSjWx6MEwYhY6V4tXzVNs9WRXfB60MA9HY,19941 |
||||||
|
jinja2/visitor.py,sha256=JD1H1cANA29JcntFfN5fPyqQxB4bI4wC00BzZa-XHks,3316 |
||||||
|
Jinja2-2.9.6.dist-info/DESCRIPTION.rst,sha256=CXIS1UnPSk5_lZBS6Lb8ko-3lqGfjsiUwNBLXCTj2lc,975 |
||||||
|
Jinja2-2.9.6.dist-info/entry_points.txt,sha256=NdzVcOrqyNyKDxD09aERj__3bFx2paZhizFDsKmVhiA,72 |
||||||
|
Jinja2-2.9.6.dist-info/LICENSE.txt,sha256=JvzUNv3Io51EiWrAPm8d_SXjhJnEjyDYvB3Tvwqqils,1554 |
||||||
|
Jinja2-2.9.6.dist-info/METADATA,sha256=53LSXlqC86JTyLSPsDyAOmyV4pXIzzmmZoUXz7ogytA,2172 |
||||||
|
Jinja2-2.9.6.dist-info/metadata.json,sha256=vzvX25T4hwMOe1EIOBo9rpfiZerOB_KVLcplGG_qYtE,1394 |
||||||
|
Jinja2-2.9.6.dist-info/RECORD,, |
||||||
|
Jinja2-2.9.6.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 |
||||||
|
Jinja2-2.9.6.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110 |
||||||
|
Jinja2-2.9.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 |
||||||
|
jinja2/_compat.pyc,, |
||||||
|
jinja2/sandbox.pyc,, |
||||||
|
jinja2/_stringdefs.pyc,, |
||||||
|
jinja2/bccache.pyc,, |
||||||
|
jinja2/runtime.pyc,, |
||||||
|
jinja2/utils.pyc,, |
||||||
|
jinja2/parser.pyc,, |
||||||
|
jinja2/debug.pyc,, |
||||||
|
jinja2/lexer.pyc,, |
||||||
|
jinja2/defaults.pyc,, |
||||||
|
jinja2/visitor.pyc,, |
||||||
|
jinja2/nodes.pyc,, |
||||||
|
jinja2/environment.pyc,, |
||||||
|
jinja2/compiler.pyc,, |
||||||
|
jinja2/exceptions.pyc,, |
||||||
|
jinja2/filters.pyc,, |
||||||
|
jinja2/__init__.pyc,, |
||||||
|
jinja2/meta.pyc,, |
||||||
|
jinja2/loaders.pyc,, |
||||||
|
jinja2/ext.pyc,, |
||||||
|
jinja2/optimizer.pyc,, |
||||||
|
jinja2/constants.pyc,, |
||||||
|
jinja2/tests.pyc,, |
||||||
|
jinja2/idtracking.pyc,, |
@ -0,0 +1,6 @@ |
|||||||
|
Wheel-Version: 1.0 |
||||||
|
Generator: bdist_wheel (0.24.0) |
||||||
|
Root-Is-Purelib: true |
||||||
|
Tag: py2-none-any |
||||||
|
Tag: py3-none-any |
||||||
|
|
@ -0,0 +1,4 @@ |
|||||||
|
|
||||||
|
[babel.extractors] |
||||||
|
jinja2 = jinja2.ext:babel_extract[i18n] |
||||||
|
|
@ -0,0 +1 @@ |
|||||||
|
{"license": "BSD", "name": "Jinja2", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "A small but fast and easy to use stand-alone template engine written in pure python.", "run_requires": [{"requires": ["Babel (>=0.8)"], "extra": "i18n"}, {"requires": ["MarkupSafe (>=0.23)"]}], "version": "2.9.6", "extensions": {"python.details": {"project_urls": {"Home": "http://jinja.pocoo.org/"}, "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "contacts": [{"role": "author", "email": "armin.ronacher@active-4.com", "name": "Armin Ronacher"}]}, "python.exports": {"babel.extractors": {"jinja2": "jinja2.ext:babel_extract [i18n]"}}}, "classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup :: HTML"], "extras": ["i18n"]} |
@ -0,0 +1 @@ |
|||||||
|
jinja2 |
@ -0,0 +1,133 @@ |
|||||||
|
Metadata-Version: 1.1 |
||||||
|
Name: MarkupSafe |
||||||
|
Version: 1.0 |
||||||
|
Summary: Implements a XML/HTML/XHTML Markup safe string for Python |
||||||
|
Home-page: http://github.com/pallets/markupsafe |
||||||
|
Author: Armin Ronacher |
||||||
|
Author-email: armin.ronacher@active-4.com |
||||||
|
License: BSD |
||||||
|
Description: MarkupSafe |
||||||
|
========== |
||||||
|
|
||||||
|
Implements a unicode subclass that supports HTML strings: |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
>>> from markupsafe import Markup, escape |
||||||
|
>>> escape("<script>alert(document.cookie);</script>") |
||||||
|
Markup(u'<script>alert(document.cookie);</script>') |
||||||
|
>>> tmpl = Markup("<em>%s</em>") |
||||||
|
>>> tmpl % "Peter > Lustig" |
||||||
|
Markup(u'<em>Peter > Lustig</em>') |
||||||
|
|
||||||
|
If you want to make an object unicode that is not yet unicode |
||||||
|
but don't want to lose the taint information, you can use the |
||||||
|
``soft_unicode`` function. (On Python 3 you can also use ``soft_str`` which |
||||||
|
is a different name for the same function). |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
>>> from markupsafe import soft_unicode |
||||||
|
>>> soft_unicode(42) |
||||||
|
u'42' |
||||||
|
>>> soft_unicode(Markup('foo')) |
||||||
|
Markup(u'foo') |
||||||
|
|
||||||
|
HTML Representations |
||||||
|
-------------------- |
||||||
|
|
||||||
|
Objects can customize their HTML markup equivalent by overriding |
||||||
|
the ``__html__`` function: |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
>>> class Foo(object): |
||||||
|
... def __html__(self): |
||||||
|
... return '<strong>Nice</strong>' |
||||||
|
... |
||||||
|
>>> escape(Foo()) |
||||||
|
Markup(u'<strong>Nice</strong>') |
||||||
|
>>> Markup(Foo()) |
||||||
|
Markup(u'<strong>Nice</strong>') |
||||||
|
|
||||||
|
Silent Escapes |
||||||
|
-------------- |
||||||
|
|
||||||
|
Since MarkupSafe 0.10 there is now also a separate escape function |
||||||
|
called ``escape_silent`` that returns an empty string for ``None`` for |
||||||
|
consistency with other systems that return empty strings for ``None`` |
||||||
|
when escaping (for instance Pylons' webhelpers). |
||||||
|
|
||||||
|
If you also want to use this for the escape method of the Markup |
||||||
|
object, you can create your own subclass that does that: |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
from markupsafe import Markup, escape_silent as escape |
||||||
|
|
||||||
|
class SilentMarkup(Markup): |
||||||
|
__slots__ = () |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def escape(cls, s): |
||||||
|
return cls(escape(s)) |
||||||
|
|
||||||
|
New-Style String Formatting |
||||||
|
--------------------------- |
||||||
|
|
||||||
|
Starting with MarkupSafe 0.21 new style string formats from Python 2.6 and |
||||||
|
3.x are now fully supported. Previously the escape behavior of those |
||||||
|
functions was spotty at best. The new implementations operates under the |
||||||
|
following algorithm: |
||||||
|
|
||||||
|
1. if an object has an ``__html_format__`` method it is called as |
||||||
|
replacement for ``__format__`` with the format specifier. It either |
||||||
|
has to return a string or markup object. |
||||||
|
2. if an object has an ``__html__`` method it is called. |
||||||
|
3. otherwise the default format system of Python kicks in and the result |
||||||
|
is HTML escaped. |
||||||
|
|
||||||
|
Here is how you can implement your own formatting: |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
class User(object): |
||||||
|
|
||||||
|
def __init__(self, id, username): |
||||||
|
self.id = id |
||||||
|
self.username = username |
||||||
|
|
||||||
|
def __html_format__(self, format_spec): |
||||||
|
if format_spec == 'link': |
||||||
|
return Markup('<a href="/user/{0}">{1}</a>').format( |
||||||
|
self.id, |
||||||
|
self.__html__(), |
||||||
|
) |
||||||
|
elif format_spec: |
||||||
|
raise ValueError('Invalid format spec') |
||||||
|
return self.__html__() |
||||||
|
|
||||||
|
def __html__(self): |
||||||
|
return Markup('<span class=user>{0}</span>').format(self.username) |
||||||
|
|
||||||
|
And to format that user: |
||||||
|
|
||||||
|
.. code-block:: python |
||||||
|
|
||||||
|
>>> user = User(1, 'foo') |
||||||
|
>>> Markup('<p>User: {0:link}').format(user) |
||||||
|
Markup(u'<p>User: <a href="/user/1"><span class=user>foo</span></a>') |
||||||
|
|
||||||
|
Markupsafe supports Python 2.6, 2.7 and Python 3.3 and higher. |
||||||
|
|
||||||
|
Platform: UNKNOWN |
||||||
|
Classifier: Development Status :: 5 - Production/Stable |
||||||
|
Classifier: Environment :: Web Environment |
||||||
|
Classifier: Intended Audience :: Developers |
||||||
|
Classifier: License :: OSI Approved :: BSD License |
||||||
|
Classifier: Operating System :: OS Independent |
||||||
|
Classifier: Programming Language :: Python |
||||||
|
Classifier: Programming Language :: Python :: 3 |
||||||
|
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content |
||||||
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules |
||||||
|
Classifier: Topic :: Text Processing :: Markup :: HTML |
@ -0,0 +1,18 @@ |
|||||||
|
AUTHORS |
||||||
|
CHANGES |
||||||
|
LICENSE |
||||||
|
MANIFEST.in |
||||||
|
README.rst |
||||||
|
setup.cfg |
||||||
|
setup.py |
||||||
|
tests.py |
||||||
|
MarkupSafe.egg-info/PKG-INFO |
||||||
|
MarkupSafe.egg-info/SOURCES.txt |
||||||
|
MarkupSafe.egg-info/dependency_links.txt |
||||||
|
MarkupSafe.egg-info/not-zip-safe |
||||||
|
MarkupSafe.egg-info/top_level.txt |
||||||
|
markupsafe/__init__.py |
||||||
|
markupsafe/_compat.py |
||||||
|
markupsafe/_constants.py |
||||||
|
markupsafe/_native.py |
||||||
|
markupsafe/_speedups.c |
@ -0,0 +1 @@ |
|||||||
|
|
@ -0,0 +1,15 @@ |
|||||||
|
../markupsafe/__init__.py |
||||||
|
../markupsafe/_compat.py |
||||||
|
../markupsafe/_constants.py |
||||||
|
../markupsafe/_native.py |
||||||
|
../markupsafe/_speedups.c |
||||||
|
../markupsafe/__init__.pyc |
||||||
|
../markupsafe/_compat.pyc |
||||||
|
../markupsafe/_constants.pyc |
||||||
|
../markupsafe/_native.pyc |
||||||
|
../markupsafe/_speedups.so |
||||||
|
dependency_links.txt |
||||||
|
not-zip-safe |
||||||
|
PKG-INFO |
||||||
|
SOURCES.txt |
||||||
|
top_level.txt |
@ -0,0 +1 @@ |
|||||||
|
|
@ -0,0 +1 @@ |
|||||||
|
markupsafe |
@ -0,0 +1,82 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2 |
||||||
|
~~~~~~ |
||||||
|
|
||||||
|
Jinja2 is a template engine written in pure Python. It provides a |
||||||
|
Django inspired non-XML syntax but supports inline expressions and |
||||||
|
an optional sandboxed environment. |
||||||
|
|
||||||
|
Nutshell |
||||||
|
-------- |
||||||
|
|
||||||
|
Here a small example of a Jinja2 template:: |
||||||
|
|
||||||
|
{% extends 'base.html' %} |
||||||
|
{% block title %}Memberlist{% endblock %} |
||||||
|
{% block content %} |
||||||
|
<ul> |
||||||
|
{% for user in users %} |
||||||
|
<li><a href="{{ user.url }}">{{ user.username }}</a></li> |
||||||
|
{% endfor %} |
||||||
|
</ul> |
||||||
|
{% endblock %} |
||||||
|
|
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
__docformat__ = 'restructuredtext en' |
||||||
|
__version__ = '2.9.6' |
||||||
|
|
||||||
|
# high level interface |
||||||
|
from jinja2.environment import Environment, Template |
||||||
|
|
||||||
|
# loaders |
||||||
|
from jinja2.loaders import BaseLoader, FileSystemLoader, PackageLoader, \ |
||||||
|
DictLoader, FunctionLoader, PrefixLoader, ChoiceLoader, \ |
||||||
|
ModuleLoader |
||||||
|
|
||||||
|
# bytecode caches |
||||||
|
from jinja2.bccache import BytecodeCache, FileSystemBytecodeCache, \ |
||||||
|
MemcachedBytecodeCache |
||||||
|
|
||||||
|
# undefined types |
||||||
|
from jinja2.runtime import Undefined, DebugUndefined, StrictUndefined, \ |
||||||
|
make_logging_undefined |
||||||
|
|
||||||
|
# exceptions |
||||||
|
from jinja2.exceptions import TemplateError, UndefinedError, \ |
||||||
|
TemplateNotFound, TemplatesNotFound, TemplateSyntaxError, \ |
||||||
|
TemplateAssertionError |
||||||
|
|
||||||
|
# decorators and public utilities |
||||||
|
from jinja2.filters import environmentfilter, contextfilter, \ |
||||||
|
evalcontextfilter |
||||||
|
from jinja2.utils import Markup, escape, clear_caches, \ |
||||||
|
environmentfunction, evalcontextfunction, contextfunction, \ |
||||||
|
is_undefined, select_autoescape |
||||||
|
|
||||||
|
__all__ = [ |
||||||
|
'Environment', 'Template', 'BaseLoader', 'FileSystemLoader', |
||||||
|
'PackageLoader', 'DictLoader', 'FunctionLoader', 'PrefixLoader', |
||||||
|
'ChoiceLoader', 'BytecodeCache', 'FileSystemBytecodeCache', |
||||||
|
'MemcachedBytecodeCache', 'Undefined', 'DebugUndefined', |
||||||
|
'StrictUndefined', 'TemplateError', 'UndefinedError', 'TemplateNotFound', |
||||||
|
'TemplatesNotFound', 'TemplateSyntaxError', 'TemplateAssertionError', |
||||||
|
'ModuleLoader', 'environmentfilter', 'contextfilter', 'Markup', 'escape', |
||||||
|
'environmentfunction', 'contextfunction', 'clear_caches', 'is_undefined', |
||||||
|
'evalcontextfilter', 'evalcontextfunction', 'make_logging_undefined', |
||||||
|
'select_autoescape', |
||||||
|
] |
||||||
|
|
||||||
|
|
||||||
|
def _patch_async(): |
||||||
|
from jinja2.utils import have_async_gen |
||||||
|
if have_async_gen: |
||||||
|
from jinja2.asyncsupport import patch_all |
||||||
|
patch_all() |
||||||
|
|
||||||
|
|
||||||
|
_patch_async() |
||||||
|
del _patch_async |
@ -0,0 +1,99 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2._compat |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Some py2/py3 compatibility support based on a stripped down |
||||||
|
version of six so we don't have to depend on a specific version |
||||||
|
of it. |
||||||
|
|
||||||
|
:copyright: Copyright 2013 by the Jinja team, see AUTHORS. |
||||||
|
:license: BSD, see LICENSE for details. |
||||||
|
""" |
||||||
|
import sys |
||||||
|
|
||||||
|
PY2 = sys.version_info[0] == 2 |
||||||
|
PYPY = hasattr(sys, 'pypy_translation_info') |
||||||
|
_identity = lambda x: x |
||||||
|
|
||||||
|
|
||||||
|
if not PY2: |
||||||
|
unichr = chr |
||||||
|
range_type = range |
||||||
|
text_type = str |
||||||
|
string_types = (str,) |
||||||
|
integer_types = (int,) |
||||||
|
|
||||||
|
iterkeys = lambda d: iter(d.keys()) |
||||||
|
itervalues = lambda d: iter(d.values()) |
||||||
|
iteritems = lambda d: iter(d.items()) |
||||||
|
|
||||||
|
import pickle |
||||||
|
from io import BytesIO, StringIO |
||||||
|
NativeStringIO = StringIO |
||||||
|
|
||||||
|
def reraise(tp, value, tb=None): |
||||||
|
if value.__traceback__ is not tb: |
||||||
|
raise value.with_traceback(tb) |
||||||
|
raise value |
||||||
|
|
||||||
|
ifilter = filter |
||||||
|
imap = map |
||||||
|
izip = zip |
||||||
|
intern = sys.intern |
||||||
|
|
||||||
|
implements_iterator = _identity |
||||||
|
implements_to_string = _identity |
||||||
|
encode_filename = _identity |
||||||
|
|
||||||
|
else: |
||||||
|
unichr = unichr |
||||||
|
text_type = unicode |
||||||
|
range_type = xrange |
||||||
|
string_types = (str, unicode) |
||||||
|
integer_types = (int, long) |
||||||
|
|
||||||
|
iterkeys = lambda d: d.iterkeys() |
||||||
|
itervalues = lambda d: d.itervalues() |
||||||
|
iteritems = lambda d: d.iteritems() |
||||||
|
|
||||||
|
import cPickle as pickle |
||||||
|
from cStringIO import StringIO as BytesIO, StringIO |
||||||
|
NativeStringIO = BytesIO |
||||||
|
|
||||||
|
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') |
||||||
|
|
||||||
|
from itertools import imap, izip, ifilter |
||||||
|
intern = intern |
||||||
|
|
||||||
|
def implements_iterator(cls): |
||||||
|
cls.next = cls.__next__ |
||||||
|
del cls.__next__ |
||||||
|
return cls |
||||||
|
|
||||||
|
def implements_to_string(cls): |
||||||
|
cls.__unicode__ = cls.__str__ |
||||||
|
cls.__str__ = lambda x: x.__unicode__().encode('utf-8') |
||||||
|
return cls |
||||||
|
|
||||||
|
def encode_filename(filename): |
||||||
|
if isinstance(filename, unicode): |
||||||
|
return filename.encode('utf-8') |
||||||
|
return filename |
||||||
|
|
||||||
|
|
||||||
|
def with_metaclass(meta, *bases): |
||||||
|
"""Create a base class with a metaclass.""" |
||||||
|
# This requires a bit of explanation: the basic idea is to make a |
||||||
|
# dummy metaclass for one level of class instantiation that replaces |
||||||
|
# itself with the actual metaclass. |
||||||
|
class metaclass(type): |
||||||
|
def __new__(cls, name, this_bases, d): |
||||||
|
return meta(name, bases, d) |
||||||
|
return type.__new__(metaclass, 'temporary_class', (), {}) |
||||||
|
|
||||||
|
|
||||||
|
try: |
||||||
|
from urllib.parse import quote_from_bytes as url_quote |
||||||
|
except ImportError: |
||||||
|
from urllib import quote as url_quote |
File diff suppressed because one or more lines are too long
@ -0,0 +1,146 @@ |
|||||||
|
from functools import wraps |
||||||
|
|
||||||
|
from jinja2.asyncsupport import auto_aiter |
||||||
|
from jinja2 import filters |
||||||
|
|
||||||
|
|
||||||
|
async def auto_to_seq(value): |
||||||
|
seq = [] |
||||||
|
if hasattr(value, '__aiter__'): |
||||||
|
async for item in value: |
||||||
|
seq.append(item) |
||||||
|
else: |
||||||
|
for item in value: |
||||||
|
seq.append(item) |
||||||
|
return seq |
||||||
|
|
||||||
|
|
||||||
|
async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): |
||||||
|
seq, func = filters.prepare_select_or_reject( |
||||||
|
args, kwargs, modfunc, lookup_attr) |
||||||
|
if seq: |
||||||
|
async for item in auto_aiter(seq): |
||||||
|
if func(item): |
||||||
|
yield item |
||||||
|
|
||||||
|
|
||||||
|
def dualfilter(normal_filter, async_filter): |
||||||
|
wrap_evalctx = False |
||||||
|
if getattr(normal_filter, 'environmentfilter', False): |
||||||
|
is_async = lambda args: args[0].is_async |
||||||
|
wrap_evalctx = False |
||||||
|
else: |
||||||
|
if not getattr(normal_filter, 'evalcontextfilter', False) and \ |
||||||
|
not getattr(normal_filter, 'contextfilter', False): |
||||||
|
wrap_evalctx = True |
||||||
|
is_async = lambda args: args[0].environment.is_async |
||||||
|
|
||||||
|
@wraps(normal_filter) |
||||||
|
def wrapper(*args, **kwargs): |
||||||
|
b = is_async(args) |
||||||
|
if wrap_evalctx: |
||||||
|
args = args[1:] |
||||||
|
if b: |
||||||
|
return async_filter(*args, **kwargs) |
||||||
|
return normal_filter(*args, **kwargs) |
||||||
|
|
||||||
|
if wrap_evalctx: |
||||||
|
wrapper.evalcontextfilter = True |
||||||
|
|
||||||
|
wrapper.asyncfiltervariant = True |
||||||
|
|
||||||
|
return wrapper |
||||||
|
|
||||||
|
|
||||||
|
def asyncfiltervariant(original): |
||||||
|
def decorator(f): |
||||||
|
return dualfilter(original, f) |
||||||
|
return decorator |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_first) |
||||||
|
async def do_first(environment, seq): |
||||||
|
try: |
||||||
|
return await auto_aiter(seq).__anext__() |
||||||
|
except StopAsyncIteration: |
||||||
|
return environment.undefined('No first item, sequence was empty.') |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_groupby) |
||||||
|
async def do_groupby(environment, value, attribute): |
||||||
|
expr = filters.make_attrgetter(environment, attribute) |
||||||
|
return [filters._GroupTuple(key, await auto_to_seq(values)) |
||||||
|
for key, values in filters.groupby(sorted( |
||||||
|
await auto_to_seq(value), key=expr), expr)] |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_join) |
||||||
|
async def do_join(eval_ctx, value, d=u'', attribute=None): |
||||||
|
return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_list) |
||||||
|
async def do_list(value): |
||||||
|
return await auto_to_seq(value) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_reject) |
||||||
|
async def do_reject(*args, **kwargs): |
||||||
|
return async_select_or_reject(args, kwargs, lambda x: not x, False) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_rejectattr) |
||||||
|
async def do_rejectattr(*args, **kwargs): |
||||||
|
return async_select_or_reject(args, kwargs, lambda x: not x, True) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_select) |
||||||
|
async def do_select(*args, **kwargs): |
||||||
|
return async_select_or_reject(args, kwargs, lambda x: x, False) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_selectattr) |
||||||
|
async def do_selectattr(*args, **kwargs): |
||||||
|
return async_select_or_reject(args, kwargs, lambda x: x, True) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_map) |
||||||
|
async def do_map(*args, **kwargs): |
||||||
|
seq, func = filters.prepare_map(args, kwargs) |
||||||
|
if seq: |
||||||
|
async for item in auto_aiter(seq): |
||||||
|
yield func(item) |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_sum) |
||||||
|
async def do_sum(environment, iterable, attribute=None, start=0): |
||||||
|
rv = start |
||||||
|
if attribute is not None: |
||||||
|
func = filters.make_attrgetter(environment, attribute) |
||||||
|
else: |
||||||
|
func = lambda x: x |
||||||
|
async for item in auto_aiter(iterable): |
||||||
|
rv += func(item) |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
@asyncfiltervariant(filters.do_slice) |
||||||
|
async def do_slice(value, slices, fill_with=None): |
||||||
|
return filters.do_slice(await auto_to_seq(value), slices, fill_with) |
||||||
|
|
||||||
|
|
||||||
|
ASYNC_FILTERS = { |
||||||
|
'first': do_first, |
||||||
|
'groupby': do_groupby, |
||||||
|
'join': do_join, |
||||||
|
'list': do_list, |
||||||
|
# we intentionally do not support do_last because that would be |
||||||
|
# ridiculous |
||||||
|
'reject': do_reject, |
||||||
|
'rejectattr': do_rejectattr, |
||||||
|
'map': do_map, |
||||||
|
'select': do_select, |
||||||
|
'selectattr': do_selectattr, |
||||||
|
'sum': do_sum, |
||||||
|
'slice': do_slice, |
||||||
|
} |
@ -0,0 +1,254 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.asyncsupport |
||||||
|
~~~~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Has all the code for async support which is implemented as a patch |
||||||
|
for supported Python versions. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import sys |
||||||
|
import asyncio |
||||||
|
import inspect |
||||||
|
from functools import update_wrapper |
||||||
|
|
||||||
|
from jinja2.utils import concat, internalcode, Markup |
||||||
|
from jinja2.environment import TemplateModule |
||||||
|
from jinja2.runtime import LoopContextBase, _last_iteration |
||||||
|
|
||||||
|
|
||||||
|
async def concat_async(async_gen): |
||||||
|
rv = [] |
||||||
|
async def collect(): |
||||||
|
async for event in async_gen: |
||||||
|
rv.append(event) |
||||||
|
await collect() |
||||||
|
return concat(rv) |
||||||
|
|
||||||
|
|
||||||
|
async def generate_async(self, *args, **kwargs): |
||||||
|
vars = dict(*args, **kwargs) |
||||||
|
try: |
||||||
|
async for event in self.root_render_func(self.new_context(vars)): |
||||||
|
yield event |
||||||
|
except Exception: |
||||||
|
exc_info = sys.exc_info() |
||||||
|
else: |
||||||
|
return |
||||||
|
yield self.environment.handle_exception(exc_info, True) |
||||||
|
|
||||||
|
|
||||||
|
def wrap_generate_func(original_generate): |
||||||
|
def _convert_generator(self, loop, args, kwargs): |
||||||
|
async_gen = self.generate_async(*args, **kwargs) |
||||||
|
try: |
||||||
|
while 1: |
||||||
|
yield loop.run_until_complete(async_gen.__anext__()) |
||||||
|
except StopAsyncIteration: |
||||||
|
pass |
||||||
|
def generate(self, *args, **kwargs): |
||||||
|
if not self.environment.is_async: |
||||||
|
return original_generate(self, *args, **kwargs) |
||||||
|
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) |
||||||
|
return update_wrapper(generate, original_generate) |
||||||
|
|
||||||
|
|
||||||
|
async def render_async(self, *args, **kwargs): |
||||||
|
if not self.environment.is_async: |
||||||
|
raise RuntimeError('The environment was not created with async mode ' |
||||||
|
'enabled.') |
||||||
|
|
||||||
|
vars = dict(*args, **kwargs) |
||||||
|
ctx = self.new_context(vars) |
||||||
|
|
||||||
|
try: |
||||||
|
return await concat_async(self.root_render_func(ctx)) |
||||||
|
except Exception: |
||||||
|
exc_info = sys.exc_info() |
||||||
|
return self.environment.handle_exception(exc_info, True) |
||||||
|
|
||||||
|
|
||||||
|
def wrap_render_func(original_render): |
||||||
|
def render(self, *args, **kwargs): |
||||||
|
if not self.environment.is_async: |
||||||
|
return original_render(self, *args, **kwargs) |
||||||
|
loop = asyncio.get_event_loop() |
||||||
|
return loop.run_until_complete(self.render_async(*args, **kwargs)) |
||||||
|
return update_wrapper(render, original_render) |
||||||
|
|
||||||
|
|
||||||
|
def wrap_block_reference_call(original_call): |
||||||
|
@internalcode |
||||||
|
async def async_call(self): |
||||||
|
rv = await concat_async(self._stack[self._depth](self._context)) |
||||||
|
if self._context.eval_ctx.autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def __call__(self): |
||||||
|
if not self._context.environment.is_async: |
||||||
|
return original_call(self) |
||||||
|
return async_call(self) |
||||||
|
|
||||||
|
return update_wrapper(__call__, original_call) |
||||||
|
|
||||||
|
|
||||||
|
def wrap_macro_invoke(original_invoke): |
||||||
|
@internalcode |
||||||
|
async def async_invoke(self, arguments, autoescape): |
||||||
|
rv = await self._func(*arguments) |
||||||
|
if autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def _invoke(self, arguments, autoescape): |
||||||
|
if not self._environment.is_async: |
||||||
|
return original_invoke(self, arguments, autoescape) |
||||||
|
return async_invoke(self, arguments, autoescape) |
||||||
|
return update_wrapper(_invoke, original_invoke) |
||||||
|
|
||||||
|
|
||||||
|
@internalcode |
||||||
|
async def get_default_module_async(self): |
||||||
|
if self._module is not None: |
||||||
|
return self._module |
||||||
|
self._module = rv = await self.make_module_async() |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
def wrap_default_module(original_default_module): |
||||||
|
@internalcode |
||||||
|
def _get_default_module(self): |
||||||
|
if self.environment.is_async: |
||||||
|
raise RuntimeError('Template module attribute is unavailable ' |
||||||
|
'in async mode') |
||||||
|
return original_default_module(self) |
||||||
|
return _get_default_module |
||||||
|
|
||||||
|
|
||||||
|
async def make_module_async(self, vars=None, shared=False, locals=None): |
||||||
|
context = self.new_context(vars, shared, locals) |
||||||
|
body_stream = [] |
||||||
|
async for item in self.root_render_func(context): |
||||||
|
body_stream.append(item) |
||||||
|
return TemplateModule(self, context, body_stream) |
||||||
|
|
||||||
|
|
||||||
|
def patch_template(): |
||||||
|
from jinja2 import Template |
||||||
|
Template.generate = wrap_generate_func(Template.generate) |
||||||
|
Template.generate_async = update_wrapper( |
||||||
|
generate_async, Template.generate_async) |
||||||
|
Template.render_async = update_wrapper( |
||||||
|
render_async, Template.render_async) |
||||||
|
Template.render = wrap_render_func(Template.render) |
||||||
|
Template._get_default_module = wrap_default_module( |
||||||
|
Template._get_default_module) |
||||||
|
Template._get_default_module_async = get_default_module_async |
||||||
|
Template.make_module_async = update_wrapper( |
||||||
|
make_module_async, Template.make_module_async) |
||||||
|
|
||||||
|
|
||||||
|
def patch_runtime(): |
||||||
|
from jinja2.runtime import BlockReference, Macro |
||||||
|
BlockReference.__call__ = wrap_block_reference_call( |
||||||
|
BlockReference.__call__) |
||||||
|
Macro._invoke = wrap_macro_invoke(Macro._invoke) |
||||||
|
|
||||||
|
|
||||||
|
def patch_filters(): |
||||||
|
from jinja2.filters import FILTERS |
||||||
|
from jinja2.asyncfilters import ASYNC_FILTERS |
||||||
|
FILTERS.update(ASYNC_FILTERS) |
||||||
|
|
||||||
|
|
||||||
|
def patch_all(): |
||||||
|
patch_template() |
||||||
|
patch_runtime() |
||||||
|
patch_filters() |
||||||
|
|
||||||
|
|
||||||
|
async def auto_await(value): |
||||||
|
if inspect.isawaitable(value): |
||||||
|
return await value |
||||||
|
return value |
||||||
|
|
||||||
|
|
||||||
|
async def auto_aiter(iterable): |
||||||
|
if hasattr(iterable, '__aiter__'): |
||||||
|
async for item in iterable: |
||||||
|
yield item |
||||||
|
return |
||||||
|
for item in iterable: |
||||||
|
yield item |
||||||
|
|
||||||
|
|
||||||
|
class AsyncLoopContext(LoopContextBase): |
||||||
|
|
||||||
|
def __init__(self, async_iterator, after, length, recurse=None, |
||||||
|
depth0=0): |
||||||
|
LoopContextBase.__init__(self, recurse, depth0) |
||||||
|
self._async_iterator = async_iterator |
||||||
|
self._after = after |
||||||
|
self._length = length |
||||||
|
|
||||||
|
@property |
||||||
|
def length(self): |
||||||
|
if self._length is None: |
||||||
|
raise TypeError('Loop length for some iterators cannot be ' |
||||||
|
'lazily calculated in async mode') |
||||||
|
return self._length |
||||||
|
|
||||||
|
def __aiter__(self): |
||||||
|
return AsyncLoopContextIterator(self) |
||||||
|
|
||||||
|
|
||||||
|
class AsyncLoopContextIterator(object): |
||||||
|
__slots__ = ('context',) |
||||||
|
|
||||||
|
def __init__(self, context): |
||||||
|
self.context = context |
||||||
|
|
||||||
|
def __aiter__(self): |
||||||
|
return self |
||||||
|
|
||||||
|
async def __anext__(self): |
||||||
|
ctx = self.context |
||||||
|
ctx.index0 += 1 |
||||||
|
if ctx._after is _last_iteration: |
||||||
|
raise StopAsyncIteration() |
||||||
|
next_elem = ctx._after |
||||||
|
try: |
||||||
|
ctx._after = await ctx._async_iterator.__anext__() |
||||||
|
except StopAsyncIteration: |
||||||
|
ctx._after = _last_iteration |
||||||
|
return next_elem, ctx |
||||||
|
|
||||||
|
|
||||||
|
async def make_async_loop_context(iterable, recurse=None, depth0=0): |
||||||
|
# Length is more complicated and less efficient in async mode. The |
||||||
|
# reason for this is that we cannot know if length will be used |
||||||
|
# upfront but because length is a property we cannot lazily execute it |
||||||
|
# later. This means that we need to buffer it up and measure :( |
||||||
|
# |
||||||
|
# We however only do this for actual iterators, not for async |
||||||
|
# iterators as blocking here does not seem like the best idea in the |
||||||
|
# world. |
||||||
|
try: |
||||||
|
length = len(iterable) |
||||||
|
except (TypeError, AttributeError): |
||||||
|
if not hasattr(iterable, '__aiter__'): |
||||||
|
iterable = tuple(iterable) |
||||||
|
length = len(iterable) |
||||||
|
else: |
||||||
|
length = None |
||||||
|
async_iterator = auto_aiter(iterable) |
||||||
|
try: |
||||||
|
after = await async_iterator.__anext__() |
||||||
|
except StopAsyncIteration: |
||||||
|
after = _last_iteration |
||||||
|
return AsyncLoopContext(async_iterator, after, length, recurse, depth0) |
@ -0,0 +1,362 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.bccache |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
This module implements the bytecode cache system Jinja is optionally |
||||||
|
using. This is useful if you have very complex template situations and |
||||||
|
the compiliation of all those templates slow down your application too |
||||||
|
much. |
||||||
|
|
||||||
|
Situations where this is useful are often forking web applications that |
||||||
|
are initialized on the first request. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
from os import path, listdir |
||||||
|
import os |
||||||
|
import sys |
||||||
|
import stat |
||||||
|
import errno |
||||||
|
import marshal |
||||||
|
import tempfile |
||||||
|
import fnmatch |
||||||
|
from hashlib import sha1 |
||||||
|
from jinja2.utils import open_if_exists |
||||||
|
from jinja2._compat import BytesIO, pickle, PY2, text_type |
||||||
|
|
||||||
|
|
||||||
|
# marshal works better on 3.x, one hack less required |
||||||
|
if not PY2: |
||||||
|
marshal_dump = marshal.dump |
||||||
|
marshal_load = marshal.load |
||||||
|
else: |
||||||
|
|
||||||
|
def marshal_dump(code, f): |
||||||
|
if isinstance(f, file): |
||||||
|
marshal.dump(code, f) |
||||||
|
else: |
||||||
|
f.write(marshal.dumps(code)) |
||||||
|
|
||||||
|
def marshal_load(f): |
||||||
|
if isinstance(f, file): |
||||||
|
return marshal.load(f) |
||||||
|
return marshal.loads(f.read()) |
||||||
|
|
||||||
|
|
||||||
|
bc_version = 3 |
||||||
|
|
||||||
|
# magic version used to only change with new jinja versions. With 2.6 |
||||||
|
# we change this to also take Python version changes into account. The |
||||||
|
# reason for this is that Python tends to segfault if fed earlier bytecode |
||||||
|
# versions because someone thought it would be a good idea to reuse opcodes |
||||||
|
# or make Python incompatible with earlier versions. |
||||||
|
bc_magic = 'j2'.encode('ascii') + \ |
||||||
|
pickle.dumps(bc_version, 2) + \ |
||||||
|
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1]) |
||||||
|
|
||||||
|
|
||||||
|
class Bucket(object): |
||||||
|
"""Buckets are used to store the bytecode for one template. It's created |
||||||
|
and initialized by the bytecode cache and passed to the loading functions. |
||||||
|
|
||||||
|
The buckets get an internal checksum from the cache assigned and use this |
||||||
|
to automatically reject outdated cache material. Individual bytecode |
||||||
|
cache subclasses don't have to care about cache invalidation. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, environment, key, checksum): |
||||||
|
self.environment = environment |
||||||
|
self.key = key |
||||||
|
self.checksum = checksum |
||||||
|
self.reset() |
||||||
|
|
||||||
|
def reset(self): |
||||||
|
"""Resets the bucket (unloads the bytecode).""" |
||||||
|
self.code = None |
||||||
|
|
||||||
|
def load_bytecode(self, f): |
||||||
|
"""Loads bytecode from a file or file like object.""" |
||||||
|
# make sure the magic header is correct |
||||||
|
magic = f.read(len(bc_magic)) |
||||||
|
if magic != bc_magic: |
||||||
|
self.reset() |
||||||
|
return |
||||||
|
# the source code of the file changed, we need to reload |
||||||
|
checksum = pickle.load(f) |
||||||
|
if self.checksum != checksum: |
||||||
|
self.reset() |
||||||
|
return |
||||||
|
# if marshal_load fails then we need to reload |
||||||
|
try: |
||||||
|
self.code = marshal_load(f) |
||||||
|
except (EOFError, ValueError, TypeError): |
||||||
|
self.reset() |
||||||
|
return |
||||||
|
|
||||||
|
def write_bytecode(self, f): |
||||||
|
"""Dump the bytecode into the file or file like object passed.""" |
||||||
|
if self.code is None: |
||||||
|
raise TypeError('can\'t write empty bucket') |
||||||
|
f.write(bc_magic) |
||||||
|
pickle.dump(self.checksum, f, 2) |
||||||
|
marshal_dump(self.code, f) |
||||||
|
|
||||||
|
def bytecode_from_string(self, string): |
||||||
|
"""Load bytecode from a string.""" |
||||||
|
self.load_bytecode(BytesIO(string)) |
||||||
|
|
||||||
|
def bytecode_to_string(self): |
||||||
|
"""Return the bytecode as string.""" |
||||||
|
out = BytesIO() |
||||||
|
self.write_bytecode(out) |
||||||
|
return out.getvalue() |
||||||
|
|
||||||
|
|
||||||
|
class BytecodeCache(object): |
||||||
|
"""To implement your own bytecode cache you have to subclass this class |
||||||
|
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of |
||||||
|
these methods are passed a :class:`~jinja2.bccache.Bucket`. |
||||||
|
|
||||||
|
A very basic bytecode cache that saves the bytecode on the file system:: |
||||||
|
|
||||||
|
from os import path |
||||||
|
|
||||||
|
class MyCache(BytecodeCache): |
||||||
|
|
||||||
|
def __init__(self, directory): |
||||||
|
self.directory = directory |
||||||
|
|
||||||
|
def load_bytecode(self, bucket): |
||||||
|
filename = path.join(self.directory, bucket.key) |
||||||
|
if path.exists(filename): |
||||||
|
with open(filename, 'rb') as f: |
||||||
|
bucket.load_bytecode(f) |
||||||
|
|
||||||
|
def dump_bytecode(self, bucket): |
||||||
|
filename = path.join(self.directory, bucket.key) |
||||||
|
with open(filename, 'wb') as f: |
||||||
|
bucket.write_bytecode(f) |
||||||
|
|
||||||
|
A more advanced version of a filesystem based bytecode cache is part of |
||||||
|
Jinja2. |
||||||
|
""" |
||||||
|
|
||||||
|
def load_bytecode(self, bucket): |
||||||
|
"""Subclasses have to override this method to load bytecode into a |
||||||
|
bucket. If they are not able to find code in the cache for the |
||||||
|
bucket, it must not do anything. |
||||||
|
""" |
||||||
|
raise NotImplementedError() |
||||||
|
|
||||||
|
def dump_bytecode(self, bucket): |
||||||
|
"""Subclasses have to override this method to write the bytecode |
||||||
|
from a bucket back to the cache. If it unable to do so it must not |
||||||
|
fail silently but raise an exception. |
||||||
|
""" |
||||||
|
raise NotImplementedError() |
||||||
|
|
||||||
|
def clear(self): |
||||||
|
"""Clears the cache. This method is not used by Jinja2 but should be |
||||||
|
implemented to allow applications to clear the bytecode cache used |
||||||
|
by a particular environment. |
||||||
|
""" |
||||||
|
|
||||||
|
def get_cache_key(self, name, filename=None): |
||||||
|
"""Returns the unique hash key for this template name.""" |
||||||
|
hash = sha1(name.encode('utf-8')) |
||||||
|
if filename is not None: |
||||||
|
filename = '|' + filename |
||||||
|
if isinstance(filename, text_type): |
||||||
|
filename = filename.encode('utf-8') |
||||||
|
hash.update(filename) |
||||||
|
return hash.hexdigest() |
||||||
|
|
||||||
|
def get_source_checksum(self, source): |
||||||
|
"""Returns a checksum for the source.""" |
||||||
|
return sha1(source.encode('utf-8')).hexdigest() |
||||||
|
|
||||||
|
def get_bucket(self, environment, name, filename, source): |
||||||
|
"""Return a cache bucket for the given template. All arguments are |
||||||
|
mandatory but filename may be `None`. |
||||||
|
""" |
||||||
|
key = self.get_cache_key(name, filename) |
||||||
|
checksum = self.get_source_checksum(source) |
||||||
|
bucket = Bucket(environment, key, checksum) |
||||||
|
self.load_bytecode(bucket) |
||||||
|
return bucket |
||||||
|
|
||||||
|
def set_bucket(self, bucket): |
||||||
|
"""Put the bucket into the cache.""" |
||||||
|
self.dump_bytecode(bucket) |
||||||
|
|
||||||
|
|
||||||
|
class FileSystemBytecodeCache(BytecodeCache): |
||||||
|
"""A bytecode cache that stores bytecode on the filesystem. It accepts |
||||||
|
two arguments: The directory where the cache items are stored and a |
||||||
|
pattern string that is used to build the filename. |
||||||
|
|
||||||
|
If no directory is specified a default cache directory is selected. On |
||||||
|
Windows the user's temp directory is used, on UNIX systems a directory |
||||||
|
is created for the user in the system temp directory. |
||||||
|
|
||||||
|
The pattern can be used to have multiple separate caches operate on the |
||||||
|
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` |
||||||
|
is replaced with the cache key. |
||||||
|
|
||||||
|
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') |
||||||
|
|
||||||
|
This bytecode cache supports clearing of the cache using the clear method. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, directory=None, pattern='__jinja2_%s.cache'): |
||||||
|
if directory is None: |
||||||
|
directory = self._get_default_cache_dir() |
||||||
|
self.directory = directory |
||||||
|
self.pattern = pattern |
||||||
|
|
||||||
|
def _get_default_cache_dir(self): |
||||||
|
def _unsafe_dir(): |
||||||
|
raise RuntimeError('Cannot determine safe temp directory. You ' |
||||||
|
'need to explicitly provide one.') |
||||||
|
|
||||||
|
tmpdir = tempfile.gettempdir() |
||||||
|
|
||||||
|
# On windows the temporary directory is used specific unless |
||||||
|
# explicitly forced otherwise. We can just use that. |
||||||
|
if os.name == 'nt': |
||||||
|
return tmpdir |
||||||
|
if not hasattr(os, 'getuid'): |
||||||
|
_unsafe_dir() |
||||||
|
|
||||||
|
dirname = '_jinja2-cache-%d' % os.getuid() |
||||||
|
actual_dir = os.path.join(tmpdir, dirname) |
||||||
|
|
||||||
|
try: |
||||||
|
os.mkdir(actual_dir, stat.S_IRWXU) |
||||||
|
except OSError as e: |
||||||
|
if e.errno != errno.EEXIST: |
||||||
|
raise |
||||||
|
try: |
||||||
|
os.chmod(actual_dir, stat.S_IRWXU) |
||||||
|
actual_dir_stat = os.lstat(actual_dir) |
||||||
|
if actual_dir_stat.st_uid != os.getuid() \ |
||||||
|
or not stat.S_ISDIR(actual_dir_stat.st_mode) \ |
||||||
|
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: |
||||||
|
_unsafe_dir() |
||||||
|
except OSError as e: |
||||||
|
if e.errno != errno.EEXIST: |
||||||
|
raise |
||||||
|
|
||||||
|
actual_dir_stat = os.lstat(actual_dir) |
||||||
|
if actual_dir_stat.st_uid != os.getuid() \ |
||||||
|
or not stat.S_ISDIR(actual_dir_stat.st_mode) \ |
||||||
|
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU: |
||||||
|
_unsafe_dir() |
||||||
|
|
||||||
|
return actual_dir |
||||||
|
|
||||||
|
def _get_cache_filename(self, bucket): |
||||||
|
return path.join(self.directory, self.pattern % bucket.key) |
||||||
|
|
||||||
|
def load_bytecode(self, bucket): |
||||||
|
f = open_if_exists(self._get_cache_filename(bucket), 'rb') |
||||||
|
if f is not None: |
||||||
|
try: |
||||||
|
bucket.load_bytecode(f) |
||||||
|
finally: |
||||||
|
f.close() |
||||||
|
|
||||||
|
def dump_bytecode(self, bucket): |
||||||
|
f = open(self._get_cache_filename(bucket), 'wb') |
||||||
|
try: |
||||||
|
bucket.write_bytecode(f) |
||||||
|
finally: |
||||||
|
f.close() |
||||||
|
|
||||||
|
def clear(self): |
||||||
|
# imported lazily here because google app-engine doesn't support |
||||||
|
# write access on the file system and the function does not exist |
||||||
|
# normally. |
||||||
|
from os import remove |
||||||
|
files = fnmatch.filter(listdir(self.directory), self.pattern % '*') |
||||||
|
for filename in files: |
||||||
|
try: |
||||||
|
remove(path.join(self.directory, filename)) |
||||||
|
except OSError: |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
class MemcachedBytecodeCache(BytecodeCache): |
||||||
|
"""This class implements a bytecode cache that uses a memcache cache for |
||||||
|
storing the information. It does not enforce a specific memcache library |
||||||
|
(tummy's memcache or cmemcache) but will accept any class that provides |
||||||
|
the minimal interface required. |
||||||
|
|
||||||
|
Libraries compatible with this class: |
||||||
|
|
||||||
|
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache |
||||||
|
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_ |
||||||
|
- `cmemcache <http://gijsbert.org/cmemcache/>`_ |
||||||
|
|
||||||
|
(Unfortunately the django cache interface is not compatible because it |
||||||
|
does not support storing binary data, only unicode. You can however pass |
||||||
|
the underlying cache client to the bytecode cache which is available |
||||||
|
as `django.core.cache.cache._client`.) |
||||||
|
|
||||||
|
The minimal interface for the client passed to the constructor is this: |
||||||
|
|
||||||
|
.. class:: MinimalClientInterface |
||||||
|
|
||||||
|
.. method:: set(key, value[, timeout]) |
||||||
|
|
||||||
|
Stores the bytecode in the cache. `value` is a string and |
||||||
|
`timeout` the timeout of the key. If timeout is not provided |
||||||
|
a default timeout or no timeout should be assumed, if it's |
||||||
|
provided it's an integer with the number of seconds the cache |
||||||
|
item should exist. |
||||||
|
|
||||||
|
.. method:: get(key) |
||||||
|
|
||||||
|
Returns the value for the cache key. If the item does not |
||||||
|
exist in the cache the return value must be `None`. |
||||||
|
|
||||||
|
The other arguments to the constructor are the prefix for all keys that |
||||||
|
is added before the actual cache key and the timeout for the bytecode in |
||||||
|
the cache system. We recommend a high (or no) timeout. |
||||||
|
|
||||||
|
This bytecode cache does not support clearing of used items in the cache. |
||||||
|
The clear method is a no-operation function. |
||||||
|
|
||||||
|
.. versionadded:: 2.7 |
||||||
|
Added support for ignoring memcache errors through the |
||||||
|
`ignore_memcache_errors` parameter. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None, |
||||||
|
ignore_memcache_errors=True): |
||||||
|
self.client = client |
||||||
|
self.prefix = prefix |
||||||
|
self.timeout = timeout |
||||||
|
self.ignore_memcache_errors = ignore_memcache_errors |
||||||
|
|
||||||
|
def load_bytecode(self, bucket): |
||||||
|
try: |
||||||
|
code = self.client.get(self.prefix + bucket.key) |
||||||
|
except Exception: |
||||||
|
if not self.ignore_memcache_errors: |
||||||
|
raise |
||||||
|
code = None |
||||||
|
if code is not None: |
||||||
|
bucket.bytecode_from_string(code) |
||||||
|
|
||||||
|
def dump_bytecode(self, bucket): |
||||||
|
args = (self.prefix + bucket.key, bucket.bytecode_to_string()) |
||||||
|
if self.timeout is not None: |
||||||
|
args += (self.timeout,) |
||||||
|
try: |
||||||
|
self.client.set(*args) |
||||||
|
except Exception: |
||||||
|
if not self.ignore_memcache_errors: |
||||||
|
raise |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,32 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja.constants |
||||||
|
~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Various constants. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
#: list of lorem ipsum words used by the lipsum() helper function |
||||||
|
LOREM_IPSUM_WORDS = u'''\ |
||||||
|
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at |
||||||
|
auctor augue bibendum blandit class commodo condimentum congue consectetuer |
||||||
|
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus |
||||||
|
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend |
||||||
|
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames |
||||||
|
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac |
||||||
|
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum |
||||||
|
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem |
||||||
|
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie |
||||||
|
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non |
||||||
|
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque |
||||||
|
penatibus per pharetra phasellus placerat platea porta porttitor posuere |
||||||
|
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus |
||||||
|
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit |
||||||
|
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor |
||||||
|
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices |
||||||
|
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus |
||||||
|
viverra volutpat vulputate''' |
@ -0,0 +1,372 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.debug |
||||||
|
~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Implements the debug interface for Jinja. This module does some pretty |
||||||
|
ugly stuff with the Python traceback system in order to achieve tracebacks |
||||||
|
with correct line numbers, locals and contents. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import sys |
||||||
|
import traceback |
||||||
|
from types import TracebackType, CodeType |
||||||
|
from jinja2.utils import missing, internal_code |
||||||
|
from jinja2.exceptions import TemplateSyntaxError |
||||||
|
from jinja2._compat import iteritems, reraise, PY2 |
||||||
|
|
||||||
|
# on pypy we can take advantage of transparent proxies |
||||||
|
try: |
||||||
|
from __pypy__ import tproxy |
||||||
|
except ImportError: |
||||||
|
tproxy = None |
||||||
|
|
||||||
|
|
||||||
|
# how does the raise helper look like? |
||||||
|
try: |
||||||
|
exec("raise TypeError, 'foo'") |
||||||
|
except SyntaxError: |
||||||
|
raise_helper = 'raise __jinja_exception__[1]' |
||||||
|
except TypeError: |
||||||
|
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]' |
||||||
|
|
||||||
|
|
||||||
|
class TracebackFrameProxy(object): |
||||||
|
"""Proxies a traceback frame.""" |
||||||
|
|
||||||
|
def __init__(self, tb): |
||||||
|
self.tb = tb |
||||||
|
self._tb_next = None |
||||||
|
|
||||||
|
@property |
||||||
|
def tb_next(self): |
||||||
|
return self._tb_next |
||||||
|
|
||||||
|
def set_next(self, next): |
||||||
|
if tb_set_next is not None: |
||||||
|
try: |
||||||
|
tb_set_next(self.tb, next and next.tb or None) |
||||||
|
except Exception: |
||||||
|
# this function can fail due to all the hackery it does |
||||||
|
# on various python implementations. We just catch errors |
||||||
|
# down and ignore them if necessary. |
||||||
|
pass |
||||||
|
self._tb_next = next |
||||||
|
|
||||||
|
@property |
||||||
|
def is_jinja_frame(self): |
||||||
|
return '__jinja_template__' in self.tb.tb_frame.f_globals |
||||||
|
|
||||||
|
def __getattr__(self, name): |
||||||
|
return getattr(self.tb, name) |
||||||
|
|
||||||
|
|
||||||
|
def make_frame_proxy(frame): |
||||||
|
proxy = TracebackFrameProxy(frame) |
||||||
|
if tproxy is None: |
||||||
|
return proxy |
||||||
|
def operation_handler(operation, *args, **kwargs): |
||||||
|
if operation in ('__getattribute__', '__getattr__'): |
||||||
|
return getattr(proxy, args[0]) |
||||||
|
elif operation == '__setattr__': |
||||||
|
proxy.__setattr__(*args, **kwargs) |
||||||
|
else: |
||||||
|
return getattr(proxy, operation)(*args, **kwargs) |
||||||
|
return tproxy(TracebackType, operation_handler) |
||||||
|
|
||||||
|
|
||||||
|
class ProcessedTraceback(object): |
||||||
|
"""Holds a Jinja preprocessed traceback for printing or reraising.""" |
||||||
|
|
||||||
|
def __init__(self, exc_type, exc_value, frames): |
||||||
|
assert frames, 'no frames for this traceback?' |
||||||
|
self.exc_type = exc_type |
||||||
|
self.exc_value = exc_value |
||||||
|
self.frames = frames |
||||||
|
|
||||||
|
# newly concatenate the frames (which are proxies) |
||||||
|
prev_tb = None |
||||||
|
for tb in self.frames: |
||||||
|
if prev_tb is not None: |
||||||
|
prev_tb.set_next(tb) |
||||||
|
prev_tb = tb |
||||||
|
prev_tb.set_next(None) |
||||||
|
|
||||||
|
def render_as_text(self, limit=None): |
||||||
|
"""Return a string with the traceback.""" |
||||||
|
lines = traceback.format_exception(self.exc_type, self.exc_value, |
||||||
|
self.frames[0], limit=limit) |
||||||
|
return ''.join(lines).rstrip() |
||||||
|
|
||||||
|
def render_as_html(self, full=False): |
||||||
|
"""Return a unicode string with the traceback as rendered HTML.""" |
||||||
|
from jinja2.debugrenderer import render_traceback |
||||||
|
return u'%s\n\n<!--\n%s\n-->' % ( |
||||||
|
render_traceback(self, full=full), |
||||||
|
self.render_as_text().decode('utf-8', 'replace') |
||||||
|
) |
||||||
|
|
||||||
|
@property |
||||||
|
def is_template_syntax_error(self): |
||||||
|
"""`True` if this is a template syntax error.""" |
||||||
|
return isinstance(self.exc_value, TemplateSyntaxError) |
||||||
|
|
||||||
|
@property |
||||||
|
def exc_info(self): |
||||||
|
"""Exception info tuple with a proxy around the frame objects.""" |
||||||
|
return self.exc_type, self.exc_value, self.frames[0] |
||||||
|
|
||||||
|
@property |
||||||
|
def standard_exc_info(self): |
||||||
|
"""Standard python exc_info for re-raising""" |
||||||
|
tb = self.frames[0] |
||||||
|
# the frame will be an actual traceback (or transparent proxy) if |
||||||
|
# we are on pypy or a python implementation with support for tproxy |
||||||
|
if type(tb) is not TracebackType: |
||||||
|
tb = tb.tb |
||||||
|
return self.exc_type, self.exc_value, tb |
||||||
|
|
||||||
|
|
||||||
|
def make_traceback(exc_info, source_hint=None): |
||||||
|
"""Creates a processed traceback object from the exc_info.""" |
||||||
|
exc_type, exc_value, tb = exc_info |
||||||
|
if isinstance(exc_value, TemplateSyntaxError): |
||||||
|
exc_info = translate_syntax_error(exc_value, source_hint) |
||||||
|
initial_skip = 0 |
||||||
|
else: |
||||||
|
initial_skip = 1 |
||||||
|
return translate_exception(exc_info, initial_skip) |
||||||
|
|
||||||
|
|
||||||
|
def translate_syntax_error(error, source=None): |
||||||
|
"""Rewrites a syntax error to please traceback systems.""" |
||||||
|
error.source = source |
||||||
|
error.translated = True |
||||||
|
exc_info = (error.__class__, error, None) |
||||||
|
filename = error.filename |
||||||
|
if filename is None: |
||||||
|
filename = '<unknown>' |
||||||
|
return fake_exc_info(exc_info, filename, error.lineno) |
||||||
|
|
||||||
|
|
||||||
|
def translate_exception(exc_info, initial_skip=0): |
||||||
|
"""If passed an exc_info it will automatically rewrite the exceptions |
||||||
|
all the way down to the correct line numbers and frames. |
||||||
|
""" |
||||||
|
tb = exc_info[2] |
||||||
|
frames = [] |
||||||
|
|
||||||
|
# skip some internal frames if wanted |
||||||
|
for x in range(initial_skip): |
||||||
|
if tb is not None: |
||||||
|
tb = tb.tb_next |
||||||
|
initial_tb = tb |
||||||
|
|
||||||
|
while tb is not None: |
||||||
|
# skip frames decorated with @internalcode. These are internal |
||||||
|
# calls we can't avoid and that are useless in template debugging |
||||||
|
# output. |
||||||
|
if tb.tb_frame.f_code in internal_code: |
||||||
|
tb = tb.tb_next |
||||||
|
continue |
||||||
|
|
||||||
|
# save a reference to the next frame if we override the current |
||||||
|
# one with a faked one. |
||||||
|
next = tb.tb_next |
||||||
|
|
||||||
|
# fake template exceptions |
||||||
|
template = tb.tb_frame.f_globals.get('__jinja_template__') |
||||||
|
if template is not None: |
||||||
|
lineno = template.get_corresponding_lineno(tb.tb_lineno) |
||||||
|
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename, |
||||||
|
lineno)[2] |
||||||
|
|
||||||
|
frames.append(make_frame_proxy(tb)) |
||||||
|
tb = next |
||||||
|
|
||||||
|
# if we don't have any exceptions in the frames left, we have to |
||||||
|
# reraise it unchanged. |
||||||
|
# XXX: can we backup here? when could this happen? |
||||||
|
if not frames: |
||||||
|
reraise(exc_info[0], exc_info[1], exc_info[2]) |
||||||
|
|
||||||
|
return ProcessedTraceback(exc_info[0], exc_info[1], frames) |
||||||
|
|
||||||
|
|
||||||
|
def get_jinja_locals(real_locals): |
||||||
|
ctx = real_locals.get('context') |
||||||
|
if ctx: |
||||||
|
locals = ctx.get_all() |
||||||
|
else: |
||||||
|
locals = {} |
||||||
|
|
||||||
|
local_overrides = {} |
||||||
|
|
||||||
|
for name, value in iteritems(real_locals): |
||||||
|
if not name.startswith('l_') or value is missing: |
||||||
|
continue |
||||||
|
try: |
||||||
|
_, depth, name = name.split('_', 2) |
||||||
|
depth = int(depth) |
||||||
|
except ValueError: |
||||||
|
continue |
||||||
|
cur_depth = local_overrides.get(name, (-1,))[0] |
||||||
|
if cur_depth < depth: |
||||||
|
local_overrides[name] = (depth, value) |
||||||
|
|
||||||
|
for name, (_, value) in iteritems(local_overrides): |
||||||
|
if value is missing: |
||||||
|
locals.pop(name, None) |
||||||
|
else: |
||||||
|
locals[name] = value |
||||||
|
|
||||||
|
return locals |
||||||
|
|
||||||
|
|
||||||
|
def fake_exc_info(exc_info, filename, lineno): |
||||||
|
"""Helper for `translate_exception`.""" |
||||||
|
exc_type, exc_value, tb = exc_info |
||||||
|
|
||||||
|
# figure the real context out |
||||||
|
if tb is not None: |
||||||
|
locals = get_jinja_locals(tb.tb_frame.f_locals) |
||||||
|
|
||||||
|
# if there is a local called __jinja_exception__, we get |
||||||
|
# rid of it to not break the debug functionality. |
||||||
|
locals.pop('__jinja_exception__', None) |
||||||
|
else: |
||||||
|
locals = {} |
||||||
|
|
||||||
|
# assamble fake globals we need |
||||||
|
globals = { |
||||||
|
'__name__': filename, |
||||||
|
'__file__': filename, |
||||||
|
'__jinja_exception__': exc_info[:2], |
||||||
|
|
||||||
|
# we don't want to keep the reference to the template around |
||||||
|
# to not cause circular dependencies, but we mark it as Jinja |
||||||
|
# frame for the ProcessedTraceback |
||||||
|
'__jinja_template__': None |
||||||
|
} |
||||||
|
|
||||||
|
# and fake the exception |
||||||
|
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec') |
||||||
|
|
||||||
|
# if it's possible, change the name of the code. This won't work |
||||||
|
# on some python environments such as google appengine |
||||||
|
try: |
||||||
|
if tb is None: |
||||||
|
location = 'template' |
||||||
|
else: |
||||||
|
function = tb.tb_frame.f_code.co_name |
||||||
|
if function == 'root': |
||||||
|
location = 'top-level template code' |
||||||
|
elif function.startswith('block_'): |
||||||
|
location = 'block "%s"' % function[6:] |
||||||
|
else: |
||||||
|
location = 'template' |
||||||
|
|
||||||
|
if PY2: |
||||||
|
code = CodeType(0, code.co_nlocals, code.co_stacksize, |
||||||
|
code.co_flags, code.co_code, code.co_consts, |
||||||
|
code.co_names, code.co_varnames, filename, |
||||||
|
location, code.co_firstlineno, |
||||||
|
code.co_lnotab, (), ()) |
||||||
|
else: |
||||||
|
code = CodeType(0, code.co_kwonlyargcount, |
||||||
|
code.co_nlocals, code.co_stacksize, |
||||||
|
code.co_flags, code.co_code, code.co_consts, |
||||||
|
code.co_names, code.co_varnames, filename, |
||||||
|
location, code.co_firstlineno, |
||||||
|
code.co_lnotab, (), ()) |
||||||
|
except Exception as e: |
||||||
|
pass |
||||||
|
|
||||||
|
# execute the code and catch the new traceback |
||||||
|
try: |
||||||
|
exec(code, globals, locals) |
||||||
|
except: |
||||||
|
exc_info = sys.exc_info() |
||||||
|
new_tb = exc_info[2].tb_next |
||||||
|
|
||||||
|
# return without this frame |
||||||
|
return exc_info[:2] + (new_tb,) |
||||||
|
|
||||||
|
|
||||||
|
def _init_ugly_crap(): |
||||||
|
"""This function implements a few ugly things so that we can patch the |
||||||
|
traceback objects. The function returned allows resetting `tb_next` on |
||||||
|
any python traceback object. Do not attempt to use this on non cpython |
||||||
|
interpreters |
||||||
|
""" |
||||||
|
import ctypes |
||||||
|
from types import TracebackType |
||||||
|
|
||||||
|
if PY2: |
||||||
|
# figure out size of _Py_ssize_t for Python 2: |
||||||
|
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): |
||||||
|
_Py_ssize_t = ctypes.c_int64 |
||||||
|
else: |
||||||
|
_Py_ssize_t = ctypes.c_int |
||||||
|
else: |
||||||
|
# platform ssize_t on Python 3 |
||||||
|
_Py_ssize_t = ctypes.c_ssize_t |
||||||
|
|
||||||
|
# regular python |
||||||
|
class _PyObject(ctypes.Structure): |
||||||
|
pass |
||||||
|
_PyObject._fields_ = [ |
||||||
|
('ob_refcnt', _Py_ssize_t), |
||||||
|
('ob_type', ctypes.POINTER(_PyObject)) |
||||||
|
] |
||||||
|
|
||||||
|
# python with trace |
||||||
|
if hasattr(sys, 'getobjects'): |
||||||
|
class _PyObject(ctypes.Structure): |
||||||
|
pass |
||||||
|
_PyObject._fields_ = [ |
||||||
|
('_ob_next', ctypes.POINTER(_PyObject)), |
||||||
|
('_ob_prev', ctypes.POINTER(_PyObject)), |
||||||
|
('ob_refcnt', _Py_ssize_t), |
||||||
|
('ob_type', ctypes.POINTER(_PyObject)) |
||||||
|
] |
||||||
|
|
||||||
|
class _Traceback(_PyObject): |
||||||
|
pass |
||||||
|
_Traceback._fields_ = [ |
||||||
|
('tb_next', ctypes.POINTER(_Traceback)), |
||||||
|
('tb_frame', ctypes.POINTER(_PyObject)), |
||||||
|
('tb_lasti', ctypes.c_int), |
||||||
|
('tb_lineno', ctypes.c_int) |
||||||
|
] |
||||||
|
|
||||||
|
def tb_set_next(tb, next): |
||||||
|
"""Set the tb_next attribute of a traceback object.""" |
||||||
|
if not (isinstance(tb, TracebackType) and |
||||||
|
(next is None or isinstance(next, TracebackType))): |
||||||
|
raise TypeError('tb_set_next arguments must be traceback objects') |
||||||
|
obj = _Traceback.from_address(id(tb)) |
||||||
|
if tb.tb_next is not None: |
||||||
|
old = _Traceback.from_address(id(tb.tb_next)) |
||||||
|
old.ob_refcnt -= 1 |
||||||
|
if next is None: |
||||||
|
obj.tb_next = ctypes.POINTER(_Traceback)() |
||||||
|
else: |
||||||
|
next = _Traceback.from_address(id(next)) |
||||||
|
next.ob_refcnt += 1 |
||||||
|
obj.tb_next = ctypes.pointer(next) |
||||||
|
|
||||||
|
return tb_set_next |
||||||
|
|
||||||
|
|
||||||
|
# try to get a tb_set_next implementation if we don't have transparent |
||||||
|
# proxies. |
||||||
|
tb_set_next = None |
||||||
|
if tproxy is None: |
||||||
|
try: |
||||||
|
tb_set_next = _init_ugly_crap() |
||||||
|
except: |
||||||
|
pass |
||||||
|
del _init_ugly_crap |
@ -0,0 +1,54 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.defaults |
||||||
|
~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Jinja default filters and tags. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
from jinja2._compat import range_type |
||||||
|
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner |
||||||
|
|
||||||
|
|
||||||
|
# defaults for the parser / lexer |
||||||
|
BLOCK_START_STRING = '{%' |
||||||
|
BLOCK_END_STRING = '%}' |
||||||
|
VARIABLE_START_STRING = '{{' |
||||||
|
VARIABLE_END_STRING = '}}' |
||||||
|
COMMENT_START_STRING = '{#' |
||||||
|
COMMENT_END_STRING = '#}' |
||||||
|
LINE_STATEMENT_PREFIX = None |
||||||
|
LINE_COMMENT_PREFIX = None |
||||||
|
TRIM_BLOCKS = False |
||||||
|
LSTRIP_BLOCKS = False |
||||||
|
NEWLINE_SEQUENCE = '\n' |
||||||
|
KEEP_TRAILING_NEWLINE = False |
||||||
|
|
||||||
|
|
||||||
|
# default filters, tests and namespace |
||||||
|
from jinja2.filters import FILTERS as DEFAULT_FILTERS |
||||||
|
from jinja2.tests import TESTS as DEFAULT_TESTS |
||||||
|
DEFAULT_NAMESPACE = { |
||||||
|
'range': range_type, |
||||||
|
'dict': dict, |
||||||
|
'lipsum': generate_lorem_ipsum, |
||||||
|
'cycler': Cycler, |
||||||
|
'joiner': Joiner |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
# default policies |
||||||
|
DEFAULT_POLICIES = { |
||||||
|
'compiler.ascii_str': True, |
||||||
|
'urlize.rel': 'noopener', |
||||||
|
'urlize.target': None, |
||||||
|
'truncate.leeway': 5, |
||||||
|
'json.dumps_function': None, |
||||||
|
'json.dumps_kwargs': {'sort_keys': True}, |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
# export all constants |
||||||
|
__all__ = tuple(x for x in locals().keys() if x.isupper()) |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,146 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.exceptions |
||||||
|
~~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Jinja exceptions. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
from jinja2._compat import imap, text_type, PY2, implements_to_string |
||||||
|
|
||||||
|
|
||||||
|
class TemplateError(Exception): |
||||||
|
"""Baseclass for all template errors.""" |
||||||
|
|
||||||
|
if PY2: |
||||||
|
def __init__(self, message=None): |
||||||
|
if message is not None: |
||||||
|
message = text_type(message).encode('utf-8') |
||||||
|
Exception.__init__(self, message) |
||||||
|
|
||||||
|
@property |
||||||
|
def message(self): |
||||||
|
if self.args: |
||||||
|
message = self.args[0] |
||||||
|
if message is not None: |
||||||
|
return message.decode('utf-8', 'replace') |
||||||
|
|
||||||
|
def __unicode__(self): |
||||||
|
return self.message or u'' |
||||||
|
else: |
||||||
|
def __init__(self, message=None): |
||||||
|
Exception.__init__(self, message) |
||||||
|
|
||||||
|
@property |
||||||
|
def message(self): |
||||||
|
if self.args: |
||||||
|
message = self.args[0] |
||||||
|
if message is not None: |
||||||
|
return message |
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string |
||||||
|
class TemplateNotFound(IOError, LookupError, TemplateError): |
||||||
|
"""Raised if a template does not exist.""" |
||||||
|
|
||||||
|
# looks weird, but removes the warning descriptor that just |
||||||
|
# bogusly warns us about message being deprecated |
||||||
|
message = None |
||||||
|
|
||||||
|
def __init__(self, name, message=None): |
||||||
|
IOError.__init__(self) |
||||||
|
if message is None: |
||||||
|
message = name |
||||||
|
self.message = message |
||||||
|
self.name = name |
||||||
|
self.templates = [name] |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
return self.message |
||||||
|
|
||||||
|
|
||||||
|
class TemplatesNotFound(TemplateNotFound): |
||||||
|
"""Like :class:`TemplateNotFound` but raised if multiple templates |
||||||
|
are selected. This is a subclass of :class:`TemplateNotFound` |
||||||
|
exception, so just catching the base exception will catch both. |
||||||
|
|
||||||
|
.. versionadded:: 2.2 |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, names=(), message=None): |
||||||
|
if message is None: |
||||||
|
message = u'none of the templates given were found: ' + \ |
||||||
|
u', '.join(imap(text_type, names)) |
||||||
|
TemplateNotFound.__init__(self, names and names[-1] or None, message) |
||||||
|
self.templates = list(names) |
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string |
||||||
|
class TemplateSyntaxError(TemplateError): |
||||||
|
"""Raised to tell the user that there is a problem with the template.""" |
||||||
|
|
||||||
|
def __init__(self, message, lineno, name=None, filename=None): |
||||||
|
TemplateError.__init__(self, message) |
||||||
|
self.lineno = lineno |
||||||
|
self.name = name |
||||||
|
self.filename = filename |
||||||
|
self.source = None |
||||||
|
|
||||||
|
# this is set to True if the debug.translate_syntax_error |
||||||
|
# function translated the syntax error into a new traceback |
||||||
|
self.translated = False |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
# for translated errors we only return the message |
||||||
|
if self.translated: |
||||||
|
return self.message |
||||||
|
|
||||||
|
# otherwise attach some stuff |
||||||
|
location = 'line %d' % self.lineno |
||||||
|
name = self.filename or self.name |
||||||
|
if name: |
||||||
|
location = 'File "%s", %s' % (name, location) |
||||||
|
lines = [self.message, ' ' + location] |
||||||
|
|
||||||
|
# if the source is set, add the line to the output |
||||||
|
if self.source is not None: |
||||||
|
try: |
||||||
|
line = self.source.splitlines()[self.lineno - 1] |
||||||
|
except IndexError: |
||||||
|
line = None |
||||||
|
if line: |
||||||
|
lines.append(' ' + line.strip()) |
||||||
|
|
||||||
|
return u'\n'.join(lines) |
||||||
|
|
||||||
|
|
||||||
|
class TemplateAssertionError(TemplateSyntaxError): |
||||||
|
"""Like a template syntax error, but covers cases where something in the |
||||||
|
template caused an error at compile time that wasn't necessarily caused |
||||||
|
by a syntax error. However it's a direct subclass of |
||||||
|
:exc:`TemplateSyntaxError` and has the same attributes. |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
class TemplateRuntimeError(TemplateError): |
||||||
|
"""A generic runtime error in the template engine. Under some situations |
||||||
|
Jinja may raise this exception. |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
class UndefinedError(TemplateRuntimeError): |
||||||
|
"""Raised if a template tries to operate on :class:`Undefined`.""" |
||||||
|
|
||||||
|
|
||||||
|
class SecurityError(TemplateRuntimeError): |
||||||
|
"""Raised if a template tries to do something insecure if the |
||||||
|
sandbox is enabled. |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
class FilterArgumentError(TemplateRuntimeError): |
||||||
|
"""This error is raised if a filter was called with inappropriate |
||||||
|
arguments |
||||||
|
""" |
@ -0,0 +1,609 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.ext |
||||||
|
~~~~~~~~~~ |
||||||
|
|
||||||
|
Jinja extensions allow to add custom tags similar to the way django custom |
||||||
|
tags work. By default two example extensions exist: an i18n and a cache |
||||||
|
extension. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
from jinja2 import nodes |
||||||
|
from jinja2.defaults import BLOCK_START_STRING, \ |
||||||
|
BLOCK_END_STRING, VARIABLE_START_STRING, VARIABLE_END_STRING, \ |
||||||
|
COMMENT_START_STRING, COMMENT_END_STRING, LINE_STATEMENT_PREFIX, \ |
||||||
|
LINE_COMMENT_PREFIX, TRIM_BLOCKS, NEWLINE_SEQUENCE, \ |
||||||
|
KEEP_TRAILING_NEWLINE, LSTRIP_BLOCKS |
||||||
|
from jinja2.environment import Environment |
||||||
|
from jinja2.runtime import concat |
||||||
|
from jinja2.exceptions import TemplateAssertionError, TemplateSyntaxError |
||||||
|
from jinja2.utils import contextfunction, import_string, Markup |
||||||
|
from jinja2._compat import with_metaclass, string_types, iteritems |
||||||
|
|
||||||
|
|
||||||
|
# the only real useful gettext functions for a Jinja template. Note |
||||||
|
# that ugettext must be assigned to gettext as Jinja doesn't support |
||||||
|
# non unicode strings. |
||||||
|
GETTEXT_FUNCTIONS = ('_', 'gettext', 'ngettext') |
||||||
|
|
||||||
|
|
||||||
|
class ExtensionRegistry(type): |
||||||
|
"""Gives the extension an unique identifier.""" |
||||||
|
|
||||||
|
def __new__(cls, name, bases, d): |
||||||
|
rv = type.__new__(cls, name, bases, d) |
||||||
|
rv.identifier = rv.__module__ + '.' + rv.__name__ |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
class Extension(with_metaclass(ExtensionRegistry, object)): |
||||||
|
"""Extensions can be used to add extra functionality to the Jinja template |
||||||
|
system at the parser level. Custom extensions are bound to an environment |
||||||
|
but may not store environment specific data on `self`. The reason for |
||||||
|
this is that an extension can be bound to another environment (for |
||||||
|
overlays) by creating a copy and reassigning the `environment` attribute. |
||||||
|
|
||||||
|
As extensions are created by the environment they cannot accept any |
||||||
|
arguments for configuration. One may want to work around that by using |
||||||
|
a factory function, but that is not possible as extensions are identified |
||||||
|
by their import name. The correct way to configure the extension is |
||||||
|
storing the configuration values on the environment. Because this way the |
||||||
|
environment ends up acting as central configuration storage the |
||||||
|
attributes may clash which is why extensions have to ensure that the names |
||||||
|
they choose for configuration are not too generic. ``prefix`` for example |
||||||
|
is a terrible name, ``fragment_cache_prefix`` on the other hand is a good |
||||||
|
name as includes the name of the extension (fragment cache). |
||||||
|
""" |
||||||
|
|
||||||
|
#: if this extension parses this is the list of tags it's listening to. |
||||||
|
tags = set() |
||||||
|
|
||||||
|
#: the priority of that extension. This is especially useful for |
||||||
|
#: extensions that preprocess values. A lower value means higher |
||||||
|
#: priority. |
||||||
|
#: |
||||||
|
#: .. versionadded:: 2.4 |
||||||
|
priority = 100 |
||||||
|
|
||||||
|
def __init__(self, environment): |
||||||
|
self.environment = environment |
||||||
|
|
||||||
|
def bind(self, environment): |
||||||
|
"""Create a copy of this extension bound to another environment.""" |
||||||
|
rv = object.__new__(self.__class__) |
||||||
|
rv.__dict__.update(self.__dict__) |
||||||
|
rv.environment = environment |
||||||
|
return rv |
||||||
|
|
||||||
|
def preprocess(self, source, name, filename=None): |
||||||
|
"""This method is called before the actual lexing and can be used to |
||||||
|
preprocess the source. The `filename` is optional. The return value |
||||||
|
must be the preprocessed source. |
||||||
|
""" |
||||||
|
return source |
||||||
|
|
||||||
|
def filter_stream(self, stream): |
||||||
|
"""It's passed a :class:`~jinja2.lexer.TokenStream` that can be used |
||||||
|
to filter tokens returned. This method has to return an iterable of |
||||||
|
:class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a |
||||||
|
:class:`~jinja2.lexer.TokenStream`. |
||||||
|
|
||||||
|
In the `ext` folder of the Jinja2 source distribution there is a file |
||||||
|
called `inlinegettext.py` which implements a filter that utilizes this |
||||||
|
method. |
||||||
|
""" |
||||||
|
return stream |
||||||
|
|
||||||
|
def parse(self, parser): |
||||||
|
"""If any of the :attr:`tags` matched this method is called with the |
||||||
|
parser as first argument. The token the parser stream is pointing at |
||||||
|
is the name token that matched. This method has to return one or a |
||||||
|
list of multiple nodes. |
||||||
|
""" |
||||||
|
raise NotImplementedError() |
||||||
|
|
||||||
|
def attr(self, name, lineno=None): |
||||||
|
"""Return an attribute node for the current extension. This is useful |
||||||
|
to pass constants on extensions to generated template code. |
||||||
|
|
||||||
|
:: |
||||||
|
|
||||||
|
self.attr('_my_attribute', lineno=lineno) |
||||||
|
""" |
||||||
|
return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno) |
||||||
|
|
||||||
|
def call_method(self, name, args=None, kwargs=None, dyn_args=None, |
||||||
|
dyn_kwargs=None, lineno=None): |
||||||
|
"""Call a method of the extension. This is a shortcut for |
||||||
|
:meth:`attr` + :class:`jinja2.nodes.Call`. |
||||||
|
""" |
||||||
|
if args is None: |
||||||
|
args = [] |
||||||
|
if kwargs is None: |
||||||
|
kwargs = [] |
||||||
|
return nodes.Call(self.attr(name, lineno=lineno), args, kwargs, |
||||||
|
dyn_args, dyn_kwargs, lineno=lineno) |
||||||
|
|
||||||
|
|
||||||
|
@contextfunction |
||||||
|
def _gettext_alias(__context, *args, **kwargs): |
||||||
|
return __context.call(__context.resolve('gettext'), *args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
def _make_new_gettext(func): |
||||||
|
@contextfunction |
||||||
|
def gettext(__context, __string, **variables): |
||||||
|
rv = __context.call(func, __string) |
||||||
|
if __context.eval_ctx.autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv % variables |
||||||
|
return gettext |
||||||
|
|
||||||
|
|
||||||
|
def _make_new_ngettext(func): |
||||||
|
@contextfunction |
||||||
|
def ngettext(__context, __singular, __plural, __num, **variables): |
||||||
|
variables.setdefault('num', __num) |
||||||
|
rv = __context.call(func, __singular, __plural, __num) |
||||||
|
if __context.eval_ctx.autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv % variables |
||||||
|
return ngettext |
||||||
|
|
||||||
|
|
||||||
|
class InternationalizationExtension(Extension): |
||||||
|
"""This extension adds gettext support to Jinja2.""" |
||||||
|
tags = set(['trans']) |
||||||
|
|
||||||
|
# TODO: the i18n extension is currently reevaluating values in a few |
||||||
|
# situations. Take this example: |
||||||
|
# {% trans count=something() %}{{ count }} foo{% pluralize |
||||||
|
# %}{{ count }} fooss{% endtrans %} |
||||||
|
# something is called twice here. One time for the gettext value and |
||||||
|
# the other time for the n-parameter of the ngettext function. |
||||||
|
|
||||||
|
def __init__(self, environment): |
||||||
|
Extension.__init__(self, environment) |
||||||
|
environment.globals['_'] = _gettext_alias |
||||||
|
environment.extend( |
||||||
|
install_gettext_translations=self._install, |
||||||
|
install_null_translations=self._install_null, |
||||||
|
install_gettext_callables=self._install_callables, |
||||||
|
uninstall_gettext_translations=self._uninstall, |
||||||
|
extract_translations=self._extract, |
||||||
|
newstyle_gettext=False |
||||||
|
) |
||||||
|
|
||||||
|
def _install(self, translations, newstyle=None): |
||||||
|
gettext = getattr(translations, 'ugettext', None) |
||||||
|
if gettext is None: |
||||||
|
gettext = translations.gettext |
||||||
|
ngettext = getattr(translations, 'ungettext', None) |
||||||
|
if ngettext is None: |
||||||
|
ngettext = translations.ngettext |
||||||
|
self._install_callables(gettext, ngettext, newstyle) |
||||||
|
|
||||||
|
def _install_null(self, newstyle=None): |
||||||
|
self._install_callables( |
||||||
|
lambda x: x, |
||||||
|
lambda s, p, n: (n != 1 and (p,) or (s,))[0], |
||||||
|
newstyle |
||||||
|
) |
||||||
|
|
||||||
|
def _install_callables(self, gettext, ngettext, newstyle=None): |
||||||
|
if newstyle is not None: |
||||||
|
self.environment.newstyle_gettext = newstyle |
||||||
|
if self.environment.newstyle_gettext: |
||||||
|
gettext = _make_new_gettext(gettext) |
||||||
|
ngettext = _make_new_ngettext(ngettext) |
||||||
|
self.environment.globals.update( |
||||||
|
gettext=gettext, |
||||||
|
ngettext=ngettext |
||||||
|
) |
||||||
|
|
||||||
|
def _uninstall(self, translations): |
||||||
|
for key in 'gettext', 'ngettext': |
||||||
|
self.environment.globals.pop(key, None) |
||||||
|
|
||||||
|
def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS): |
||||||
|
if isinstance(source, string_types): |
||||||
|
source = self.environment.parse(source) |
||||||
|
return extract_from_ast(source, gettext_functions) |
||||||
|
|
||||||
|
def parse(self, parser): |
||||||
|
"""Parse a translatable tag.""" |
||||||
|
lineno = next(parser.stream).lineno |
||||||
|
num_called_num = False |
||||||
|
|
||||||
|
# find all the variables referenced. Additionally a variable can be |
||||||
|
# defined in the body of the trans block too, but this is checked at |
||||||
|
# a later state. |
||||||
|
plural_expr = None |
||||||
|
plural_expr_assignment = None |
||||||
|
variables = {} |
||||||
|
while parser.stream.current.type != 'block_end': |
||||||
|
if variables: |
||||||
|
parser.stream.expect('comma') |
||||||
|
|
||||||
|
# skip colon for python compatibility |
||||||
|
if parser.stream.skip_if('colon'): |
||||||
|
break |
||||||
|
|
||||||
|
name = parser.stream.expect('name') |
||||||
|
if name.value in variables: |
||||||
|
parser.fail('translatable variable %r defined twice.' % |
||||||
|
name.value, name.lineno, |
||||||
|
exc=TemplateAssertionError) |
||||||
|
|
||||||
|
# expressions |
||||||
|
if parser.stream.current.type == 'assign': |
||||||
|
next(parser.stream) |
||||||
|
variables[name.value] = var = parser.parse_expression() |
||||||
|
else: |
||||||
|
variables[name.value] = var = nodes.Name(name.value, 'load') |
||||||
|
|
||||||
|
if plural_expr is None: |
||||||
|
if isinstance(var, nodes.Call): |
||||||
|
plural_expr = nodes.Name('_trans', 'load') |
||||||
|
variables[name.value] = plural_expr |
||||||
|
plural_expr_assignment = nodes.Assign( |
||||||
|
nodes.Name('_trans', 'store'), var) |
||||||
|
else: |
||||||
|
plural_expr = var |
||||||
|
num_called_num = name.value == 'num' |
||||||
|
|
||||||
|
parser.stream.expect('block_end') |
||||||
|
|
||||||
|
plural = plural_names = None |
||||||
|
have_plural = False |
||||||
|
referenced = set() |
||||||
|
|
||||||
|
# now parse until endtrans or pluralize |
||||||
|
singular_names, singular = self._parse_block(parser, True) |
||||||
|
if singular_names: |
||||||
|
referenced.update(singular_names) |
||||||
|
if plural_expr is None: |
||||||
|
plural_expr = nodes.Name(singular_names[0], 'load') |
||||||
|
num_called_num = singular_names[0] == 'num' |
||||||
|
|
||||||
|
# if we have a pluralize block, we parse that too |
||||||
|
if parser.stream.current.test('name:pluralize'): |
||||||
|
have_plural = True |
||||||
|
next(parser.stream) |
||||||
|
if parser.stream.current.type != 'block_end': |
||||||
|
name = parser.stream.expect('name') |
||||||
|
if name.value not in variables: |
||||||
|
parser.fail('unknown variable %r for pluralization' % |
||||||
|
name.value, name.lineno, |
||||||
|
exc=TemplateAssertionError) |
||||||
|
plural_expr = variables[name.value] |
||||||
|
num_called_num = name.value == 'num' |
||||||
|
parser.stream.expect('block_end') |
||||||
|
plural_names, plural = self._parse_block(parser, False) |
||||||
|
next(parser.stream) |
||||||
|
referenced.update(plural_names) |
||||||
|
else: |
||||||
|
next(parser.stream) |
||||||
|
|
||||||
|
# register free names as simple name expressions |
||||||
|
for var in referenced: |
||||||
|
if var not in variables: |
||||||
|
variables[var] = nodes.Name(var, 'load') |
||||||
|
|
||||||
|
if not have_plural: |
||||||
|
plural_expr = None |
||||||
|
elif plural_expr is None: |
||||||
|
parser.fail('pluralize without variables', lineno) |
||||||
|
|
||||||
|
node = self._make_node(singular, plural, variables, plural_expr, |
||||||
|
bool(referenced), |
||||||
|
num_called_num and have_plural) |
||||||
|
node.set_lineno(lineno) |
||||||
|
if plural_expr_assignment is not None: |
||||||
|
return [plural_expr_assignment, node] |
||||||
|
else: |
||||||
|
return node |
||||||
|
|
||||||
|
def _parse_block(self, parser, allow_pluralize): |
||||||
|
"""Parse until the next block tag with a given name.""" |
||||||
|
referenced = [] |
||||||
|
buf = [] |
||||||
|
while 1: |
||||||
|
if parser.stream.current.type == 'data': |
||||||
|
buf.append(parser.stream.current.value.replace('%', '%%')) |
||||||
|
next(parser.stream) |
||||||
|
elif parser.stream.current.type == 'variable_begin': |
||||||
|
next(parser.stream) |
||||||
|
name = parser.stream.expect('name').value |
||||||
|
referenced.append(name) |
||||||
|
buf.append('%%(%s)s' % name) |
||||||
|
parser.stream.expect('variable_end') |
||||||
|
elif parser.stream.current.type == 'block_begin': |
||||||
|
next(parser.stream) |
||||||
|
if parser.stream.current.test('name:endtrans'): |
||||||
|
break |
||||||
|
elif parser.stream.current.test('name:pluralize'): |
||||||
|
if allow_pluralize: |
||||||
|
break |
||||||
|
parser.fail('a translatable section can have only one ' |
||||||
|
'pluralize section') |
||||||
|
parser.fail('control structures in translatable sections are ' |
||||||
|
'not allowed') |
||||||
|
elif parser.stream.eos: |
||||||
|
parser.fail('unclosed translation block') |
||||||
|
else: |
||||||
|
assert False, 'internal parser error' |
||||||
|
|
||||||
|
return referenced, concat(buf) |
||||||
|
|
||||||
|
def _make_node(self, singular, plural, variables, plural_expr, |
||||||
|
vars_referenced, num_called_num): |
||||||
|
"""Generates a useful node from the data provided.""" |
||||||
|
# no variables referenced? no need to escape for old style |
||||||
|
# gettext invocations only if there are vars. |
||||||
|
if not vars_referenced and not self.environment.newstyle_gettext: |
||||||
|
singular = singular.replace('%%', '%') |
||||||
|
if plural: |
||||||
|
plural = plural.replace('%%', '%') |
||||||
|
|
||||||
|
# singular only: |
||||||
|
if plural_expr is None: |
||||||
|
gettext = nodes.Name('gettext', 'load') |
||||||
|
node = nodes.Call(gettext, [nodes.Const(singular)], |
||||||
|
[], None, None) |
||||||
|
|
||||||
|
# singular and plural |
||||||
|
else: |
||||||
|
ngettext = nodes.Name('ngettext', 'load') |
||||||
|
node = nodes.Call(ngettext, [ |
||||||
|
nodes.Const(singular), |
||||||
|
nodes.Const(plural), |
||||||
|
plural_expr |
||||||
|
], [], None, None) |
||||||
|
|
||||||
|
# in case newstyle gettext is used, the method is powerful |
||||||
|
# enough to handle the variable expansion and autoescape |
||||||
|
# handling itself |
||||||
|
if self.environment.newstyle_gettext: |
||||||
|
for key, value in iteritems(variables): |
||||||
|
# the function adds that later anyways in case num was |
||||||
|
# called num, so just skip it. |
||||||
|
if num_called_num and key == 'num': |
||||||
|
continue |
||||||
|
node.kwargs.append(nodes.Keyword(key, value)) |
||||||
|
|
||||||
|
# otherwise do that here |
||||||
|
else: |
||||||
|
# mark the return value as safe if we are in an |
||||||
|
# environment with autoescaping turned on |
||||||
|
node = nodes.MarkSafeIfAutoescape(node) |
||||||
|
if variables: |
||||||
|
node = nodes.Mod(node, nodes.Dict([ |
||||||
|
nodes.Pair(nodes.Const(key), value) |
||||||
|
for key, value in variables.items() |
||||||
|
])) |
||||||
|
return nodes.Output([node]) |
||||||
|
|
||||||
|
|
||||||
|
class ExprStmtExtension(Extension): |
||||||
|
"""Adds a `do` tag to Jinja2 that works like the print statement just |
||||||
|
that it doesn't print the return value. |
||||||
|
""" |
||||||
|
tags = set(['do']) |
||||||
|
|
||||||
|
def parse(self, parser): |
||||||
|
node = nodes.ExprStmt(lineno=next(parser.stream).lineno) |
||||||
|
node.node = parser.parse_tuple() |
||||||
|
return node |
||||||
|
|
||||||
|
|
||||||
|
class LoopControlExtension(Extension): |
||||||
|
"""Adds break and continue to the template engine.""" |
||||||
|
tags = set(['break', 'continue']) |
||||||
|
|
||||||
|
def parse(self, parser): |
||||||
|
token = next(parser.stream) |
||||||
|
if token.value == 'break': |
||||||
|
return nodes.Break(lineno=token.lineno) |
||||||
|
return nodes.Continue(lineno=token.lineno) |
||||||
|
|
||||||
|
|
||||||
|
class WithExtension(Extension): |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
class AutoEscapeExtension(Extension): |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, |
||||||
|
babel_style=True): |
||||||
|
"""Extract localizable strings from the given template node. Per |
||||||
|
default this function returns matches in babel style that means non string |
||||||
|
parameters as well as keyword arguments are returned as `None`. This |
||||||
|
allows Babel to figure out what you really meant if you are using |
||||||
|
gettext functions that allow keyword arguments for placeholder expansion. |
||||||
|
If you don't want that behavior set the `babel_style` parameter to `False` |
||||||
|
which causes only strings to be returned and parameters are always stored |
||||||
|
in tuples. As a consequence invalid gettext calls (calls without a single |
||||||
|
string parameter or string parameters after non-string parameters) are |
||||||
|
skipped. |
||||||
|
|
||||||
|
This example explains the behavior: |
||||||
|
|
||||||
|
>>> from jinja2 import Environment |
||||||
|
>>> env = Environment() |
||||||
|
>>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}') |
||||||
|
>>> list(extract_from_ast(node)) |
||||||
|
[(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))] |
||||||
|
>>> list(extract_from_ast(node, babel_style=False)) |
||||||
|
[(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))] |
||||||
|
|
||||||
|
For every string found this function yields a ``(lineno, function, |
||||||
|
message)`` tuple, where: |
||||||
|
|
||||||
|
* ``lineno`` is the number of the line on which the string was found, |
||||||
|
* ``function`` is the name of the ``gettext`` function used (if the |
||||||
|
string was extracted from embedded Python code), and |
||||||
|
* ``message`` is the string itself (a ``unicode`` object, or a tuple |
||||||
|
of ``unicode`` objects for functions with multiple string arguments). |
||||||
|
|
||||||
|
This extraction function operates on the AST and is because of that unable |
||||||
|
to extract any comments. For comment support you have to use the babel |
||||||
|
extraction interface or extract comments yourself. |
||||||
|
""" |
||||||
|
for node in node.find_all(nodes.Call): |
||||||
|
if not isinstance(node.node, nodes.Name) or \ |
||||||
|
node.node.name not in gettext_functions: |
||||||
|
continue |
||||||
|
|
||||||
|
strings = [] |
||||||
|
for arg in node.args: |
||||||
|
if isinstance(arg, nodes.Const) and \ |
||||||
|
isinstance(arg.value, string_types): |
||||||
|
strings.append(arg.value) |
||||||
|
else: |
||||||
|
strings.append(None) |
||||||
|
|
||||||
|
for arg in node.kwargs: |
||||||
|
strings.append(None) |
||||||
|
if node.dyn_args is not None: |
||||||
|
strings.append(None) |
||||||
|
if node.dyn_kwargs is not None: |
||||||
|
strings.append(None) |
||||||
|
|
||||||
|
if not babel_style: |
||||||
|
strings = tuple(x for x in strings if x is not None) |
||||||
|
if not strings: |
||||||
|
continue |
||||||
|
else: |
||||||
|
if len(strings) == 1: |
||||||
|
strings = strings[0] |
||||||
|
else: |
||||||
|
strings = tuple(strings) |
||||||
|
yield node.lineno, node.node.name, strings |
||||||
|
|
||||||
|
|
||||||
|
class _CommentFinder(object): |
||||||
|
"""Helper class to find comments in a token stream. Can only |
||||||
|
find comments for gettext calls forwards. Once the comment |
||||||
|
from line 4 is found, a comment for line 1 will not return a |
||||||
|
usable value. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, tokens, comment_tags): |
||||||
|
self.tokens = tokens |
||||||
|
self.comment_tags = comment_tags |
||||||
|
self.offset = 0 |
||||||
|
self.last_lineno = 0 |
||||||
|
|
||||||
|
def find_backwards(self, offset): |
||||||
|
try: |
||||||
|
for _, token_type, token_value in \ |
||||||
|
reversed(self.tokens[self.offset:offset]): |
||||||
|
if token_type in ('comment', 'linecomment'): |
||||||
|
try: |
||||||
|
prefix, comment = token_value.split(None, 1) |
||||||
|
except ValueError: |
||||||
|
continue |
||||||
|
if prefix in self.comment_tags: |
||||||
|
return [comment.rstrip()] |
||||||
|
return [] |
||||||
|
finally: |
||||||
|
self.offset = offset |
||||||
|
|
||||||
|
def find_comments(self, lineno): |
||||||
|
if not self.comment_tags or self.last_lineno > lineno: |
||||||
|
return [] |
||||||
|
for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset:]): |
||||||
|
if token_lineno > lineno: |
||||||
|
return self.find_backwards(self.offset + idx) |
||||||
|
return self.find_backwards(len(self.tokens)) |
||||||
|
|
||||||
|
|
||||||
|
def babel_extract(fileobj, keywords, comment_tags, options): |
||||||
|
"""Babel extraction method for Jinja templates. |
||||||
|
|
||||||
|
.. versionchanged:: 2.3 |
||||||
|
Basic support for translation comments was added. If `comment_tags` |
||||||
|
is now set to a list of keywords for extraction, the extractor will |
||||||
|
try to find the best preceeding comment that begins with one of the |
||||||
|
keywords. For best results, make sure to not have more than one |
||||||
|
gettext call in one line of code and the matching comment in the |
||||||
|
same line or the line before. |
||||||
|
|
||||||
|
.. versionchanged:: 2.5.1 |
||||||
|
The `newstyle_gettext` flag can be set to `True` to enable newstyle |
||||||
|
gettext calls. |
||||||
|
|
||||||
|
.. versionchanged:: 2.7 |
||||||
|
A `silent` option can now be provided. If set to `False` template |
||||||
|
syntax errors are propagated instead of being ignored. |
||||||
|
|
||||||
|
:param fileobj: the file-like object the messages should be extracted from |
||||||
|
:param keywords: a list of keywords (i.e. function names) that should be |
||||||
|
recognized as translation functions |
||||||
|
:param comment_tags: a list of translator tags to search for and include |
||||||
|
in the results. |
||||||
|
:param options: a dictionary of additional options (optional) |
||||||
|
:return: an iterator over ``(lineno, funcname, message, comments)`` tuples. |
||||||
|
(comments will be empty currently) |
||||||
|
""" |
||||||
|
extensions = set() |
||||||
|
for extension in options.get('extensions', '').split(','): |
||||||
|
extension = extension.strip() |
||||||
|
if not extension: |
||||||
|
continue |
||||||
|
extensions.add(import_string(extension)) |
||||||
|
if InternationalizationExtension not in extensions: |
||||||
|
extensions.add(InternationalizationExtension) |
||||||
|
|
||||||
|
def getbool(options, key, default=False): |
||||||
|
return options.get(key, str(default)).lower() in \ |
||||||
|
('1', 'on', 'yes', 'true') |
||||||
|
|
||||||
|
silent = getbool(options, 'silent', True) |
||||||
|
environment = Environment( |
||||||
|
options.get('block_start_string', BLOCK_START_STRING), |
||||||
|
options.get('block_end_string', BLOCK_END_STRING), |
||||||
|
options.get('variable_start_string', VARIABLE_START_STRING), |
||||||
|
options.get('variable_end_string', VARIABLE_END_STRING), |
||||||
|
options.get('comment_start_string', COMMENT_START_STRING), |
||||||
|
options.get('comment_end_string', COMMENT_END_STRING), |
||||||
|
options.get('line_statement_prefix') or LINE_STATEMENT_PREFIX, |
||||||
|
options.get('line_comment_prefix') or LINE_COMMENT_PREFIX, |
||||||
|
getbool(options, 'trim_blocks', TRIM_BLOCKS), |
||||||
|
getbool(options, 'lstrip_blocks', LSTRIP_BLOCKS), |
||||||
|
NEWLINE_SEQUENCE, |
||||||
|
getbool(options, 'keep_trailing_newline', KEEP_TRAILING_NEWLINE), |
||||||
|
frozenset(extensions), |
||||||
|
cache_size=0, |
||||||
|
auto_reload=False |
||||||
|
) |
||||||
|
|
||||||
|
if getbool(options, 'newstyle_gettext'): |
||||||
|
environment.newstyle_gettext = True |
||||||
|
|
||||||
|
source = fileobj.read().decode(options.get('encoding', 'utf-8')) |
||||||
|
try: |
||||||
|
node = environment.parse(source) |
||||||
|
tokens = list(environment.lex(environment.preprocess(source))) |
||||||
|
except TemplateSyntaxError as e: |
||||||
|
if not silent: |
||||||
|
raise |
||||||
|
# skip templates with syntax errors |
||||||
|
return |
||||||
|
|
||||||
|
finder = _CommentFinder(tokens, comment_tags) |
||||||
|
for lineno, func, message in extract_from_ast(node, keywords): |
||||||
|
yield lineno, func, message, finder.find_comments(lineno) |
||||||
|
|
||||||
|
|
||||||
|
#: nicer import names |
||||||
|
i18n = InternationalizationExtension |
||||||
|
do = ExprStmtExtension |
||||||
|
loopcontrols = LoopControlExtension |
||||||
|
with_ = WithExtension |
||||||
|
autoescape = AutoEscapeExtension |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,273 @@ |
|||||||
|
from jinja2.visitor import NodeVisitor |
||||||
|
from jinja2._compat import iteritems |
||||||
|
|
||||||
|
|
||||||
|
VAR_LOAD_PARAMETER = 'param' |
||||||
|
VAR_LOAD_RESOLVE = 'resolve' |
||||||
|
VAR_LOAD_ALIAS = 'alias' |
||||||
|
VAR_LOAD_UNDEFINED = 'undefined' |
||||||
|
|
||||||
|
|
||||||
|
def find_symbols(nodes, parent_symbols=None): |
||||||
|
sym = Symbols(parent=parent_symbols) |
||||||
|
visitor = FrameSymbolVisitor(sym) |
||||||
|
for node in nodes: |
||||||
|
visitor.visit(node) |
||||||
|
return sym |
||||||
|
|
||||||
|
|
||||||
|
def symbols_for_node(node, parent_symbols=None): |
||||||
|
sym = Symbols(parent=parent_symbols) |
||||||
|
sym.analyze_node(node) |
||||||
|
return sym |
||||||
|
|
||||||
|
|
||||||
|
class Symbols(object): |
||||||
|
|
||||||
|
def __init__(self, parent=None): |
||||||
|
if parent is None: |
||||||
|
self.level = 0 |
||||||
|
else: |
||||||
|
self.level = parent.level + 1 |
||||||
|
self.parent = parent |
||||||
|
self.refs = {} |
||||||
|
self.loads = {} |
||||||
|
self.stores = set() |
||||||
|
|
||||||
|
def analyze_node(self, node, **kwargs): |
||||||
|
visitor = RootVisitor(self) |
||||||
|
visitor.visit(node, **kwargs) |
||||||
|
|
||||||
|
def _define_ref(self, name, load=None): |
||||||
|
ident = 'l_%d_%s' % (self.level, name) |
||||||
|
self.refs[name] = ident |
||||||
|
if load is not None: |
||||||
|
self.loads[ident] = load |
||||||
|
return ident |
||||||
|
|
||||||
|
def find_load(self, target): |
||||||
|
if target in self.loads: |
||||||
|
return self.loads[target] |
||||||
|
if self.parent is not None: |
||||||
|
return self.parent.find_load(target) |
||||||
|
|
||||||
|
def find_ref(self, name): |
||||||
|
if name in self.refs: |
||||||
|
return self.refs[name] |
||||||
|
if self.parent is not None: |
||||||
|
return self.parent.find_ref(name) |
||||||
|
|
||||||
|
def ref(self, name): |
||||||
|
rv = self.find_ref(name) |
||||||
|
if rv is None: |
||||||
|
raise AssertionError('Tried to resolve a name to a reference that ' |
||||||
|
'was unknown to the frame (%r)' % name) |
||||||
|
return rv |
||||||
|
|
||||||
|
def copy(self): |
||||||
|
rv = object.__new__(self.__class__) |
||||||
|
rv.__dict__.update(self.__dict__) |
||||||
|
rv.refs = self.refs.copy() |
||||||
|
rv.loads = self.loads.copy() |
||||||
|
rv.stores = self.stores.copy() |
||||||
|
return rv |
||||||
|
|
||||||
|
def store(self, name): |
||||||
|
self.stores.add(name) |
||||||
|
|
||||||
|
# If we have not see the name referenced yet, we need to figure |
||||||
|
# out what to set it to. |
||||||
|
if name not in self.refs: |
||||||
|
# If there is a parent scope we check if the name has a |
||||||
|
# reference there. If it does it means we might have to alias |
||||||
|
# to a variable there. |
||||||
|
if self.parent is not None: |
||||||
|
outer_ref = self.parent.find_ref(name) |
||||||
|
if outer_ref is not None: |
||||||
|
self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref)) |
||||||
|
return |
||||||
|
|
||||||
|
# Otherwise we can just set it to undefined. |
||||||
|
self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None)) |
||||||
|
|
||||||
|
def declare_parameter(self, name): |
||||||
|
self.stores.add(name) |
||||||
|
return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None)) |
||||||
|
|
||||||
|
def load(self, name): |
||||||
|
target = self.find_ref(name) |
||||||
|
if target is None: |
||||||
|
self._define_ref(name, load=(VAR_LOAD_RESOLVE, name)) |
||||||
|
|
||||||
|
def branch_update(self, branch_symbols): |
||||||
|
stores = {} |
||||||
|
for branch in branch_symbols: |
||||||
|
for target in branch.stores: |
||||||
|
if target in self.stores: |
||||||
|
continue |
||||||
|
stores[target] = stores.get(target, 0) + 1 |
||||||
|
|
||||||
|
for sym in branch_symbols: |
||||||
|
self.refs.update(sym.refs) |
||||||
|
self.loads.update(sym.loads) |
||||||
|
self.stores.update(sym.stores) |
||||||
|
|
||||||
|
for name, branch_count in iteritems(stores): |
||||||
|
if branch_count == len(branch_symbols): |
||||||
|
continue |
||||||
|
target = self.find_ref(name) |
||||||
|
assert target is not None, 'should not happen' |
||||||
|
|
||||||
|
if self.parent is not None: |
||||||
|
outer_target = self.parent.find_ref(name) |
||||||
|
if outer_target is not None: |
||||||
|
self.loads[target] = (VAR_LOAD_ALIAS, outer_target) |
||||||
|
continue |
||||||
|
self.loads[target] = (VAR_LOAD_RESOLVE, name) |
||||||
|
|
||||||
|
def dump_stores(self): |
||||||
|
rv = {} |
||||||
|
node = self |
||||||
|
while node is not None: |
||||||
|
for name in node.stores: |
||||||
|
if name not in rv: |
||||||
|
rv[name] = self.find_ref(name) |
||||||
|
node = node.parent |
||||||
|
return rv |
||||||
|
|
||||||
|
def dump_param_targets(self): |
||||||
|
rv = set() |
||||||
|
node = self |
||||||
|
while node is not None: |
||||||
|
for target, (instr, _) in iteritems(self.loads): |
||||||
|
if instr == VAR_LOAD_PARAMETER: |
||||||
|
rv.add(target) |
||||||
|
node = node.parent |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
class RootVisitor(NodeVisitor): |
||||||
|
|
||||||
|
def __init__(self, symbols): |
||||||
|
self.sym_visitor = FrameSymbolVisitor(symbols) |
||||||
|
|
||||||
|
def _simple_visit(self, node, **kwargs): |
||||||
|
for child in node.iter_child_nodes(): |
||||||
|
self.sym_visitor.visit(child) |
||||||
|
|
||||||
|
visit_Template = visit_Block = visit_Macro = visit_FilterBlock = \ |
||||||
|
visit_Scope = visit_If = visit_ScopedEvalContextModifier = \ |
||||||
|
_simple_visit |
||||||
|
|
||||||
|
def visit_AssignBlock(self, node, **kwargs): |
||||||
|
for child in node.body: |
||||||
|
self.sym_visitor.visit(child) |
||||||
|
|
||||||
|
def visit_CallBlock(self, node, **kwargs): |
||||||
|
for child in node.iter_child_nodes(exclude=('call',)): |
||||||
|
self.sym_visitor.visit(child) |
||||||
|
|
||||||
|
def visit_For(self, node, for_branch='body', **kwargs): |
||||||
|
if for_branch == 'body': |
||||||
|
self.sym_visitor.visit(node.target, store_as_param=True) |
||||||
|
branch = node.body |
||||||
|
elif for_branch == 'else': |
||||||
|
branch = node.else_ |
||||||
|
elif for_branch == 'test': |
||||||
|
self.sym_visitor.visit(node.target, store_as_param=True) |
||||||
|
if node.test is not None: |
||||||
|
self.sym_visitor.visit(node.test) |
||||||
|
return |
||||||
|
else: |
||||||
|
raise RuntimeError('Unknown for branch') |
||||||
|
for item in branch or (): |
||||||
|
self.sym_visitor.visit(item) |
||||||
|
|
||||||
|
def visit_With(self, node, **kwargs): |
||||||
|
for target in node.targets: |
||||||
|
self.sym_visitor.visit(target) |
||||||
|
for child in node.body: |
||||||
|
self.sym_visitor.visit(child) |
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs): |
||||||
|
raise NotImplementedError('Cannot find symbols for %r' % |
||||||
|
node.__class__.__name__) |
||||||
|
|
||||||
|
|
||||||
|
class FrameSymbolVisitor(NodeVisitor): |
||||||
|
"""A visitor for `Frame.inspect`.""" |
||||||
|
|
||||||
|
def __init__(self, symbols): |
||||||
|
self.symbols = symbols |
||||||
|
|
||||||
|
def visit_Name(self, node, store_as_param=False, **kwargs): |
||||||
|
"""All assignments to names go through this function.""" |
||||||
|
if store_as_param or node.ctx == 'param': |
||||||
|
self.symbols.declare_parameter(node.name) |
||||||
|
elif node.ctx == 'store': |
||||||
|
self.symbols.store(node.name) |
||||||
|
elif node.ctx == 'load': |
||||||
|
self.symbols.load(node.name) |
||||||
|
|
||||||
|
def visit_If(self, node, **kwargs): |
||||||
|
self.visit(node.test, **kwargs) |
||||||
|
|
||||||
|
original_symbols = self.symbols |
||||||
|
|
||||||
|
def inner_visit(nodes): |
||||||
|
self.symbols = rv = original_symbols.copy() |
||||||
|
for subnode in nodes: |
||||||
|
self.visit(subnode, **kwargs) |
||||||
|
self.symbols = original_symbols |
||||||
|
return rv |
||||||
|
|
||||||
|
body_symbols = inner_visit(node.body) |
||||||
|
else_symbols = inner_visit(node.else_ or ()) |
||||||
|
|
||||||
|
self.symbols.branch_update([body_symbols, else_symbols]) |
||||||
|
|
||||||
|
def visit_Macro(self, node, **kwargs): |
||||||
|
self.symbols.store(node.name) |
||||||
|
|
||||||
|
def visit_Import(self, node, **kwargs): |
||||||
|
self.generic_visit(node, **kwargs) |
||||||
|
self.symbols.store(node.target) |
||||||
|
|
||||||
|
def visit_FromImport(self, node, **kwargs): |
||||||
|
self.generic_visit(node, **kwargs) |
||||||
|
for name in node.names: |
||||||
|
if isinstance(name, tuple): |
||||||
|
self.symbols.store(name[1]) |
||||||
|
else: |
||||||
|
self.symbols.store(name) |
||||||
|
|
||||||
|
def visit_Assign(self, node, **kwargs): |
||||||
|
"""Visit assignments in the correct order.""" |
||||||
|
self.visit(node.node, **kwargs) |
||||||
|
self.visit(node.target, **kwargs) |
||||||
|
|
||||||
|
def visit_For(self, node, **kwargs): |
||||||
|
"""Visiting stops at for blocks. However the block sequence |
||||||
|
is visited as part of the outer scope. |
||||||
|
""" |
||||||
|
self.visit(node.iter, **kwargs) |
||||||
|
|
||||||
|
def visit_CallBlock(self, node, **kwargs): |
||||||
|
self.visit(node.call, **kwargs) |
||||||
|
|
||||||
|
def visit_FilterBlock(self, node, **kwargs): |
||||||
|
self.visit(node.filter, **kwargs) |
||||||
|
|
||||||
|
def visit_With(self, node, **kwargs): |
||||||
|
for target in node.values: |
||||||
|
self.visit(target) |
||||||
|
|
||||||
|
def visit_AssignBlock(self, node, **kwargs): |
||||||
|
"""Stop visiting at block assigns.""" |
||||||
|
self.visit(node.target, **kwargs) |
||||||
|
|
||||||
|
def visit_Scope(self, node, **kwargs): |
||||||
|
"""Stop visiting at scopes.""" |
||||||
|
|
||||||
|
def visit_Block(self, node, **kwargs): |
||||||
|
"""Stop visiting at blocks.""" |
@ -0,0 +1,737 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.lexer |
||||||
|
~~~~~~~~~~~~ |
||||||
|
|
||||||
|
This module implements a Jinja / Python combination lexer. The |
||||||
|
`Lexer` class provided by this module is used to do some preprocessing |
||||||
|
for Jinja. |
||||||
|
|
||||||
|
On the one hand it filters out invalid operators like the bitshift |
||||||
|
operators we don't allow in templates. On the other hand it separates |
||||||
|
template code and python code in expressions. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import re |
||||||
|
import sys |
||||||
|
|
||||||
|
from operator import itemgetter |
||||||
|
from collections import deque |
||||||
|
from jinja2.exceptions import TemplateSyntaxError |
||||||
|
from jinja2.utils import LRUCache |
||||||
|
from jinja2._compat import iteritems, implements_iterator, text_type, intern |
||||||
|
|
||||||
|
|
||||||
|
# cache for the lexers. Exists in order to be able to have multiple |
||||||
|
# environments with the same lexer |
||||||
|
_lexer_cache = LRUCache(50) |
||||||
|
|
||||||
|
# static regular expressions |
||||||
|
whitespace_re = re.compile(r'\s+', re.U) |
||||||
|
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'" |
||||||
|
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S) |
||||||
|
integer_re = re.compile(r'\d+') |
||||||
|
|
||||||
|
def _make_name_re(): |
||||||
|
try: |
||||||
|
compile('föö', '<unknown>', 'eval') |
||||||
|
except SyntaxError: |
||||||
|
return re.compile(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b') |
||||||
|
|
||||||
|
import jinja2 |
||||||
|
from jinja2 import _stringdefs |
||||||
|
name_re = re.compile(r'[%s][%s]*' % (_stringdefs.xid_start, |
||||||
|
_stringdefs.xid_continue)) |
||||||
|
|
||||||
|
# Save some memory here |
||||||
|
sys.modules.pop('jinja2._stringdefs') |
||||||
|
del _stringdefs |
||||||
|
del jinja2._stringdefs |
||||||
|
|
||||||
|
return name_re |
||||||
|
|
||||||
|
# we use the unicode identifier rule if this python version is able |
||||||
|
# to handle unicode identifiers, otherwise the standard ASCII one. |
||||||
|
name_re = _make_name_re() |
||||||
|
del _make_name_re |
||||||
|
|
||||||
|
float_re = re.compile(r'(?<!\.)\d+\.\d+') |
||||||
|
newline_re = re.compile(r'(\r\n|\r|\n)') |
||||||
|
|
||||||
|
# internal the tokens and keep references to them |
||||||
|
TOKEN_ADD = intern('add') |
||||||
|
TOKEN_ASSIGN = intern('assign') |
||||||
|
TOKEN_COLON = intern('colon') |
||||||
|
TOKEN_COMMA = intern('comma') |
||||||
|
TOKEN_DIV = intern('div') |
||||||
|
TOKEN_DOT = intern('dot') |
||||||
|
TOKEN_EQ = intern('eq') |
||||||
|
TOKEN_FLOORDIV = intern('floordiv') |
||||||
|
TOKEN_GT = intern('gt') |
||||||
|
TOKEN_GTEQ = intern('gteq') |
||||||
|
TOKEN_LBRACE = intern('lbrace') |
||||||
|
TOKEN_LBRACKET = intern('lbracket') |
||||||
|
TOKEN_LPAREN = intern('lparen') |
||||||
|
TOKEN_LT = intern('lt') |
||||||
|
TOKEN_LTEQ = intern('lteq') |
||||||
|
TOKEN_MOD = intern('mod') |
||||||
|
TOKEN_MUL = intern('mul') |
||||||
|
TOKEN_NE = intern('ne') |
||||||
|
TOKEN_PIPE = intern('pipe') |
||||||
|
TOKEN_POW = intern('pow') |
||||||
|
TOKEN_RBRACE = intern('rbrace') |
||||||
|
TOKEN_RBRACKET = intern('rbracket') |
||||||
|
TOKEN_RPAREN = intern('rparen') |
||||||
|
TOKEN_SEMICOLON = intern('semicolon') |
||||||
|
TOKEN_SUB = intern('sub') |
||||||
|
TOKEN_TILDE = intern('tilde') |
||||||
|
TOKEN_WHITESPACE = intern('whitespace') |
||||||
|
TOKEN_FLOAT = intern('float') |
||||||
|
TOKEN_INTEGER = intern('integer') |
||||||
|
TOKEN_NAME = intern('name') |
||||||
|
TOKEN_STRING = intern('string') |
||||||
|
TOKEN_OPERATOR = intern('operator') |
||||||
|
TOKEN_BLOCK_BEGIN = intern('block_begin') |
||||||
|
TOKEN_BLOCK_END = intern('block_end') |
||||||
|
TOKEN_VARIABLE_BEGIN = intern('variable_begin') |
||||||
|
TOKEN_VARIABLE_END = intern('variable_end') |
||||||
|
TOKEN_RAW_BEGIN = intern('raw_begin') |
||||||
|
TOKEN_RAW_END = intern('raw_end') |
||||||
|
TOKEN_COMMENT_BEGIN = intern('comment_begin') |
||||||
|
TOKEN_COMMENT_END = intern('comment_end') |
||||||
|
TOKEN_COMMENT = intern('comment') |
||||||
|
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin') |
||||||
|
TOKEN_LINESTATEMENT_END = intern('linestatement_end') |
||||||
|
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin') |
||||||
|
TOKEN_LINECOMMENT_END = intern('linecomment_end') |
||||||
|
TOKEN_LINECOMMENT = intern('linecomment') |
||||||
|
TOKEN_DATA = intern('data') |
||||||
|
TOKEN_INITIAL = intern('initial') |
||||||
|
TOKEN_EOF = intern('eof') |
||||||
|
|
||||||
|
# bind operators to token types |
||||||
|
operators = { |
||||||
|
'+': TOKEN_ADD, |
||||||
|
'-': TOKEN_SUB, |
||||||
|
'/': TOKEN_DIV, |
||||||
|
'//': TOKEN_FLOORDIV, |
||||||
|
'*': TOKEN_MUL, |
||||||
|
'%': TOKEN_MOD, |
||||||
|
'**': TOKEN_POW, |
||||||
|
'~': TOKEN_TILDE, |
||||||
|
'[': TOKEN_LBRACKET, |
||||||
|
']': TOKEN_RBRACKET, |
||||||
|
'(': TOKEN_LPAREN, |
||||||
|
')': TOKEN_RPAREN, |
||||||
|
'{': TOKEN_LBRACE, |
||||||
|
'}': TOKEN_RBRACE, |
||||||
|
'==': TOKEN_EQ, |
||||||
|
'!=': TOKEN_NE, |
||||||
|
'>': TOKEN_GT, |
||||||
|
'>=': TOKEN_GTEQ, |
||||||
|
'<': TOKEN_LT, |
||||||
|
'<=': TOKEN_LTEQ, |
||||||
|
'=': TOKEN_ASSIGN, |
||||||
|
'.': TOKEN_DOT, |
||||||
|
':': TOKEN_COLON, |
||||||
|
'|': TOKEN_PIPE, |
||||||
|
',': TOKEN_COMMA, |
||||||
|
';': TOKEN_SEMICOLON |
||||||
|
} |
||||||
|
|
||||||
|
reverse_operators = dict([(v, k) for k, v in iteritems(operators)]) |
||||||
|
assert len(operators) == len(reverse_operators), 'operators dropped' |
||||||
|
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in |
||||||
|
sorted(operators, key=lambda x: -len(x)))) |
||||||
|
|
||||||
|
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT, |
||||||
|
TOKEN_COMMENT_END, TOKEN_WHITESPACE, |
||||||
|
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END, |
||||||
|
TOKEN_LINECOMMENT]) |
||||||
|
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA, |
||||||
|
TOKEN_COMMENT, TOKEN_LINECOMMENT]) |
||||||
|
|
||||||
|
|
||||||
|
def _describe_token_type(token_type): |
||||||
|
if token_type in reverse_operators: |
||||||
|
return reverse_operators[token_type] |
||||||
|
return { |
||||||
|
TOKEN_COMMENT_BEGIN: 'begin of comment', |
||||||
|
TOKEN_COMMENT_END: 'end of comment', |
||||||
|
TOKEN_COMMENT: 'comment', |
||||||
|
TOKEN_LINECOMMENT: 'comment', |
||||||
|
TOKEN_BLOCK_BEGIN: 'begin of statement block', |
||||||
|
TOKEN_BLOCK_END: 'end of statement block', |
||||||
|
TOKEN_VARIABLE_BEGIN: 'begin of print statement', |
||||||
|
TOKEN_VARIABLE_END: 'end of print statement', |
||||||
|
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement', |
||||||
|
TOKEN_LINESTATEMENT_END: 'end of line statement', |
||||||
|
TOKEN_DATA: 'template data / text', |
||||||
|
TOKEN_EOF: 'end of template' |
||||||
|
}.get(token_type, token_type) |
||||||
|
|
||||||
|
|
||||||
|
def describe_token(token): |
||||||
|
"""Returns a description of the token.""" |
||||||
|
if token.type == 'name': |
||||||
|
return token.value |
||||||
|
return _describe_token_type(token.type) |
||||||
|
|
||||||
|
|
||||||
|
def describe_token_expr(expr): |
||||||
|
"""Like `describe_token` but for token expressions.""" |
||||||
|
if ':' in expr: |
||||||
|
type, value = expr.split(':', 1) |
||||||
|
if type == 'name': |
||||||
|
return value |
||||||
|
else: |
||||||
|
type = expr |
||||||
|
return _describe_token_type(type) |
||||||
|
|
||||||
|
|
||||||
|
def count_newlines(value): |
||||||
|
"""Count the number of newline characters in the string. This is |
||||||
|
useful for extensions that filter a stream. |
||||||
|
""" |
||||||
|
return len(newline_re.findall(value)) |
||||||
|
|
||||||
|
|
||||||
|
def compile_rules(environment): |
||||||
|
"""Compiles all the rules from the environment into a list of rules.""" |
||||||
|
e = re.escape |
||||||
|
rules = [ |
||||||
|
(len(environment.comment_start_string), 'comment', |
||||||
|
e(environment.comment_start_string)), |
||||||
|
(len(environment.block_start_string), 'block', |
||||||
|
e(environment.block_start_string)), |
||||||
|
(len(environment.variable_start_string), 'variable', |
||||||
|
e(environment.variable_start_string)) |
||||||
|
] |
||||||
|
|
||||||
|
if environment.line_statement_prefix is not None: |
||||||
|
rules.append((len(environment.line_statement_prefix), 'linestatement', |
||||||
|
r'^[ \t\v]*' + e(environment.line_statement_prefix))) |
||||||
|
if environment.line_comment_prefix is not None: |
||||||
|
rules.append((len(environment.line_comment_prefix), 'linecomment', |
||||||
|
r'(?:^|(?<=\S))[^\S\r\n]*' + |
||||||
|
e(environment.line_comment_prefix))) |
||||||
|
|
||||||
|
return [x[1:] for x in sorted(rules, reverse=True)] |
||||||
|
|
||||||
|
|
||||||
|
class Failure(object): |
||||||
|
"""Class that raises a `TemplateSyntaxError` if called. |
||||||
|
Used by the `Lexer` to specify known errors. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, message, cls=TemplateSyntaxError): |
||||||
|
self.message = message |
||||||
|
self.error_class = cls |
||||||
|
|
||||||
|
def __call__(self, lineno, filename): |
||||||
|
raise self.error_class(self.message, lineno, filename) |
||||||
|
|
||||||
|
|
||||||
|
class Token(tuple): |
||||||
|
"""Token class.""" |
||||||
|
__slots__ = () |
||||||
|
lineno, type, value = (property(itemgetter(x)) for x in range(3)) |
||||||
|
|
||||||
|
def __new__(cls, lineno, type, value): |
||||||
|
return tuple.__new__(cls, (lineno, intern(str(type)), value)) |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
if self.type in reverse_operators: |
||||||
|
return reverse_operators[self.type] |
||||||
|
elif self.type == 'name': |
||||||
|
return self.value |
||||||
|
return self.type |
||||||
|
|
||||||
|
def test(self, expr): |
||||||
|
"""Test a token against a token expression. This can either be a |
||||||
|
token type or ``'token_type:token_value'``. This can only test |
||||||
|
against string values and types. |
||||||
|
""" |
||||||
|
# here we do a regular string equality check as test_any is usually |
||||||
|
# passed an iterable of not interned strings. |
||||||
|
if self.type == expr: |
||||||
|
return True |
||||||
|
elif ':' in expr: |
||||||
|
return expr.split(':', 1) == [self.type, self.value] |
||||||
|
return False |
||||||
|
|
||||||
|
def test_any(self, *iterable): |
||||||
|
"""Test against multiple token expressions.""" |
||||||
|
for expr in iterable: |
||||||
|
if self.test(expr): |
||||||
|
return True |
||||||
|
return False |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return 'Token(%r, %r, %r)' % ( |
||||||
|
self.lineno, |
||||||
|
self.type, |
||||||
|
self.value |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator |
||||||
|
class TokenStreamIterator(object): |
||||||
|
"""The iterator for tokenstreams. Iterate over the stream |
||||||
|
until the eof token is reached. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, stream): |
||||||
|
self.stream = stream |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return self |
||||||
|
|
||||||
|
def __next__(self): |
||||||
|
token = self.stream.current |
||||||
|
if token.type is TOKEN_EOF: |
||||||
|
self.stream.close() |
||||||
|
raise StopIteration() |
||||||
|
next(self.stream) |
||||||
|
return token |
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator |
||||||
|
class TokenStream(object): |
||||||
|
"""A token stream is an iterable that yields :class:`Token`\\s. The |
||||||
|
parser however does not iterate over it but calls :meth:`next` to go |
||||||
|
one token ahead. The current active token is stored as :attr:`current`. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, generator, name, filename): |
||||||
|
self._iter = iter(generator) |
||||||
|
self._pushed = deque() |
||||||
|
self.name = name |
||||||
|
self.filename = filename |
||||||
|
self.closed = False |
||||||
|
self.current = Token(1, TOKEN_INITIAL, '') |
||||||
|
next(self) |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return TokenStreamIterator(self) |
||||||
|
|
||||||
|
def __bool__(self): |
||||||
|
return bool(self._pushed) or self.current.type is not TOKEN_EOF |
||||||
|
__nonzero__ = __bool__ # py2 |
||||||
|
|
||||||
|
eos = property(lambda x: not x, doc="Are we at the end of the stream?") |
||||||
|
|
||||||
|
def push(self, token): |
||||||
|
"""Push a token back to the stream.""" |
||||||
|
self._pushed.append(token) |
||||||
|
|
||||||
|
def look(self): |
||||||
|
"""Look at the next token.""" |
||||||
|
old_token = next(self) |
||||||
|
result = self.current |
||||||
|
self.push(result) |
||||||
|
self.current = old_token |
||||||
|
return result |
||||||
|
|
||||||
|
def skip(self, n=1): |
||||||
|
"""Got n tokens ahead.""" |
||||||
|
for x in range(n): |
||||||
|
next(self) |
||||||
|
|
||||||
|
def next_if(self, expr): |
||||||
|
"""Perform the token test and return the token if it matched. |
||||||
|
Otherwise the return value is `None`. |
||||||
|
""" |
||||||
|
if self.current.test(expr): |
||||||
|
return next(self) |
||||||
|
|
||||||
|
def skip_if(self, expr): |
||||||
|
"""Like :meth:`next_if` but only returns `True` or `False`.""" |
||||||
|
return self.next_if(expr) is not None |
||||||
|
|
||||||
|
def __next__(self): |
||||||
|
"""Go one token ahead and return the old one""" |
||||||
|
rv = self.current |
||||||
|
if self._pushed: |
||||||
|
self.current = self._pushed.popleft() |
||||||
|
elif self.current.type is not TOKEN_EOF: |
||||||
|
try: |
||||||
|
self.current = next(self._iter) |
||||||
|
except StopIteration: |
||||||
|
self.close() |
||||||
|
return rv |
||||||
|
|
||||||
|
def close(self): |
||||||
|
"""Close the stream.""" |
||||||
|
self.current = Token(self.current.lineno, TOKEN_EOF, '') |
||||||
|
self._iter = None |
||||||
|
self.closed = True |
||||||
|
|
||||||
|
def expect(self, expr): |
||||||
|
"""Expect a given token type and return it. This accepts the same |
||||||
|
argument as :meth:`jinja2.lexer.Token.test`. |
||||||
|
""" |
||||||
|
if not self.current.test(expr): |
||||||
|
expr = describe_token_expr(expr) |
||||||
|
if self.current.type is TOKEN_EOF: |
||||||
|
raise TemplateSyntaxError('unexpected end of template, ' |
||||||
|
'expected %r.' % expr, |
||||||
|
self.current.lineno, |
||||||
|
self.name, self.filename) |
||||||
|
raise TemplateSyntaxError("expected token %r, got %r" % |
||||||
|
(expr, describe_token(self.current)), |
||||||
|
self.current.lineno, |
||||||
|
self.name, self.filename) |
||||||
|
try: |
||||||
|
return self.current |
||||||
|
finally: |
||||||
|
next(self) |
||||||
|
|
||||||
|
|
||||||
|
def get_lexer(environment): |
||||||
|
"""Return a lexer which is probably cached.""" |
||||||
|
key = (environment.block_start_string, |
||||||
|
environment.block_end_string, |
||||||
|
environment.variable_start_string, |
||||||
|
environment.variable_end_string, |
||||||
|
environment.comment_start_string, |
||||||
|
environment.comment_end_string, |
||||||
|
environment.line_statement_prefix, |
||||||
|
environment.line_comment_prefix, |
||||||
|
environment.trim_blocks, |
||||||
|
environment.lstrip_blocks, |
||||||
|
environment.newline_sequence, |
||||||
|
environment.keep_trailing_newline) |
||||||
|
lexer = _lexer_cache.get(key) |
||||||
|
if lexer is None: |
||||||
|
lexer = Lexer(environment) |
||||||
|
_lexer_cache[key] = lexer |
||||||
|
return lexer |
||||||
|
|
||||||
|
|
||||||
|
class Lexer(object): |
||||||
|
"""Class that implements a lexer for a given environment. Automatically |
||||||
|
created by the environment class, usually you don't have to do that. |
||||||
|
|
||||||
|
Note that the lexer is not automatically bound to an environment. |
||||||
|
Multiple environments can share the same lexer. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, environment): |
||||||
|
# shortcuts |
||||||
|
c = lambda x: re.compile(x, re.M | re.S) |
||||||
|
e = re.escape |
||||||
|
|
||||||
|
# lexing rules for tags |
||||||
|
tag_rules = [ |
||||||
|
(whitespace_re, TOKEN_WHITESPACE, None), |
||||||
|
(float_re, TOKEN_FLOAT, None), |
||||||
|
(integer_re, TOKEN_INTEGER, None), |
||||||
|
(name_re, TOKEN_NAME, None), |
||||||
|
(string_re, TOKEN_STRING, None), |
||||||
|
(operator_re, TOKEN_OPERATOR, None) |
||||||
|
] |
||||||
|
|
||||||
|
# assemble the root lexing rule. because "|" is ungreedy |
||||||
|
# we have to sort by length so that the lexer continues working |
||||||
|
# as expected when we have parsing rules like <% for block and |
||||||
|
# <%= for variables. (if someone wants asp like syntax) |
||||||
|
# variables are just part of the rules if variable processing |
||||||
|
# is required. |
||||||
|
root_tag_rules = compile_rules(environment) |
||||||
|
|
||||||
|
# block suffix if trimming is enabled |
||||||
|
block_suffix_re = environment.trim_blocks and '\\n?' or '' |
||||||
|
|
||||||
|
# strip leading spaces if lstrip_blocks is enabled |
||||||
|
prefix_re = {} |
||||||
|
if environment.lstrip_blocks: |
||||||
|
# use '{%+' to manually disable lstrip_blocks behavior |
||||||
|
no_lstrip_re = e('+') |
||||||
|
# detect overlap between block and variable or comment strings |
||||||
|
block_diff = c(r'^%s(.*)' % e(environment.block_start_string)) |
||||||
|
# make sure we don't mistake a block for a variable or a comment |
||||||
|
m = block_diff.match(environment.comment_start_string) |
||||||
|
no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' |
||||||
|
m = block_diff.match(environment.variable_start_string) |
||||||
|
no_lstrip_re += m and r'|%s' % e(m.group(1)) or '' |
||||||
|
|
||||||
|
# detect overlap between comment and variable strings |
||||||
|
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string)) |
||||||
|
m = comment_diff.match(environment.variable_start_string) |
||||||
|
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or '' |
||||||
|
|
||||||
|
lstrip_re = r'^[ \t]*' |
||||||
|
block_prefix_re = r'%s%s(?!%s)|%s\+?' % ( |
||||||
|
lstrip_re, |
||||||
|
e(environment.block_start_string), |
||||||
|
no_lstrip_re, |
||||||
|
e(environment.block_start_string), |
||||||
|
) |
||||||
|
comment_prefix_re = r'%s%s%s|%s\+?' % ( |
||||||
|
lstrip_re, |
||||||
|
e(environment.comment_start_string), |
||||||
|
no_variable_re, |
||||||
|
e(environment.comment_start_string), |
||||||
|
) |
||||||
|
prefix_re['block'] = block_prefix_re |
||||||
|
prefix_re['comment'] = comment_prefix_re |
||||||
|
else: |
||||||
|
block_prefix_re = '%s' % e(environment.block_start_string) |
||||||
|
|
||||||
|
self.newline_sequence = environment.newline_sequence |
||||||
|
self.keep_trailing_newline = environment.keep_trailing_newline |
||||||
|
|
||||||
|
# global lexing rules |
||||||
|
self.rules = { |
||||||
|
'root': [ |
||||||
|
# directives |
||||||
|
(c('(.*?)(?:%s)' % '|'.join( |
||||||
|
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % ( |
||||||
|
e(environment.block_start_string), |
||||||
|
block_prefix_re, |
||||||
|
e(environment.block_end_string), |
||||||
|
e(environment.block_end_string) |
||||||
|
)] + [ |
||||||
|
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r)) |
||||||
|
for n, r in root_tag_rules |
||||||
|
])), (TOKEN_DATA, '#bygroup'), '#bygroup'), |
||||||
|
# data |
||||||
|
(c('.+'), TOKEN_DATA, None) |
||||||
|
], |
||||||
|
# comments |
||||||
|
TOKEN_COMMENT_BEGIN: [ |
||||||
|
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % ( |
||||||
|
e(environment.comment_end_string), |
||||||
|
e(environment.comment_end_string), |
||||||
|
block_suffix_re |
||||||
|
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'), |
||||||
|
(c('(.)'), (Failure('Missing end of comment tag'),), None) |
||||||
|
], |
||||||
|
# blocks |
||||||
|
TOKEN_BLOCK_BEGIN: [ |
||||||
|
(c(r'(?:\-%s\s*|%s)%s' % ( |
||||||
|
e(environment.block_end_string), |
||||||
|
e(environment.block_end_string), |
||||||
|
block_suffix_re |
||||||
|
)), TOKEN_BLOCK_END, '#pop'), |
||||||
|
] + tag_rules, |
||||||
|
# variables |
||||||
|
TOKEN_VARIABLE_BEGIN: [ |
||||||
|
(c(r'\-%s\s*|%s' % ( |
||||||
|
e(environment.variable_end_string), |
||||||
|
e(environment.variable_end_string) |
||||||
|
)), TOKEN_VARIABLE_END, '#pop') |
||||||
|
] + tag_rules, |
||||||
|
# raw block |
||||||
|
TOKEN_RAW_BEGIN: [ |
||||||
|
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % ( |
||||||
|
e(environment.block_start_string), |
||||||
|
block_prefix_re, |
||||||
|
e(environment.block_end_string), |
||||||
|
e(environment.block_end_string), |
||||||
|
block_suffix_re |
||||||
|
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'), |
||||||
|
(c('(.)'), (Failure('Missing end of raw directive'),), None) |
||||||
|
], |
||||||
|
# line statements |
||||||
|
TOKEN_LINESTATEMENT_BEGIN: [ |
||||||
|
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop') |
||||||
|
] + tag_rules, |
||||||
|
# line comments |
||||||
|
TOKEN_LINECOMMENT_BEGIN: [ |
||||||
|
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT, |
||||||
|
TOKEN_LINECOMMENT_END), '#pop') |
||||||
|
] |
||||||
|
} |
||||||
|
|
||||||
|
def _normalize_newlines(self, value): |
||||||
|
"""Called for strings and template data to normalize it to unicode.""" |
||||||
|
return newline_re.sub(self.newline_sequence, value) |
||||||
|
|
||||||
|
def tokenize(self, source, name=None, filename=None, state=None): |
||||||
|
"""Calls tokeniter + tokenize and wraps it in a token stream. |
||||||
|
""" |
||||||
|
stream = self.tokeniter(source, name, filename, state) |
||||||
|
return TokenStream(self.wrap(stream, name, filename), name, filename) |
||||||
|
|
||||||
|
def wrap(self, stream, name=None, filename=None): |
||||||
|
"""This is called with the stream as returned by `tokenize` and wraps |
||||||
|
every token in a :class:`Token` and converts the value. |
||||||
|
""" |
||||||
|
for lineno, token, value in stream: |
||||||
|
if token in ignored_tokens: |
||||||
|
continue |
||||||
|
elif token == 'linestatement_begin': |
||||||
|
token = 'block_begin' |
||||||
|
elif token == 'linestatement_end': |
||||||
|
token = 'block_end' |
||||||
|
# we are not interested in those tokens in the parser |
||||||
|
elif token in ('raw_begin', 'raw_end'): |
||||||
|
continue |
||||||
|
elif token == 'data': |
||||||
|
value = self._normalize_newlines(value) |
||||||
|
elif token == 'keyword': |
||||||
|
token = value |
||||||
|
elif token == 'name': |
||||||
|
value = str(value) |
||||||
|
elif token == 'string': |
||||||
|
# try to unescape string |
||||||
|
try: |
||||||
|
value = self._normalize_newlines(value[1:-1]) \ |
||||||
|
.encode('ascii', 'backslashreplace') \ |
||||||
|
.decode('unicode-escape') |
||||||
|
except Exception as e: |
||||||
|
msg = str(e).split(':')[-1].strip() |
||||||
|
raise TemplateSyntaxError(msg, lineno, name, filename) |
||||||
|
elif token == 'integer': |
||||||
|
value = int(value) |
||||||
|
elif token == 'float': |
||||||
|
value = float(value) |
||||||
|
elif token == 'operator': |
||||||
|
token = operators[value] |
||||||
|
yield Token(lineno, token, value) |
||||||
|
|
||||||
|
def tokeniter(self, source, name, filename=None, state=None): |
||||||
|
"""This method tokenizes the text and returns the tokens in a |
||||||
|
generator. Use this method if you just want to tokenize a template. |
||||||
|
""" |
||||||
|
source = text_type(source) |
||||||
|
lines = source.splitlines() |
||||||
|
if self.keep_trailing_newline and source: |
||||||
|
for newline in ('\r\n', '\r', '\n'): |
||||||
|
if source.endswith(newline): |
||||||
|
lines.append('') |
||||||
|
break |
||||||
|
source = '\n'.join(lines) |
||||||
|
pos = 0 |
||||||
|
lineno = 1 |
||||||
|
stack = ['root'] |
||||||
|
if state is not None and state != 'root': |
||||||
|
assert state in ('variable', 'block'), 'invalid state' |
||||||
|
stack.append(state + '_begin') |
||||||
|
else: |
||||||
|
state = 'root' |
||||||
|
statetokens = self.rules[stack[-1]] |
||||||
|
source_length = len(source) |
||||||
|
|
||||||
|
balancing_stack = [] |
||||||
|
|
||||||
|
while 1: |
||||||
|
# tokenizer loop |
||||||
|
for regex, tokens, new_state in statetokens: |
||||||
|
m = regex.match(source, pos) |
||||||
|
# if no match we try again with the next rule |
||||||
|
if m is None: |
||||||
|
continue |
||||||
|
|
||||||
|
# we only match blocks and variables if braces / parentheses |
||||||
|
# are balanced. continue parsing with the lower rule which |
||||||
|
# is the operator rule. do this only if the end tags look |
||||||
|
# like operators |
||||||
|
if balancing_stack and \ |
||||||
|
tokens in ('variable_end', 'block_end', |
||||||
|
'linestatement_end'): |
||||||
|
continue |
||||||
|
|
||||||
|
# tuples support more options |
||||||
|
if isinstance(tokens, tuple): |
||||||
|
for idx, token in enumerate(tokens): |
||||||
|
# failure group |
||||||
|
if token.__class__ is Failure: |
||||||
|
raise token(lineno, filename) |
||||||
|
# bygroup is a bit more complex, in that case we |
||||||
|
# yield for the current token the first named |
||||||
|
# group that matched |
||||||
|
elif token == '#bygroup': |
||||||
|
for key, value in iteritems(m.groupdict()): |
||||||
|
if value is not None: |
||||||
|
yield lineno, key, value |
||||||
|
lineno += value.count('\n') |
||||||
|
break |
||||||
|
else: |
||||||
|
raise RuntimeError('%r wanted to resolve ' |
||||||
|
'the token dynamically' |
||||||
|
' but no group matched' |
||||||
|
% regex) |
||||||
|
# normal group |
||||||
|
else: |
||||||
|
data = m.group(idx + 1) |
||||||
|
if data or token not in ignore_if_empty: |
||||||
|
yield lineno, token, data |
||||||
|
lineno += data.count('\n') |
||||||
|
|
||||||
|
# strings as token just are yielded as it. |
||||||
|
else: |
||||||
|
data = m.group() |
||||||
|
# update brace/parentheses balance |
||||||
|
if tokens == 'operator': |
||||||
|
if data == '{': |
||||||
|
balancing_stack.append('}') |
||||||
|
elif data == '(': |
||||||
|
balancing_stack.append(')') |
||||||
|
elif data == '[': |
||||||
|
balancing_stack.append(']') |
||||||
|
elif data in ('}', ')', ']'): |
||||||
|
if not balancing_stack: |
||||||
|
raise TemplateSyntaxError('unexpected \'%s\'' % |
||||||
|
data, lineno, name, |
||||||
|
filename) |
||||||
|
expected_op = balancing_stack.pop() |
||||||
|
if expected_op != data: |
||||||
|
raise TemplateSyntaxError('unexpected \'%s\', ' |
||||||
|
'expected \'%s\'' % |
||||||
|
(data, expected_op), |
||||||
|
lineno, name, |
||||||
|
filename) |
||||||
|
# yield items |
||||||
|
if data or tokens not in ignore_if_empty: |
||||||
|
yield lineno, tokens, data |
||||||
|
lineno += data.count('\n') |
||||||
|
|
||||||
|
# fetch new position into new variable so that we can check |
||||||
|
# if there is a internal parsing error which would result |
||||||
|
# in an infinite loop |
||||||
|
pos2 = m.end() |
||||||
|
|
||||||
|
# handle state changes |
||||||
|
if new_state is not None: |
||||||
|
# remove the uppermost state |
||||||
|
if new_state == '#pop': |
||||||
|
stack.pop() |
||||||
|
# resolve the new state by group checking |
||||||
|
elif new_state == '#bygroup': |
||||||
|
for key, value in iteritems(m.groupdict()): |
||||||
|
if value is not None: |
||||||
|
stack.append(key) |
||||||
|
break |
||||||
|
else: |
||||||
|
raise RuntimeError('%r wanted to resolve the ' |
||||||
|
'new state dynamically but' |
||||||
|
' no group matched' % |
||||||
|
regex) |
||||||
|
# direct state name given |
||||||
|
else: |
||||||
|
stack.append(new_state) |
||||||
|
statetokens = self.rules[stack[-1]] |
||||||
|
# we are still at the same position and no stack change. |
||||||
|
# this means a loop without break condition, avoid that and |
||||||
|
# raise error |
||||||
|
elif pos2 == pos: |
||||||
|
raise RuntimeError('%r yielded empty string without ' |
||||||
|
'stack change' % regex) |
||||||
|
# publish new function and start again |
||||||
|
pos = pos2 |
||||||
|
break |
||||||
|
# if loop terminated without break we haven't found a single match |
||||||
|
# either we are at the end of the file or we have a problem |
||||||
|
else: |
||||||
|
# end of text |
||||||
|
if pos >= source_length: |
||||||
|
return |
||||||
|
# something went wrong |
||||||
|
raise TemplateSyntaxError('unexpected char %r at %d' % |
||||||
|
(source[pos], pos), lineno, |
||||||
|
name, filename) |
@ -0,0 +1,481 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.loaders |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Jinja loader classes. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import os |
||||||
|
import sys |
||||||
|
import weakref |
||||||
|
from types import ModuleType |
||||||
|
from os import path |
||||||
|
from hashlib import sha1 |
||||||
|
from jinja2.exceptions import TemplateNotFound |
||||||
|
from jinja2.utils import open_if_exists, internalcode |
||||||
|
from jinja2._compat import string_types, iteritems |
||||||
|
|
||||||
|
|
||||||
|
def split_template_path(template): |
||||||
|
"""Split a path into segments and perform a sanity check. If it detects |
||||||
|
'..' in the path it will raise a `TemplateNotFound` error. |
||||||
|
""" |
||||||
|
pieces = [] |
||||||
|
for piece in template.split('/'): |
||||||
|
if path.sep in piece \ |
||||||
|
or (path.altsep and path.altsep in piece) or \ |
||||||
|
piece == path.pardir: |
||||||
|
raise TemplateNotFound(template) |
||||||
|
elif piece and piece != '.': |
||||||
|
pieces.append(piece) |
||||||
|
return pieces |
||||||
|
|
||||||
|
|
||||||
|
class BaseLoader(object): |
||||||
|
"""Baseclass for all loaders. Subclass this and override `get_source` to |
||||||
|
implement a custom loading mechanism. The environment provides a |
||||||
|
`get_template` method that calls the loader's `load` method to get the |
||||||
|
:class:`Template` object. |
||||||
|
|
||||||
|
A very basic example for a loader that looks up templates on the file |
||||||
|
system could look like this:: |
||||||
|
|
||||||
|
from jinja2 import BaseLoader, TemplateNotFound |
||||||
|
from os.path import join, exists, getmtime |
||||||
|
|
||||||
|
class MyLoader(BaseLoader): |
||||||
|
|
||||||
|
def __init__(self, path): |
||||||
|
self.path = path |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
path = join(self.path, template) |
||||||
|
if not exists(path): |
||||||
|
raise TemplateNotFound(template) |
||||||
|
mtime = getmtime(path) |
||||||
|
with file(path) as f: |
||||||
|
source = f.read().decode('utf-8') |
||||||
|
return source, path, lambda: mtime == getmtime(path) |
||||||
|
""" |
||||||
|
|
||||||
|
#: if set to `False` it indicates that the loader cannot provide access |
||||||
|
#: to the source of templates. |
||||||
|
#: |
||||||
|
#: .. versionadded:: 2.4 |
||||||
|
has_source_access = True |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
"""Get the template source, filename and reload helper for a template. |
||||||
|
It's passed the environment and template name and has to return a |
||||||
|
tuple in the form ``(source, filename, uptodate)`` or raise a |
||||||
|
`TemplateNotFound` error if it can't locate the template. |
||||||
|
|
||||||
|
The source part of the returned tuple must be the source of the |
||||||
|
template as unicode string or a ASCII bytestring. The filename should |
||||||
|
be the name of the file on the filesystem if it was loaded from there, |
||||||
|
otherwise `None`. The filename is used by python for the tracebacks |
||||||
|
if no loader extension is used. |
||||||
|
|
||||||
|
The last item in the tuple is the `uptodate` function. If auto |
||||||
|
reloading is enabled it's always called to check if the template |
||||||
|
changed. No arguments are passed so the function must store the |
||||||
|
old state somewhere (for example in a closure). If it returns `False` |
||||||
|
the template will be reloaded. |
||||||
|
""" |
||||||
|
if not self.has_source_access: |
||||||
|
raise RuntimeError('%s cannot provide access to the source' % |
||||||
|
self.__class__.__name__) |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
"""Iterates over all templates. If the loader does not support that |
||||||
|
it should raise a :exc:`TypeError` which is the default behavior. |
||||||
|
""" |
||||||
|
raise TypeError('this loader cannot iterate over all templates') |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def load(self, environment, name, globals=None): |
||||||
|
"""Loads a template. This method looks up the template in the cache |
||||||
|
or loads one by calling :meth:`get_source`. Subclasses should not |
||||||
|
override this method as loaders working on collections of other |
||||||
|
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`) |
||||||
|
will not call this method but `get_source` directly. |
||||||
|
""" |
||||||
|
code = None |
||||||
|
if globals is None: |
||||||
|
globals = {} |
||||||
|
|
||||||
|
# first we try to get the source for this template together |
||||||
|
# with the filename and the uptodate function. |
||||||
|
source, filename, uptodate = self.get_source(environment, name) |
||||||
|
|
||||||
|
# try to load the code from the bytecode cache if there is a |
||||||
|
# bytecode cache configured. |
||||||
|
bcc = environment.bytecode_cache |
||||||
|
if bcc is not None: |
||||||
|
bucket = bcc.get_bucket(environment, name, filename, source) |
||||||
|
code = bucket.code |
||||||
|
|
||||||
|
# if we don't have code so far (not cached, no longer up to |
||||||
|
# date) etc. we compile the template |
||||||
|
if code is None: |
||||||
|
code = environment.compile(source, name, filename) |
||||||
|
|
||||||
|
# if the bytecode cache is available and the bucket doesn't |
||||||
|
# have a code so far, we give the bucket the new code and put |
||||||
|
# it back to the bytecode cache. |
||||||
|
if bcc is not None and bucket.code is None: |
||||||
|
bucket.code = code |
||||||
|
bcc.set_bucket(bucket) |
||||||
|
|
||||||
|
return environment.template_class.from_code(environment, code, |
||||||
|
globals, uptodate) |
||||||
|
|
||||||
|
|
||||||
|
class FileSystemLoader(BaseLoader): |
||||||
|
"""Loads templates from the file system. This loader can find templates |
||||||
|
in folders on the file system and is the preferred way to load them. |
||||||
|
|
||||||
|
The loader takes the path to the templates as string, or if multiple |
||||||
|
locations are wanted a list of them which is then looked up in the |
||||||
|
given order:: |
||||||
|
|
||||||
|
>>> loader = FileSystemLoader('/path/to/templates') |
||||||
|
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path']) |
||||||
|
|
||||||
|
Per default the template encoding is ``'utf-8'`` which can be changed |
||||||
|
by setting the `encoding` parameter to something else. |
||||||
|
|
||||||
|
To follow symbolic links, set the *followlinks* parameter to ``True``:: |
||||||
|
|
||||||
|
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True) |
||||||
|
|
||||||
|
.. versionchanged:: 2.8+ |
||||||
|
The *followlinks* parameter was added. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, searchpath, encoding='utf-8', followlinks=False): |
||||||
|
if isinstance(searchpath, string_types): |
||||||
|
searchpath = [searchpath] |
||||||
|
self.searchpath = list(searchpath) |
||||||
|
self.encoding = encoding |
||||||
|
self.followlinks = followlinks |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
pieces = split_template_path(template) |
||||||
|
for searchpath in self.searchpath: |
||||||
|
filename = path.join(searchpath, *pieces) |
||||||
|
f = open_if_exists(filename) |
||||||
|
if f is None: |
||||||
|
continue |
||||||
|
try: |
||||||
|
contents = f.read().decode(self.encoding) |
||||||
|
finally: |
||||||
|
f.close() |
||||||
|
|
||||||
|
mtime = path.getmtime(filename) |
||||||
|
|
||||||
|
def uptodate(): |
||||||
|
try: |
||||||
|
return path.getmtime(filename) == mtime |
||||||
|
except OSError: |
||||||
|
return False |
||||||
|
return contents, filename, uptodate |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
found = set() |
||||||
|
for searchpath in self.searchpath: |
||||||
|
walk_dir = os.walk(searchpath, followlinks=self.followlinks) |
||||||
|
for dirpath, dirnames, filenames in walk_dir: |
||||||
|
for filename in filenames: |
||||||
|
template = os.path.join(dirpath, filename) \ |
||||||
|
[len(searchpath):].strip(os.path.sep) \ |
||||||
|
.replace(os.path.sep, '/') |
||||||
|
if template[:2] == './': |
||||||
|
template = template[2:] |
||||||
|
if template not in found: |
||||||
|
found.add(template) |
||||||
|
return sorted(found) |
||||||
|
|
||||||
|
|
||||||
|
class PackageLoader(BaseLoader): |
||||||
|
"""Load templates from python eggs or packages. It is constructed with |
||||||
|
the name of the python package and the path to the templates in that |
||||||
|
package:: |
||||||
|
|
||||||
|
loader = PackageLoader('mypackage', 'views') |
||||||
|
|
||||||
|
If the package path is not given, ``'templates'`` is assumed. |
||||||
|
|
||||||
|
Per default the template encoding is ``'utf-8'`` which can be changed |
||||||
|
by setting the `encoding` parameter to something else. Due to the nature |
||||||
|
of eggs it's only possible to reload templates if the package was loaded |
||||||
|
from the file system and not a zip file. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, package_name, package_path='templates', |
||||||
|
encoding='utf-8'): |
||||||
|
from pkg_resources import DefaultProvider, ResourceManager, \ |
||||||
|
get_provider |
||||||
|
provider = get_provider(package_name) |
||||||
|
self.encoding = encoding |
||||||
|
self.manager = ResourceManager() |
||||||
|
self.filesystem_bound = isinstance(provider, DefaultProvider) |
||||||
|
self.provider = provider |
||||||
|
self.package_path = package_path |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
pieces = split_template_path(template) |
||||||
|
p = '/'.join((self.package_path,) + tuple(pieces)) |
||||||
|
if not self.provider.has_resource(p): |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
filename = uptodate = None |
||||||
|
if self.filesystem_bound: |
||||||
|
filename = self.provider.get_resource_filename(self.manager, p) |
||||||
|
mtime = path.getmtime(filename) |
||||||
|
def uptodate(): |
||||||
|
try: |
||||||
|
return path.getmtime(filename) == mtime |
||||||
|
except OSError: |
||||||
|
return False |
||||||
|
|
||||||
|
source = self.provider.get_resource_string(self.manager, p) |
||||||
|
return source.decode(self.encoding), filename, uptodate |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
path = self.package_path |
||||||
|
if path[:2] == './': |
||||||
|
path = path[2:] |
||||||
|
elif path == '.': |
||||||
|
path = '' |
||||||
|
offset = len(path) |
||||||
|
results = [] |
||||||
|
def _walk(path): |
||||||
|
for filename in self.provider.resource_listdir(path): |
||||||
|
fullname = path + '/' + filename |
||||||
|
if self.provider.resource_isdir(fullname): |
||||||
|
_walk(fullname) |
||||||
|
else: |
||||||
|
results.append(fullname[offset:].lstrip('/')) |
||||||
|
_walk(path) |
||||||
|
results.sort() |
||||||
|
return results |
||||||
|
|
||||||
|
|
||||||
|
class DictLoader(BaseLoader): |
||||||
|
"""Loads a template from a python dict. It's passed a dict of unicode |
||||||
|
strings bound to template names. This loader is useful for unittesting: |
||||||
|
|
||||||
|
>>> loader = DictLoader({'index.html': 'source here'}) |
||||||
|
|
||||||
|
Because auto reloading is rarely useful this is disabled per default. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, mapping): |
||||||
|
self.mapping = mapping |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
if template in self.mapping: |
||||||
|
source = self.mapping[template] |
||||||
|
return source, None, lambda: source == self.mapping.get(template) |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
return sorted(self.mapping) |
||||||
|
|
||||||
|
|
||||||
|
class FunctionLoader(BaseLoader): |
||||||
|
"""A loader that is passed a function which does the loading. The |
||||||
|
function receives the name of the template and has to return either |
||||||
|
an unicode string with the template source, a tuple in the form ``(source, |
||||||
|
filename, uptodatefunc)`` or `None` if the template does not exist. |
||||||
|
|
||||||
|
>>> def load_template(name): |
||||||
|
... if name == 'index.html': |
||||||
|
... return '...' |
||||||
|
... |
||||||
|
>>> loader = FunctionLoader(load_template) |
||||||
|
|
||||||
|
The `uptodatefunc` is a function that is called if autoreload is enabled |
||||||
|
and has to return `True` if the template is still up to date. For more |
||||||
|
details have a look at :meth:`BaseLoader.get_source` which has the same |
||||||
|
return value. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, load_func): |
||||||
|
self.load_func = load_func |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
rv = self.load_func(template) |
||||||
|
if rv is None: |
||||||
|
raise TemplateNotFound(template) |
||||||
|
elif isinstance(rv, string_types): |
||||||
|
return rv, None, None |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
class PrefixLoader(BaseLoader): |
||||||
|
"""A loader that is passed a dict of loaders where each loader is bound |
||||||
|
to a prefix. The prefix is delimited from the template by a slash per |
||||||
|
default, which can be changed by setting the `delimiter` argument to |
||||||
|
something else:: |
||||||
|
|
||||||
|
loader = PrefixLoader({ |
||||||
|
'app1': PackageLoader('mypackage.app1'), |
||||||
|
'app2': PackageLoader('mypackage.app2') |
||||||
|
}) |
||||||
|
|
||||||
|
By loading ``'app1/index.html'`` the file from the app1 package is loaded, |
||||||
|
by loading ``'app2/index.html'`` the file from the second. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, mapping, delimiter='/'): |
||||||
|
self.mapping = mapping |
||||||
|
self.delimiter = delimiter |
||||||
|
|
||||||
|
def get_loader(self, template): |
||||||
|
try: |
||||||
|
prefix, name = template.split(self.delimiter, 1) |
||||||
|
loader = self.mapping[prefix] |
||||||
|
except (ValueError, KeyError): |
||||||
|
raise TemplateNotFound(template) |
||||||
|
return loader, name |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
loader, name = self.get_loader(template) |
||||||
|
try: |
||||||
|
return loader.get_source(environment, name) |
||||||
|
except TemplateNotFound: |
||||||
|
# re-raise the exception with the correct filename here. |
||||||
|
# (the one that includes the prefix) |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def load(self, environment, name, globals=None): |
||||||
|
loader, local_name = self.get_loader(name) |
||||||
|
try: |
||||||
|
return loader.load(environment, local_name, globals) |
||||||
|
except TemplateNotFound: |
||||||
|
# re-raise the exception with the correct filename here. |
||||||
|
# (the one that includes the prefix) |
||||||
|
raise TemplateNotFound(name) |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
result = [] |
||||||
|
for prefix, loader in iteritems(self.mapping): |
||||||
|
for template in loader.list_templates(): |
||||||
|
result.append(prefix + self.delimiter + template) |
||||||
|
return result |
||||||
|
|
||||||
|
|
||||||
|
class ChoiceLoader(BaseLoader): |
||||||
|
"""This loader works like the `PrefixLoader` just that no prefix is |
||||||
|
specified. If a template could not be found by one loader the next one |
||||||
|
is tried. |
||||||
|
|
||||||
|
>>> loader = ChoiceLoader([ |
||||||
|
... FileSystemLoader('/path/to/user/templates'), |
||||||
|
... FileSystemLoader('/path/to/system/templates') |
||||||
|
... ]) |
||||||
|
|
||||||
|
This is useful if you want to allow users to override builtin templates |
||||||
|
from a different location. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, loaders): |
||||||
|
self.loaders = loaders |
||||||
|
|
||||||
|
def get_source(self, environment, template): |
||||||
|
for loader in self.loaders: |
||||||
|
try: |
||||||
|
return loader.get_source(environment, template) |
||||||
|
except TemplateNotFound: |
||||||
|
pass |
||||||
|
raise TemplateNotFound(template) |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def load(self, environment, name, globals=None): |
||||||
|
for loader in self.loaders: |
||||||
|
try: |
||||||
|
return loader.load(environment, name, globals) |
||||||
|
except TemplateNotFound: |
||||||
|
pass |
||||||
|
raise TemplateNotFound(name) |
||||||
|
|
||||||
|
def list_templates(self): |
||||||
|
found = set() |
||||||
|
for loader in self.loaders: |
||||||
|
found.update(loader.list_templates()) |
||||||
|
return sorted(found) |
||||||
|
|
||||||
|
|
||||||
|
class _TemplateModule(ModuleType): |
||||||
|
"""Like a normal module but with support for weak references""" |
||||||
|
|
||||||
|
|
||||||
|
class ModuleLoader(BaseLoader): |
||||||
|
"""This loader loads templates from precompiled templates. |
||||||
|
|
||||||
|
Example usage: |
||||||
|
|
||||||
|
>>> loader = ChoiceLoader([ |
||||||
|
... ModuleLoader('/path/to/compiled/templates'), |
||||||
|
... FileSystemLoader('/path/to/templates') |
||||||
|
... ]) |
||||||
|
|
||||||
|
Templates can be precompiled with :meth:`Environment.compile_templates`. |
||||||
|
""" |
||||||
|
|
||||||
|
has_source_access = False |
||||||
|
|
||||||
|
def __init__(self, path): |
||||||
|
package_name = '_jinja2_module_templates_%x' % id(self) |
||||||
|
|
||||||
|
# create a fake module that looks for the templates in the |
||||||
|
# path given. |
||||||
|
mod = _TemplateModule(package_name) |
||||||
|
if isinstance(path, string_types): |
||||||
|
path = [path] |
||||||
|
else: |
||||||
|
path = list(path) |
||||||
|
mod.__path__ = path |
||||||
|
|
||||||
|
sys.modules[package_name] = weakref.proxy(mod, |
||||||
|
lambda x: sys.modules.pop(package_name, None)) |
||||||
|
|
||||||
|
# the only strong reference, the sys.modules entry is weak |
||||||
|
# so that the garbage collector can remove it once the |
||||||
|
# loader that created it goes out of business. |
||||||
|
self.module = mod |
||||||
|
self.package_name = package_name |
||||||
|
|
||||||
|
@staticmethod |
||||||
|
def get_template_key(name): |
||||||
|
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest() |
||||||
|
|
||||||
|
@staticmethod |
||||||
|
def get_module_filename(name): |
||||||
|
return ModuleLoader.get_template_key(name) + '.py' |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def load(self, environment, name, globals=None): |
||||||
|
key = self.get_template_key(name) |
||||||
|
module = '%s.%s' % (self.package_name, key) |
||||||
|
mod = getattr(self.module, module, None) |
||||||
|
if mod is None: |
||||||
|
try: |
||||||
|
mod = __import__(module, None, None, ['root']) |
||||||
|
except ImportError: |
||||||
|
raise TemplateNotFound(name) |
||||||
|
|
||||||
|
# remove the entry from sys.modules, we only want the attribute |
||||||
|
# on the module object we have stored on the loader. |
||||||
|
sys.modules.pop(module, None) |
||||||
|
|
||||||
|
return environment.template_class.from_module_dict( |
||||||
|
environment, mod.__dict__, globals) |
@ -0,0 +1,106 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.meta |
||||||
|
~~~~~~~~~~~ |
||||||
|
|
||||||
|
This module implements various functions that exposes information about |
||||||
|
templates that might be interesting for various kinds of applications. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team, see AUTHORS for more details. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
from jinja2 import nodes |
||||||
|
from jinja2.compiler import CodeGenerator |
||||||
|
from jinja2._compat import string_types, iteritems |
||||||
|
|
||||||
|
|
||||||
|
class TrackingCodeGenerator(CodeGenerator): |
||||||
|
"""We abuse the code generator for introspection.""" |
||||||
|
|
||||||
|
def __init__(self, environment): |
||||||
|
CodeGenerator.__init__(self, environment, '<introspection>', |
||||||
|
'<introspection>') |
||||||
|
self.undeclared_identifiers = set() |
||||||
|
|
||||||
|
def write(self, x): |
||||||
|
"""Don't write.""" |
||||||
|
|
||||||
|
def enter_frame(self, frame): |
||||||
|
"""Remember all undeclared identifiers.""" |
||||||
|
CodeGenerator.enter_frame(self, frame) |
||||||
|
for _, (action, param) in iteritems(frame.symbols.loads): |
||||||
|
if action == 'resolve': |
||||||
|
self.undeclared_identifiers.add(param) |
||||||
|
|
||||||
|
|
||||||
|
def find_undeclared_variables(ast): |
||||||
|
"""Returns a set of all variables in the AST that will be looked up from |
||||||
|
the context at runtime. Because at compile time it's not known which |
||||||
|
variables will be used depending on the path the execution takes at |
||||||
|
runtime, all variables are returned. |
||||||
|
|
||||||
|
>>> from jinja2 import Environment, meta |
||||||
|
>>> env = Environment() |
||||||
|
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}') |
||||||
|
>>> meta.find_undeclared_variables(ast) == set(['bar']) |
||||||
|
True |
||||||
|
|
||||||
|
.. admonition:: Implementation |
||||||
|
|
||||||
|
Internally the code generator is used for finding undeclared variables. |
||||||
|
This is good to know because the code generator might raise a |
||||||
|
:exc:`TemplateAssertionError` during compilation and as a matter of |
||||||
|
fact this function can currently raise that exception as well. |
||||||
|
""" |
||||||
|
codegen = TrackingCodeGenerator(ast.environment) |
||||||
|
codegen.visit(ast) |
||||||
|
return codegen.undeclared_identifiers |
||||||
|
|
||||||
|
|
||||||
|
def find_referenced_templates(ast): |
||||||
|
"""Finds all the referenced templates from the AST. This will return an |
||||||
|
iterator over all the hardcoded template extensions, inclusions and |
||||||
|
imports. If dynamic inheritance or inclusion is used, `None` will be |
||||||
|
yielded. |
||||||
|
|
||||||
|
>>> from jinja2 import Environment, meta |
||||||
|
>>> env = Environment() |
||||||
|
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}') |
||||||
|
>>> list(meta.find_referenced_templates(ast)) |
||||||
|
['layout.html', None] |
||||||
|
|
||||||
|
This function is useful for dependency tracking. For example if you want |
||||||
|
to rebuild parts of the website after a layout template has changed. |
||||||
|
""" |
||||||
|
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import, |
||||||
|
nodes.Include)): |
||||||
|
if not isinstance(node.template, nodes.Const): |
||||||
|
# a tuple with some non consts in there |
||||||
|
if isinstance(node.template, (nodes.Tuple, nodes.List)): |
||||||
|
for template_name in node.template.items: |
||||||
|
# something const, only yield the strings and ignore |
||||||
|
# non-string consts that really just make no sense |
||||||
|
if isinstance(template_name, nodes.Const): |
||||||
|
if isinstance(template_name.value, string_types): |
||||||
|
yield template_name.value |
||||||
|
# something dynamic in there |
||||||
|
else: |
||||||
|
yield None |
||||||
|
# something dynamic we don't know about here |
||||||
|
else: |
||||||
|
yield None |
||||||
|
continue |
||||||
|
# constant is a basestring, direct template name |
||||||
|
if isinstance(node.template.value, string_types): |
||||||
|
yield node.template.value |
||||||
|
# a tuple or list (latter *should* not happen) made of consts, |
||||||
|
# yield the consts that are strings. We could warn here for |
||||||
|
# non string values |
||||||
|
elif isinstance(node, nodes.Include) and \ |
||||||
|
isinstance(node.template.value, (tuple, list)): |
||||||
|
for template_name in node.template.value: |
||||||
|
if isinstance(template_name, string_types): |
||||||
|
yield template_name |
||||||
|
# something else we don't care about, we could warn here |
||||||
|
else: |
||||||
|
yield None |
@ -0,0 +1,939 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.nodes |
||||||
|
~~~~~~~~~~~~ |
||||||
|
|
||||||
|
This module implements additional nodes derived from the ast base node. |
||||||
|
|
||||||
|
It also provides some node tree helper functions like `in_lineno` and |
||||||
|
`get_nodes` used by the parser and translator in order to normalize |
||||||
|
python and jinja nodes. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import types |
||||||
|
import operator |
||||||
|
|
||||||
|
from collections import deque |
||||||
|
from jinja2.utils import Markup |
||||||
|
from jinja2._compat import izip, with_metaclass, text_type, PY2 |
||||||
|
|
||||||
|
|
||||||
|
#: the types we support for context functions |
||||||
|
_context_function_types = (types.FunctionType, types.MethodType) |
||||||
|
|
||||||
|
|
||||||
|
_binop_to_func = { |
||||||
|
'*': operator.mul, |
||||||
|
'/': operator.truediv, |
||||||
|
'//': operator.floordiv, |
||||||
|
'**': operator.pow, |
||||||
|
'%': operator.mod, |
||||||
|
'+': operator.add, |
||||||
|
'-': operator.sub |
||||||
|
} |
||||||
|
|
||||||
|
_uaop_to_func = { |
||||||
|
'not': operator.not_, |
||||||
|
'+': operator.pos, |
||||||
|
'-': operator.neg |
||||||
|
} |
||||||
|
|
||||||
|
_cmpop_to_func = { |
||||||
|
'eq': operator.eq, |
||||||
|
'ne': operator.ne, |
||||||
|
'gt': operator.gt, |
||||||
|
'gteq': operator.ge, |
||||||
|
'lt': operator.lt, |
||||||
|
'lteq': operator.le, |
||||||
|
'in': lambda a, b: a in b, |
||||||
|
'notin': lambda a, b: a not in b |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
class Impossible(Exception): |
||||||
|
"""Raised if the node could not perform a requested action.""" |
||||||
|
|
||||||
|
|
||||||
|
class NodeType(type): |
||||||
|
"""A metaclass for nodes that handles the field and attribute |
||||||
|
inheritance. fields and attributes from the parent class are |
||||||
|
automatically forwarded to the child.""" |
||||||
|
|
||||||
|
def __new__(cls, name, bases, d): |
||||||
|
for attr in 'fields', 'attributes': |
||||||
|
storage = [] |
||||||
|
storage.extend(getattr(bases[0], attr, ())) |
||||||
|
storage.extend(d.get(attr, ())) |
||||||
|
assert len(bases) == 1, 'multiple inheritance not allowed' |
||||||
|
assert len(storage) == len(set(storage)), 'layout conflict' |
||||||
|
d[attr] = tuple(storage) |
||||||
|
d.setdefault('abstract', False) |
||||||
|
return type.__new__(cls, name, bases, d) |
||||||
|
|
||||||
|
|
||||||
|
class EvalContext(object): |
||||||
|
"""Holds evaluation time information. Custom attributes can be attached |
||||||
|
to it in extensions. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, environment, template_name=None): |
||||||
|
self.environment = environment |
||||||
|
if callable(environment.autoescape): |
||||||
|
self.autoescape = environment.autoescape(template_name) |
||||||
|
else: |
||||||
|
self.autoescape = environment.autoescape |
||||||
|
self.volatile = False |
||||||
|
|
||||||
|
def save(self): |
||||||
|
return self.__dict__.copy() |
||||||
|
|
||||||
|
def revert(self, old): |
||||||
|
self.__dict__.clear() |
||||||
|
self.__dict__.update(old) |
||||||
|
|
||||||
|
|
||||||
|
def get_eval_context(node, ctx): |
||||||
|
if ctx is None: |
||||||
|
if node.environment is None: |
||||||
|
raise RuntimeError('if no eval context is passed, the ' |
||||||
|
'node must have an attached ' |
||||||
|
'environment.') |
||||||
|
return EvalContext(node.environment) |
||||||
|
return ctx |
||||||
|
|
||||||
|
|
||||||
|
class Node(with_metaclass(NodeType, object)): |
||||||
|
"""Baseclass for all Jinja2 nodes. There are a number of nodes available |
||||||
|
of different types. There are four major types: |
||||||
|
|
||||||
|
- :class:`Stmt`: statements |
||||||
|
- :class:`Expr`: expressions |
||||||
|
- :class:`Helper`: helper nodes |
||||||
|
- :class:`Template`: the outermost wrapper node |
||||||
|
|
||||||
|
All nodes have fields and attributes. Fields may be other nodes, lists, |
||||||
|
or arbitrary values. Fields are passed to the constructor as regular |
||||||
|
positional arguments, attributes as keyword arguments. Each node has |
||||||
|
two attributes: `lineno` (the line number of the node) and `environment`. |
||||||
|
The `environment` attribute is set at the end of the parsing process for |
||||||
|
all nodes automatically. |
||||||
|
""" |
||||||
|
fields = () |
||||||
|
attributes = ('lineno', 'environment') |
||||||
|
abstract = True |
||||||
|
|
||||||
|
def __init__(self, *fields, **attributes): |
||||||
|
if self.abstract: |
||||||
|
raise TypeError('abstract nodes are not instanciable') |
||||||
|
if fields: |
||||||
|
if len(fields) != len(self.fields): |
||||||
|
if not self.fields: |
||||||
|
raise TypeError('%r takes 0 arguments' % |
||||||
|
self.__class__.__name__) |
||||||
|
raise TypeError('%r takes 0 or %d argument%s' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
len(self.fields), |
||||||
|
len(self.fields) != 1 and 's' or '' |
||||||
|
)) |
||||||
|
for name, arg in izip(self.fields, fields): |
||||||
|
setattr(self, name, arg) |
||||||
|
for attr in self.attributes: |
||||||
|
setattr(self, attr, attributes.pop(attr, None)) |
||||||
|
if attributes: |
||||||
|
raise TypeError('unknown attribute %r' % |
||||||
|
next(iter(attributes))) |
||||||
|
|
||||||
|
def iter_fields(self, exclude=None, only=None): |
||||||
|
"""This method iterates over all fields that are defined and yields |
||||||
|
``(key, value)`` tuples. Per default all fields are returned, but |
||||||
|
it's possible to limit that to some fields by providing the `only` |
||||||
|
parameter or to exclude some using the `exclude` parameter. Both |
||||||
|
should be sets or tuples of field names. |
||||||
|
""" |
||||||
|
for name in self.fields: |
||||||
|
if (exclude is only is None) or \ |
||||||
|
(exclude is not None and name not in exclude) or \ |
||||||
|
(only is not None and name in only): |
||||||
|
try: |
||||||
|
yield name, getattr(self, name) |
||||||
|
except AttributeError: |
||||||
|
pass |
||||||
|
|
||||||
|
def iter_child_nodes(self, exclude=None, only=None): |
||||||
|
"""Iterates over all direct child nodes of the node. This iterates |
||||||
|
over all fields and yields the values of they are nodes. If the value |
||||||
|
of a field is a list all the nodes in that list are returned. |
||||||
|
""" |
||||||
|
for field, item in self.iter_fields(exclude, only): |
||||||
|
if isinstance(item, list): |
||||||
|
for n in item: |
||||||
|
if isinstance(n, Node): |
||||||
|
yield n |
||||||
|
elif isinstance(item, Node): |
||||||
|
yield item |
||||||
|
|
||||||
|
def find(self, node_type): |
||||||
|
"""Find the first node of a given type. If no such node exists the |
||||||
|
return value is `None`. |
||||||
|
""" |
||||||
|
for result in self.find_all(node_type): |
||||||
|
return result |
||||||
|
|
||||||
|
def find_all(self, node_type): |
||||||
|
"""Find all the nodes of a given type. If the type is a tuple, |
||||||
|
the check is performed for any of the tuple items. |
||||||
|
""" |
||||||
|
for child in self.iter_child_nodes(): |
||||||
|
if isinstance(child, node_type): |
||||||
|
yield child |
||||||
|
for result in child.find_all(node_type): |
||||||
|
yield result |
||||||
|
|
||||||
|
def set_ctx(self, ctx): |
||||||
|
"""Reset the context of a node and all child nodes. Per default the |
||||||
|
parser will all generate nodes that have a 'load' context as it's the |
||||||
|
most common one. This method is used in the parser to set assignment |
||||||
|
targets and other nodes to a store context. |
||||||
|
""" |
||||||
|
todo = deque([self]) |
||||||
|
while todo: |
||||||
|
node = todo.popleft() |
||||||
|
if 'ctx' in node.fields: |
||||||
|
node.ctx = ctx |
||||||
|
todo.extend(node.iter_child_nodes()) |
||||||
|
return self |
||||||
|
|
||||||
|
def set_lineno(self, lineno, override=False): |
||||||
|
"""Set the line numbers of the node and children.""" |
||||||
|
todo = deque([self]) |
||||||
|
while todo: |
||||||
|
node = todo.popleft() |
||||||
|
if 'lineno' in node.attributes: |
||||||
|
if node.lineno is None or override: |
||||||
|
node.lineno = lineno |
||||||
|
todo.extend(node.iter_child_nodes()) |
||||||
|
return self |
||||||
|
|
||||||
|
def set_environment(self, environment): |
||||||
|
"""Set the environment for all nodes.""" |
||||||
|
todo = deque([self]) |
||||||
|
while todo: |
||||||
|
node = todo.popleft() |
||||||
|
node.environment = environment |
||||||
|
todo.extend(node.iter_child_nodes()) |
||||||
|
return self |
||||||
|
|
||||||
|
def __eq__(self, other): |
||||||
|
return type(self) is type(other) and \ |
||||||
|
tuple(self.iter_fields()) == tuple(other.iter_fields()) |
||||||
|
|
||||||
|
def __ne__(self, other): |
||||||
|
return not self.__eq__(other) |
||||||
|
|
||||||
|
# Restore Python 2 hashing behavior on Python 3 |
||||||
|
__hash__ = object.__hash__ |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '%s(%s)' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
', '.join('%s=%r' % (arg, getattr(self, arg, None)) for |
||||||
|
arg in self.fields) |
||||||
|
) |
||||||
|
|
||||||
|
def dump(self): |
||||||
|
def _dump(node): |
||||||
|
if not isinstance(node, Node): |
||||||
|
buf.append(repr(node)) |
||||||
|
return |
||||||
|
|
||||||
|
buf.append('nodes.%s(' % node.__class__.__name__) |
||||||
|
if not node.fields: |
||||||
|
buf.append(')') |
||||||
|
return |
||||||
|
for idx, field in enumerate(node.fields): |
||||||
|
if idx: |
||||||
|
buf.append(', ') |
||||||
|
value = getattr(node, field) |
||||||
|
if isinstance(value, list): |
||||||
|
buf.append('[') |
||||||
|
for idx, item in enumerate(value): |
||||||
|
if idx: |
||||||
|
buf.append(', ') |
||||||
|
_dump(item) |
||||||
|
buf.append(']') |
||||||
|
else: |
||||||
|
_dump(value) |
||||||
|
buf.append(')') |
||||||
|
buf = [] |
||||||
|
_dump(self) |
||||||
|
return ''.join(buf) |
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Stmt(Node): |
||||||
|
"""Base node for all statements.""" |
||||||
|
abstract = True |
||||||
|
|
||||||
|
|
||||||
|
class Helper(Node): |
||||||
|
"""Nodes that exist in a specific context only.""" |
||||||
|
abstract = True |
||||||
|
|
||||||
|
|
||||||
|
class Template(Node): |
||||||
|
"""Node that represents a template. This must be the outermost node that |
||||||
|
is passed to the compiler. |
||||||
|
""" |
||||||
|
fields = ('body',) |
||||||
|
|
||||||
|
|
||||||
|
class Output(Stmt): |
||||||
|
"""A node that holds multiple expressions which are then printed out. |
||||||
|
This is used both for the `print` statement and the regular template data. |
||||||
|
""" |
||||||
|
fields = ('nodes',) |
||||||
|
|
||||||
|
|
||||||
|
class Extends(Stmt): |
||||||
|
"""Represents an extends statement.""" |
||||||
|
fields = ('template',) |
||||||
|
|
||||||
|
|
||||||
|
class For(Stmt): |
||||||
|
"""The for loop. `target` is the target for the iteration (usually a |
||||||
|
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list |
||||||
|
of nodes that are used as loop-body, and `else_` a list of nodes for the |
||||||
|
`else` block. If no else node exists it has to be an empty list. |
||||||
|
|
||||||
|
For filtered nodes an expression can be stored as `test`, otherwise `None`. |
||||||
|
""" |
||||||
|
fields = ('target', 'iter', 'body', 'else_', 'test', 'recursive') |
||||||
|
|
||||||
|
|
||||||
|
class If(Stmt): |
||||||
|
"""If `test` is true, `body` is rendered, else `else_`.""" |
||||||
|
fields = ('test', 'body', 'else_') |
||||||
|
|
||||||
|
|
||||||
|
class Macro(Stmt): |
||||||
|
"""A macro definition. `name` is the name of the macro, `args` a list of |
||||||
|
arguments and `defaults` a list of defaults if there are any. `body` is |
||||||
|
a list of nodes for the macro body. |
||||||
|
""" |
||||||
|
fields = ('name', 'args', 'defaults', 'body') |
||||||
|
|
||||||
|
|
||||||
|
class CallBlock(Stmt): |
||||||
|
"""Like a macro without a name but a call instead. `call` is called with |
||||||
|
the unnamed macro as `caller` argument this node holds. |
||||||
|
""" |
||||||
|
fields = ('call', 'args', 'defaults', 'body') |
||||||
|
|
||||||
|
|
||||||
|
class FilterBlock(Stmt): |
||||||
|
"""Node for filter sections.""" |
||||||
|
fields = ('body', 'filter') |
||||||
|
|
||||||
|
|
||||||
|
class With(Stmt): |
||||||
|
"""Specific node for with statements. In older versions of Jinja the |
||||||
|
with statement was implemented on the base of the `Scope` node instead. |
||||||
|
|
||||||
|
.. versionadded:: 2.9.3 |
||||||
|
""" |
||||||
|
fields = ('targets', 'values', 'body') |
||||||
|
|
||||||
|
|
||||||
|
class Block(Stmt): |
||||||
|
"""A node that represents a block.""" |
||||||
|
fields = ('name', 'body', 'scoped') |
||||||
|
|
||||||
|
|
||||||
|
class Include(Stmt): |
||||||
|
"""A node that represents the include tag.""" |
||||||
|
fields = ('template', 'with_context', 'ignore_missing') |
||||||
|
|
||||||
|
|
||||||
|
class Import(Stmt): |
||||||
|
"""A node that represents the import tag.""" |
||||||
|
fields = ('template', 'target', 'with_context') |
||||||
|
|
||||||
|
|
||||||
|
class FromImport(Stmt): |
||||||
|
"""A node that represents the from import tag. It's important to not |
||||||
|
pass unsafe names to the name attribute. The compiler translates the |
||||||
|
attribute lookups directly into getattr calls and does *not* use the |
||||||
|
subscript callback of the interface. As exported variables may not |
||||||
|
start with double underscores (which the parser asserts) this is not a |
||||||
|
problem for regular Jinja code, but if this node is used in an extension |
||||||
|
extra care must be taken. |
||||||
|
|
||||||
|
The list of names may contain tuples if aliases are wanted. |
||||||
|
""" |
||||||
|
fields = ('template', 'names', 'with_context') |
||||||
|
|
||||||
|
|
||||||
|
class ExprStmt(Stmt): |
||||||
|
"""A statement that evaluates an expression and discards the result.""" |
||||||
|
fields = ('node',) |
||||||
|
|
||||||
|
|
||||||
|
class Assign(Stmt): |
||||||
|
"""Assigns an expression to a target.""" |
||||||
|
fields = ('target', 'node') |
||||||
|
|
||||||
|
|
||||||
|
class AssignBlock(Stmt): |
||||||
|
"""Assigns a block to a target.""" |
||||||
|
fields = ('target', 'body') |
||||||
|
|
||||||
|
|
||||||
|
class Expr(Node): |
||||||
|
"""Baseclass for all expressions.""" |
||||||
|
abstract = True |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
"""Return the value of the expression as constant or raise |
||||||
|
:exc:`Impossible` if this was not possible. |
||||||
|
|
||||||
|
An :class:`EvalContext` can be provided, if none is given |
||||||
|
a default context is created which requires the nodes to have |
||||||
|
an attached environment. |
||||||
|
|
||||||
|
.. versionchanged:: 2.4 |
||||||
|
the `eval_ctx` parameter was added. |
||||||
|
""" |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
def can_assign(self): |
||||||
|
"""Check if it's possible to assign something to this node.""" |
||||||
|
return False |
||||||
|
|
||||||
|
|
||||||
|
class BinExpr(Expr): |
||||||
|
"""Baseclass for all binary expressions.""" |
||||||
|
fields = ('left', 'right') |
||||||
|
operator = None |
||||||
|
abstract = True |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
# intercepted operators cannot be folded at compile time |
||||||
|
if self.environment.sandboxed and \ |
||||||
|
self.operator in self.environment.intercepted_binops: |
||||||
|
raise Impossible() |
||||||
|
f = _binop_to_func[self.operator] |
||||||
|
try: |
||||||
|
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx)) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
|
||||||
|
class UnaryExpr(Expr): |
||||||
|
"""Baseclass for all unary expressions.""" |
||||||
|
fields = ('node',) |
||||||
|
operator = None |
||||||
|
abstract = True |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
# intercepted operators cannot be folded at compile time |
||||||
|
if self.environment.sandboxed and \ |
||||||
|
self.operator in self.environment.intercepted_unops: |
||||||
|
raise Impossible() |
||||||
|
f = _uaop_to_func[self.operator] |
||||||
|
try: |
||||||
|
return f(self.node.as_const(eval_ctx)) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
|
||||||
|
class Name(Expr): |
||||||
|
"""Looks up a name or stores a value in a name. |
||||||
|
The `ctx` of the node can be one of the following values: |
||||||
|
|
||||||
|
- `store`: store a value in the name |
||||||
|
- `load`: load that name |
||||||
|
- `param`: like `store` but if the name was defined as function parameter. |
||||||
|
""" |
||||||
|
fields = ('name', 'ctx') |
||||||
|
|
||||||
|
def can_assign(self): |
||||||
|
return self.name not in ('true', 'false', 'none', |
||||||
|
'True', 'False', 'None') |
||||||
|
|
||||||
|
|
||||||
|
class Literal(Expr): |
||||||
|
"""Baseclass for literals.""" |
||||||
|
abstract = True |
||||||
|
|
||||||
|
|
||||||
|
class Const(Literal): |
||||||
|
"""All constant values. The parser will return this node for simple |
||||||
|
constants such as ``42`` or ``"foo"`` but it can be used to store more |
||||||
|
complex values such as lists too. Only constants with a safe |
||||||
|
representation (objects where ``eval(repr(x)) == x`` is true). |
||||||
|
""" |
||||||
|
fields = ('value',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
rv = self.value |
||||||
|
if PY2 and type(rv) is text_type and \ |
||||||
|
self.environment.policies['compiler.ascii_str']: |
||||||
|
try: |
||||||
|
rv = rv.encode('ascii') |
||||||
|
except UnicodeError: |
||||||
|
pass |
||||||
|
return rv |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def from_untrusted(cls, value, lineno=None, environment=None): |
||||||
|
"""Return a const object if the value is representable as |
||||||
|
constant value in the generated code, otherwise it will raise |
||||||
|
an `Impossible` exception. |
||||||
|
""" |
||||||
|
from .compiler import has_safe_repr |
||||||
|
if not has_safe_repr(value): |
||||||
|
raise Impossible() |
||||||
|
return cls(value, lineno=lineno, environment=environment) |
||||||
|
|
||||||
|
|
||||||
|
class TemplateData(Literal): |
||||||
|
"""A constant template string.""" |
||||||
|
fields = ('data',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
if eval_ctx.volatile: |
||||||
|
raise Impossible() |
||||||
|
if eval_ctx.autoescape: |
||||||
|
return Markup(self.data) |
||||||
|
return self.data |
||||||
|
|
||||||
|
|
||||||
|
class Tuple(Literal): |
||||||
|
"""For loop unpacking and some other things like multiple arguments |
||||||
|
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple |
||||||
|
is used for loading the names or storing. |
||||||
|
""" |
||||||
|
fields = ('items', 'ctx') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return tuple(x.as_const(eval_ctx) for x in self.items) |
||||||
|
|
||||||
|
def can_assign(self): |
||||||
|
for item in self.items: |
||||||
|
if not item.can_assign(): |
||||||
|
return False |
||||||
|
return True |
||||||
|
|
||||||
|
|
||||||
|
class List(Literal): |
||||||
|
"""Any list literal such as ``[1, 2, 3]``""" |
||||||
|
fields = ('items',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return [x.as_const(eval_ctx) for x in self.items] |
||||||
|
|
||||||
|
|
||||||
|
class Dict(Literal): |
||||||
|
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of |
||||||
|
:class:`Pair` nodes. |
||||||
|
""" |
||||||
|
fields = ('items',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return dict(x.as_const(eval_ctx) for x in self.items) |
||||||
|
|
||||||
|
|
||||||
|
class Pair(Helper): |
||||||
|
"""A key, value pair for dicts.""" |
||||||
|
fields = ('key', 'value') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx) |
||||||
|
|
||||||
|
|
||||||
|
class Keyword(Helper): |
||||||
|
"""A key, value pair for keyword arguments where key is a string.""" |
||||||
|
fields = ('key', 'value') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return self.key, self.value.as_const(eval_ctx) |
||||||
|
|
||||||
|
|
||||||
|
class CondExpr(Expr): |
||||||
|
"""A conditional expression (inline if expression). (``{{ |
||||||
|
foo if bar else baz }}``) |
||||||
|
""" |
||||||
|
fields = ('test', 'expr1', 'expr2') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
if self.test.as_const(eval_ctx): |
||||||
|
return self.expr1.as_const(eval_ctx) |
||||||
|
|
||||||
|
# if we evaluate to an undefined object, we better do that at runtime |
||||||
|
if self.expr2 is None: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
return self.expr2.as_const(eval_ctx) |
||||||
|
|
||||||
|
|
||||||
|
class Filter(Expr): |
||||||
|
"""This node applies a filter on an expression. `name` is the name of |
||||||
|
the filter, the rest of the fields are the same as for :class:`Call`. |
||||||
|
|
||||||
|
If the `node` of a filter is `None` the contents of the last buffer are |
||||||
|
filtered. Buffers are created by macros and filter blocks. |
||||||
|
""" |
||||||
|
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
if eval_ctx.volatile or self.node is None: |
||||||
|
raise Impossible() |
||||||
|
# we have to be careful here because we call filter_ below. |
||||||
|
# if this variable would be called filter, 2to3 would wrap the |
||||||
|
# call in a list beause it is assuming we are talking about the |
||||||
|
# builtin filter function here which no longer returns a list in |
||||||
|
# python 3. because of that, do not rename filter_ to filter! |
||||||
|
filter_ = self.environment.filters.get(self.name) |
||||||
|
if filter_ is None or getattr(filter_, 'contextfilter', False): |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
# We cannot constant handle async filters, so we need to make sure |
||||||
|
# to not go down this path. |
||||||
|
if eval_ctx.environment.is_async and \ |
||||||
|
getattr(filter_, 'asyncfiltervariant', False): |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
obj = self.node.as_const(eval_ctx) |
||||||
|
args = [obj] + [x.as_const(eval_ctx) for x in self.args] |
||||||
|
if getattr(filter_, 'evalcontextfilter', False): |
||||||
|
args.insert(0, eval_ctx) |
||||||
|
elif getattr(filter_, 'environmentfilter', False): |
||||||
|
args.insert(0, self.environment) |
||||||
|
kwargs = dict(x.as_const(eval_ctx) for x in self.kwargs) |
||||||
|
if self.dyn_args is not None: |
||||||
|
try: |
||||||
|
args.extend(self.dyn_args.as_const(eval_ctx)) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
if self.dyn_kwargs is not None: |
||||||
|
try: |
||||||
|
kwargs.update(self.dyn_kwargs.as_const(eval_ctx)) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
try: |
||||||
|
return filter_(*args, **kwargs) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
|
||||||
|
class Test(Expr): |
||||||
|
"""Applies a test on an expression. `name` is the name of the test, the |
||||||
|
rest of the fields are the same as for :class:`Call`. |
||||||
|
""" |
||||||
|
fields = ('node', 'name', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') |
||||||
|
|
||||||
|
|
||||||
|
class Call(Expr): |
||||||
|
"""Calls an expression. `args` is a list of arguments, `kwargs` a list |
||||||
|
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args` |
||||||
|
and `dyn_kwargs` has to be either `None` or a node that is used as |
||||||
|
node for dynamic positional (``*args``) or keyword (``**kwargs``) |
||||||
|
arguments. |
||||||
|
""" |
||||||
|
fields = ('node', 'args', 'kwargs', 'dyn_args', 'dyn_kwargs') |
||||||
|
|
||||||
|
|
||||||
|
class Getitem(Expr): |
||||||
|
"""Get an attribute or item from an expression and prefer the item.""" |
||||||
|
fields = ('node', 'arg', 'ctx') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
if self.ctx != 'load': |
||||||
|
raise Impossible() |
||||||
|
try: |
||||||
|
return self.environment.getitem(self.node.as_const(eval_ctx), |
||||||
|
self.arg.as_const(eval_ctx)) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
def can_assign(self): |
||||||
|
return False |
||||||
|
|
||||||
|
|
||||||
|
class Getattr(Expr): |
||||||
|
"""Get an attribute or item from an expression that is a ascii-only |
||||||
|
bytestring and prefer the attribute. |
||||||
|
""" |
||||||
|
fields = ('node', 'attr', 'ctx') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
if self.ctx != 'load': |
||||||
|
raise Impossible() |
||||||
|
try: |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return self.environment.getattr(self.node.as_const(eval_ctx), |
||||||
|
self.attr) |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
|
||||||
|
def can_assign(self): |
||||||
|
return False |
||||||
|
|
||||||
|
|
||||||
|
class Slice(Expr): |
||||||
|
"""Represents a slice object. This must only be used as argument for |
||||||
|
:class:`Subscript`. |
||||||
|
""" |
||||||
|
fields = ('start', 'stop', 'step') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
def const(obj): |
||||||
|
if obj is None: |
||||||
|
return None |
||||||
|
return obj.as_const(eval_ctx) |
||||||
|
return slice(const(self.start), const(self.stop), const(self.step)) |
||||||
|
|
||||||
|
|
||||||
|
class Concat(Expr): |
||||||
|
"""Concatenates the list of expressions provided after converting them to |
||||||
|
unicode. |
||||||
|
""" |
||||||
|
fields = ('nodes',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return ''.join(text_type(x.as_const(eval_ctx)) for x in self.nodes) |
||||||
|
|
||||||
|
|
||||||
|
class Compare(Expr): |
||||||
|
"""Compares an expression with some other expressions. `ops` must be a |
||||||
|
list of :class:`Operand`\\s. |
||||||
|
""" |
||||||
|
fields = ('expr', 'ops') |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
result = value = self.expr.as_const(eval_ctx) |
||||||
|
try: |
||||||
|
for op in self.ops: |
||||||
|
new_value = op.expr.as_const(eval_ctx) |
||||||
|
result = _cmpop_to_func[op.op](value, new_value) |
||||||
|
value = new_value |
||||||
|
except Exception: |
||||||
|
raise Impossible() |
||||||
|
return result |
||||||
|
|
||||||
|
|
||||||
|
class Operand(Helper): |
||||||
|
"""Holds an operator and an expression.""" |
||||||
|
fields = ('op', 'expr') |
||||||
|
|
||||||
|
if __debug__: |
||||||
|
Operand.__doc__ += '\nThe following operators are available: ' + \ |
||||||
|
', '.join(sorted('``%s``' % x for x in set(_binop_to_func) | |
||||||
|
set(_uaop_to_func) | set(_cmpop_to_func))) |
||||||
|
|
||||||
|
|
||||||
|
class Mul(BinExpr): |
||||||
|
"""Multiplies the left with the right node.""" |
||||||
|
operator = '*' |
||||||
|
|
||||||
|
|
||||||
|
class Div(BinExpr): |
||||||
|
"""Divides the left by the right node.""" |
||||||
|
operator = '/' |
||||||
|
|
||||||
|
|
||||||
|
class FloorDiv(BinExpr): |
||||||
|
"""Divides the left by the right node and truncates conver the |
||||||
|
result into an integer by truncating. |
||||||
|
""" |
||||||
|
operator = '//' |
||||||
|
|
||||||
|
|
||||||
|
class Add(BinExpr): |
||||||
|
"""Add the left to the right node.""" |
||||||
|
operator = '+' |
||||||
|
|
||||||
|
|
||||||
|
class Sub(BinExpr): |
||||||
|
"""Subtract the right from the left node.""" |
||||||
|
operator = '-' |
||||||
|
|
||||||
|
|
||||||
|
class Mod(BinExpr): |
||||||
|
"""Left modulo right.""" |
||||||
|
operator = '%' |
||||||
|
|
||||||
|
|
||||||
|
class Pow(BinExpr): |
||||||
|
"""Left to the power of right.""" |
||||||
|
operator = '**' |
||||||
|
|
||||||
|
|
||||||
|
class And(BinExpr): |
||||||
|
"""Short circuited AND.""" |
||||||
|
operator = 'and' |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx) |
||||||
|
|
||||||
|
|
||||||
|
class Or(BinExpr): |
||||||
|
"""Short circuited OR.""" |
||||||
|
operator = 'or' |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx) |
||||||
|
|
||||||
|
|
||||||
|
class Not(UnaryExpr): |
||||||
|
"""Negate the expression.""" |
||||||
|
operator = 'not' |
||||||
|
|
||||||
|
|
||||||
|
class Neg(UnaryExpr): |
||||||
|
"""Make the expression negative.""" |
||||||
|
operator = '-' |
||||||
|
|
||||||
|
|
||||||
|
class Pos(UnaryExpr): |
||||||
|
"""Make the expression positive (noop for most expressions)""" |
||||||
|
operator = '+' |
||||||
|
|
||||||
|
|
||||||
|
# Helpers for extensions |
||||||
|
|
||||||
|
|
||||||
|
class EnvironmentAttribute(Expr): |
||||||
|
"""Loads an attribute from the environment object. This is useful for |
||||||
|
extensions that want to call a callback stored on the environment. |
||||||
|
""" |
||||||
|
fields = ('name',) |
||||||
|
|
||||||
|
|
||||||
|
class ExtensionAttribute(Expr): |
||||||
|
"""Returns the attribute of an extension bound to the environment. |
||||||
|
The identifier is the identifier of the :class:`Extension`. |
||||||
|
|
||||||
|
This node is usually constructed by calling the |
||||||
|
:meth:`~jinja2.ext.Extension.attr` method on an extension. |
||||||
|
""" |
||||||
|
fields = ('identifier', 'name') |
||||||
|
|
||||||
|
|
||||||
|
class ImportedName(Expr): |
||||||
|
"""If created with an import name the import name is returned on node |
||||||
|
access. For example ``ImportedName('cgi.escape')`` returns the `escape` |
||||||
|
function from the cgi module on evaluation. Imports are optimized by the |
||||||
|
compiler so there is no need to assign them to local variables. |
||||||
|
""" |
||||||
|
fields = ('importname',) |
||||||
|
|
||||||
|
|
||||||
|
class InternalName(Expr): |
||||||
|
"""An internal name in the compiler. You cannot create these nodes |
||||||
|
yourself but the parser provides a |
||||||
|
:meth:`~jinja2.parser.Parser.free_identifier` method that creates |
||||||
|
a new identifier for you. This identifier is not available from the |
||||||
|
template and is not threated specially by the compiler. |
||||||
|
""" |
||||||
|
fields = ('name',) |
||||||
|
|
||||||
|
def __init__(self): |
||||||
|
raise TypeError('Can\'t create internal names. Use the ' |
||||||
|
'`free_identifier` method on a parser.') |
||||||
|
|
||||||
|
|
||||||
|
class MarkSafe(Expr): |
||||||
|
"""Mark the wrapped expression as safe (wrap it as `Markup`).""" |
||||||
|
fields = ('expr',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
return Markup(self.expr.as_const(eval_ctx)) |
||||||
|
|
||||||
|
|
||||||
|
class MarkSafeIfAutoescape(Expr): |
||||||
|
"""Mark the wrapped expression as safe (wrap it as `Markup`) but |
||||||
|
only if autoescaping is active. |
||||||
|
|
||||||
|
.. versionadded:: 2.5 |
||||||
|
""" |
||||||
|
fields = ('expr',) |
||||||
|
|
||||||
|
def as_const(self, eval_ctx=None): |
||||||
|
eval_ctx = get_eval_context(self, eval_ctx) |
||||||
|
if eval_ctx.volatile: |
||||||
|
raise Impossible() |
||||||
|
expr = self.expr.as_const(eval_ctx) |
||||||
|
if eval_ctx.autoescape: |
||||||
|
return Markup(expr) |
||||||
|
return expr |
||||||
|
|
||||||
|
|
||||||
|
class ContextReference(Expr): |
||||||
|
"""Returns the current template context. It can be used like a |
||||||
|
:class:`Name` node, with a ``'load'`` ctx and will return the |
||||||
|
current :class:`~jinja2.runtime.Context` object. |
||||||
|
|
||||||
|
Here an example that assigns the current template name to a |
||||||
|
variable named `foo`:: |
||||||
|
|
||||||
|
Assign(Name('foo', ctx='store'), |
||||||
|
Getattr(ContextReference(), 'name')) |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
class Continue(Stmt): |
||||||
|
"""Continue a loop.""" |
||||||
|
|
||||||
|
|
||||||
|
class Break(Stmt): |
||||||
|
"""Break a loop.""" |
||||||
|
|
||||||
|
|
||||||
|
class Scope(Stmt): |
||||||
|
"""An artificial scope.""" |
||||||
|
fields = ('body',) |
||||||
|
|
||||||
|
|
||||||
|
class EvalContextModifier(Stmt): |
||||||
|
"""Modifies the eval context. For each option that should be modified, |
||||||
|
a :class:`Keyword` has to be added to the :attr:`options` list. |
||||||
|
|
||||||
|
Example to change the `autoescape` setting:: |
||||||
|
|
||||||
|
EvalContextModifier(options=[Keyword('autoescape', Const(True))]) |
||||||
|
""" |
||||||
|
fields = ('options',) |
||||||
|
|
||||||
|
|
||||||
|
class ScopedEvalContextModifier(EvalContextModifier): |
||||||
|
"""Modifies the eval context and reverts it later. Works exactly like |
||||||
|
:class:`EvalContextModifier` but will only modify the |
||||||
|
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`. |
||||||
|
""" |
||||||
|
fields = ('body',) |
||||||
|
|
||||||
|
|
||||||
|
# make sure nobody creates custom nodes |
||||||
|
def _failing_new(*args, **kwargs): |
||||||
|
raise TypeError('can\'t create custom node types') |
||||||
|
NodeType.__new__ = staticmethod(_failing_new); del _failing_new |
@ -0,0 +1,49 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.optimizer |
||||||
|
~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
The jinja optimizer is currently trying to constant fold a few expressions |
||||||
|
and modify the AST in place so that it should be easier to evaluate it. |
||||||
|
|
||||||
|
Because the AST does not contain all the scoping information and the |
||||||
|
compiler has to find that out, we cannot do all the optimizations we |
||||||
|
want. For example loop unrolling doesn't work because unrolled loops would |
||||||
|
have a different scoping. |
||||||
|
|
||||||
|
The solution would be a second syntax tree that has the scoping rules stored. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
from jinja2 import nodes |
||||||
|
from jinja2.visitor import NodeTransformer |
||||||
|
|
||||||
|
|
||||||
|
def optimize(node, environment): |
||||||
|
"""The context hint can be used to perform an static optimization |
||||||
|
based on the context given.""" |
||||||
|
optimizer = Optimizer(environment) |
||||||
|
return optimizer.visit(node) |
||||||
|
|
||||||
|
|
||||||
|
class Optimizer(NodeTransformer): |
||||||
|
|
||||||
|
def __init__(self, environment): |
||||||
|
self.environment = environment |
||||||
|
|
||||||
|
def fold(self, node, eval_ctx=None): |
||||||
|
"""Do constant folding.""" |
||||||
|
node = self.generic_visit(node) |
||||||
|
try: |
||||||
|
return nodes.Const.from_untrusted(node.as_const(eval_ctx), |
||||||
|
lineno=node.lineno, |
||||||
|
environment=self.environment) |
||||||
|
except nodes.Impossible: |
||||||
|
return node |
||||||
|
|
||||||
|
visit_Add = visit_Sub = visit_Mul = visit_Div = visit_FloorDiv = \ |
||||||
|
visit_Pow = visit_Mod = visit_And = visit_Or = visit_Pos = visit_Neg = \ |
||||||
|
visit_Not = visit_Compare = visit_Getitem = visit_Getattr = visit_Call = \ |
||||||
|
visit_Filter = visit_Test = visit_CondExpr = fold |
||||||
|
del fold |
@ -0,0 +1,898 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.parser |
||||||
|
~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Implements the template parser. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
from jinja2 import nodes |
||||||
|
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError |
||||||
|
from jinja2.lexer import describe_token, describe_token_expr |
||||||
|
from jinja2._compat import imap |
||||||
|
|
||||||
|
|
||||||
|
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print', |
||||||
|
'macro', 'include', 'from', 'import', |
||||||
|
'set', 'with', 'autoescape']) |
||||||
|
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq']) |
||||||
|
|
||||||
|
_math_nodes = { |
||||||
|
'add': nodes.Add, |
||||||
|
'sub': nodes.Sub, |
||||||
|
'mul': nodes.Mul, |
||||||
|
'div': nodes.Div, |
||||||
|
'floordiv': nodes.FloorDiv, |
||||||
|
'mod': nodes.Mod, |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
class Parser(object): |
||||||
|
"""This is the central parsing class Jinja2 uses. It's passed to |
||||||
|
extensions and can be used to parse expressions or statements. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, environment, source, name=None, filename=None, |
||||||
|
state=None): |
||||||
|
self.environment = environment |
||||||
|
self.stream = environment._tokenize(source, name, filename, state) |
||||||
|
self.name = name |
||||||
|
self.filename = filename |
||||||
|
self.closed = False |
||||||
|
self.extensions = {} |
||||||
|
for extension in environment.iter_extensions(): |
||||||
|
for tag in extension.tags: |
||||||
|
self.extensions[tag] = extension.parse |
||||||
|
self._last_identifier = 0 |
||||||
|
self._tag_stack = [] |
||||||
|
self._end_token_stack = [] |
||||||
|
|
||||||
|
def fail(self, msg, lineno=None, exc=TemplateSyntaxError): |
||||||
|
"""Convenience method that raises `exc` with the message, passed |
||||||
|
line number or last line number as well as the current name and |
||||||
|
filename. |
||||||
|
""" |
||||||
|
if lineno is None: |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
raise exc(msg, lineno, self.name, self.filename) |
||||||
|
|
||||||
|
def _fail_ut_eof(self, name, end_token_stack, lineno): |
||||||
|
expected = [] |
||||||
|
for exprs in end_token_stack: |
||||||
|
expected.extend(imap(describe_token_expr, exprs)) |
||||||
|
if end_token_stack: |
||||||
|
currently_looking = ' or '.join( |
||||||
|
"'%s'" % describe_token_expr(expr) |
||||||
|
for expr in end_token_stack[-1]) |
||||||
|
else: |
||||||
|
currently_looking = None |
||||||
|
|
||||||
|
if name is None: |
||||||
|
message = ['Unexpected end of template.'] |
||||||
|
else: |
||||||
|
message = ['Encountered unknown tag \'%s\'.' % name] |
||||||
|
|
||||||
|
if currently_looking: |
||||||
|
if name is not None and name in expected: |
||||||
|
message.append('You probably made a nesting mistake. Jinja ' |
||||||
|
'is expecting this tag, but currently looking ' |
||||||
|
'for %s.' % currently_looking) |
||||||
|
else: |
||||||
|
message.append('Jinja was looking for the following tags: ' |
||||||
|
'%s.' % currently_looking) |
||||||
|
|
||||||
|
if self._tag_stack: |
||||||
|
message.append('The innermost block that needs to be ' |
||||||
|
'closed is \'%s\'.' % self._tag_stack[-1]) |
||||||
|
|
||||||
|
self.fail(' '.join(message), lineno) |
||||||
|
|
||||||
|
def fail_unknown_tag(self, name, lineno=None): |
||||||
|
"""Called if the parser encounters an unknown tag. Tries to fail |
||||||
|
with a human readable error message that could help to identify |
||||||
|
the problem. |
||||||
|
""" |
||||||
|
return self._fail_ut_eof(name, self._end_token_stack, lineno) |
||||||
|
|
||||||
|
def fail_eof(self, end_tokens=None, lineno=None): |
||||||
|
"""Like fail_unknown_tag but for end of template situations.""" |
||||||
|
stack = list(self._end_token_stack) |
||||||
|
if end_tokens is not None: |
||||||
|
stack.append(end_tokens) |
||||||
|
return self._fail_ut_eof(None, stack, lineno) |
||||||
|
|
||||||
|
def is_tuple_end(self, extra_end_rules=None): |
||||||
|
"""Are we at the end of a tuple?""" |
||||||
|
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'): |
||||||
|
return True |
||||||
|
elif extra_end_rules is not None: |
||||||
|
return self.stream.current.test_any(extra_end_rules) |
||||||
|
return False |
||||||
|
|
||||||
|
def free_identifier(self, lineno=None): |
||||||
|
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`.""" |
||||||
|
self._last_identifier += 1 |
||||||
|
rv = object.__new__(nodes.InternalName) |
||||||
|
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno) |
||||||
|
return rv |
||||||
|
|
||||||
|
def parse_statement(self): |
||||||
|
"""Parse a single statement.""" |
||||||
|
token = self.stream.current |
||||||
|
if token.type != 'name': |
||||||
|
self.fail('tag name expected', token.lineno) |
||||||
|
self._tag_stack.append(token.value) |
||||||
|
pop_tag = True |
||||||
|
try: |
||||||
|
if token.value in _statement_keywords: |
||||||
|
return getattr(self, 'parse_' + self.stream.current.value)() |
||||||
|
if token.value == 'call': |
||||||
|
return self.parse_call_block() |
||||||
|
if token.value == 'filter': |
||||||
|
return self.parse_filter_block() |
||||||
|
ext = self.extensions.get(token.value) |
||||||
|
if ext is not None: |
||||||
|
return ext(self) |
||||||
|
|
||||||
|
# did not work out, remove the token we pushed by accident |
||||||
|
# from the stack so that the unknown tag fail function can |
||||||
|
# produce a proper error message. |
||||||
|
self._tag_stack.pop() |
||||||
|
pop_tag = False |
||||||
|
self.fail_unknown_tag(token.value, token.lineno) |
||||||
|
finally: |
||||||
|
if pop_tag: |
||||||
|
self._tag_stack.pop() |
||||||
|
|
||||||
|
def parse_statements(self, end_tokens, drop_needle=False): |
||||||
|
"""Parse multiple statements into a list until one of the end tokens |
||||||
|
is reached. This is used to parse the body of statements as it also |
||||||
|
parses template data if appropriate. The parser checks first if the |
||||||
|
current token is a colon and skips it if there is one. Then it checks |
||||||
|
for the block end and parses until if one of the `end_tokens` is |
||||||
|
reached. Per default the active token in the stream at the end of |
||||||
|
the call is the matched end token. If this is not wanted `drop_needle` |
||||||
|
can be set to `True` and the end token is removed. |
||||||
|
""" |
||||||
|
# the first token may be a colon for python compatibility |
||||||
|
self.stream.skip_if('colon') |
||||||
|
|
||||||
|
# in the future it would be possible to add whole code sections |
||||||
|
# by adding some sort of end of statement token and parsing those here. |
||||||
|
self.stream.expect('block_end') |
||||||
|
result = self.subparse(end_tokens) |
||||||
|
|
||||||
|
# we reached the end of the template too early, the subparser |
||||||
|
# does not check for this, so we do that now |
||||||
|
if self.stream.current.type == 'eof': |
||||||
|
self.fail_eof(end_tokens) |
||||||
|
|
||||||
|
if drop_needle: |
||||||
|
next(self.stream) |
||||||
|
return result |
||||||
|
|
||||||
|
def parse_set(self): |
||||||
|
"""Parse an assign statement.""" |
||||||
|
lineno = next(self.stream).lineno |
||||||
|
target = self.parse_assign_target() |
||||||
|
if self.stream.skip_if('assign'): |
||||||
|
expr = self.parse_tuple() |
||||||
|
return nodes.Assign(target, expr, lineno=lineno) |
||||||
|
body = self.parse_statements(('name:endset',), |
||||||
|
drop_needle=True) |
||||||
|
return nodes.AssignBlock(target, body, lineno=lineno) |
||||||
|
|
||||||
|
def parse_for(self): |
||||||
|
"""Parse a for loop.""" |
||||||
|
lineno = self.stream.expect('name:for').lineno |
||||||
|
target = self.parse_assign_target(extra_end_rules=('name:in',)) |
||||||
|
self.stream.expect('name:in') |
||||||
|
iter = self.parse_tuple(with_condexpr=False, |
||||||
|
extra_end_rules=('name:recursive',)) |
||||||
|
test = None |
||||||
|
if self.stream.skip_if('name:if'): |
||||||
|
test = self.parse_expression() |
||||||
|
recursive = self.stream.skip_if('name:recursive') |
||||||
|
body = self.parse_statements(('name:endfor', 'name:else')) |
||||||
|
if next(self.stream).value == 'endfor': |
||||||
|
else_ = [] |
||||||
|
else: |
||||||
|
else_ = self.parse_statements(('name:endfor',), drop_needle=True) |
||||||
|
return nodes.For(target, iter, body, else_, test, |
||||||
|
recursive, lineno=lineno) |
||||||
|
|
||||||
|
def parse_if(self): |
||||||
|
"""Parse an if construct.""" |
||||||
|
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno) |
||||||
|
while 1: |
||||||
|
node.test = self.parse_tuple(with_condexpr=False) |
||||||
|
node.body = self.parse_statements(('name:elif', 'name:else', |
||||||
|
'name:endif')) |
||||||
|
token = next(self.stream) |
||||||
|
if token.test('name:elif'): |
||||||
|
new_node = nodes.If(lineno=self.stream.current.lineno) |
||||||
|
node.else_ = [new_node] |
||||||
|
node = new_node |
||||||
|
continue |
||||||
|
elif token.test('name:else'): |
||||||
|
node.else_ = self.parse_statements(('name:endif',), |
||||||
|
drop_needle=True) |
||||||
|
else: |
||||||
|
node.else_ = [] |
||||||
|
break |
||||||
|
return result |
||||||
|
|
||||||
|
def parse_with(self): |
||||||
|
node = nodes.With(lineno=next(self.stream).lineno) |
||||||
|
targets = [] |
||||||
|
values = [] |
||||||
|
while self.stream.current.type != 'block_end': |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
if targets: |
||||||
|
self.stream.expect('comma') |
||||||
|
target = self.parse_assign_target() |
||||||
|
target.set_ctx('param') |
||||||
|
targets.append(target) |
||||||
|
self.stream.expect('assign') |
||||||
|
values.append(self.parse_expression()) |
||||||
|
node.targets = targets |
||||||
|
node.values = values |
||||||
|
node.body = self.parse_statements(('name:endwith',), |
||||||
|
drop_needle=True) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_autoescape(self): |
||||||
|
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno) |
||||||
|
node.options = [ |
||||||
|
nodes.Keyword('autoescape', self.parse_expression()) |
||||||
|
] |
||||||
|
node.body = self.parse_statements(('name:endautoescape',), |
||||||
|
drop_needle=True) |
||||||
|
return nodes.Scope([node]) |
||||||
|
|
||||||
|
def parse_block(self): |
||||||
|
node = nodes.Block(lineno=next(self.stream).lineno) |
||||||
|
node.name = self.stream.expect('name').value |
||||||
|
node.scoped = self.stream.skip_if('name:scoped') |
||||||
|
|
||||||
|
# common problem people encounter when switching from django |
||||||
|
# to jinja. we do not support hyphens in block names, so let's |
||||||
|
# raise a nicer error message in that case. |
||||||
|
if self.stream.current.type == 'sub': |
||||||
|
self.fail('Block names in Jinja have to be valid Python ' |
||||||
|
'identifiers and may not contain hyphens, use an ' |
||||||
|
'underscore instead.') |
||||||
|
|
||||||
|
node.body = self.parse_statements(('name:endblock',), drop_needle=True) |
||||||
|
self.stream.skip_if('name:' + node.name) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_extends(self): |
||||||
|
node = nodes.Extends(lineno=next(self.stream).lineno) |
||||||
|
node.template = self.parse_expression() |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_import_context(self, node, default): |
||||||
|
if self.stream.current.test_any('name:with', 'name:without') and \ |
||||||
|
self.stream.look().test('name:context'): |
||||||
|
node.with_context = next(self.stream).value == 'with' |
||||||
|
self.stream.skip() |
||||||
|
else: |
||||||
|
node.with_context = default |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_include(self): |
||||||
|
node = nodes.Include(lineno=next(self.stream).lineno) |
||||||
|
node.template = self.parse_expression() |
||||||
|
if self.stream.current.test('name:ignore') and \ |
||||||
|
self.stream.look().test('name:missing'): |
||||||
|
node.ignore_missing = True |
||||||
|
self.stream.skip(2) |
||||||
|
else: |
||||||
|
node.ignore_missing = False |
||||||
|
return self.parse_import_context(node, True) |
||||||
|
|
||||||
|
def parse_import(self): |
||||||
|
node = nodes.Import(lineno=next(self.stream).lineno) |
||||||
|
node.template = self.parse_expression() |
||||||
|
self.stream.expect('name:as') |
||||||
|
node.target = self.parse_assign_target(name_only=True).name |
||||||
|
return self.parse_import_context(node, False) |
||||||
|
|
||||||
|
def parse_from(self): |
||||||
|
node = nodes.FromImport(lineno=next(self.stream).lineno) |
||||||
|
node.template = self.parse_expression() |
||||||
|
self.stream.expect('name:import') |
||||||
|
node.names = [] |
||||||
|
|
||||||
|
def parse_context(): |
||||||
|
if self.stream.current.value in ('with', 'without') and \ |
||||||
|
self.stream.look().test('name:context'): |
||||||
|
node.with_context = next(self.stream).value == 'with' |
||||||
|
self.stream.skip() |
||||||
|
return True |
||||||
|
return False |
||||||
|
|
||||||
|
while 1: |
||||||
|
if node.names: |
||||||
|
self.stream.expect('comma') |
||||||
|
if self.stream.current.type == 'name': |
||||||
|
if parse_context(): |
||||||
|
break |
||||||
|
target = self.parse_assign_target(name_only=True) |
||||||
|
if target.name.startswith('_'): |
||||||
|
self.fail('names starting with an underline can not ' |
||||||
|
'be imported', target.lineno, |
||||||
|
exc=TemplateAssertionError) |
||||||
|
if self.stream.skip_if('name:as'): |
||||||
|
alias = self.parse_assign_target(name_only=True) |
||||||
|
node.names.append((target.name, alias.name)) |
||||||
|
else: |
||||||
|
node.names.append(target.name) |
||||||
|
if parse_context() or self.stream.current.type != 'comma': |
||||||
|
break |
||||||
|
else: |
||||||
|
break |
||||||
|
if not hasattr(node, 'with_context'): |
||||||
|
node.with_context = False |
||||||
|
self.stream.skip_if('comma') |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_signature(self, node): |
||||||
|
node.args = args = [] |
||||||
|
node.defaults = defaults = [] |
||||||
|
self.stream.expect('lparen') |
||||||
|
while self.stream.current.type != 'rparen': |
||||||
|
if args: |
||||||
|
self.stream.expect('comma') |
||||||
|
arg = self.parse_assign_target(name_only=True) |
||||||
|
arg.set_ctx('param') |
||||||
|
if self.stream.skip_if('assign'): |
||||||
|
defaults.append(self.parse_expression()) |
||||||
|
elif defaults: |
||||||
|
self.fail('non-default argument follows default argument') |
||||||
|
args.append(arg) |
||||||
|
self.stream.expect('rparen') |
||||||
|
|
||||||
|
def parse_call_block(self): |
||||||
|
node = nodes.CallBlock(lineno=next(self.stream).lineno) |
||||||
|
if self.stream.current.type == 'lparen': |
||||||
|
self.parse_signature(node) |
||||||
|
else: |
||||||
|
node.args = [] |
||||||
|
node.defaults = [] |
||||||
|
|
||||||
|
node.call = self.parse_expression() |
||||||
|
if not isinstance(node.call, nodes.Call): |
||||||
|
self.fail('expected call', node.lineno) |
||||||
|
node.body = self.parse_statements(('name:endcall',), drop_needle=True) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_filter_block(self): |
||||||
|
node = nodes.FilterBlock(lineno=next(self.stream).lineno) |
||||||
|
node.filter = self.parse_filter(None, start_inline=True) |
||||||
|
node.body = self.parse_statements(('name:endfilter',), |
||||||
|
drop_needle=True) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_macro(self): |
||||||
|
node = nodes.Macro(lineno=next(self.stream).lineno) |
||||||
|
node.name = self.parse_assign_target(name_only=True).name |
||||||
|
self.parse_signature(node) |
||||||
|
node.body = self.parse_statements(('name:endmacro',), |
||||||
|
drop_needle=True) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_print(self): |
||||||
|
node = nodes.Output(lineno=next(self.stream).lineno) |
||||||
|
node.nodes = [] |
||||||
|
while self.stream.current.type != 'block_end': |
||||||
|
if node.nodes: |
||||||
|
self.stream.expect('comma') |
||||||
|
node.nodes.append(self.parse_expression()) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_assign_target(self, with_tuple=True, name_only=False, |
||||||
|
extra_end_rules=None): |
||||||
|
"""Parse an assignment target. As Jinja2 allows assignments to |
||||||
|
tuples, this function can parse all allowed assignment targets. Per |
||||||
|
default assignments to tuples are parsed, that can be disable however |
||||||
|
by setting `with_tuple` to `False`. If only assignments to names are |
||||||
|
wanted `name_only` can be set to `True`. The `extra_end_rules` |
||||||
|
parameter is forwarded to the tuple parsing function. |
||||||
|
""" |
||||||
|
if name_only: |
||||||
|
token = self.stream.expect('name') |
||||||
|
target = nodes.Name(token.value, 'store', lineno=token.lineno) |
||||||
|
else: |
||||||
|
if with_tuple: |
||||||
|
target = self.parse_tuple(simplified=True, |
||||||
|
extra_end_rules=extra_end_rules) |
||||||
|
else: |
||||||
|
target = self.parse_primary() |
||||||
|
target.set_ctx('store') |
||||||
|
if not target.can_assign(): |
||||||
|
self.fail('can\'t assign to %r' % target.__class__. |
||||||
|
__name__.lower(), target.lineno) |
||||||
|
return target |
||||||
|
|
||||||
|
def parse_expression(self, with_condexpr=True): |
||||||
|
"""Parse an expression. Per default all expressions are parsed, if |
||||||
|
the optional `with_condexpr` parameter is set to `False` conditional |
||||||
|
expressions are not parsed. |
||||||
|
""" |
||||||
|
if with_condexpr: |
||||||
|
return self.parse_condexpr() |
||||||
|
return self.parse_or() |
||||||
|
|
||||||
|
def parse_condexpr(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
expr1 = self.parse_or() |
||||||
|
while self.stream.skip_if('name:if'): |
||||||
|
expr2 = self.parse_or() |
||||||
|
if self.stream.skip_if('name:else'): |
||||||
|
expr3 = self.parse_condexpr() |
||||||
|
else: |
||||||
|
expr3 = None |
||||||
|
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return expr1 |
||||||
|
|
||||||
|
def parse_or(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
left = self.parse_and() |
||||||
|
while self.stream.skip_if('name:or'): |
||||||
|
right = self.parse_and() |
||||||
|
left = nodes.Or(left, right, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return left |
||||||
|
|
||||||
|
def parse_and(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
left = self.parse_not() |
||||||
|
while self.stream.skip_if('name:and'): |
||||||
|
right = self.parse_not() |
||||||
|
left = nodes.And(left, right, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return left |
||||||
|
|
||||||
|
def parse_not(self): |
||||||
|
if self.stream.current.test('name:not'): |
||||||
|
lineno = next(self.stream).lineno |
||||||
|
return nodes.Not(self.parse_not(), lineno=lineno) |
||||||
|
return self.parse_compare() |
||||||
|
|
||||||
|
def parse_compare(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
expr = self.parse_math1() |
||||||
|
ops = [] |
||||||
|
while 1: |
||||||
|
token_type = self.stream.current.type |
||||||
|
if token_type in _compare_operators: |
||||||
|
next(self.stream) |
||||||
|
ops.append(nodes.Operand(token_type, self.parse_math1())) |
||||||
|
elif self.stream.skip_if('name:in'): |
||||||
|
ops.append(nodes.Operand('in', self.parse_math1())) |
||||||
|
elif (self.stream.current.test('name:not') and |
||||||
|
self.stream.look().test('name:in')): |
||||||
|
self.stream.skip(2) |
||||||
|
ops.append(nodes.Operand('notin', self.parse_math1())) |
||||||
|
else: |
||||||
|
break |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
if not ops: |
||||||
|
return expr |
||||||
|
return nodes.Compare(expr, ops, lineno=lineno) |
||||||
|
|
||||||
|
def parse_math1(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
left = self.parse_concat() |
||||||
|
while self.stream.current.type in ('add', 'sub'): |
||||||
|
cls = _math_nodes[self.stream.current.type] |
||||||
|
next(self.stream) |
||||||
|
right = self.parse_concat() |
||||||
|
left = cls(left, right, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return left |
||||||
|
|
||||||
|
def parse_concat(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
args = [self.parse_math2()] |
||||||
|
while self.stream.current.type == 'tilde': |
||||||
|
next(self.stream) |
||||||
|
args.append(self.parse_math2()) |
||||||
|
if len(args) == 1: |
||||||
|
return args[0] |
||||||
|
return nodes.Concat(args, lineno=lineno) |
||||||
|
|
||||||
|
def parse_math2(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
left = self.parse_pow() |
||||||
|
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'): |
||||||
|
cls = _math_nodes[self.stream.current.type] |
||||||
|
next(self.stream) |
||||||
|
right = self.parse_pow() |
||||||
|
left = cls(left, right, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return left |
||||||
|
|
||||||
|
def parse_pow(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
left = self.parse_unary() |
||||||
|
while self.stream.current.type == 'pow': |
||||||
|
next(self.stream) |
||||||
|
right = self.parse_unary() |
||||||
|
left = nodes.Pow(left, right, lineno=lineno) |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
return left |
||||||
|
|
||||||
|
def parse_unary(self, with_filter=True): |
||||||
|
token_type = self.stream.current.type |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
if token_type == 'sub': |
||||||
|
next(self.stream) |
||||||
|
node = nodes.Neg(self.parse_unary(False), lineno=lineno) |
||||||
|
elif token_type == 'add': |
||||||
|
next(self.stream) |
||||||
|
node = nodes.Pos(self.parse_unary(False), lineno=lineno) |
||||||
|
else: |
||||||
|
node = self.parse_primary() |
||||||
|
node = self.parse_postfix(node) |
||||||
|
if with_filter: |
||||||
|
node = self.parse_filter_expr(node) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_primary(self): |
||||||
|
token = self.stream.current |
||||||
|
if token.type == 'name': |
||||||
|
if token.value in ('true', 'false', 'True', 'False'): |
||||||
|
node = nodes.Const(token.value in ('true', 'True'), |
||||||
|
lineno=token.lineno) |
||||||
|
elif token.value in ('none', 'None'): |
||||||
|
node = nodes.Const(None, lineno=token.lineno) |
||||||
|
else: |
||||||
|
node = nodes.Name(token.value, 'load', lineno=token.lineno) |
||||||
|
next(self.stream) |
||||||
|
elif token.type == 'string': |
||||||
|
next(self.stream) |
||||||
|
buf = [token.value] |
||||||
|
lineno = token.lineno |
||||||
|
while self.stream.current.type == 'string': |
||||||
|
buf.append(self.stream.current.value) |
||||||
|
next(self.stream) |
||||||
|
node = nodes.Const(''.join(buf), lineno=lineno) |
||||||
|
elif token.type in ('integer', 'float'): |
||||||
|
next(self.stream) |
||||||
|
node = nodes.Const(token.value, lineno=token.lineno) |
||||||
|
elif token.type == 'lparen': |
||||||
|
next(self.stream) |
||||||
|
node = self.parse_tuple(explicit_parentheses=True) |
||||||
|
self.stream.expect('rparen') |
||||||
|
elif token.type == 'lbracket': |
||||||
|
node = self.parse_list() |
||||||
|
elif token.type == 'lbrace': |
||||||
|
node = self.parse_dict() |
||||||
|
else: |
||||||
|
self.fail("unexpected '%s'" % describe_token(token), token.lineno) |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_tuple(self, simplified=False, with_condexpr=True, |
||||||
|
extra_end_rules=None, explicit_parentheses=False): |
||||||
|
"""Works like `parse_expression` but if multiple expressions are |
||||||
|
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created. |
||||||
|
This method could also return a regular expression instead of a tuple |
||||||
|
if no commas where found. |
||||||
|
|
||||||
|
The default parsing mode is a full tuple. If `simplified` is `True` |
||||||
|
only names and literals are parsed. The `no_condexpr` parameter is |
||||||
|
forwarded to :meth:`parse_expression`. |
||||||
|
|
||||||
|
Because tuples do not require delimiters and may end in a bogus comma |
||||||
|
an extra hint is needed that marks the end of a tuple. For example |
||||||
|
for loops support tuples between `for` and `in`. In that case the |
||||||
|
`extra_end_rules` is set to ``['name:in']``. |
||||||
|
|
||||||
|
`explicit_parentheses` is true if the parsing was triggered by an |
||||||
|
expression in parentheses. This is used to figure out if an empty |
||||||
|
tuple is a valid expression or not. |
||||||
|
""" |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
if simplified: |
||||||
|
parse = self.parse_primary |
||||||
|
elif with_condexpr: |
||||||
|
parse = self.parse_expression |
||||||
|
else: |
||||||
|
parse = lambda: self.parse_expression(with_condexpr=False) |
||||||
|
args = [] |
||||||
|
is_tuple = False |
||||||
|
while 1: |
||||||
|
if args: |
||||||
|
self.stream.expect('comma') |
||||||
|
if self.is_tuple_end(extra_end_rules): |
||||||
|
break |
||||||
|
args.append(parse()) |
||||||
|
if self.stream.current.type == 'comma': |
||||||
|
is_tuple = True |
||||||
|
else: |
||||||
|
break |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
|
||||||
|
if not is_tuple: |
||||||
|
if args: |
||||||
|
return args[0] |
||||||
|
|
||||||
|
# if we don't have explicit parentheses, an empty tuple is |
||||||
|
# not a valid expression. This would mean nothing (literally |
||||||
|
# nothing) in the spot of an expression would be an empty |
||||||
|
# tuple. |
||||||
|
if not explicit_parentheses: |
||||||
|
self.fail('Expected an expression, got \'%s\'' % |
||||||
|
describe_token(self.stream.current)) |
||||||
|
|
||||||
|
return nodes.Tuple(args, 'load', lineno=lineno) |
||||||
|
|
||||||
|
def parse_list(self): |
||||||
|
token = self.stream.expect('lbracket') |
||||||
|
items = [] |
||||||
|
while self.stream.current.type != 'rbracket': |
||||||
|
if items: |
||||||
|
self.stream.expect('comma') |
||||||
|
if self.stream.current.type == 'rbracket': |
||||||
|
break |
||||||
|
items.append(self.parse_expression()) |
||||||
|
self.stream.expect('rbracket') |
||||||
|
return nodes.List(items, lineno=token.lineno) |
||||||
|
|
||||||
|
def parse_dict(self): |
||||||
|
token = self.stream.expect('lbrace') |
||||||
|
items = [] |
||||||
|
while self.stream.current.type != 'rbrace': |
||||||
|
if items: |
||||||
|
self.stream.expect('comma') |
||||||
|
if self.stream.current.type == 'rbrace': |
||||||
|
break |
||||||
|
key = self.parse_expression() |
||||||
|
self.stream.expect('colon') |
||||||
|
value = self.parse_expression() |
||||||
|
items.append(nodes.Pair(key, value, lineno=key.lineno)) |
||||||
|
self.stream.expect('rbrace') |
||||||
|
return nodes.Dict(items, lineno=token.lineno) |
||||||
|
|
||||||
|
def parse_postfix(self, node): |
||||||
|
while 1: |
||||||
|
token_type = self.stream.current.type |
||||||
|
if token_type == 'dot' or token_type == 'lbracket': |
||||||
|
node = self.parse_subscript(node) |
||||||
|
# calls are valid both after postfix expressions (getattr |
||||||
|
# and getitem) as well as filters and tests |
||||||
|
elif token_type == 'lparen': |
||||||
|
node = self.parse_call(node) |
||||||
|
else: |
||||||
|
break |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_filter_expr(self, node): |
||||||
|
while 1: |
||||||
|
token_type = self.stream.current.type |
||||||
|
if token_type == 'pipe': |
||||||
|
node = self.parse_filter(node) |
||||||
|
elif token_type == 'name' and self.stream.current.value == 'is': |
||||||
|
node = self.parse_test(node) |
||||||
|
# calls are valid both after postfix expressions (getattr |
||||||
|
# and getitem) as well as filters and tests |
||||||
|
elif token_type == 'lparen': |
||||||
|
node = self.parse_call(node) |
||||||
|
else: |
||||||
|
break |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_subscript(self, node): |
||||||
|
token = next(self.stream) |
||||||
|
if token.type == 'dot': |
||||||
|
attr_token = self.stream.current |
||||||
|
next(self.stream) |
||||||
|
if attr_token.type == 'name': |
||||||
|
return nodes.Getattr(node, attr_token.value, 'load', |
||||||
|
lineno=token.lineno) |
||||||
|
elif attr_token.type != 'integer': |
||||||
|
self.fail('expected name or number', attr_token.lineno) |
||||||
|
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno) |
||||||
|
return nodes.Getitem(node, arg, 'load', lineno=token.lineno) |
||||||
|
if token.type == 'lbracket': |
||||||
|
args = [] |
||||||
|
while self.stream.current.type != 'rbracket': |
||||||
|
if args: |
||||||
|
self.stream.expect('comma') |
||||||
|
args.append(self.parse_subscribed()) |
||||||
|
self.stream.expect('rbracket') |
||||||
|
if len(args) == 1: |
||||||
|
arg = args[0] |
||||||
|
else: |
||||||
|
arg = nodes.Tuple(args, 'load', lineno=token.lineno) |
||||||
|
return nodes.Getitem(node, arg, 'load', lineno=token.lineno) |
||||||
|
self.fail('expected subscript expression', self.lineno) |
||||||
|
|
||||||
|
def parse_subscribed(self): |
||||||
|
lineno = self.stream.current.lineno |
||||||
|
|
||||||
|
if self.stream.current.type == 'colon': |
||||||
|
next(self.stream) |
||||||
|
args = [None] |
||||||
|
else: |
||||||
|
node = self.parse_expression() |
||||||
|
if self.stream.current.type != 'colon': |
||||||
|
return node |
||||||
|
next(self.stream) |
||||||
|
args = [node] |
||||||
|
|
||||||
|
if self.stream.current.type == 'colon': |
||||||
|
args.append(None) |
||||||
|
elif self.stream.current.type not in ('rbracket', 'comma'): |
||||||
|
args.append(self.parse_expression()) |
||||||
|
else: |
||||||
|
args.append(None) |
||||||
|
|
||||||
|
if self.stream.current.type == 'colon': |
||||||
|
next(self.stream) |
||||||
|
if self.stream.current.type not in ('rbracket', 'comma'): |
||||||
|
args.append(self.parse_expression()) |
||||||
|
else: |
||||||
|
args.append(None) |
||||||
|
else: |
||||||
|
args.append(None) |
||||||
|
|
||||||
|
return nodes.Slice(lineno=lineno, *args) |
||||||
|
|
||||||
|
def parse_call(self, node): |
||||||
|
token = self.stream.expect('lparen') |
||||||
|
args = [] |
||||||
|
kwargs = [] |
||||||
|
dyn_args = dyn_kwargs = None |
||||||
|
require_comma = False |
||||||
|
|
||||||
|
def ensure(expr): |
||||||
|
if not expr: |
||||||
|
self.fail('invalid syntax for function call expression', |
||||||
|
token.lineno) |
||||||
|
|
||||||
|
while self.stream.current.type != 'rparen': |
||||||
|
if require_comma: |
||||||
|
self.stream.expect('comma') |
||||||
|
# support for trailing comma |
||||||
|
if self.stream.current.type == 'rparen': |
||||||
|
break |
||||||
|
if self.stream.current.type == 'mul': |
||||||
|
ensure(dyn_args is None and dyn_kwargs is None) |
||||||
|
next(self.stream) |
||||||
|
dyn_args = self.parse_expression() |
||||||
|
elif self.stream.current.type == 'pow': |
||||||
|
ensure(dyn_kwargs is None) |
||||||
|
next(self.stream) |
||||||
|
dyn_kwargs = self.parse_expression() |
||||||
|
else: |
||||||
|
ensure(dyn_args is None and dyn_kwargs is None) |
||||||
|
if self.stream.current.type == 'name' and \ |
||||||
|
self.stream.look().type == 'assign': |
||||||
|
key = self.stream.current.value |
||||||
|
self.stream.skip(2) |
||||||
|
value = self.parse_expression() |
||||||
|
kwargs.append(nodes.Keyword(key, value, |
||||||
|
lineno=value.lineno)) |
||||||
|
else: |
||||||
|
ensure(not kwargs) |
||||||
|
args.append(self.parse_expression()) |
||||||
|
|
||||||
|
require_comma = True |
||||||
|
self.stream.expect('rparen') |
||||||
|
|
||||||
|
if node is None: |
||||||
|
return args, kwargs, dyn_args, dyn_kwargs |
||||||
|
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, |
||||||
|
lineno=token.lineno) |
||||||
|
|
||||||
|
def parse_filter(self, node, start_inline=False): |
||||||
|
while self.stream.current.type == 'pipe' or start_inline: |
||||||
|
if not start_inline: |
||||||
|
next(self.stream) |
||||||
|
token = self.stream.expect('name') |
||||||
|
name = token.value |
||||||
|
while self.stream.current.type == 'dot': |
||||||
|
next(self.stream) |
||||||
|
name += '.' + self.stream.expect('name').value |
||||||
|
if self.stream.current.type == 'lparen': |
||||||
|
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) |
||||||
|
else: |
||||||
|
args = [] |
||||||
|
kwargs = [] |
||||||
|
dyn_args = dyn_kwargs = None |
||||||
|
node = nodes.Filter(node, name, args, kwargs, dyn_args, |
||||||
|
dyn_kwargs, lineno=token.lineno) |
||||||
|
start_inline = False |
||||||
|
return node |
||||||
|
|
||||||
|
def parse_test(self, node): |
||||||
|
token = next(self.stream) |
||||||
|
if self.stream.current.test('name:not'): |
||||||
|
next(self.stream) |
||||||
|
negated = True |
||||||
|
else: |
||||||
|
negated = False |
||||||
|
name = self.stream.expect('name').value |
||||||
|
while self.stream.current.type == 'dot': |
||||||
|
next(self.stream) |
||||||
|
name += '.' + self.stream.expect('name').value |
||||||
|
dyn_args = dyn_kwargs = None |
||||||
|
kwargs = [] |
||||||
|
if self.stream.current.type == 'lparen': |
||||||
|
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None) |
||||||
|
elif (self.stream.current.type in ('name', 'string', 'integer', |
||||||
|
'float', 'lparen', 'lbracket', |
||||||
|
'lbrace') and not |
||||||
|
self.stream.current.test_any('name:else', 'name:or', |
||||||
|
'name:and')): |
||||||
|
if self.stream.current.test('name:is'): |
||||||
|
self.fail('You cannot chain multiple tests with is') |
||||||
|
args = [self.parse_primary()] |
||||||
|
else: |
||||||
|
args = [] |
||||||
|
node = nodes.Test(node, name, args, kwargs, dyn_args, |
||||||
|
dyn_kwargs, lineno=token.lineno) |
||||||
|
if negated: |
||||||
|
node = nodes.Not(node, lineno=token.lineno) |
||||||
|
return node |
||||||
|
|
||||||
|
def subparse(self, end_tokens=None): |
||||||
|
body = [] |
||||||
|
data_buffer = [] |
||||||
|
add_data = data_buffer.append |
||||||
|
|
||||||
|
if end_tokens is not None: |
||||||
|
self._end_token_stack.append(end_tokens) |
||||||
|
|
||||||
|
def flush_data(): |
||||||
|
if data_buffer: |
||||||
|
lineno = data_buffer[0].lineno |
||||||
|
body.append(nodes.Output(data_buffer[:], lineno=lineno)) |
||||||
|
del data_buffer[:] |
||||||
|
|
||||||
|
try: |
||||||
|
while self.stream: |
||||||
|
token = self.stream.current |
||||||
|
if token.type == 'data': |
||||||
|
if token.value: |
||||||
|
add_data(nodes.TemplateData(token.value, |
||||||
|
lineno=token.lineno)) |
||||||
|
next(self.stream) |
||||||
|
elif token.type == 'variable_begin': |
||||||
|
next(self.stream) |
||||||
|
add_data(self.parse_tuple(with_condexpr=True)) |
||||||
|
self.stream.expect('variable_end') |
||||||
|
elif token.type == 'block_begin': |
||||||
|
flush_data() |
||||||
|
next(self.stream) |
||||||
|
if end_tokens is not None and \ |
||||||
|
self.stream.current.test_any(*end_tokens): |
||||||
|
return body |
||||||
|
rv = self.parse_statement() |
||||||
|
if isinstance(rv, list): |
||||||
|
body.extend(rv) |
||||||
|
else: |
||||||
|
body.append(rv) |
||||||
|
self.stream.expect('block_end') |
||||||
|
else: |
||||||
|
raise AssertionError('internal parsing error') |
||||||
|
|
||||||
|
flush_data() |
||||||
|
finally: |
||||||
|
if end_tokens is not None: |
||||||
|
self._end_token_stack.pop() |
||||||
|
|
||||||
|
return body |
||||||
|
|
||||||
|
def parse(self): |
||||||
|
"""Parse the whole template into a `Template` node.""" |
||||||
|
result = nodes.Template(self.subparse(), lineno=1) |
||||||
|
result.set_environment(self.environment) |
||||||
|
return result |
@ -0,0 +1,787 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.runtime |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Runtime helpers. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
import sys |
||||||
|
|
||||||
|
from itertools import chain |
||||||
|
from types import MethodType |
||||||
|
|
||||||
|
from jinja2.nodes import EvalContext, _context_function_types |
||||||
|
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \ |
||||||
|
internalcode, object_type_repr, evalcontextfunction |
||||||
|
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \ |
||||||
|
TemplateNotFound |
||||||
|
from jinja2._compat import imap, text_type, iteritems, \ |
||||||
|
implements_iterator, implements_to_string, string_types, PY2, \ |
||||||
|
with_metaclass |
||||||
|
|
||||||
|
|
||||||
|
# these variables are exported to the template runtime |
||||||
|
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup', |
||||||
|
'TemplateRuntimeError', 'missing', 'concat', 'escape', |
||||||
|
'markup_join', 'unicode_join', 'to_string', 'identity', |
||||||
|
'TemplateNotFound'] |
||||||
|
|
||||||
|
#: the name of the function that is used to convert something into |
||||||
|
#: a string. We can just use the text type here. |
||||||
|
to_string = text_type |
||||||
|
|
||||||
|
#: the identity function. Useful for certain things in the environment |
||||||
|
identity = lambda x: x |
||||||
|
|
||||||
|
_last_iteration = object() |
||||||
|
|
||||||
|
|
||||||
|
def markup_join(seq): |
||||||
|
"""Concatenation that escapes if necessary and converts to unicode.""" |
||||||
|
buf = [] |
||||||
|
iterator = imap(soft_unicode, seq) |
||||||
|
for arg in iterator: |
||||||
|
buf.append(arg) |
||||||
|
if hasattr(arg, '__html__'): |
||||||
|
return Markup(u'').join(chain(buf, iterator)) |
||||||
|
return concat(buf) |
||||||
|
|
||||||
|
|
||||||
|
def unicode_join(seq): |
||||||
|
"""Simple args to unicode conversion and concatenation.""" |
||||||
|
return concat(imap(text_type, seq)) |
||||||
|
|
||||||
|
|
||||||
|
def new_context(environment, template_name, blocks, vars=None, |
||||||
|
shared=None, globals=None, locals=None): |
||||||
|
"""Internal helper to for context creation.""" |
||||||
|
if vars is None: |
||||||
|
vars = {} |
||||||
|
if shared: |
||||||
|
parent = vars |
||||||
|
else: |
||||||
|
parent = dict(globals or (), **vars) |
||||||
|
if locals: |
||||||
|
# if the parent is shared a copy should be created because |
||||||
|
# we don't want to modify the dict passed |
||||||
|
if shared: |
||||||
|
parent = dict(parent) |
||||||
|
for key, value in iteritems(locals): |
||||||
|
if value is not missing: |
||||||
|
parent[key] = value |
||||||
|
return environment.context_class(environment, parent, template_name, |
||||||
|
blocks) |
||||||
|
|
||||||
|
|
||||||
|
class TemplateReference(object): |
||||||
|
"""The `self` in templates.""" |
||||||
|
|
||||||
|
def __init__(self, context): |
||||||
|
self.__context = context |
||||||
|
|
||||||
|
def __getitem__(self, name): |
||||||
|
blocks = self.__context.blocks[name] |
||||||
|
return BlockReference(name, self.__context, blocks, 0) |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '<%s %r>' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
self.__context.name |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
def _get_func(x): |
||||||
|
return getattr(x, '__func__', x) |
||||||
|
|
||||||
|
|
||||||
|
class ContextMeta(type): |
||||||
|
|
||||||
|
def __new__(cls, name, bases, d): |
||||||
|
rv = type.__new__(cls, name, bases, d) |
||||||
|
if bases == (): |
||||||
|
return rv |
||||||
|
|
||||||
|
resolve = _get_func(rv.resolve) |
||||||
|
default_resolve = _get_func(Context.resolve) |
||||||
|
resolve_or_missing = _get_func(rv.resolve_or_missing) |
||||||
|
default_resolve_or_missing = _get_func(Context.resolve_or_missing) |
||||||
|
|
||||||
|
# If we have a changed resolve but no changed default or missing |
||||||
|
# resolve we invert the call logic. |
||||||
|
if resolve is not default_resolve and \ |
||||||
|
resolve_or_missing is default_resolve_or_missing: |
||||||
|
rv._legacy_resolve_mode = True |
||||||
|
elif resolve is default_resolve and \ |
||||||
|
resolve_or_missing is default_resolve_or_missing: |
||||||
|
rv._fast_resolve_mode = True |
||||||
|
|
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
def resolve_or_missing(context, key, missing=missing): |
||||||
|
if key in context.vars: |
||||||
|
return context.vars[key] |
||||||
|
if key in context.parent: |
||||||
|
return context.parent[key] |
||||||
|
return missing |
||||||
|
|
||||||
|
|
||||||
|
class Context(with_metaclass(ContextMeta)): |
||||||
|
"""The template context holds the variables of a template. It stores the |
||||||
|
values passed to the template and also the names the template exports. |
||||||
|
Creating instances is neither supported nor useful as it's created |
||||||
|
automatically at various stages of the template evaluation and should not |
||||||
|
be created by hand. |
||||||
|
|
||||||
|
The context is immutable. Modifications on :attr:`parent` **must not** |
||||||
|
happen and modifications on :attr:`vars` are allowed from generated |
||||||
|
template code only. Template filters and global functions marked as |
||||||
|
:func:`contextfunction`\\s get the active context passed as first argument |
||||||
|
and are allowed to access the context read-only. |
||||||
|
|
||||||
|
The template context supports read only dict operations (`get`, |
||||||
|
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`, |
||||||
|
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve` |
||||||
|
method that doesn't fail with a `KeyError` but returns an |
||||||
|
:class:`Undefined` object for missing variables. |
||||||
|
""" |
||||||
|
# XXX: we want to eventually make this be a deprecation warning and |
||||||
|
# remove it. |
||||||
|
_legacy_resolve_mode = False |
||||||
|
_fast_resolve_mode = False |
||||||
|
|
||||||
|
def __init__(self, environment, parent, name, blocks): |
||||||
|
self.parent = parent |
||||||
|
self.vars = {} |
||||||
|
self.environment = environment |
||||||
|
self.eval_ctx = EvalContext(self.environment, name) |
||||||
|
self.exported_vars = set() |
||||||
|
self.name = name |
||||||
|
|
||||||
|
# create the initial mapping of blocks. Whenever template inheritance |
||||||
|
# takes place the runtime will update this mapping with the new blocks |
||||||
|
# from the template. |
||||||
|
self.blocks = dict((k, [v]) for k, v in iteritems(blocks)) |
||||||
|
|
||||||
|
# In case we detect the fast resolve mode we can set up an alias |
||||||
|
# here that bypasses the legacy code logic. |
||||||
|
if self._fast_resolve_mode: |
||||||
|
self.resolve_or_missing = MethodType(resolve_or_missing, self) |
||||||
|
|
||||||
|
def super(self, name, current): |
||||||
|
"""Render a parent block.""" |
||||||
|
try: |
||||||
|
blocks = self.blocks[name] |
||||||
|
index = blocks.index(current) + 1 |
||||||
|
blocks[index] |
||||||
|
except LookupError: |
||||||
|
return self.environment.undefined('there is no parent block ' |
||||||
|
'called %r.' % name, |
||||||
|
name='super') |
||||||
|
return BlockReference(name, self, blocks, index) |
||||||
|
|
||||||
|
def get(self, key, default=None): |
||||||
|
"""Returns an item from the template context, if it doesn't exist |
||||||
|
`default` is returned. |
||||||
|
""" |
||||||
|
try: |
||||||
|
return self[key] |
||||||
|
except KeyError: |
||||||
|
return default |
||||||
|
|
||||||
|
def resolve(self, key): |
||||||
|
"""Looks up a variable like `__getitem__` or `get` but returns an |
||||||
|
:class:`Undefined` object with the name of the name looked up. |
||||||
|
""" |
||||||
|
if self._legacy_resolve_mode: |
||||||
|
rv = resolve_or_missing(self, key) |
||||||
|
else: |
||||||
|
rv = self.resolve_or_missing(key) |
||||||
|
if rv is missing: |
||||||
|
return self.environment.undefined(name=key) |
||||||
|
return rv |
||||||
|
|
||||||
|
def resolve_or_missing(self, key): |
||||||
|
"""Resolves a variable like :meth:`resolve` but returns the |
||||||
|
special `missing` value if it cannot be found. |
||||||
|
""" |
||||||
|
if self._legacy_resolve_mode: |
||||||
|
rv = self.resolve(key) |
||||||
|
if isinstance(rv, Undefined): |
||||||
|
rv = missing |
||||||
|
return rv |
||||||
|
return resolve_or_missing(self, key) |
||||||
|
|
||||||
|
def get_exported(self): |
||||||
|
"""Get a new dict with the exported variables.""" |
||||||
|
return dict((k, self.vars[k]) for k in self.exported_vars) |
||||||
|
|
||||||
|
def get_all(self): |
||||||
|
"""Return the complete context as dict including the exported |
||||||
|
variables. For optimizations reasons this might not return an |
||||||
|
actual copy so be careful with using it. |
||||||
|
""" |
||||||
|
if not self.vars: |
||||||
|
return self.parent |
||||||
|
if not self.parent: |
||||||
|
return self.vars |
||||||
|
return dict(self.parent, **self.vars) |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def call(__self, __obj, *args, **kwargs): |
||||||
|
"""Call the callable with the arguments and keyword arguments |
||||||
|
provided but inject the active context or environment as first |
||||||
|
argument if the callable is a :func:`contextfunction` or |
||||||
|
:func:`environmentfunction`. |
||||||
|
""" |
||||||
|
if __debug__: |
||||||
|
__traceback_hide__ = True # noqa |
||||||
|
|
||||||
|
# Allow callable classes to take a context |
||||||
|
fn = __obj.__call__ |
||||||
|
for fn_type in ('contextfunction', |
||||||
|
'evalcontextfunction', |
||||||
|
'environmentfunction'): |
||||||
|
if hasattr(fn, fn_type): |
||||||
|
__obj = fn |
||||||
|
break |
||||||
|
|
||||||
|
if isinstance(__obj, _context_function_types): |
||||||
|
if getattr(__obj, 'contextfunction', 0): |
||||||
|
args = (__self,) + args |
||||||
|
elif getattr(__obj, 'evalcontextfunction', 0): |
||||||
|
args = (__self.eval_ctx,) + args |
||||||
|
elif getattr(__obj, 'environmentfunction', 0): |
||||||
|
args = (__self.environment,) + args |
||||||
|
try: |
||||||
|
return __obj(*args, **kwargs) |
||||||
|
except StopIteration: |
||||||
|
return __self.environment.undefined('value was undefined because ' |
||||||
|
'a callable raised a ' |
||||||
|
'StopIteration exception') |
||||||
|
|
||||||
|
def derived(self, locals=None): |
||||||
|
"""Internal helper function to create a derived context. This is |
||||||
|
used in situations where the system needs a new context in the same |
||||||
|
template that is independent. |
||||||
|
""" |
||||||
|
context = new_context(self.environment, self.name, {}, |
||||||
|
self.get_all(), True, None, locals) |
||||||
|
context.eval_ctx = self.eval_ctx |
||||||
|
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks)) |
||||||
|
return context |
||||||
|
|
||||||
|
def _all(meth): |
||||||
|
proxy = lambda self: getattr(self.get_all(), meth)() |
||||||
|
proxy.__doc__ = getattr(dict, meth).__doc__ |
||||||
|
proxy.__name__ = meth |
||||||
|
return proxy |
||||||
|
|
||||||
|
keys = _all('keys') |
||||||
|
values = _all('values') |
||||||
|
items = _all('items') |
||||||
|
|
||||||
|
# not available on python 3 |
||||||
|
if PY2: |
||||||
|
iterkeys = _all('iterkeys') |
||||||
|
itervalues = _all('itervalues') |
||||||
|
iteritems = _all('iteritems') |
||||||
|
del _all |
||||||
|
|
||||||
|
def __contains__(self, name): |
||||||
|
return name in self.vars or name in self.parent |
||||||
|
|
||||||
|
def __getitem__(self, key): |
||||||
|
"""Lookup a variable or raise `KeyError` if the variable is |
||||||
|
undefined. |
||||||
|
""" |
||||||
|
item = self.resolve_or_missing(key) |
||||||
|
if item is missing: |
||||||
|
raise KeyError(key) |
||||||
|
return item |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '<%s %s of %r>' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
repr(self.get_all()), |
||||||
|
self.name |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
# register the context as mapping if possible |
||||||
|
try: |
||||||
|
from collections import Mapping |
||||||
|
Mapping.register(Context) |
||||||
|
except ImportError: |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
class BlockReference(object): |
||||||
|
"""One block on a template reference.""" |
||||||
|
|
||||||
|
def __init__(self, name, context, stack, depth): |
||||||
|
self.name = name |
||||||
|
self._context = context |
||||||
|
self._stack = stack |
||||||
|
self._depth = depth |
||||||
|
|
||||||
|
@property |
||||||
|
def super(self): |
||||||
|
"""Super the block.""" |
||||||
|
if self._depth + 1 >= len(self._stack): |
||||||
|
return self._context.environment. \ |
||||||
|
undefined('there is no parent block called %r.' % |
||||||
|
self.name, name='super') |
||||||
|
return BlockReference(self.name, self._context, self._stack, |
||||||
|
self._depth + 1) |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def __call__(self): |
||||||
|
rv = concat(self._stack[self._depth](self._context)) |
||||||
|
if self._context.eval_ctx.autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
class LoopContextBase(object): |
||||||
|
"""A loop context for dynamic iteration.""" |
||||||
|
|
||||||
|
_after = _last_iteration |
||||||
|
_length = None |
||||||
|
|
||||||
|
def __init__(self, recurse=None, depth0=0): |
||||||
|
self._recurse = recurse |
||||||
|
self.index0 = -1 |
||||||
|
self.depth0 = depth0 |
||||||
|
|
||||||
|
def cycle(self, *args): |
||||||
|
"""Cycles among the arguments with the current loop index.""" |
||||||
|
if not args: |
||||||
|
raise TypeError('no items for cycling given') |
||||||
|
return args[self.index0 % len(args)] |
||||||
|
|
||||||
|
first = property(lambda x: x.index0 == 0) |
||||||
|
last = property(lambda x: x._after is _last_iteration) |
||||||
|
index = property(lambda x: x.index0 + 1) |
||||||
|
revindex = property(lambda x: x.length - x.index0) |
||||||
|
revindex0 = property(lambda x: x.length - x.index) |
||||||
|
depth = property(lambda x: x.depth0 + 1) |
||||||
|
|
||||||
|
def __len__(self): |
||||||
|
return self.length |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def loop(self, iterable): |
||||||
|
if self._recurse is None: |
||||||
|
raise TypeError('Tried to call non recursive loop. Maybe you ' |
||||||
|
"forgot the 'recursive' modifier.") |
||||||
|
return self._recurse(iterable, self._recurse, self.depth0 + 1) |
||||||
|
|
||||||
|
# a nifty trick to enhance the error message if someone tried to call |
||||||
|
# the the loop without or with too many arguments. |
||||||
|
__call__ = loop |
||||||
|
del loop |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '<%s %r/%r>' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
self.index, |
||||||
|
self.length |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
class LoopContext(LoopContextBase): |
||||||
|
|
||||||
|
def __init__(self, iterable, recurse=None, depth0=0): |
||||||
|
LoopContextBase.__init__(self, recurse, depth0) |
||||||
|
self._iterator = iter(iterable) |
||||||
|
|
||||||
|
# try to get the length of the iterable early. This must be done |
||||||
|
# here because there are some broken iterators around where there |
||||||
|
# __len__ is the number of iterations left (i'm looking at your |
||||||
|
# listreverseiterator!). |
||||||
|
try: |
||||||
|
self._length = len(iterable) |
||||||
|
except (TypeError, AttributeError): |
||||||
|
self._length = None |
||||||
|
self._after = self._safe_next() |
||||||
|
|
||||||
|
@property |
||||||
|
def length(self): |
||||||
|
if self._length is None: |
||||||
|
# if was not possible to get the length of the iterator when |
||||||
|
# the loop context was created (ie: iterating over a generator) |
||||||
|
# we have to convert the iterable into a sequence and use the |
||||||
|
# length of that + the number of iterations so far. |
||||||
|
iterable = tuple(self._iterator) |
||||||
|
self._iterator = iter(iterable) |
||||||
|
iterations_done = self.index0 + 2 |
||||||
|
self._length = len(iterable) + iterations_done |
||||||
|
return self._length |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return LoopContextIterator(self) |
||||||
|
|
||||||
|
def _safe_next(self): |
||||||
|
try: |
||||||
|
return next(self._iterator) |
||||||
|
except StopIteration: |
||||||
|
return _last_iteration |
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator |
||||||
|
class LoopContextIterator(object): |
||||||
|
"""The iterator for a loop context.""" |
||||||
|
__slots__ = ('context',) |
||||||
|
|
||||||
|
def __init__(self, context): |
||||||
|
self.context = context |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return self |
||||||
|
|
||||||
|
def __next__(self): |
||||||
|
ctx = self.context |
||||||
|
ctx.index0 += 1 |
||||||
|
if ctx._after is _last_iteration: |
||||||
|
raise StopIteration() |
||||||
|
next_elem = ctx._after |
||||||
|
ctx._after = ctx._safe_next() |
||||||
|
return next_elem, ctx |
||||||
|
|
||||||
|
|
||||||
|
class Macro(object): |
||||||
|
"""Wraps a macro function.""" |
||||||
|
|
||||||
|
def __init__(self, environment, func, name, arguments, |
||||||
|
catch_kwargs, catch_varargs, caller, |
||||||
|
default_autoescape=None): |
||||||
|
self._environment = environment |
||||||
|
self._func = func |
||||||
|
self._argument_count = len(arguments) |
||||||
|
self.name = name |
||||||
|
self.arguments = arguments |
||||||
|
self.catch_kwargs = catch_kwargs |
||||||
|
self.catch_varargs = catch_varargs |
||||||
|
self.caller = caller |
||||||
|
self.explicit_caller = 'caller' in arguments |
||||||
|
if default_autoescape is None: |
||||||
|
default_autoescape = environment.autoescape |
||||||
|
self._default_autoescape = default_autoescape |
||||||
|
|
||||||
|
@internalcode |
||||||
|
@evalcontextfunction |
||||||
|
def __call__(self, *args, **kwargs): |
||||||
|
# This requires a bit of explanation, In the past we used to |
||||||
|
# decide largely based on compile-time information if a macro is |
||||||
|
# safe or unsafe. While there was a volatile mode it was largely |
||||||
|
# unused for deciding on escaping. This turns out to be |
||||||
|
# problemtic for macros because if a macro is safe or not not so |
||||||
|
# much depends on the escape mode when it was defined but when it |
||||||
|
# was used. |
||||||
|
# |
||||||
|
# Because however we export macros from the module system and |
||||||
|
# there are historic callers that do not pass an eval context (and |
||||||
|
# will continue to not pass one), we need to perform an instance |
||||||
|
# check here. |
||||||
|
# |
||||||
|
# This is considered safe because an eval context is not a valid |
||||||
|
# argument to callables otherwise anwyays. Worst case here is |
||||||
|
# that if no eval context is passed we fall back to the compile |
||||||
|
# time autoescape flag. |
||||||
|
if args and isinstance(args[0], EvalContext): |
||||||
|
autoescape = args[0].autoescape |
||||||
|
args = args[1:] |
||||||
|
else: |
||||||
|
autoescape = self._default_autoescape |
||||||
|
|
||||||
|
# try to consume the positional arguments |
||||||
|
arguments = list(args[:self._argument_count]) |
||||||
|
off = len(arguments) |
||||||
|
|
||||||
|
# For information why this is necessary refer to the handling |
||||||
|
# of caller in the `macro_body` handler in the compiler. |
||||||
|
found_caller = False |
||||||
|
|
||||||
|
# if the number of arguments consumed is not the number of |
||||||
|
# arguments expected we start filling in keyword arguments |
||||||
|
# and defaults. |
||||||
|
if off != self._argument_count: |
||||||
|
for idx, name in enumerate(self.arguments[len(arguments):]): |
||||||
|
try: |
||||||
|
value = kwargs.pop(name) |
||||||
|
except KeyError: |
||||||
|
value = missing |
||||||
|
if name == 'caller': |
||||||
|
found_caller = True |
||||||
|
arguments.append(value) |
||||||
|
else: |
||||||
|
found_caller = self.explicit_caller |
||||||
|
|
||||||
|
# it's important that the order of these arguments does not change |
||||||
|
# if not also changed in the compiler's `function_scoping` method. |
||||||
|
# the order is caller, keyword arguments, positional arguments! |
||||||
|
if self.caller and not found_caller: |
||||||
|
caller = kwargs.pop('caller', None) |
||||||
|
if caller is None: |
||||||
|
caller = self._environment.undefined('No caller defined', |
||||||
|
name='caller') |
||||||
|
arguments.append(caller) |
||||||
|
|
||||||
|
if self.catch_kwargs: |
||||||
|
arguments.append(kwargs) |
||||||
|
elif kwargs: |
||||||
|
if 'caller' in kwargs: |
||||||
|
raise TypeError('macro %r was invoked with two values for ' |
||||||
|
'the special caller argument. This is ' |
||||||
|
'most likely a bug.' % self.name) |
||||||
|
raise TypeError('macro %r takes no keyword argument %r' % |
||||||
|
(self.name, next(iter(kwargs)))) |
||||||
|
if self.catch_varargs: |
||||||
|
arguments.append(args[self._argument_count:]) |
||||||
|
elif len(args) > self._argument_count: |
||||||
|
raise TypeError('macro %r takes not more than %d argument(s)' % |
||||||
|
(self.name, len(self.arguments))) |
||||||
|
|
||||||
|
return self._invoke(arguments, autoescape) |
||||||
|
|
||||||
|
def _invoke(self, arguments, autoescape): |
||||||
|
"""This method is being swapped out by the async implementation.""" |
||||||
|
rv = self._func(*arguments) |
||||||
|
if autoescape: |
||||||
|
rv = Markup(rv) |
||||||
|
return rv |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '<%s %s>' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
self.name is None and 'anonymous' or repr(self.name) |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string |
||||||
|
class Undefined(object): |
||||||
|
"""The default undefined type. This undefined type can be printed and |
||||||
|
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`: |
||||||
|
|
||||||
|
>>> foo = Undefined(name='foo') |
||||||
|
>>> str(foo) |
||||||
|
'' |
||||||
|
>>> not foo |
||||||
|
True |
||||||
|
>>> foo + 42 |
||||||
|
Traceback (most recent call last): |
||||||
|
... |
||||||
|
jinja2.exceptions.UndefinedError: 'foo' is undefined |
||||||
|
""" |
||||||
|
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name', |
||||||
|
'_undefined_exception') |
||||||
|
|
||||||
|
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): |
||||||
|
self._undefined_hint = hint |
||||||
|
self._undefined_obj = obj |
||||||
|
self._undefined_name = name |
||||||
|
self._undefined_exception = exc |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def _fail_with_undefined_error(self, *args, **kwargs): |
||||||
|
"""Regular callback function for undefined objects that raises an |
||||||
|
`jinja2.exceptions.UndefinedError` on call. |
||||||
|
""" |
||||||
|
if self._undefined_hint is None: |
||||||
|
if self._undefined_obj is missing: |
||||||
|
hint = '%r is undefined' % self._undefined_name |
||||||
|
elif not isinstance(self._undefined_name, string_types): |
||||||
|
hint = '%s has no element %r' % ( |
||||||
|
object_type_repr(self._undefined_obj), |
||||||
|
self._undefined_name |
||||||
|
) |
||||||
|
else: |
||||||
|
hint = '%r has no attribute %r' % ( |
||||||
|
object_type_repr(self._undefined_obj), |
||||||
|
self._undefined_name |
||||||
|
) |
||||||
|
else: |
||||||
|
hint = self._undefined_hint |
||||||
|
raise self._undefined_exception(hint) |
||||||
|
|
||||||
|
@internalcode |
||||||
|
def __getattr__(self, name): |
||||||
|
if name[:2] == '__': |
||||||
|
raise AttributeError(name) |
||||||
|
return self._fail_with_undefined_error() |
||||||
|
|
||||||
|
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \ |
||||||
|
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \ |
||||||
|
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \ |
||||||
|
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \ |
||||||
|
__float__ = __complex__ = __pow__ = __rpow__ = __sub__ = \ |
||||||
|
__rsub__ = _fail_with_undefined_error |
||||||
|
|
||||||
|
def __eq__(self, other): |
||||||
|
return type(self) is type(other) |
||||||
|
|
||||||
|
def __ne__(self, other): |
||||||
|
return not self.__eq__(other) |
||||||
|
|
||||||
|
def __hash__(self): |
||||||
|
return id(type(self)) |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
return u'' |
||||||
|
|
||||||
|
def __len__(self): |
||||||
|
return 0 |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
if 0: |
||||||
|
yield None |
||||||
|
|
||||||
|
def __nonzero__(self): |
||||||
|
return False |
||||||
|
__bool__ = __nonzero__ |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return 'Undefined' |
||||||
|
|
||||||
|
|
||||||
|
def make_logging_undefined(logger=None, base=None): |
||||||
|
"""Given a logger object this returns a new undefined class that will |
||||||
|
log certain failures. It will log iterations and printing. If no |
||||||
|
logger is given a default logger is created. |
||||||
|
|
||||||
|
Example:: |
||||||
|
|
||||||
|
logger = logging.getLogger(__name__) |
||||||
|
LoggingUndefined = make_logging_undefined( |
||||||
|
logger=logger, |
||||||
|
base=Undefined |
||||||
|
) |
||||||
|
|
||||||
|
.. versionadded:: 2.8 |
||||||
|
|
||||||
|
:param logger: the logger to use. If not provided, a default logger |
||||||
|
is created. |
||||||
|
:param base: the base class to add logging functionality to. This |
||||||
|
defaults to :class:`Undefined`. |
||||||
|
""" |
||||||
|
if logger is None: |
||||||
|
import logging |
||||||
|
logger = logging.getLogger(__name__) |
||||||
|
logger.addHandler(logging.StreamHandler(sys.stderr)) |
||||||
|
if base is None: |
||||||
|
base = Undefined |
||||||
|
|
||||||
|
def _log_message(undef): |
||||||
|
if undef._undefined_hint is None: |
||||||
|
if undef._undefined_obj is missing: |
||||||
|
hint = '%s is undefined' % undef._undefined_name |
||||||
|
elif not isinstance(undef._undefined_name, string_types): |
||||||
|
hint = '%s has no element %s' % ( |
||||||
|
object_type_repr(undef._undefined_obj), |
||||||
|
undef._undefined_name) |
||||||
|
else: |
||||||
|
hint = '%s has no attribute %s' % ( |
||||||
|
object_type_repr(undef._undefined_obj), |
||||||
|
undef._undefined_name) |
||||||
|
else: |
||||||
|
hint = undef._undefined_hint |
||||||
|
logger.warning('Template variable warning: %s', hint) |
||||||
|
|
||||||
|
class LoggingUndefined(base): |
||||||
|
|
||||||
|
def _fail_with_undefined_error(self, *args, **kwargs): |
||||||
|
try: |
||||||
|
return base._fail_with_undefined_error(self, *args, **kwargs) |
||||||
|
except self._undefined_exception as e: |
||||||
|
logger.error('Template variable error: %s', str(e)) |
||||||
|
raise e |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
rv = base.__str__(self) |
||||||
|
_log_message(self) |
||||||
|
return rv |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
rv = base.__iter__(self) |
||||||
|
_log_message(self) |
||||||
|
return rv |
||||||
|
|
||||||
|
if PY2: |
||||||
|
def __nonzero__(self): |
||||||
|
rv = base.__nonzero__(self) |
||||||
|
_log_message(self) |
||||||
|
return rv |
||||||
|
|
||||||
|
def __unicode__(self): |
||||||
|
rv = base.__unicode__(self) |
||||||
|
_log_message(self) |
||||||
|
return rv |
||||||
|
else: |
||||||
|
def __bool__(self): |
||||||
|
rv = base.__bool__(self) |
||||||
|
_log_message(self) |
||||||
|
return rv |
||||||
|
|
||||||
|
return LoggingUndefined |
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string |
||||||
|
class DebugUndefined(Undefined): |
||||||
|
"""An undefined that returns the debug info when printed. |
||||||
|
|
||||||
|
>>> foo = DebugUndefined(name='foo') |
||||||
|
>>> str(foo) |
||||||
|
'{{ foo }}' |
||||||
|
>>> not foo |
||||||
|
True |
||||||
|
>>> foo + 42 |
||||||
|
Traceback (most recent call last): |
||||||
|
... |
||||||
|
jinja2.exceptions.UndefinedError: 'foo' is undefined |
||||||
|
""" |
||||||
|
__slots__ = () |
||||||
|
|
||||||
|
def __str__(self): |
||||||
|
if self._undefined_hint is None: |
||||||
|
if self._undefined_obj is missing: |
||||||
|
return u'{{ %s }}' % self._undefined_name |
||||||
|
return '{{ no such element: %s[%r] }}' % ( |
||||||
|
object_type_repr(self._undefined_obj), |
||||||
|
self._undefined_name |
||||||
|
) |
||||||
|
return u'{{ undefined value printed: %s }}' % self._undefined_hint |
||||||
|
|
||||||
|
|
||||||
|
@implements_to_string |
||||||
|
class StrictUndefined(Undefined): |
||||||
|
"""An undefined that barks on print and iteration as well as boolean |
||||||
|
tests and all kinds of comparisons. In other words: you can do nothing |
||||||
|
with it except checking if it's defined using the `defined` test. |
||||||
|
|
||||||
|
>>> foo = StrictUndefined(name='foo') |
||||||
|
>>> str(foo) |
||||||
|
Traceback (most recent call last): |
||||||
|
... |
||||||
|
jinja2.exceptions.UndefinedError: 'foo' is undefined |
||||||
|
>>> not foo |
||||||
|
Traceback (most recent call last): |
||||||
|
... |
||||||
|
jinja2.exceptions.UndefinedError: 'foo' is undefined |
||||||
|
>>> foo + 42 |
||||||
|
Traceback (most recent call last): |
||||||
|
... |
||||||
|
jinja2.exceptions.UndefinedError: 'foo' is undefined |
||||||
|
""" |
||||||
|
__slots__ = () |
||||||
|
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \ |
||||||
|
__ne__ = __bool__ = __hash__ = \ |
||||||
|
Undefined._fail_with_undefined_error |
||||||
|
|
||||||
|
|
||||||
|
# remove remaining slots attributes, after the metaclass did the magic they |
||||||
|
# are unneeded and irritating as they contain wrong data for the subclasses. |
||||||
|
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__ |
@ -0,0 +1,475 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.sandbox |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Adds a sandbox layer to Jinja as it was the default behavior in the old |
||||||
|
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the |
||||||
|
default behavior is easier to use. |
||||||
|
|
||||||
|
The behavior can be changed by subclassing the environment. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
import types |
||||||
|
import operator |
||||||
|
from collections import Mapping |
||||||
|
from jinja2.environment import Environment |
||||||
|
from jinja2.exceptions import SecurityError |
||||||
|
from jinja2._compat import string_types, PY2 |
||||||
|
from jinja2.utils import Markup |
||||||
|
|
||||||
|
from markupsafe import EscapeFormatter |
||||||
|
from string import Formatter |
||||||
|
|
||||||
|
|
||||||
|
#: maximum number of items a range may produce |
||||||
|
MAX_RANGE = 100000 |
||||||
|
|
||||||
|
#: attributes of function objects that are considered unsafe. |
||||||
|
if PY2: |
||||||
|
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict', |
||||||
|
'func_defaults', 'func_globals']) |
||||||
|
else: |
||||||
|
# On versions > python 2 the special attributes on functions are gone, |
||||||
|
# but they remain on methods and generators for whatever reason. |
||||||
|
UNSAFE_FUNCTION_ATTRIBUTES = set() |
||||||
|
|
||||||
|
|
||||||
|
#: unsafe method attributes. function attributes are unsafe for methods too |
||||||
|
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self']) |
||||||
|
|
||||||
|
#: unsafe generator attirbutes. |
||||||
|
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code']) |
||||||
|
|
||||||
|
#: unsafe attributes on coroutines |
||||||
|
UNSAFE_COROUTINE_ATTRIBUTES = set(['cr_frame', 'cr_code']) |
||||||
|
|
||||||
|
#: unsafe attributes on async generators |
||||||
|
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = set(['ag_code', 'ag_frame']) |
||||||
|
|
||||||
|
import warnings |
||||||
|
|
||||||
|
# make sure we don't warn in python 2.6 about stuff we don't care about |
||||||
|
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning, |
||||||
|
module='jinja2.sandbox') |
||||||
|
|
||||||
|
from collections import deque |
||||||
|
|
||||||
|
_mutable_set_types = (set,) |
||||||
|
_mutable_mapping_types = (dict,) |
||||||
|
_mutable_sequence_types = (list,) |
||||||
|
|
||||||
|
|
||||||
|
# on python 2.x we can register the user collection types |
||||||
|
try: |
||||||
|
from UserDict import UserDict, DictMixin |
||||||
|
from UserList import UserList |
||||||
|
_mutable_mapping_types += (UserDict, DictMixin) |
||||||
|
_mutable_set_types += (UserList,) |
||||||
|
except ImportError: |
||||||
|
pass |
||||||
|
|
||||||
|
# if sets is still available, register the mutable set from there as well |
||||||
|
try: |
||||||
|
from sets import Set |
||||||
|
_mutable_set_types += (Set,) |
||||||
|
except ImportError: |
||||||
|
pass |
||||||
|
|
||||||
|
#: register Python 2.6 abstract base classes |
||||||
|
from collections import MutableSet, MutableMapping, MutableSequence |
||||||
|
_mutable_set_types += (MutableSet,) |
||||||
|
_mutable_mapping_types += (MutableMapping,) |
||||||
|
_mutable_sequence_types += (MutableSequence,) |
||||||
|
|
||||||
|
|
||||||
|
_mutable_spec = ( |
||||||
|
(_mutable_set_types, frozenset([ |
||||||
|
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove', |
||||||
|
'symmetric_difference_update', 'update' |
||||||
|
])), |
||||||
|
(_mutable_mapping_types, frozenset([ |
||||||
|
'clear', 'pop', 'popitem', 'setdefault', 'update' |
||||||
|
])), |
||||||
|
(_mutable_sequence_types, frozenset([ |
||||||
|
'append', 'reverse', 'insert', 'sort', 'extend', 'remove' |
||||||
|
])), |
||||||
|
(deque, frozenset([ |
||||||
|
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop', |
||||||
|
'popleft', 'remove', 'rotate' |
||||||
|
])) |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
class _MagicFormatMapping(Mapping): |
||||||
|
"""This class implements a dummy wrapper to fix a bug in the Python |
||||||
|
standard library for string formatting. |
||||||
|
|
||||||
|
See http://bugs.python.org/issue13598 for information about why |
||||||
|
this is necessary. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, args, kwargs): |
||||||
|
self._args = args |
||||||
|
self._kwargs = kwargs |
||||||
|
self._last_index = 0 |
||||||
|
|
||||||
|
def __getitem__(self, key): |
||||||
|
if key == '': |
||||||
|
idx = self._last_index |
||||||
|
self._last_index += 1 |
||||||
|
try: |
||||||
|
return self._args[idx] |
||||||
|
except LookupError: |
||||||
|
pass |
||||||
|
key = str(idx) |
||||||
|
return self._kwargs[key] |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return iter(self._kwargs) |
||||||
|
|
||||||
|
def __len__(self): |
||||||
|
return len(self._kwargs) |
||||||
|
|
||||||
|
|
||||||
|
def inspect_format_method(callable): |
||||||
|
if not isinstance(callable, (types.MethodType, |
||||||
|
types.BuiltinMethodType)) or \ |
||||||
|
callable.__name__ != 'format': |
||||||
|
return None |
||||||
|
obj = callable.__self__ |
||||||
|
if isinstance(obj, string_types): |
||||||
|
return obj |
||||||
|
|
||||||
|
|
||||||
|
def safe_range(*args): |
||||||
|
"""A range that can't generate ranges with a length of more than |
||||||
|
MAX_RANGE items. |
||||||
|
""" |
||||||
|
rng = range(*args) |
||||||
|
if len(rng) > MAX_RANGE: |
||||||
|
raise OverflowError('range too big, maximum size for range is %d' % |
||||||
|
MAX_RANGE) |
||||||
|
return rng |
||||||
|
|
||||||
|
|
||||||
|
def unsafe(f): |
||||||
|
"""Marks a function or method as unsafe. |
||||||
|
|
||||||
|
:: |
||||||
|
|
||||||
|
@unsafe |
||||||
|
def delete(self): |
||||||
|
pass |
||||||
|
""" |
||||||
|
f.unsafe_callable = True |
||||||
|
return f |
||||||
|
|
||||||
|
|
||||||
|
def is_internal_attribute(obj, attr): |
||||||
|
"""Test if the attribute given is an internal python attribute. For |
||||||
|
example this function returns `True` for the `func_code` attribute of |
||||||
|
python objects. This is useful if the environment method |
||||||
|
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden. |
||||||
|
|
||||||
|
>>> from jinja2.sandbox import is_internal_attribute |
||||||
|
>>> is_internal_attribute(str, "mro") |
||||||
|
True |
||||||
|
>>> is_internal_attribute(str, "upper") |
||||||
|
False |
||||||
|
""" |
||||||
|
if isinstance(obj, types.FunctionType): |
||||||
|
if attr in UNSAFE_FUNCTION_ATTRIBUTES: |
||||||
|
return True |
||||||
|
elif isinstance(obj, types.MethodType): |
||||||
|
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \ |
||||||
|
attr in UNSAFE_METHOD_ATTRIBUTES: |
||||||
|
return True |
||||||
|
elif isinstance(obj, type): |
||||||
|
if attr == 'mro': |
||||||
|
return True |
||||||
|
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)): |
||||||
|
return True |
||||||
|
elif isinstance(obj, types.GeneratorType): |
||||||
|
if attr in UNSAFE_GENERATOR_ATTRIBUTES: |
||||||
|
return True |
||||||
|
elif hasattr(types, 'CoroutineType') and isinstance(obj, types.CoroutineType): |
||||||
|
if attr in UNSAFE_COROUTINE_ATTRIBUTES: |
||||||
|
return True |
||||||
|
elif hasattr(types, 'AsyncGeneratorType') and isinstance(obj, types.AsyncGeneratorType): |
||||||
|
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES: |
||||||
|
return True |
||||||
|
return attr.startswith('__') |
||||||
|
|
||||||
|
|
||||||
|
def modifies_known_mutable(obj, attr): |
||||||
|
"""This function checks if an attribute on a builtin mutable object |
||||||
|
(list, dict, set or deque) would modify it if called. It also supports |
||||||
|
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and |
||||||
|
with Python 2.6 onwards the abstract base classes `MutableSet`, |
||||||
|
`MutableMapping`, and `MutableSequence`. |
||||||
|
|
||||||
|
>>> modifies_known_mutable({}, "clear") |
||||||
|
True |
||||||
|
>>> modifies_known_mutable({}, "keys") |
||||||
|
False |
||||||
|
>>> modifies_known_mutable([], "append") |
||||||
|
True |
||||||
|
>>> modifies_known_mutable([], "index") |
||||||
|
False |
||||||
|
|
||||||
|
If called with an unsupported object (such as unicode) `False` is |
||||||
|
returned. |
||||||
|
|
||||||
|
>>> modifies_known_mutable("foo", "upper") |
||||||
|
False |
||||||
|
""" |
||||||
|
for typespec, unsafe in _mutable_spec: |
||||||
|
if isinstance(obj, typespec): |
||||||
|
return attr in unsafe |
||||||
|
return False |
||||||
|
|
||||||
|
|
||||||
|
class SandboxedEnvironment(Environment): |
||||||
|
"""The sandboxed environment. It works like the regular environment but |
||||||
|
tells the compiler to generate sandboxed code. Additionally subclasses of |
||||||
|
this environment may override the methods that tell the runtime what |
||||||
|
attributes or functions are safe to access. |
||||||
|
|
||||||
|
If the template tries to access insecure code a :exc:`SecurityError` is |
||||||
|
raised. However also other exceptions may occur during the rendering so |
||||||
|
the caller has to ensure that all exceptions are caught. |
||||||
|
""" |
||||||
|
sandboxed = True |
||||||
|
|
||||||
|
#: default callback table for the binary operators. A copy of this is |
||||||
|
#: available on each instance of a sandboxed environment as |
||||||
|
#: :attr:`binop_table` |
||||||
|
default_binop_table = { |
||||||
|
'+': operator.add, |
||||||
|
'-': operator.sub, |
||||||
|
'*': operator.mul, |
||||||
|
'/': operator.truediv, |
||||||
|
'//': operator.floordiv, |
||||||
|
'**': operator.pow, |
||||||
|
'%': operator.mod |
||||||
|
} |
||||||
|
|
||||||
|
#: default callback table for the unary operators. A copy of this is |
||||||
|
#: available on each instance of a sandboxed environment as |
||||||
|
#: :attr:`unop_table` |
||||||
|
default_unop_table = { |
||||||
|
'+': operator.pos, |
||||||
|
'-': operator.neg |
||||||
|
} |
||||||
|
|
||||||
|
#: a set of binary operators that should be intercepted. Each operator |
||||||
|
#: that is added to this set (empty by default) is delegated to the |
||||||
|
#: :meth:`call_binop` method that will perform the operator. The default |
||||||
|
#: operator callback is specified by :attr:`binop_table`. |
||||||
|
#: |
||||||
|
#: The following binary operators are interceptable: |
||||||
|
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**`` |
||||||
|
#: |
||||||
|
#: The default operation form the operator table corresponds to the |
||||||
|
#: builtin function. Intercepted calls are always slower than the native |
||||||
|
#: operator call, so make sure only to intercept the ones you are |
||||||
|
#: interested in. |
||||||
|
#: |
||||||
|
#: .. versionadded:: 2.6 |
||||||
|
intercepted_binops = frozenset() |
||||||
|
|
||||||
|
#: a set of unary operators that should be intercepted. Each operator |
||||||
|
#: that is added to this set (empty by default) is delegated to the |
||||||
|
#: :meth:`call_unop` method that will perform the operator. The default |
||||||
|
#: operator callback is specified by :attr:`unop_table`. |
||||||
|
#: |
||||||
|
#: The following unary operators are interceptable: ``+``, ``-`` |
||||||
|
#: |
||||||
|
#: The default operation form the operator table corresponds to the |
||||||
|
#: builtin function. Intercepted calls are always slower than the native |
||||||
|
#: operator call, so make sure only to intercept the ones you are |
||||||
|
#: interested in. |
||||||
|
#: |
||||||
|
#: .. versionadded:: 2.6 |
||||||
|
intercepted_unops = frozenset() |
||||||
|
|
||||||
|
def intercept_unop(self, operator): |
||||||
|
"""Called during template compilation with the name of a unary |
||||||
|
operator to check if it should be intercepted at runtime. If this |
||||||
|
method returns `True`, :meth:`call_unop` is excuted for this unary |
||||||
|
operator. The default implementation of :meth:`call_unop` will use |
||||||
|
the :attr:`unop_table` dictionary to perform the operator with the |
||||||
|
same logic as the builtin one. |
||||||
|
|
||||||
|
The following unary operators are interceptable: ``+`` and ``-`` |
||||||
|
|
||||||
|
Intercepted calls are always slower than the native operator call, |
||||||
|
so make sure only to intercept the ones you are interested in. |
||||||
|
|
||||||
|
.. versionadded:: 2.6 |
||||||
|
""" |
||||||
|
return False |
||||||
|
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs): |
||||||
|
Environment.__init__(self, *args, **kwargs) |
||||||
|
self.globals['range'] = safe_range |
||||||
|
self.binop_table = self.default_binop_table.copy() |
||||||
|
self.unop_table = self.default_unop_table.copy() |
||||||
|
|
||||||
|
def is_safe_attribute(self, obj, attr, value): |
||||||
|
"""The sandboxed environment will call this method to check if the |
||||||
|
attribute of an object is safe to access. Per default all attributes |
||||||
|
starting with an underscore are considered private as well as the |
||||||
|
special attributes of internal python objects as returned by the |
||||||
|
:func:`is_internal_attribute` function. |
||||||
|
""" |
||||||
|
return not (attr.startswith('_') or is_internal_attribute(obj, attr)) |
||||||
|
|
||||||
|
def is_safe_callable(self, obj): |
||||||
|
"""Check if an object is safely callable. Per default a function is |
||||||
|
considered safe unless the `unsafe_callable` attribute exists and is |
||||||
|
True. Override this method to alter the behavior, but this won't |
||||||
|
affect the `unsafe` decorator from this module. |
||||||
|
""" |
||||||
|
return not (getattr(obj, 'unsafe_callable', False) or |
||||||
|
getattr(obj, 'alters_data', False)) |
||||||
|
|
||||||
|
def call_binop(self, context, operator, left, right): |
||||||
|
"""For intercepted binary operator calls (:meth:`intercepted_binops`) |
||||||
|
this function is executed instead of the builtin operator. This can |
||||||
|
be used to fine tune the behavior of certain operators. |
||||||
|
|
||||||
|
.. versionadded:: 2.6 |
||||||
|
""" |
||||||
|
return self.binop_table[operator](left, right) |
||||||
|
|
||||||
|
def call_unop(self, context, operator, arg): |
||||||
|
"""For intercepted unary operator calls (:meth:`intercepted_unops`) |
||||||
|
this function is executed instead of the builtin operator. This can |
||||||
|
be used to fine tune the behavior of certain operators. |
||||||
|
|
||||||
|
.. versionadded:: 2.6 |
||||||
|
""" |
||||||
|
return self.unop_table[operator](arg) |
||||||
|
|
||||||
|
def getitem(self, obj, argument): |
||||||
|
"""Subscribe an object from sandboxed code.""" |
||||||
|
try: |
||||||
|
return obj[argument] |
||||||
|
except (TypeError, LookupError): |
||||||
|
if isinstance(argument, string_types): |
||||||
|
try: |
||||||
|
attr = str(argument) |
||||||
|
except Exception: |
||||||
|
pass |
||||||
|
else: |
||||||
|
try: |
||||||
|
value = getattr(obj, attr) |
||||||
|
except AttributeError: |
||||||
|
pass |
||||||
|
else: |
||||||
|
if self.is_safe_attribute(obj, argument, value): |
||||||
|
return value |
||||||
|
return self.unsafe_undefined(obj, argument) |
||||||
|
return self.undefined(obj=obj, name=argument) |
||||||
|
|
||||||
|
def getattr(self, obj, attribute): |
||||||
|
"""Subscribe an object from sandboxed code and prefer the |
||||||
|
attribute. The attribute passed *must* be a bytestring. |
||||||
|
""" |
||||||
|
try: |
||||||
|
value = getattr(obj, attribute) |
||||||
|
except AttributeError: |
||||||
|
try: |
||||||
|
return obj[attribute] |
||||||
|
except (TypeError, LookupError): |
||||||
|
pass |
||||||
|
else: |
||||||
|
if self.is_safe_attribute(obj, attribute, value): |
||||||
|
return value |
||||||
|
return self.unsafe_undefined(obj, attribute) |
||||||
|
return self.undefined(obj=obj, name=attribute) |
||||||
|
|
||||||
|
def unsafe_undefined(self, obj, attribute): |
||||||
|
"""Return an undefined object for unsafe attributes.""" |
||||||
|
return self.undefined('access to attribute %r of %r ' |
||||||
|
'object is unsafe.' % ( |
||||||
|
attribute, |
||||||
|
obj.__class__.__name__ |
||||||
|
), name=attribute, obj=obj, exc=SecurityError) |
||||||
|
|
||||||
|
def format_string(self, s, args, kwargs): |
||||||
|
"""If a format call is detected, then this is routed through this |
||||||
|
method so that our safety sandbox can be used for it. |
||||||
|
""" |
||||||
|
if isinstance(s, Markup): |
||||||
|
formatter = SandboxedEscapeFormatter(self, s.escape) |
||||||
|
else: |
||||||
|
formatter = SandboxedFormatter(self) |
||||||
|
kwargs = _MagicFormatMapping(args, kwargs) |
||||||
|
rv = formatter.vformat(s, args, kwargs) |
||||||
|
return type(s)(rv) |
||||||
|
|
||||||
|
def call(__self, __context, __obj, *args, **kwargs): |
||||||
|
"""Call an object from sandboxed code.""" |
||||||
|
fmt = inspect_format_method(__obj) |
||||||
|
if fmt is not None: |
||||||
|
return __self.format_string(fmt, args, kwargs) |
||||||
|
|
||||||
|
# the double prefixes are to avoid double keyword argument |
||||||
|
# errors when proxying the call. |
||||||
|
if not __self.is_safe_callable(__obj): |
||||||
|
raise SecurityError('%r is not safely callable' % (__obj,)) |
||||||
|
return __context.call(__obj, *args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class ImmutableSandboxedEnvironment(SandboxedEnvironment): |
||||||
|
"""Works exactly like the regular `SandboxedEnvironment` but does not |
||||||
|
permit modifications on the builtin mutable objects `list`, `set`, and |
||||||
|
`dict` by using the :func:`modifies_known_mutable` function. |
||||||
|
""" |
||||||
|
|
||||||
|
def is_safe_attribute(self, obj, attr, value): |
||||||
|
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value): |
||||||
|
return False |
||||||
|
return not modifies_known_mutable(obj, attr) |
||||||
|
|
||||||
|
|
||||||
|
# This really is not a public API apparenlty. |
||||||
|
try: |
||||||
|
from _string import formatter_field_name_split |
||||||
|
except ImportError: |
||||||
|
def formatter_field_name_split(field_name): |
||||||
|
return field_name._formatter_field_name_split() |
||||||
|
|
||||||
|
|
||||||
|
class SandboxedFormatterMixin(object): |
||||||
|
|
||||||
|
def __init__(self, env): |
||||||
|
self._env = env |
||||||
|
|
||||||
|
def get_field(self, field_name, args, kwargs): |
||||||
|
first, rest = formatter_field_name_split(field_name) |
||||||
|
obj = self.get_value(first, args, kwargs) |
||||||
|
for is_attr, i in rest: |
||||||
|
if is_attr: |
||||||
|
obj = self._env.getattr(obj, i) |
||||||
|
else: |
||||||
|
obj = self._env.getitem(obj, i) |
||||||
|
return obj, first |
||||||
|
|
||||||
|
class SandboxedFormatter(SandboxedFormatterMixin, Formatter): |
||||||
|
|
||||||
|
def __init__(self, env): |
||||||
|
SandboxedFormatterMixin.__init__(self, env) |
||||||
|
Formatter.__init__(self) |
||||||
|
|
||||||
|
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter): |
||||||
|
|
||||||
|
def __init__(self, env, escape): |
||||||
|
SandboxedFormatterMixin.__init__(self, env) |
||||||
|
EscapeFormatter.__init__(self, escape) |
@ -0,0 +1,185 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.tests |
||||||
|
~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Jinja test functions. Used with the "is" operator. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import re |
||||||
|
from collections import Mapping |
||||||
|
from jinja2.runtime import Undefined |
||||||
|
from jinja2._compat import text_type, string_types, integer_types |
||||||
|
import decimal |
||||||
|
|
||||||
|
number_re = re.compile(r'^-?\d+(\.\d+)?$') |
||||||
|
regex_type = type(number_re) |
||||||
|
|
||||||
|
|
||||||
|
test_callable = callable |
||||||
|
|
||||||
|
|
||||||
|
def test_odd(value): |
||||||
|
"""Return true if the variable is odd.""" |
||||||
|
return value % 2 == 1 |
||||||
|
|
||||||
|
|
||||||
|
def test_even(value): |
||||||
|
"""Return true if the variable is even.""" |
||||||
|
return value % 2 == 0 |
||||||
|
|
||||||
|
|
||||||
|
def test_divisibleby(value, num): |
||||||
|
"""Check if a variable is divisible by a number.""" |
||||||
|
return value % num == 0 |
||||||
|
|
||||||
|
|
||||||
|
def test_defined(value): |
||||||
|
"""Return true if the variable is defined: |
||||||
|
|
||||||
|
.. sourcecode:: jinja |
||||||
|
|
||||||
|
{% if variable is defined %} |
||||||
|
value of variable: {{ variable }} |
||||||
|
{% else %} |
||||||
|
variable is not defined |
||||||
|
{% endif %} |
||||||
|
|
||||||
|
See the :func:`default` filter for a simple way to set undefined |
||||||
|
variables. |
||||||
|
""" |
||||||
|
return not isinstance(value, Undefined) |
||||||
|
|
||||||
|
|
||||||
|
def test_undefined(value): |
||||||
|
"""Like :func:`defined` but the other way round.""" |
||||||
|
return isinstance(value, Undefined) |
||||||
|
|
||||||
|
|
||||||
|
def test_none(value): |
||||||
|
"""Return true if the variable is none.""" |
||||||
|
return value is None |
||||||
|
|
||||||
|
|
||||||
|
def test_lower(value): |
||||||
|
"""Return true if the variable is lowercased.""" |
||||||
|
return text_type(value).islower() |
||||||
|
|
||||||
|
|
||||||
|
def test_upper(value): |
||||||
|
"""Return true if the variable is uppercased.""" |
||||||
|
return text_type(value).isupper() |
||||||
|
|
||||||
|
|
||||||
|
def test_string(value): |
||||||
|
"""Return true if the object is a string.""" |
||||||
|
return isinstance(value, string_types) |
||||||
|
|
||||||
|
|
||||||
|
def test_mapping(value): |
||||||
|
"""Return true if the object is a mapping (dict etc.). |
||||||
|
|
||||||
|
.. versionadded:: 2.6 |
||||||
|
""" |
||||||
|
return isinstance(value, Mapping) |
||||||
|
|
||||||
|
|
||||||
|
def test_number(value): |
||||||
|
"""Return true if the variable is a number.""" |
||||||
|
return isinstance(value, integer_types + (float, complex, decimal.Decimal)) |
||||||
|
|
||||||
|
|
||||||
|
def test_sequence(value): |
||||||
|
"""Return true if the variable is a sequence. Sequences are variables |
||||||
|
that are iterable. |
||||||
|
""" |
||||||
|
try: |
||||||
|
len(value) |
||||||
|
value.__getitem__ |
||||||
|
except: |
||||||
|
return False |
||||||
|
return True |
||||||
|
|
||||||
|
|
||||||
|
def test_equalto(value, other): |
||||||
|
"""Check if an object has the same value as another object: |
||||||
|
|
||||||
|
.. sourcecode:: jinja |
||||||
|
|
||||||
|
{% if foo.expression is equalto 42 %} |
||||||
|
the foo attribute evaluates to the constant 42 |
||||||
|
{% endif %} |
||||||
|
|
||||||
|
This appears to be a useless test as it does exactly the same as the |
||||||
|
``==`` operator, but it can be useful when used together with the |
||||||
|
`selectattr` function: |
||||||
|
|
||||||
|
.. sourcecode:: jinja |
||||||
|
|
||||||
|
{{ users|selectattr("email", "equalto", "foo@bar.invalid") }} |
||||||
|
|
||||||
|
.. versionadded:: 2.8 |
||||||
|
""" |
||||||
|
return value == other |
||||||
|
|
||||||
|
|
||||||
|
def test_sameas(value, other): |
||||||
|
"""Check if an object points to the same memory address than another |
||||||
|
object: |
||||||
|
|
||||||
|
.. sourcecode:: jinja |
||||||
|
|
||||||
|
{% if foo.attribute is sameas false %} |
||||||
|
the foo attribute really is the `False` singleton |
||||||
|
{% endif %} |
||||||
|
""" |
||||||
|
return value is other |
||||||
|
|
||||||
|
|
||||||
|
def test_iterable(value): |
||||||
|
"""Check if it's possible to iterate over an object.""" |
||||||
|
try: |
||||||
|
iter(value) |
||||||
|
except TypeError: |
||||||
|
return False |
||||||
|
return True |
||||||
|
|
||||||
|
|
||||||
|
def test_escaped(value): |
||||||
|
"""Check if the value is escaped.""" |
||||||
|
return hasattr(value, '__html__') |
||||||
|
|
||||||
|
|
||||||
|
def test_greaterthan(value, other): |
||||||
|
"""Check if value is greater than other.""" |
||||||
|
return value > other |
||||||
|
|
||||||
|
|
||||||
|
def test_lessthan(value, other): |
||||||
|
"""Check if value is less than other.""" |
||||||
|
return value < other |
||||||
|
|
||||||
|
|
||||||
|
TESTS = { |
||||||
|
'odd': test_odd, |
||||||
|
'even': test_even, |
||||||
|
'divisibleby': test_divisibleby, |
||||||
|
'defined': test_defined, |
||||||
|
'undefined': test_undefined, |
||||||
|
'none': test_none, |
||||||
|
'lower': test_lower, |
||||||
|
'upper': test_upper, |
||||||
|
'string': test_string, |
||||||
|
'mapping': test_mapping, |
||||||
|
'number': test_number, |
||||||
|
'sequence': test_sequence, |
||||||
|
'iterable': test_iterable, |
||||||
|
'callable': test_callable, |
||||||
|
'sameas': test_sameas, |
||||||
|
'equalto': test_equalto, |
||||||
|
'escaped': test_escaped, |
||||||
|
'greaterthan': test_greaterthan, |
||||||
|
'lessthan': test_lessthan |
||||||
|
} |
@ -0,0 +1,624 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.utils |
||||||
|
~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Utility functions. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import re |
||||||
|
import json |
||||||
|
import errno |
||||||
|
from collections import deque |
||||||
|
from threading import Lock |
||||||
|
from jinja2._compat import text_type, string_types, implements_iterator, \ |
||||||
|
url_quote |
||||||
|
|
||||||
|
|
||||||
|
_word_split_re = re.compile(r'(\s+)') |
||||||
|
_punctuation_re = re.compile( |
||||||
|
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % ( |
||||||
|
'|'.join(map(re.escape, ('(', '<', '<'))), |
||||||
|
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>'))) |
||||||
|
) |
||||||
|
) |
||||||
|
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$') |
||||||
|
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') |
||||||
|
_entity_re = re.compile(r'&([^;]+);') |
||||||
|
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' |
||||||
|
_digits = '0123456789' |
||||||
|
|
||||||
|
# special singleton representing missing values for the runtime |
||||||
|
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})() |
||||||
|
|
||||||
|
# internal code |
||||||
|
internal_code = set() |
||||||
|
|
||||||
|
concat = u''.join |
||||||
|
|
||||||
|
_slash_escape = '\\/' not in json.dumps('/') |
||||||
|
|
||||||
|
|
||||||
|
def contextfunction(f): |
||||||
|
"""This decorator can be used to mark a function or method context callable. |
||||||
|
A context callable is passed the active :class:`Context` as first argument when |
||||||
|
called from the template. This is useful if a function wants to get access |
||||||
|
to the context or functions provided on the context object. For example |
||||||
|
a function that returns a sorted list of template variables the current |
||||||
|
template exports could look like this:: |
||||||
|
|
||||||
|
@contextfunction |
||||||
|
def get_exported_names(context): |
||||||
|
return sorted(context.exported_vars) |
||||||
|
""" |
||||||
|
f.contextfunction = True |
||||||
|
return f |
||||||
|
|
||||||
|
|
||||||
|
def evalcontextfunction(f): |
||||||
|
"""This decorator can be used to mark a function or method as an eval |
||||||
|
context callable. This is similar to the :func:`contextfunction` |
||||||
|
but instead of passing the context, an evaluation context object is |
||||||
|
passed. For more information about the eval context, see |
||||||
|
:ref:`eval-context`. |
||||||
|
|
||||||
|
.. versionadded:: 2.4 |
||||||
|
""" |
||||||
|
f.evalcontextfunction = True |
||||||
|
return f |
||||||
|
|
||||||
|
|
||||||
|
def environmentfunction(f): |
||||||
|
"""This decorator can be used to mark a function or method as environment |
||||||
|
callable. This decorator works exactly like the :func:`contextfunction` |
||||||
|
decorator just that the first argument is the active :class:`Environment` |
||||||
|
and not context. |
||||||
|
""" |
||||||
|
f.environmentfunction = True |
||||||
|
return f |
||||||
|
|
||||||
|
|
||||||
|
def internalcode(f): |
||||||
|
"""Marks the function as internally used""" |
||||||
|
internal_code.add(f.__code__) |
||||||
|
return f |
||||||
|
|
||||||
|
|
||||||
|
def is_undefined(obj): |
||||||
|
"""Check if the object passed is undefined. This does nothing more than |
||||||
|
performing an instance check against :class:`Undefined` but looks nicer. |
||||||
|
This can be used for custom filters or tests that want to react to |
||||||
|
undefined variables. For example a custom default filter can look like |
||||||
|
this:: |
||||||
|
|
||||||
|
def default(var, default=''): |
||||||
|
if is_undefined(var): |
||||||
|
return default |
||||||
|
return var |
||||||
|
""" |
||||||
|
from jinja2.runtime import Undefined |
||||||
|
return isinstance(obj, Undefined) |
||||||
|
|
||||||
|
|
||||||
|
def consume(iterable): |
||||||
|
"""Consumes an iterable without doing anything with it.""" |
||||||
|
for event in iterable: |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
def clear_caches(): |
||||||
|
"""Jinja2 keeps internal caches for environments and lexers. These are |
||||||
|
used so that Jinja2 doesn't have to recreate environments and lexers all |
||||||
|
the time. Normally you don't have to care about that but if you are |
||||||
|
measuring memory consumption you may want to clean the caches. |
||||||
|
""" |
||||||
|
from jinja2.environment import _spontaneous_environments |
||||||
|
from jinja2.lexer import _lexer_cache |
||||||
|
_spontaneous_environments.clear() |
||||||
|
_lexer_cache.clear() |
||||||
|
|
||||||
|
|
||||||
|
def import_string(import_name, silent=False): |
||||||
|
"""Imports an object based on a string. This is useful if you want to |
||||||
|
use import paths as endpoints or something similar. An import path can |
||||||
|
be specified either in dotted notation (``xml.sax.saxutils.escape``) |
||||||
|
or with a colon as object delimiter (``xml.sax.saxutils:escape``). |
||||||
|
|
||||||
|
If the `silent` is True the return value will be `None` if the import |
||||||
|
fails. |
||||||
|
|
||||||
|
:return: imported object |
||||||
|
""" |
||||||
|
try: |
||||||
|
if ':' in import_name: |
||||||
|
module, obj = import_name.split(':', 1) |
||||||
|
elif '.' in import_name: |
||||||
|
items = import_name.split('.') |
||||||
|
module = '.'.join(items[:-1]) |
||||||
|
obj = items[-1] |
||||||
|
else: |
||||||
|
return __import__(import_name) |
||||||
|
return getattr(__import__(module, None, None, [obj]), obj) |
||||||
|
except (ImportError, AttributeError): |
||||||
|
if not silent: |
||||||
|
raise |
||||||
|
|
||||||
|
|
||||||
|
def open_if_exists(filename, mode='rb'): |
||||||
|
"""Returns a file descriptor for the filename if that file exists, |
||||||
|
otherwise `None`. |
||||||
|
""" |
||||||
|
try: |
||||||
|
return open(filename, mode) |
||||||
|
except IOError as e: |
||||||
|
if e.errno not in (errno.ENOENT, errno.EISDIR, errno.EINVAL): |
||||||
|
raise |
||||||
|
|
||||||
|
|
||||||
|
def object_type_repr(obj): |
||||||
|
"""Returns the name of the object's type. For some recognized |
||||||
|
singletons the name of the object is returned instead. (For |
||||||
|
example for `None` and `Ellipsis`). |
||||||
|
""" |
||||||
|
if obj is None: |
||||||
|
return 'None' |
||||||
|
elif obj is Ellipsis: |
||||||
|
return 'Ellipsis' |
||||||
|
# __builtin__ in 2.x, builtins in 3.x |
||||||
|
if obj.__class__.__module__ in ('__builtin__', 'builtins'): |
||||||
|
name = obj.__class__.__name__ |
||||||
|
else: |
||||||
|
name = obj.__class__.__module__ + '.' + obj.__class__.__name__ |
||||||
|
return '%s object' % name |
||||||
|
|
||||||
|
|
||||||
|
def pformat(obj, verbose=False): |
||||||
|
"""Prettyprint an object. Either use the `pretty` library or the |
||||||
|
builtin `pprint`. |
||||||
|
""" |
||||||
|
try: |
||||||
|
from pretty import pretty |
||||||
|
return pretty(obj, verbose=verbose) |
||||||
|
except ImportError: |
||||||
|
from pprint import pformat |
||||||
|
return pformat(obj) |
||||||
|
|
||||||
|
|
||||||
|
def urlize(text, trim_url_limit=None, rel=None, target=None): |
||||||
|
"""Converts any URLs in text into clickable links. Works on http://, |
||||||
|
https:// and www. links. Links can have trailing punctuation (periods, |
||||||
|
commas, close-parens) and leading punctuation (opening parens) and |
||||||
|
it'll still do the right thing. |
||||||
|
|
||||||
|
If trim_url_limit is not None, the URLs in link text will be limited |
||||||
|
to trim_url_limit characters. |
||||||
|
|
||||||
|
If nofollow is True, the URLs in link text will get a rel="nofollow" |
||||||
|
attribute. |
||||||
|
|
||||||
|
If target is not None, a target attribute will be added to the link. |
||||||
|
""" |
||||||
|
trim_url = lambda x, limit=trim_url_limit: limit is not None \ |
||||||
|
and (x[:limit] + (len(x) >=limit and '...' |
||||||
|
or '')) or x |
||||||
|
words = _word_split_re.split(text_type(escape(text))) |
||||||
|
rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or '' |
||||||
|
target_attr = target and ' target="%s"' % escape(target) or '' |
||||||
|
|
||||||
|
for i, word in enumerate(words): |
||||||
|
match = _punctuation_re.match(word) |
||||||
|
if match: |
||||||
|
lead, middle, trail = match.groups() |
||||||
|
if middle.startswith('www.') or ( |
||||||
|
'@' not in middle and |
||||||
|
not middle.startswith('http://') and |
||||||
|
not middle.startswith('https://') and |
||||||
|
len(middle) > 0 and |
||||||
|
middle[0] in _letters + _digits and ( |
||||||
|
middle.endswith('.org') or |
||||||
|
middle.endswith('.net') or |
||||||
|
middle.endswith('.com') |
||||||
|
)): |
||||||
|
middle = '<a href="http://%s"%s%s>%s</a>' % (middle, |
||||||
|
rel_attr, target_attr, trim_url(middle)) |
||||||
|
if middle.startswith('http://') or \ |
||||||
|
middle.startswith('https://'): |
||||||
|
middle = '<a href="%s"%s%s>%s</a>' % (middle, |
||||||
|
rel_attr, target_attr, trim_url(middle)) |
||||||
|
if '@' in middle and not middle.startswith('www.') and \ |
||||||
|
not ':' in middle and _simple_email_re.match(middle): |
||||||
|
middle = '<a href="mailto:%s">%s</a>' % (middle, middle) |
||||||
|
if lead + middle + trail != word: |
||||||
|
words[i] = lead + middle + trail |
||||||
|
return u''.join(words) |
||||||
|
|
||||||
|
|
||||||
|
def generate_lorem_ipsum(n=5, html=True, min=20, max=100): |
||||||
|
"""Generate some lorem ipsum for the template.""" |
||||||
|
from jinja2.constants import LOREM_IPSUM_WORDS |
||||||
|
from random import choice, randrange |
||||||
|
words = LOREM_IPSUM_WORDS.split() |
||||||
|
result = [] |
||||||
|
|
||||||
|
for _ in range(n): |
||||||
|
next_capitalized = True |
||||||
|
last_comma = last_fullstop = 0 |
||||||
|
word = None |
||||||
|
last = None |
||||||
|
p = [] |
||||||
|
|
||||||
|
# each paragraph contains out of 20 to 100 words. |
||||||
|
for idx, _ in enumerate(range(randrange(min, max))): |
||||||
|
while True: |
||||||
|
word = choice(words) |
||||||
|
if word != last: |
||||||
|
last = word |
||||||
|
break |
||||||
|
if next_capitalized: |
||||||
|
word = word.capitalize() |
||||||
|
next_capitalized = False |
||||||
|
# add commas |
||||||
|
if idx - randrange(3, 8) > last_comma: |
||||||
|
last_comma = idx |
||||||
|
last_fullstop += 2 |
||||||
|
word += ',' |
||||||
|
# add end of sentences |
||||||
|
if idx - randrange(10, 20) > last_fullstop: |
||||||
|
last_comma = last_fullstop = idx |
||||||
|
word += '.' |
||||||
|
next_capitalized = True |
||||||
|
p.append(word) |
||||||
|
|
||||||
|
# ensure that the paragraph ends with a dot. |
||||||
|
p = u' '.join(p) |
||||||
|
if p.endswith(','): |
||||||
|
p = p[:-1] + '.' |
||||||
|
elif not p.endswith('.'): |
||||||
|
p += '.' |
||||||
|
result.append(p) |
||||||
|
|
||||||
|
if not html: |
||||||
|
return u'\n\n'.join(result) |
||||||
|
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result)) |
||||||
|
|
||||||
|
|
||||||
|
def unicode_urlencode(obj, charset='utf-8', for_qs=False): |
||||||
|
"""URL escapes a single bytestring or unicode string with the |
||||||
|
given charset if applicable to URL safe quoting under all rules |
||||||
|
that need to be considered under all supported Python versions. |
||||||
|
|
||||||
|
If non strings are provided they are converted to their unicode |
||||||
|
representation first. |
||||||
|
""" |
||||||
|
if not isinstance(obj, string_types): |
||||||
|
obj = text_type(obj) |
||||||
|
if isinstance(obj, text_type): |
||||||
|
obj = obj.encode(charset) |
||||||
|
safe = not for_qs and b'/' or b'' |
||||||
|
rv = text_type(url_quote(obj, safe)) |
||||||
|
if for_qs: |
||||||
|
rv = rv.replace('%20', '+') |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
class LRUCache(object): |
||||||
|
"""A simple LRU Cache implementation.""" |
||||||
|
|
||||||
|
# this is fast for small capacities (something below 1000) but doesn't |
||||||
|
# scale. But as long as it's only used as storage for templates this |
||||||
|
# won't do any harm. |
||||||
|
|
||||||
|
def __init__(self, capacity): |
||||||
|
self.capacity = capacity |
||||||
|
self._mapping = {} |
||||||
|
self._queue = deque() |
||||||
|
self._postinit() |
||||||
|
|
||||||
|
def _postinit(self): |
||||||
|
# alias all queue methods for faster lookup |
||||||
|
self._popleft = self._queue.popleft |
||||||
|
self._pop = self._queue.pop |
||||||
|
self._remove = self._queue.remove |
||||||
|
self._wlock = Lock() |
||||||
|
self._append = self._queue.append |
||||||
|
|
||||||
|
def __getstate__(self): |
||||||
|
return { |
||||||
|
'capacity': self.capacity, |
||||||
|
'_mapping': self._mapping, |
||||||
|
'_queue': self._queue |
||||||
|
} |
||||||
|
|
||||||
|
def __setstate__(self, d): |
||||||
|
self.__dict__.update(d) |
||||||
|
self._postinit() |
||||||
|
|
||||||
|
def __getnewargs__(self): |
||||||
|
return (self.capacity,) |
||||||
|
|
||||||
|
def copy(self): |
||||||
|
"""Return a shallow copy of the instance.""" |
||||||
|
rv = self.__class__(self.capacity) |
||||||
|
rv._mapping.update(self._mapping) |
||||||
|
rv._queue = deque(self._queue) |
||||||
|
return rv |
||||||
|
|
||||||
|
def get(self, key, default=None): |
||||||
|
"""Return an item from the cache dict or `default`""" |
||||||
|
try: |
||||||
|
return self[key] |
||||||
|
except KeyError: |
||||||
|
return default |
||||||
|
|
||||||
|
def setdefault(self, key, default=None): |
||||||
|
"""Set `default` if the key is not in the cache otherwise |
||||||
|
leave unchanged. Return the value of this key. |
||||||
|
""" |
||||||
|
self._wlock.acquire() |
||||||
|
try: |
||||||
|
try: |
||||||
|
return self[key] |
||||||
|
except KeyError: |
||||||
|
self[key] = default |
||||||
|
return default |
||||||
|
finally: |
||||||
|
self._wlock.release() |
||||||
|
|
||||||
|
def clear(self): |
||||||
|
"""Clear the cache.""" |
||||||
|
self._wlock.acquire() |
||||||
|
try: |
||||||
|
self._mapping.clear() |
||||||
|
self._queue.clear() |
||||||
|
finally: |
||||||
|
self._wlock.release() |
||||||
|
|
||||||
|
def __contains__(self, key): |
||||||
|
"""Check if a key exists in this cache.""" |
||||||
|
return key in self._mapping |
||||||
|
|
||||||
|
def __len__(self): |
||||||
|
"""Return the current size of the cache.""" |
||||||
|
return len(self._mapping) |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '<%s %r>' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
self._mapping |
||||||
|
) |
||||||
|
|
||||||
|
def __getitem__(self, key): |
||||||
|
"""Get an item from the cache. Moves the item up so that it has the |
||||||
|
highest priority then. |
||||||
|
|
||||||
|
Raise a `KeyError` if it does not exist. |
||||||
|
""" |
||||||
|
self._wlock.acquire() |
||||||
|
try: |
||||||
|
rv = self._mapping[key] |
||||||
|
if self._queue[-1] != key: |
||||||
|
try: |
||||||
|
self._remove(key) |
||||||
|
except ValueError: |
||||||
|
# if something removed the key from the container |
||||||
|
# when we read, ignore the ValueError that we would |
||||||
|
# get otherwise. |
||||||
|
pass |
||||||
|
self._append(key) |
||||||
|
return rv |
||||||
|
finally: |
||||||
|
self._wlock.release() |
||||||
|
|
||||||
|
def __setitem__(self, key, value): |
||||||
|
"""Sets the value for an item. Moves the item up so that it |
||||||
|
has the highest priority then. |
||||||
|
""" |
||||||
|
self._wlock.acquire() |
||||||
|
try: |
||||||
|
if key in self._mapping: |
||||||
|
self._remove(key) |
||||||
|
elif len(self._mapping) == self.capacity: |
||||||
|
del self._mapping[self._popleft()] |
||||||
|
self._append(key) |
||||||
|
self._mapping[key] = value |
||||||
|
finally: |
||||||
|
self._wlock.release() |
||||||
|
|
||||||
|
def __delitem__(self, key): |
||||||
|
"""Remove an item from the cache dict. |
||||||
|
Raise a `KeyError` if it does not exist. |
||||||
|
""" |
||||||
|
self._wlock.acquire() |
||||||
|
try: |
||||||
|
del self._mapping[key] |
||||||
|
try: |
||||||
|
self._remove(key) |
||||||
|
except ValueError: |
||||||
|
# __getitem__ is not locked, it might happen |
||||||
|
pass |
||||||
|
finally: |
||||||
|
self._wlock.release() |
||||||
|
|
||||||
|
def items(self): |
||||||
|
"""Return a list of items.""" |
||||||
|
result = [(key, self._mapping[key]) for key in list(self._queue)] |
||||||
|
result.reverse() |
||||||
|
return result |
||||||
|
|
||||||
|
def iteritems(self): |
||||||
|
"""Iterate over all items.""" |
||||||
|
return iter(self.items()) |
||||||
|
|
||||||
|
def values(self): |
||||||
|
"""Return a list of all values.""" |
||||||
|
return [x[1] for x in self.items()] |
||||||
|
|
||||||
|
def itervalue(self): |
||||||
|
"""Iterate over all values.""" |
||||||
|
return iter(self.values()) |
||||||
|
|
||||||
|
def keys(self): |
||||||
|
"""Return a list of all keys ordered by most recent usage.""" |
||||||
|
return list(self) |
||||||
|
|
||||||
|
def iterkeys(self): |
||||||
|
"""Iterate over all keys in the cache dict, ordered by |
||||||
|
the most recent usage. |
||||||
|
""" |
||||||
|
return reversed(tuple(self._queue)) |
||||||
|
|
||||||
|
__iter__ = iterkeys |
||||||
|
|
||||||
|
def __reversed__(self): |
||||||
|
"""Iterate over the values in the cache dict, oldest items |
||||||
|
coming first. |
||||||
|
""" |
||||||
|
return iter(tuple(self._queue)) |
||||||
|
|
||||||
|
__copy__ = copy |
||||||
|
|
||||||
|
|
||||||
|
# register the LRU cache as mutable mapping if possible |
||||||
|
try: |
||||||
|
from collections import MutableMapping |
||||||
|
MutableMapping.register(LRUCache) |
||||||
|
except ImportError: |
||||||
|
pass |
||||||
|
|
||||||
|
|
||||||
|
def select_autoescape(enabled_extensions=('html', 'htm', 'xml'), |
||||||
|
disabled_extensions=(), |
||||||
|
default_for_string=True, |
||||||
|
default=False): |
||||||
|
"""Intelligently sets the initial value of autoescaping based on the |
||||||
|
filename of the template. This is the recommended way to configure |
||||||
|
autoescaping if you do not want to write a custom function yourself. |
||||||
|
|
||||||
|
If you want to enable it for all templates created from strings or |
||||||
|
for all templates with `.html` and `.xml` extensions:: |
||||||
|
|
||||||
|
from jinja2 import Environment, select_autoescape |
||||||
|
env = Environment(autoescape=select_autoescape( |
||||||
|
enabled_extensions=('html', 'xml'), |
||||||
|
default_for_string=True, |
||||||
|
)) |
||||||
|
|
||||||
|
Example configuration to turn it on at all times except if the template |
||||||
|
ends with `.txt`:: |
||||||
|
|
||||||
|
from jinja2 import Environment, select_autoescape |
||||||
|
env = Environment(autoescape=select_autoescape( |
||||||
|
disabled_extensions=('txt',), |
||||||
|
default_for_string=True, |
||||||
|
default=True, |
||||||
|
)) |
||||||
|
|
||||||
|
The `enabled_extensions` is an iterable of all the extensions that |
||||||
|
autoescaping should be enabled for. Likewise `disabled_extensions` is |
||||||
|
a list of all templates it should be disabled for. If a template is |
||||||
|
loaded from a string then the default from `default_for_string` is used. |
||||||
|
If nothing matches then the initial value of autoescaping is set to the |
||||||
|
value of `default`. |
||||||
|
|
||||||
|
For security reasons this function operates case insensitive. |
||||||
|
|
||||||
|
.. versionadded:: 2.9 |
||||||
|
""" |
||||||
|
enabled_patterns = tuple('.' + x.lstrip('.').lower() |
||||||
|
for x in enabled_extensions) |
||||||
|
disabled_patterns = tuple('.' + x.lstrip('.').lower() |
||||||
|
for x in disabled_extensions) |
||||||
|
def autoescape(template_name): |
||||||
|
if template_name is None: |
||||||
|
return default_for_string |
||||||
|
template_name = template_name.lower() |
||||||
|
if template_name.endswith(enabled_patterns): |
||||||
|
return True |
||||||
|
if template_name.endswith(disabled_patterns): |
||||||
|
return False |
||||||
|
return default |
||||||
|
return autoescape |
||||||
|
|
||||||
|
|
||||||
|
def htmlsafe_json_dumps(obj, dumper=None, **kwargs): |
||||||
|
"""Works exactly like :func:`dumps` but is safe for use in ``<script>`` |
||||||
|
tags. It accepts the same arguments and returns a JSON string. Note that |
||||||
|
this is available in templates through the ``|tojson`` filter which will |
||||||
|
also mark the result as safe. Due to how this function escapes certain |
||||||
|
characters this is safe even if used outside of ``<script>`` tags. |
||||||
|
|
||||||
|
The following characters are escaped in strings: |
||||||
|
|
||||||
|
- ``<`` |
||||||
|
- ``>`` |
||||||
|
- ``&`` |
||||||
|
- ``'`` |
||||||
|
|
||||||
|
This makes it safe to embed such strings in any place in HTML with the |
||||||
|
notable exception of double quoted attributes. In that case single |
||||||
|
quote your attributes or HTML escape it in addition. |
||||||
|
""" |
||||||
|
if dumper is None: |
||||||
|
dumper = json.dumps |
||||||
|
rv = dumper(obj, **kwargs) \ |
||||||
|
.replace(u'<', u'\\u003c') \ |
||||||
|
.replace(u'>', u'\\u003e') \ |
||||||
|
.replace(u'&', u'\\u0026') \ |
||||||
|
.replace(u"'", u'\\u0027') |
||||||
|
return rv |
||||||
|
|
||||||
|
|
||||||
|
@implements_iterator |
||||||
|
class Cycler(object): |
||||||
|
"""A cycle helper for templates.""" |
||||||
|
|
||||||
|
def __init__(self, *items): |
||||||
|
if not items: |
||||||
|
raise RuntimeError('at least one item has to be provided') |
||||||
|
self.items = items |
||||||
|
self.reset() |
||||||
|
|
||||||
|
def reset(self): |
||||||
|
"""Resets the cycle.""" |
||||||
|
self.pos = 0 |
||||||
|
|
||||||
|
@property |
||||||
|
def current(self): |
||||||
|
"""Returns the current item.""" |
||||||
|
return self.items[self.pos] |
||||||
|
|
||||||
|
def next(self): |
||||||
|
"""Goes one item ahead and returns it.""" |
||||||
|
rv = self.current |
||||||
|
self.pos = (self.pos + 1) % len(self.items) |
||||||
|
return rv |
||||||
|
|
||||||
|
__next__ = next |
||||||
|
|
||||||
|
|
||||||
|
class Joiner(object): |
||||||
|
"""A joining helper for templates.""" |
||||||
|
|
||||||
|
def __init__(self, sep=u', '): |
||||||
|
self.sep = sep |
||||||
|
self.used = False |
||||||
|
|
||||||
|
def __call__(self): |
||||||
|
if not self.used: |
||||||
|
self.used = True |
||||||
|
return u'' |
||||||
|
return self.sep |
||||||
|
|
||||||
|
|
||||||
|
# does this python version support async for in and async generators? |
||||||
|
try: |
||||||
|
exec('async def _():\n async for _ in ():\n yield _') |
||||||
|
have_async_gen = True |
||||||
|
except SyntaxError: |
||||||
|
have_async_gen = False |
||||||
|
|
||||||
|
|
||||||
|
# Imported here because that's where it was in the past |
||||||
|
from markupsafe import Markup, escape, soft_unicode |
@ -0,0 +1,87 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
jinja2.visitor |
||||||
|
~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
This module implements a visitor for the nodes. |
||||||
|
|
||||||
|
:copyright: (c) 2017 by the Jinja Team. |
||||||
|
:license: BSD. |
||||||
|
""" |
||||||
|
from jinja2.nodes import Node |
||||||
|
|
||||||
|
|
||||||
|
class NodeVisitor(object): |
||||||
|
"""Walks the abstract syntax tree and call visitor functions for every |
||||||
|
node found. The visitor functions may return values which will be |
||||||
|
forwarded by the `visit` method. |
||||||
|
|
||||||
|
Per default the visitor functions for the nodes are ``'visit_'`` + |
||||||
|
class name of the node. So a `TryFinally` node visit function would |
||||||
|
be `visit_TryFinally`. This behavior can be changed by overriding |
||||||
|
the `get_visitor` function. If no visitor function exists for a node |
||||||
|
(return value `None`) the `generic_visit` visitor is used instead. |
||||||
|
""" |
||||||
|
|
||||||
|
def get_visitor(self, node): |
||||||
|
"""Return the visitor function for this node or `None` if no visitor |
||||||
|
exists for this node. In that case the generic visit function is |
||||||
|
used instead. |
||||||
|
""" |
||||||
|
method = 'visit_' + node.__class__.__name__ |
||||||
|
return getattr(self, method, None) |
||||||
|
|
||||||
|
def visit(self, node, *args, **kwargs): |
||||||
|
"""Visit a node.""" |
||||||
|
f = self.get_visitor(node) |
||||||
|
if f is not None: |
||||||
|
return f(node, *args, **kwargs) |
||||||
|
return self.generic_visit(node, *args, **kwargs) |
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs): |
||||||
|
"""Called if no explicit visitor function exists for a node.""" |
||||||
|
for node in node.iter_child_nodes(): |
||||||
|
self.visit(node, *args, **kwargs) |
||||||
|
|
||||||
|
|
||||||
|
class NodeTransformer(NodeVisitor): |
||||||
|
"""Walks the abstract syntax tree and allows modifications of nodes. |
||||||
|
|
||||||
|
The `NodeTransformer` will walk the AST and use the return value of the |
||||||
|
visitor functions to replace or remove the old node. If the return |
||||||
|
value of the visitor function is `None` the node will be removed |
||||||
|
from the previous location otherwise it's replaced with the return |
||||||
|
value. The return value may be the original node in which case no |
||||||
|
replacement takes place. |
||||||
|
""" |
||||||
|
|
||||||
|
def generic_visit(self, node, *args, **kwargs): |
||||||
|
for field, old_value in node.iter_fields(): |
||||||
|
if isinstance(old_value, list): |
||||||
|
new_values = [] |
||||||
|
for value in old_value: |
||||||
|
if isinstance(value, Node): |
||||||
|
value = self.visit(value, *args, **kwargs) |
||||||
|
if value is None: |
||||||
|
continue |
||||||
|
elif not isinstance(value, Node): |
||||||
|
new_values.extend(value) |
||||||
|
continue |
||||||
|
new_values.append(value) |
||||||
|
old_value[:] = new_values |
||||||
|
elif isinstance(old_value, Node): |
||||||
|
new_node = self.visit(old_value, *args, **kwargs) |
||||||
|
if new_node is None: |
||||||
|
delattr(node, field) |
||||||
|
else: |
||||||
|
setattr(node, field, new_node) |
||||||
|
return node |
||||||
|
|
||||||
|
def visit_list(self, node, *args, **kwargs): |
||||||
|
"""As transformers may return lists in some places this method |
||||||
|
can be used to enforce a list as return value. |
||||||
|
""" |
||||||
|
rv = self.visit(node, *args, **kwargs) |
||||||
|
if not isinstance(rv, list): |
||||||
|
rv = [rv] |
||||||
|
return rv |
@ -0,0 +1,305 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
markupsafe |
||||||
|
~~~~~~~~~~ |
||||||
|
|
||||||
|
Implements a Markup string. |
||||||
|
|
||||||
|
:copyright: (c) 2010 by Armin Ronacher. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import re |
||||||
|
import string |
||||||
|
from collections import Mapping |
||||||
|
from markupsafe._compat import text_type, string_types, int_types, \ |
||||||
|
unichr, iteritems, PY2 |
||||||
|
|
||||||
|
__version__ = "1.0" |
||||||
|
|
||||||
|
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent'] |
||||||
|
|
||||||
|
|
||||||
|
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)') |
||||||
|
_entity_re = re.compile(r'&([^& ;]+);') |
||||||
|
|
||||||
|
|
||||||
|
class Markup(text_type): |
||||||
|
r"""Marks a string as being safe for inclusion in HTML/XML output without |
||||||
|
needing to be escaped. This implements the `__html__` interface a couple |
||||||
|
of frameworks and web applications use. :class:`Markup` is a direct |
||||||
|
subclass of `unicode` and provides all the methods of `unicode` just that |
||||||
|
it escapes arguments passed and always returns `Markup`. |
||||||
|
|
||||||
|
The `escape` function returns markup objects so that double escaping can't |
||||||
|
happen. |
||||||
|
|
||||||
|
The constructor of the :class:`Markup` class can be used for three |
||||||
|
different things: When passed an unicode object it's assumed to be safe, |
||||||
|
when passed an object with an HTML representation (has an `__html__` |
||||||
|
method) that representation is used, otherwise the object passed is |
||||||
|
converted into a unicode string and then assumed to be safe: |
||||||
|
|
||||||
|
>>> Markup("Hello <em>World</em>!") |
||||||
|
Markup(u'Hello <em>World</em>!') |
||||||
|
>>> class Foo(object): |
||||||
|
... def __html__(self): |
||||||
|
... return '<a href="#">foo</a>' |
||||||
|
... |
||||||
|
>>> Markup(Foo()) |
||||||
|
Markup(u'<a href="#">foo</a>') |
||||||
|
|
||||||
|
If you want object passed being always treated as unsafe you can use the |
||||||
|
:meth:`escape` classmethod to create a :class:`Markup` object: |
||||||
|
|
||||||
|
>>> Markup.escape("Hello <em>World</em>!") |
||||||
|
Markup(u'Hello <em>World</em>!') |
||||||
|
|
||||||
|
Operations on a markup string are markup aware which means that all |
||||||
|
arguments are passed through the :func:`escape` function: |
||||||
|
|
||||||
|
>>> em = Markup("<em>%s</em>") |
||||||
|
>>> em % "foo & bar" |
||||||
|
Markup(u'<em>foo & bar</em>') |
||||||
|
>>> strong = Markup("<strong>%(text)s</strong>") |
||||||
|
>>> strong % {'text': '<blink>hacker here</blink>'} |
||||||
|
Markup(u'<strong><blink>hacker here</blink></strong>') |
||||||
|
>>> Markup("<em>Hello</em> ") + "<foo>" |
||||||
|
Markup(u'<em>Hello</em> <foo>') |
||||||
|
""" |
||||||
|
__slots__ = () |
||||||
|
|
||||||
|
def __new__(cls, base=u'', encoding=None, errors='strict'): |
||||||
|
if hasattr(base, '__html__'): |
||||||
|
base = base.__html__() |
||||||
|
if encoding is None: |
||||||
|
return text_type.__new__(cls, base) |
||||||
|
return text_type.__new__(cls, base, encoding, errors) |
||||||
|
|
||||||
|
def __html__(self): |
||||||
|
return self |
||||||
|
|
||||||
|
def __add__(self, other): |
||||||
|
if isinstance(other, string_types) or hasattr(other, '__html__'): |
||||||
|
return self.__class__(super(Markup, self).__add__(self.escape(other))) |
||||||
|
return NotImplemented |
||||||
|
|
||||||
|
def __radd__(self, other): |
||||||
|
if hasattr(other, '__html__') or isinstance(other, string_types): |
||||||
|
return self.escape(other).__add__(self) |
||||||
|
return NotImplemented |
||||||
|
|
||||||
|
def __mul__(self, num): |
||||||
|
if isinstance(num, int_types): |
||||||
|
return self.__class__(text_type.__mul__(self, num)) |
||||||
|
return NotImplemented |
||||||
|
__rmul__ = __mul__ |
||||||
|
|
||||||
|
def __mod__(self, arg): |
||||||
|
if isinstance(arg, tuple): |
||||||
|
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg) |
||||||
|
else: |
||||||
|
arg = _MarkupEscapeHelper(arg, self.escape) |
||||||
|
return self.__class__(text_type.__mod__(self, arg)) |
||||||
|
|
||||||
|
def __repr__(self): |
||||||
|
return '%s(%s)' % ( |
||||||
|
self.__class__.__name__, |
||||||
|
text_type.__repr__(self) |
||||||
|
) |
||||||
|
|
||||||
|
def join(self, seq): |
||||||
|
return self.__class__(text_type.join(self, map(self.escape, seq))) |
||||||
|
join.__doc__ = text_type.join.__doc__ |
||||||
|
|
||||||
|
def split(self, *args, **kwargs): |
||||||
|
return list(map(self.__class__, text_type.split(self, *args, **kwargs))) |
||||||
|
split.__doc__ = text_type.split.__doc__ |
||||||
|
|
||||||
|
def rsplit(self, *args, **kwargs): |
||||||
|
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs))) |
||||||
|
rsplit.__doc__ = text_type.rsplit.__doc__ |
||||||
|
|
||||||
|
def splitlines(self, *args, **kwargs): |
||||||
|
return list(map(self.__class__, text_type.splitlines( |
||||||
|
self, *args, **kwargs))) |
||||||
|
splitlines.__doc__ = text_type.splitlines.__doc__ |
||||||
|
|
||||||
|
def unescape(self): |
||||||
|
r"""Unescape markup again into an text_type string. This also resolves |
||||||
|
known HTML4 and XHTML entities: |
||||||
|
|
||||||
|
>>> Markup("Main » <em>About</em>").unescape() |
||||||
|
u'Main \xbb <em>About</em>' |
||||||
|
""" |
||||||
|
from markupsafe._constants import HTML_ENTITIES |
||||||
|
def handle_match(m): |
||||||
|
name = m.group(1) |
||||||
|
if name in HTML_ENTITIES: |
||||||
|
return unichr(HTML_ENTITIES[name]) |
||||||
|
try: |
||||||
|
if name[:2] in ('#x', '#X'): |
||||||
|
return unichr(int(name[2:], 16)) |
||||||
|
elif name.startswith('#'): |
||||||
|
return unichr(int(name[1:])) |
||||||
|
except ValueError: |
||||||
|
pass |
||||||
|
# Don't modify unexpected input. |
||||||
|
return m.group() |
||||||
|
return _entity_re.sub(handle_match, text_type(self)) |
||||||
|
|
||||||
|
def striptags(self): |
||||||
|
r"""Unescape markup into an text_type string and strip all tags. This |
||||||
|
also resolves known HTML4 and XHTML entities. Whitespace is |
||||||
|
normalized to one: |
||||||
|
|
||||||
|
>>> Markup("Main » <em>About</em>").striptags() |
||||||
|
u'Main \xbb About' |
||||||
|
""" |
||||||
|
stripped = u' '.join(_striptags_re.sub('', self).split()) |
||||||
|
return Markup(stripped).unescape() |
||||||
|
|
||||||
|
@classmethod |
||||||
|
def escape(cls, s): |
||||||
|
"""Escape the string. Works like :func:`escape` with the difference |
||||||
|
that for subclasses of :class:`Markup` this function would return the |
||||||
|
correct subclass. |
||||||
|
""" |
||||||
|
rv = escape(s) |
||||||
|
if rv.__class__ is not cls: |
||||||
|
return cls(rv) |
||||||
|
return rv |
||||||
|
|
||||||
|
def make_simple_escaping_wrapper(name): |
||||||
|
orig = getattr(text_type, name) |
||||||
|
def func(self, *args, **kwargs): |
||||||
|
args = _escape_argspec(list(args), enumerate(args), self.escape) |
||||||
|
_escape_argspec(kwargs, iteritems(kwargs), self.escape) |
||||||
|
return self.__class__(orig(self, *args, **kwargs)) |
||||||
|
func.__name__ = orig.__name__ |
||||||
|
func.__doc__ = orig.__doc__ |
||||||
|
return func |
||||||
|
|
||||||
|
for method in '__getitem__', 'capitalize', \ |
||||||
|
'title', 'lower', 'upper', 'replace', 'ljust', \ |
||||||
|
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \ |
||||||
|
'translate', 'expandtabs', 'swapcase', 'zfill': |
||||||
|
locals()[method] = make_simple_escaping_wrapper(method) |
||||||
|
|
||||||
|
# new in python 2.5 |
||||||
|
if hasattr(text_type, 'partition'): |
||||||
|
def partition(self, sep): |
||||||
|
return tuple(map(self.__class__, |
||||||
|
text_type.partition(self, self.escape(sep)))) |
||||||
|
def rpartition(self, sep): |
||||||
|
return tuple(map(self.__class__, |
||||||
|
text_type.rpartition(self, self.escape(sep)))) |
||||||
|
|
||||||
|
# new in python 2.6 |
||||||
|
if hasattr(text_type, 'format'): |
||||||
|
def format(*args, **kwargs): |
||||||
|
self, args = args[0], args[1:] |
||||||
|
formatter = EscapeFormatter(self.escape) |
||||||
|
kwargs = _MagicFormatMapping(args, kwargs) |
||||||
|
return self.__class__(formatter.vformat(self, args, kwargs)) |
||||||
|
|
||||||
|
def __html_format__(self, format_spec): |
||||||
|
if format_spec: |
||||||
|
raise ValueError('Unsupported format specification ' |
||||||
|
'for Markup.') |
||||||
|
return self |
||||||
|
|
||||||
|
# not in python 3 |
||||||
|
if hasattr(text_type, '__getslice__'): |
||||||
|
__getslice__ = make_simple_escaping_wrapper('__getslice__') |
||||||
|
|
||||||
|
del method, make_simple_escaping_wrapper |
||||||
|
|
||||||
|
|
||||||
|
class _MagicFormatMapping(Mapping): |
||||||
|
"""This class implements a dummy wrapper to fix a bug in the Python |
||||||
|
standard library for string formatting. |
||||||
|
|
||||||
|
See http://bugs.python.org/issue13598 for information about why |
||||||
|
this is necessary. |
||||||
|
""" |
||||||
|
|
||||||
|
def __init__(self, args, kwargs): |
||||||
|
self._args = args |
||||||
|
self._kwargs = kwargs |
||||||
|
self._last_index = 0 |
||||||
|
|
||||||
|
def __getitem__(self, key): |
||||||
|
if key == '': |
||||||
|
idx = self._last_index |
||||||
|
self._last_index += 1 |
||||||
|
try: |
||||||
|
return self._args[idx] |
||||||
|
except LookupError: |
||||||
|
pass |
||||||
|
key = str(idx) |
||||||
|
return self._kwargs[key] |
||||||
|
|
||||||
|
def __iter__(self): |
||||||
|
return iter(self._kwargs) |
||||||
|
|
||||||
|
def __len__(self): |
||||||
|
return len(self._kwargs) |
||||||
|
|
||||||
|
|
||||||
|
if hasattr(text_type, 'format'): |
||||||
|
class EscapeFormatter(string.Formatter): |
||||||
|
|
||||||
|
def __init__(self, escape): |
||||||
|
self.escape = escape |
||||||
|
|
||||||
|
def format_field(self, value, format_spec): |
||||||
|
if hasattr(value, '__html_format__'): |
||||||
|
rv = value.__html_format__(format_spec) |
||||||
|
elif hasattr(value, '__html__'): |
||||||
|
if format_spec: |
||||||
|
raise ValueError('No format specification allowed ' |
||||||
|
'when formatting an object with ' |
||||||
|
'its __html__ method.') |
||||||
|
rv = value.__html__() |
||||||
|
else: |
||||||
|
# We need to make sure the format spec is unicode here as |
||||||
|
# otherwise the wrong callback methods are invoked. For |
||||||
|
# instance a byte string there would invoke __str__ and |
||||||
|
# not __unicode__. |
||||||
|
rv = string.Formatter.format_field( |
||||||
|
self, value, text_type(format_spec)) |
||||||
|
return text_type(self.escape(rv)) |
||||||
|
|
||||||
|
|
||||||
|
def _escape_argspec(obj, iterable, escape): |
||||||
|
"""Helper for various string-wrapped functions.""" |
||||||
|
for key, value in iterable: |
||||||
|
if hasattr(value, '__html__') or isinstance(value, string_types): |
||||||
|
obj[key] = escape(value) |
||||||
|
return obj |
||||||
|
|
||||||
|
|
||||||
|
class _MarkupEscapeHelper(object): |
||||||
|
"""Helper for Markup.__mod__""" |
||||||
|
|
||||||
|
def __init__(self, obj, escape): |
||||||
|
self.obj = obj |
||||||
|
self.escape = escape |
||||||
|
|
||||||
|
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape) |
||||||
|
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj)) |
||||||
|
__repr__ = lambda s: str(s.escape(repr(s.obj))) |
||||||
|
__int__ = lambda s: int(s.obj) |
||||||
|
__float__ = lambda s: float(s.obj) |
||||||
|
|
||||||
|
|
||||||
|
# we have to import it down here as the speedups and native |
||||||
|
# modules imports the markup type which is define above. |
||||||
|
try: |
||||||
|
from markupsafe._speedups import escape, escape_silent, soft_unicode |
||||||
|
except ImportError: |
||||||
|
from markupsafe._native import escape, escape_silent, soft_unicode |
||||||
|
|
||||||
|
if not PY2: |
||||||
|
soft_str = soft_unicode |
||||||
|
__all__.append('soft_str') |
@ -0,0 +1,26 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
markupsafe._compat |
||||||
|
~~~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Compatibility module for different Python versions. |
||||||
|
|
||||||
|
:copyright: (c) 2013 by Armin Ronacher. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
import sys |
||||||
|
|
||||||
|
PY2 = sys.version_info[0] == 2 |
||||||
|
|
||||||
|
if not PY2: |
||||||
|
text_type = str |
||||||
|
string_types = (str,) |
||||||
|
unichr = chr |
||||||
|
int_types = (int,) |
||||||
|
iteritems = lambda x: iter(x.items()) |
||||||
|
else: |
||||||
|
text_type = unicode |
||||||
|
string_types = (str, unicode) |
||||||
|
unichr = unichr |
||||||
|
int_types = (int, long) |
||||||
|
iteritems = lambda x: x.iteritems() |
@ -0,0 +1,267 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
markupsafe._constants |
||||||
|
~~~~~~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Highlevel implementation of the Markup string. |
||||||
|
|
||||||
|
:copyright: (c) 2010 by Armin Ronacher. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
|
||||||
|
|
||||||
|
HTML_ENTITIES = { |
||||||
|
'AElig': 198, |
||||||
|
'Aacute': 193, |
||||||
|
'Acirc': 194, |
||||||
|
'Agrave': 192, |
||||||
|
'Alpha': 913, |
||||||
|
'Aring': 197, |
||||||
|
'Atilde': 195, |
||||||
|
'Auml': 196, |
||||||
|
'Beta': 914, |
||||||
|
'Ccedil': 199, |
||||||
|
'Chi': 935, |
||||||
|
'Dagger': 8225, |
||||||
|
'Delta': 916, |
||||||
|
'ETH': 208, |
||||||
|
'Eacute': 201, |
||||||
|
'Ecirc': 202, |
||||||
|
'Egrave': 200, |
||||||
|
'Epsilon': 917, |
||||||
|
'Eta': 919, |
||||||
|
'Euml': 203, |
||||||
|
'Gamma': 915, |
||||||
|
'Iacute': 205, |
||||||
|
'Icirc': 206, |
||||||
|
'Igrave': 204, |
||||||
|
'Iota': 921, |
||||||
|
'Iuml': 207, |
||||||
|
'Kappa': 922, |
||||||
|
'Lambda': 923, |
||||||
|
'Mu': 924, |
||||||
|
'Ntilde': 209, |
||||||
|
'Nu': 925, |
||||||
|
'OElig': 338, |
||||||
|
'Oacute': 211, |
||||||
|
'Ocirc': 212, |
||||||
|
'Ograve': 210, |
||||||
|
'Omega': 937, |
||||||
|
'Omicron': 927, |
||||||
|
'Oslash': 216, |
||||||
|
'Otilde': 213, |
||||||
|
'Ouml': 214, |
||||||
|
'Phi': 934, |
||||||
|
'Pi': 928, |
||||||
|
'Prime': 8243, |
||||||
|
'Psi': 936, |
||||||
|
'Rho': 929, |
||||||
|
'Scaron': 352, |
||||||
|
'Sigma': 931, |
||||||
|
'THORN': 222, |
||||||
|
'Tau': 932, |
||||||
|
'Theta': 920, |
||||||
|
'Uacute': 218, |
||||||
|
'Ucirc': 219, |
||||||
|
'Ugrave': 217, |
||||||
|
'Upsilon': 933, |
||||||
|
'Uuml': 220, |
||||||
|
'Xi': 926, |
||||||
|
'Yacute': 221, |
||||||
|
'Yuml': 376, |
||||||
|
'Zeta': 918, |
||||||
|
'aacute': 225, |
||||||
|
'acirc': 226, |
||||||
|
'acute': 180, |
||||||
|
'aelig': 230, |
||||||
|
'agrave': 224, |
||||||
|
'alefsym': 8501, |
||||||
|
'alpha': 945, |
||||||
|
'amp': 38, |
||||||
|
'and': 8743, |
||||||
|
'ang': 8736, |
||||||
|
'apos': 39, |
||||||
|
'aring': 229, |
||||||
|
'asymp': 8776, |
||||||
|
'atilde': 227, |
||||||
|
'auml': 228, |
||||||
|
'bdquo': 8222, |
||||||
|
'beta': 946, |
||||||
|
'brvbar': 166, |
||||||
|
'bull': 8226, |
||||||
|
'cap': 8745, |
||||||
|
'ccedil': 231, |
||||||
|
'cedil': 184, |
||||||
|
'cent': 162, |
||||||
|
'chi': 967, |
||||||
|
'circ': 710, |
||||||
|
'clubs': 9827, |
||||||
|
'cong': 8773, |
||||||
|
'copy': 169, |
||||||
|
'crarr': 8629, |
||||||
|
'cup': 8746, |
||||||
|
'curren': 164, |
||||||
|
'dArr': 8659, |
||||||
|
'dagger': 8224, |
||||||
|
'darr': 8595, |
||||||
|
'deg': 176, |
||||||
|
'delta': 948, |
||||||
|
'diams': 9830, |
||||||
|
'divide': 247, |
||||||
|
'eacute': 233, |
||||||
|
'ecirc': 234, |
||||||
|
'egrave': 232, |
||||||
|
'empty': 8709, |
||||||
|
'emsp': 8195, |
||||||
|
'ensp': 8194, |
||||||
|
'epsilon': 949, |
||||||
|
'equiv': 8801, |
||||||
|
'eta': 951, |
||||||
|
'eth': 240, |
||||||
|
'euml': 235, |
||||||
|
'euro': 8364, |
||||||
|
'exist': 8707, |
||||||
|
'fnof': 402, |
||||||
|
'forall': 8704, |
||||||
|
'frac12': 189, |
||||||
|
'frac14': 188, |
||||||
|
'frac34': 190, |
||||||
|
'frasl': 8260, |
||||||
|
'gamma': 947, |
||||||
|
'ge': 8805, |
||||||
|
'gt': 62, |
||||||
|
'hArr': 8660, |
||||||
|
'harr': 8596, |
||||||
|
'hearts': 9829, |
||||||
|
'hellip': 8230, |
||||||
|
'iacute': 237, |
||||||
|
'icirc': 238, |
||||||
|
'iexcl': 161, |
||||||
|
'igrave': 236, |
||||||
|
'image': 8465, |
||||||
|
'infin': 8734, |
||||||
|
'int': 8747, |
||||||
|
'iota': 953, |
||||||
|
'iquest': 191, |
||||||
|
'isin': 8712, |
||||||
|
'iuml': 239, |
||||||
|
'kappa': 954, |
||||||
|
'lArr': 8656, |
||||||
|
'lambda': 955, |
||||||
|
'lang': 9001, |
||||||
|
'laquo': 171, |
||||||
|
'larr': 8592, |
||||||
|
'lceil': 8968, |
||||||
|
'ldquo': 8220, |
||||||
|
'le': 8804, |
||||||
|
'lfloor': 8970, |
||||||
|
'lowast': 8727, |
||||||
|
'loz': 9674, |
||||||
|
'lrm': 8206, |
||||||
|
'lsaquo': 8249, |
||||||
|
'lsquo': 8216, |
||||||
|
'lt': 60, |
||||||
|
'macr': 175, |
||||||
|
'mdash': 8212, |
||||||
|
'micro': 181, |
||||||
|
'middot': 183, |
||||||
|
'minus': 8722, |
||||||
|
'mu': 956, |
||||||
|
'nabla': 8711, |
||||||
|
'nbsp': 160, |
||||||
|
'ndash': 8211, |
||||||
|
'ne': 8800, |
||||||
|
'ni': 8715, |
||||||
|
'not': 172, |
||||||
|
'notin': 8713, |
||||||
|
'nsub': 8836, |
||||||
|
'ntilde': 241, |
||||||
|
'nu': 957, |
||||||
|
'oacute': 243, |
||||||
|
'ocirc': 244, |
||||||
|
'oelig': 339, |
||||||
|
'ograve': 242, |
||||||
|
'oline': 8254, |
||||||
|
'omega': 969, |
||||||
|
'omicron': 959, |
||||||
|
'oplus': 8853, |
||||||
|
'or': 8744, |
||||||
|
'ordf': 170, |
||||||
|
'ordm': 186, |
||||||
|
'oslash': 248, |
||||||
|
'otilde': 245, |
||||||
|
'otimes': 8855, |
||||||
|
'ouml': 246, |
||||||
|
'para': 182, |
||||||
|
'part': 8706, |
||||||
|
'permil': 8240, |
||||||
|
'perp': 8869, |
||||||
|
'phi': 966, |
||||||
|
'pi': 960, |
||||||
|
'piv': 982, |
||||||
|
'plusmn': 177, |
||||||
|
'pound': 163, |
||||||
|
'prime': 8242, |
||||||
|
'prod': 8719, |
||||||
|
'prop': 8733, |
||||||
|
'psi': 968, |
||||||
|
'quot': 34, |
||||||
|
'rArr': 8658, |
||||||
|
'radic': 8730, |
||||||
|
'rang': 9002, |
||||||
|
'raquo': 187, |
||||||
|
'rarr': 8594, |
||||||
|
'rceil': 8969, |
||||||
|
'rdquo': 8221, |
||||||
|
'real': 8476, |
||||||
|
'reg': 174, |
||||||
|
'rfloor': 8971, |
||||||
|
'rho': 961, |
||||||
|
'rlm': 8207, |
||||||
|
'rsaquo': 8250, |
||||||
|
'rsquo': 8217, |
||||||
|
'sbquo': 8218, |
||||||
|
'scaron': 353, |
||||||
|
'sdot': 8901, |
||||||
|
'sect': 167, |
||||||
|
'shy': 173, |
||||||
|
'sigma': 963, |
||||||
|
'sigmaf': 962, |
||||||
|
'sim': 8764, |
||||||
|
'spades': 9824, |
||||||
|
'sub': 8834, |
||||||
|
'sube': 8838, |
||||||
|
'sum': 8721, |
||||||
|
'sup': 8835, |
||||||
|
'sup1': 185, |
||||||
|
'sup2': 178, |
||||||
|
'sup3': 179, |
||||||
|
'supe': 8839, |
||||||
|
'szlig': 223, |
||||||
|
'tau': 964, |
||||||
|
'there4': 8756, |
||||||
|
'theta': 952, |
||||||
|
'thetasym': 977, |
||||||
|
'thinsp': 8201, |
||||||
|
'thorn': 254, |
||||||
|
'tilde': 732, |
||||||
|
'times': 215, |
||||||
|
'trade': 8482, |
||||||
|
'uArr': 8657, |
||||||
|
'uacute': 250, |
||||||
|
'uarr': 8593, |
||||||
|
'ucirc': 251, |
||||||
|
'ugrave': 249, |
||||||
|
'uml': 168, |
||||||
|
'upsih': 978, |
||||||
|
'upsilon': 965, |
||||||
|
'uuml': 252, |
||||||
|
'weierp': 8472, |
||||||
|
'xi': 958, |
||||||
|
'yacute': 253, |
||||||
|
'yen': 165, |
||||||
|
'yuml': 255, |
||||||
|
'zeta': 950, |
||||||
|
'zwj': 8205, |
||||||
|
'zwnj': 8204 |
||||||
|
} |
@ -0,0 +1,46 @@ |
|||||||
|
# -*- coding: utf-8 -*- |
||||||
|
""" |
||||||
|
markupsafe._native |
||||||
|
~~~~~~~~~~~~~~~~~~ |
||||||
|
|
||||||
|
Native Python implementation the C module is not compiled. |
||||||
|
|
||||||
|
:copyright: (c) 2010 by Armin Ronacher. |
||||||
|
:license: BSD, see LICENSE for more details. |
||||||
|
""" |
||||||
|
from markupsafe import Markup |
||||||
|
from markupsafe._compat import text_type |
||||||
|
|
||||||
|
|
||||||
|
def escape(s): |
||||||
|
"""Convert the characters &, <, >, ' and " in string s to HTML-safe |
||||||
|
sequences. Use this if you need to display text that might contain |
||||||
|
such characters in HTML. Marks return value as markup string. |
||||||
|
""" |
||||||
|
if hasattr(s, '__html__'): |
||||||
|
return s.__html__() |
||||||
|
return Markup(text_type(s) |
||||||
|
.replace('&', '&') |
||||||
|
.replace('>', '>') |
||||||
|
.replace('<', '<') |
||||||
|
.replace("'", ''') |
||||||
|
.replace('"', '"') |
||||||
|
) |
||||||
|
|
||||||
|
|
||||||
|
def escape_silent(s): |
||||||
|
"""Like :func:`escape` but converts `None` into an empty |
||||||
|
markup string. |
||||||
|
""" |
||||||
|
if s is None: |
||||||
|
return Markup() |
||||||
|
return escape(s) |
||||||
|
|
||||||
|
|
||||||
|
def soft_unicode(s): |
||||||
|
"""Make a string unicode if it isn't already. That way a markup |
||||||
|
string is not converted back to unicode. |
||||||
|
""" |
||||||
|
if not isinstance(s, text_type): |
||||||
|
s = text_type(s) |
||||||
|
return s |
@ -0,0 +1,239 @@ |
|||||||
|
/**
|
||||||
|
* markupsafe._speedups |
||||||
|
* ~~~~~~~~~~~~~~~~~~~~ |
||||||
|
* |
||||||
|
* This module implements functions for automatic escaping in C for better |
||||||
|
* performance. |
||||||
|
* |
||||||
|
* :copyright: (c) 2010 by Armin Ronacher. |
||||||
|
* :license: BSD. |
||||||
|
*/ |
||||||
|
|
||||||
|
#include <Python.h> |
||||||
|
|
||||||
|
#define ESCAPED_CHARS_TABLE_SIZE 63 |
||||||
|
#define UNICHR(x) (PyUnicode_AS_UNICODE((PyUnicodeObject*)PyUnicode_DecodeASCII(x, strlen(x), NULL))); |
||||||
|
|
||||||
|
#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) |
||||||
|
typedef int Py_ssize_t; |
||||||
|
#define PY_SSIZE_T_MAX INT_MAX |
||||||
|
#define PY_SSIZE_T_MIN INT_MIN |
||||||
|
#endif |
||||||
|
|
||||||
|
|
||||||
|
static PyObject* markup; |
||||||
|
static Py_ssize_t escaped_chars_delta_len[ESCAPED_CHARS_TABLE_SIZE]; |
||||||
|
static Py_UNICODE *escaped_chars_repl[ESCAPED_CHARS_TABLE_SIZE]; |
||||||
|
|
||||||
|
static int |
||||||
|
init_constants(void) |
||||||
|
{ |
||||||
|
PyObject *module; |
||||||
|
/* mapping of characters to replace */ |
||||||
|
escaped_chars_repl['"'] = UNICHR("""); |
||||||
|
escaped_chars_repl['\''] = UNICHR("'"); |
||||||
|
escaped_chars_repl['&'] = UNICHR("&"); |
||||||
|
escaped_chars_repl['<'] = UNICHR("<"); |
||||||
|
escaped_chars_repl['>'] = UNICHR(">"); |
||||||
|
|
||||||
|
/* lengths of those characters when replaced - 1 */ |
||||||
|
memset(escaped_chars_delta_len, 0, sizeof (escaped_chars_delta_len)); |
||||||
|
escaped_chars_delta_len['"'] = escaped_chars_delta_len['\''] = \
|
||||||
|
escaped_chars_delta_len['&'] = 4; |
||||||
|
escaped_chars_delta_len['<'] = escaped_chars_delta_len['>'] = 3; |
||||||
|
|
||||||
|
/* import markup type so that we can mark the return value */ |
||||||
|
module = PyImport_ImportModule("markupsafe"); |
||||||
|
if (!module) |
||||||
|
return 0; |
||||||
|
markup = PyObject_GetAttrString(module, "Markup"); |
||||||
|
Py_DECREF(module); |
||||||
|
|
||||||
|
return 1; |
||||||
|
} |
||||||
|
|
||||||
|
static PyObject* |
||||||
|
escape_unicode(PyUnicodeObject *in) |
||||||
|
{ |
||||||
|
PyUnicodeObject *out; |
||||||
|
Py_UNICODE *inp = PyUnicode_AS_UNICODE(in); |
||||||
|
const Py_UNICODE *inp_end = PyUnicode_AS_UNICODE(in) + PyUnicode_GET_SIZE(in); |
||||||
|
Py_UNICODE *next_escp; |
||||||
|
Py_UNICODE *outp; |
||||||
|
Py_ssize_t delta=0, erepl=0, delta_len=0; |
||||||
|
|
||||||
|
/* First we need to figure out how long the escaped string will be */ |
||||||
|
while (*(inp) || inp < inp_end) { |
||||||
|
if (*inp < ESCAPED_CHARS_TABLE_SIZE) { |
||||||
|
delta += escaped_chars_delta_len[*inp]; |
||||||
|
erepl += !!escaped_chars_delta_len[*inp]; |
||||||
|
} |
||||||
|
++inp; |
||||||
|
} |
||||||
|
|
||||||
|
/* Do we need to escape anything at all? */ |
||||||
|
if (!erepl) { |
||||||
|
Py_INCREF(in); |
||||||
|
return (PyObject*)in; |
||||||
|
} |
||||||
|
|
||||||
|
out = (PyUnicodeObject*)PyUnicode_FromUnicode(NULL, PyUnicode_GET_SIZE(in) + delta); |
||||||
|
if (!out) |
||||||
|
return NULL; |
||||||
|
|
||||||
|
outp = PyUnicode_AS_UNICODE(out); |
||||||
|
inp = PyUnicode_AS_UNICODE(in); |
||||||
|
while (erepl-- > 0) { |
||||||
|
/* look for the next substitution */ |
||||||
|
next_escp = inp; |
||||||
|
while (next_escp < inp_end) { |
||||||
|
if (*next_escp < ESCAPED_CHARS_TABLE_SIZE && |
||||||
|
(delta_len = escaped_chars_delta_len[*next_escp])) { |
||||||
|
++delta_len; |
||||||
|
break; |
||||||
|
} |
||||||
|
++next_escp; |
||||||
|
} |
||||||
|
|
||||||
|
if (next_escp > inp) { |
||||||
|
/* copy unescaped chars between inp and next_escp */ |
||||||
|
Py_UNICODE_COPY(outp, inp, next_escp-inp); |
||||||
|
outp += next_escp - inp; |
||||||
|
} |
||||||
|
|
||||||
|
/* escape 'next_escp' */ |
||||||
|
Py_UNICODE_COPY(outp, escaped_chars_repl[*next_escp], delta_len); |
||||||
|
outp += delta_len; |
||||||
|
|
||||||
|
inp = next_escp + 1; |
||||||
|
} |
||||||
|
if (inp < inp_end) |
||||||
|
Py_UNICODE_COPY(outp, inp, PyUnicode_GET_SIZE(in) - (inp - PyUnicode_AS_UNICODE(in))); |
||||||
|
|
||||||
|
return (PyObject*)out; |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
static PyObject* |
||||||
|
escape(PyObject *self, PyObject *text) |
||||||
|
{ |
||||||
|
PyObject *s = NULL, *rv = NULL, *html; |
||||||
|
|
||||||
|
/* we don't have to escape integers, bools or floats */ |
||||||
|
if (PyLong_CheckExact(text) || |
||||||
|
#if PY_MAJOR_VERSION < 3 |
||||||
|
PyInt_CheckExact(text) || |
||||||
|
#endif |
||||||
|
PyFloat_CheckExact(text) || PyBool_Check(text) || |
||||||
|
text == Py_None) |
||||||
|
return PyObject_CallFunctionObjArgs(markup, text, NULL); |
||||||
|
|
||||||
|
/* if the object has an __html__ method that performs the escaping */ |
||||||
|
html = PyObject_GetAttrString(text, "__html__"); |
||||||
|
if (html) { |
||||||
|
rv = PyObject_CallObject(html, NULL); |
||||||
|
Py_DECREF(html); |
||||||
|
return rv; |
||||||
|
} |
||||||
|
|
||||||
|
/* otherwise make the object unicode if it isn't, then escape */ |
||||||
|
PyErr_Clear(); |
||||||
|
if (!PyUnicode_Check(text)) { |
||||||
|
#if PY_MAJOR_VERSION < 3 |
||||||
|
PyObject *unicode = PyObject_Unicode(text); |
||||||
|
#else |
||||||
|
PyObject *unicode = PyObject_Str(text); |
||||||
|
#endif |
||||||
|
if (!unicode) |
||||||
|
return NULL; |
||||||
|
s = escape_unicode((PyUnicodeObject*)unicode); |
||||||
|
Py_DECREF(unicode); |
||||||
|
} |
||||||
|
else |
||||||
|
s = escape_unicode((PyUnicodeObject*)text); |
||||||
|
|
||||||
|
/* convert the unicode string into a markup object. */ |
||||||
|
rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL); |
||||||
|
Py_DECREF(s); |
||||||
|
return rv; |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
static PyObject* |
||||||
|
escape_silent(PyObject *self, PyObject *text) |
||||||
|
{ |
||||||
|
if (text != Py_None) |
||||||
|
return escape(self, text); |
||||||
|
return PyObject_CallFunctionObjArgs(markup, NULL); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
static PyObject* |
||||||
|
soft_unicode(PyObject *self, PyObject *s) |
||||||
|
{ |
||||||
|
if (!PyUnicode_Check(s)) |
||||||
|
#if PY_MAJOR_VERSION < 3 |
||||||
|
return PyObject_Unicode(s); |
||||||
|
#else |
||||||
|
return PyObject_Str(s); |
||||||
|
#endif |
||||||
|
Py_INCREF(s); |
||||||
|
return s; |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
static PyMethodDef module_methods[] = { |
||||||
|
{"escape", (PyCFunction)escape, METH_O, |
||||||
|
"escape(s) -> markup\n\n" |
||||||
|
"Convert the characters &, <, >, ', and \" in string s to HTML-safe\n" |
||||||
|
"sequences. Use this if you need to display text that might contain\n" |
||||||
|
"such characters in HTML. Marks return value as markup string."}, |
||||||
|
{"escape_silent", (PyCFunction)escape_silent, METH_O, |
||||||
|
"escape_silent(s) -> markup\n\n" |
||||||
|
"Like escape but converts None to an empty string."}, |
||||||
|
{"soft_unicode", (PyCFunction)soft_unicode, METH_O, |
||||||
|
"soft_unicode(object) -> string\n\n" |
||||||
|
"Make a string unicode if it isn't already. That way a markup\n" |
||||||
|
"string is not converted back to unicode."}, |
||||||
|
{NULL, NULL, 0, NULL} /* Sentinel */ |
||||||
|
}; |
||||||
|
|
||||||
|
|
||||||
|
#if PY_MAJOR_VERSION < 3 |
||||||
|
|
||||||
|
#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ |
||||||
|
#define PyMODINIT_FUNC void |
||||||
|
#endif |
||||||
|
PyMODINIT_FUNC |
||||||
|
init_speedups(void) |
||||||
|
{ |
||||||
|
if (!init_constants()) |
||||||
|
return; |
||||||
|
|
||||||
|
Py_InitModule3("markupsafe._speedups", module_methods, ""); |
||||||
|
} |
||||||
|
|
||||||
|
#else /* Python 3.x module initialization */ |
||||||
|
|
||||||
|
static struct PyModuleDef module_definition = { |
||||||
|
PyModuleDef_HEAD_INIT, |
||||||
|
"markupsafe._speedups", |
||||||
|
NULL, |
||||||
|
-1, |
||||||
|
module_methods, |
||||||
|
NULL, |
||||||
|
NULL, |
||||||
|
NULL, |
||||||
|
NULL |
||||||
|
}; |
||||||
|
|
||||||
|
PyMODINIT_FUNC |
||||||
|
PyInit__speedups(void) |
||||||
|
{ |
||||||
|
if (!init_constants()) |
||||||
|
return NULL; |
||||||
|
|
||||||
|
return PyModule_Create(&module_definition); |
||||||
|
} |
||||||
|
|
||||||
|
#endif |
Binary file not shown.
@ -0,0 +1,19 @@ |
|||||||
|
Metadata-Version: 1.1 |
||||||
|
Name: utm |
||||||
|
Version: 0.4.2 |
||||||
|
Summary: Bidirectional UTM-WGS84 converter for python |
||||||
|
Home-page: https://github.com/Turbo87/utm |
||||||
|
Author: Tobias Bieniek |
||||||
|
Author-email: Tobias.Bieniek@gmx.de |
||||||
|
License: UNKNOWN |
||||||
|
Description: UNKNOWN |
||||||
|
Keywords: utm,wgs84,coordinate,converter |
||||||
|
Platform: UNKNOWN |
||||||
|
Classifier: Programming Language :: Python |
||||||
|
Classifier: License :: OSI Approved :: MIT License |
||||||
|
Classifier: Operating System :: OS Independent |
||||||
|
Classifier: Development Status :: 4 - Beta |
||||||
|
Classifier: Environment :: Other Environment |
||||||
|
Classifier: Intended Audience :: Developers |
||||||
|
Classifier: Intended Audience :: Science/Research |
||||||
|
Classifier: Topic :: Scientific/Engineering :: GIS |
@ -0,0 +1,10 @@ |
|||||||
|
README.rst |
||||||
|
scripts/utm-converter |
||||||
|
test/test_utm.py |
||||||
|
utm/__init__.py |
||||||
|
utm/conversion.py |
||||||
|
utm/error.py |
||||||
|
utm.egg-info/PKG-INFO |
||||||
|
utm.egg-info/SOURCES.txt |
||||||
|
utm.egg-info/dependency_links.txt |
||||||
|
utm.egg-info/top_level.txt |
@ -0,0 +1 @@ |
|||||||
|
|
@ -0,0 +1,11 @@ |
|||||||
|
../utm/error.py |
||||||
|
../utm/conversion.py |
||||||
|
../utm/__init__.py |
||||||
|
../utm/error.pyc |
||||||
|
../utm/conversion.pyc |
||||||
|
../utm/__init__.pyc |
||||||
|
PKG-INFO |
||||||
|
top_level.txt |
||||||
|
dependency_links.txt |
||||||
|
SOURCES.txt |
||||||
|
../../../bin/utm-converter |
@ -0,0 +1 @@ |
|||||||
|
utm |
@ -0,0 +1,2 @@ |
|||||||
|
from utm.conversion import to_latlon, from_latlon, latlon_to_zone_number, latitude_to_zone_letter |
||||||
|
from utm.error import OutOfRangeError |
@ -0,0 +1,232 @@ |
|||||||
|
import math |
||||||
|
from utm.error import OutOfRangeError |
||||||
|
|
||||||
|
__all__ = ['to_latlon', 'from_latlon'] |
||||||
|
|
||||||
|
K0 = 0.9996 |
||||||
|
|
||||||
|
E = 0.00669438 |
||||||
|
E2 = E * E |
||||||
|
E3 = E2 * E |
||||||
|
E_P2 = E / (1.0 - E) |
||||||
|
|
||||||
|
SQRT_E = math.sqrt(1 - E) |
||||||
|
_E = (1 - SQRT_E) / (1 + SQRT_E) |
||||||
|
_E2 = _E * _E |
||||||
|
_E3 = _E2 * _E |
||||||
|
_E4 = _E3 * _E |
||||||
|
_E5 = _E4 * _E |
||||||
|
|
||||||
|
M1 = (1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256) |
||||||
|
M2 = (3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024) |
||||||
|
M3 = (15 * E2 / 256 + 45 * E3 / 1024) |
||||||
|
M4 = (35 * E3 / 3072) |
||||||
|
|
||||||
|
P2 = (3. / 2 * _E - 27. / 32 * _E3 + 269. / 512 * _E5) |
||||||
|
P3 = (21. / 16 * _E2 - 55. / 32 * _E4) |
||||||
|
P4 = (151. / 96 * _E3 - 417. / 128 * _E5) |
||||||
|
P5 = (1097. / 512 * _E4) |
||||||
|
|
||||||
|
R = 6378137 |
||||||
|
|
||||||
|
ZONE_LETTERS = "CDEFGHJKLMNPQRSTUVWXX" |
||||||
|
|
||||||
|
|
||||||
|
def to_latlon(easting, northing, zone_number, zone_letter=None, northern=None, strict=True): |
||||||
|
"""This function convert an UTM coordinate into Latitude and Longitude |
||||||
|
|
||||||
|
Parameters |
||||||
|
---------- |
||||||
|
easting: int |
||||||
|
Easting value of UTM coordinate |
||||||
|
|
||||||
|
northing: int |
||||||
|
Northing value of UTM coordinate |
||||||
|
|
||||||
|
zone number: int |
||||||
|
Zone Number is represented with global map numbers of an UTM Zone |
||||||
|
Numbers Map. More information see utmzones [1]_ |
||||||
|
|
||||||
|
zone_letter: str |
||||||
|
Zone Letter can be represented as string values. Where UTM Zone |
||||||
|
Designators can be accessed in [1]_ |
||||||
|
|
||||||
|
northern: bool |
||||||
|
You can set True or False to set this parameter. Default is None |
||||||
|
|
||||||
|
|
||||||
|
.. _[1]: http://www.jaworski.ca/utmzones.htm |
||||||
|
|
||||||
|
""" |
||||||
|
if not zone_letter and northern is None: |
||||||
|
raise ValueError('either zone_letter or northern needs to be set') |
||||||
|
|
||||||
|
elif zone_letter and northern is not None: |
||||||
|
raise ValueError('set either zone_letter or northern, but not both') |
||||||
|
|
||||||
|
if strict: |
||||||
|
if not 100000 <= easting < 1000000: |
||||||
|
raise OutOfRangeError('easting out of range (must be between 100.000 m and 999.999 m)') |
||||||
|
if not 0 <= northing <= 10000000: |
||||||
|
raise OutOfRangeError('northing out of range (must be between 0 m and 10.000.000 m)') |
||||||
|
if not 1 <= zone_number <= 60: |
||||||
|
raise OutOfRangeError('zone number out of range (must be between 1 and 60)') |
||||||
|
|
||||||
|
if zone_letter: |
||||||
|
zone_letter = zone_letter.upper() |
||||||
|
|
||||||
|
if not 'C' <= zone_letter <= 'X' or zone_letter in ['I', 'O']: |
||||||
|
raise OutOfRangeError('zone letter out of range (must be between C and X)') |
||||||
|
|
||||||
|
northern = (zone_letter >= 'N') |
||||||
|
|
||||||
|
x = easting - 500000 |
||||||
|
y = northing |
||||||
|
|
||||||
|
if not northern: |
||||||
|
y -= 10000000 |
||||||
|
|
||||||
|
m = y / K0 |
||||||
|
mu = m / (R * M1) |
||||||
|
|
||||||
|
p_rad = (mu + |
||||||
|
P2 * math.sin(2 * mu) + |
||||||
|
P3 * math.sin(4 * mu) + |
||||||
|
P4 * math.sin(6 * mu) + |
||||||
|
P5 * math.sin(8 * mu)) |
||||||
|
|
||||||
|
p_sin = math.sin(p_rad) |
||||||
|
p_sin2 = p_sin * p_sin |
||||||
|
|
||||||
|
p_cos = math.cos(p_rad) |
||||||
|
|
||||||
|
p_tan = p_sin / p_cos |
||||||
|
p_tan2 = p_tan * p_tan |
||||||
|
p_tan4 = p_tan2 * p_tan2 |
||||||
|
|
||||||
|
ep_sin = 1 - E * p_sin2 |
||||||
|
ep_sin_sqrt = math.sqrt(1 - E * p_sin2) |
||||||
|
|
||||||
|
n = R / ep_sin_sqrt |
||||||
|
r = (1 - E) / ep_sin |
||||||
|
|
||||||
|
c = _E * p_cos**2 |
||||||
|
c2 = c * c |
||||||
|
|
||||||
|
d = x / (n * K0) |
||||||
|
d2 = d * d |
||||||
|
d3 = d2 * d |
||||||
|
d4 = d3 * d |
||||||
|
d5 = d4 * d |
||||||
|
d6 = d5 * d |
||||||
|
|
||||||
|
latitude = (p_rad - (p_tan / r) * |
||||||
|
(d2 / 2 - |
||||||
|
d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) + |
||||||
|
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)) |
||||||
|
|
||||||
|
longitude = (d - |
||||||
|
d3 / 6 * (1 + 2 * p_tan2 + c) + |
||||||
|
d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos |
||||||
|
|
||||||
|
return (math.degrees(latitude), |
||||||
|
math.degrees(longitude) + zone_number_to_central_longitude(zone_number)) |
||||||
|
|
||||||
|
|
||||||
|
def from_latlon(latitude, longitude, force_zone_number=None): |
||||||
|
"""This function convert Latitude and Longitude to UTM coordinate |
||||||
|
|
||||||
|
Parameters |
||||||
|
---------- |
||||||
|
latitude: float |
||||||
|
Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0) |
||||||
|
|
||||||
|
longitude: float |
||||||
|
Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0). |
||||||
|
|
||||||
|
force_zone number: int |
||||||
|
Zone Number is represented with global map numbers of an UTM Zone |
||||||
|
Numbers Map. You may force conversion including one UTM Zone Number. |
||||||
|
More information see utmzones [1]_ |
||||||
|
|
||||||
|
.. _[1]: http://www.jaworski.ca/utmzones.htm |
||||||
|
""" |
||||||
|
if not -80.0 <= latitude <= 84.0: |
||||||
|
raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)') |
||||||
|
if not -180.0 <= longitude <= 180.0: |
||||||
|
raise OutOfRangeError('longitude out of range (must be between 180 deg W and 180 deg E)') |
||||||
|
|
||||||
|
lat_rad = math.radians(latitude) |
||||||
|
lat_sin = math.sin(lat_rad) |
||||||
|
lat_cos = math.cos(lat_rad) |
||||||
|
|
||||||
|
lat_tan = lat_sin / lat_cos |
||||||
|
lat_tan2 = lat_tan * lat_tan |
||||||
|
lat_tan4 = lat_tan2 * lat_tan2 |
||||||
|
|
||||||
|
if force_zone_number is None: |
||||||
|
zone_number = latlon_to_zone_number(latitude, longitude) |
||||||
|
else: |
||||||
|
zone_number = force_zone_number |
||||||
|
|
||||||
|
zone_letter = latitude_to_zone_letter(latitude) |
||||||
|
|
||||||
|
lon_rad = math.radians(longitude) |
||||||
|
central_lon = zone_number_to_central_longitude(zone_number) |
||||||
|
central_lon_rad = math.radians(central_lon) |
||||||
|
|
||||||
|
n = R / math.sqrt(1 - E * lat_sin**2) |
||||||
|
c = E_P2 * lat_cos**2 |
||||||
|
|
||||||
|
a = lat_cos * (lon_rad - central_lon_rad) |
||||||
|
a2 = a * a |
||||||
|
a3 = a2 * a |
||||||
|
a4 = a3 * a |
||||||
|
a5 = a4 * a |
||||||
|
a6 = a5 * a |
||||||
|
|
||||||
|
m = R * (M1 * lat_rad - |
||||||
|
M2 * math.sin(2 * lat_rad) + |
||||||
|
M3 * math.sin(4 * lat_rad) - |
||||||
|
M4 * math.sin(6 * lat_rad)) |
||||||
|
|
||||||
|
easting = K0 * n * (a + |
||||||
|
a3 / 6 * (1 - lat_tan2 + c) + |
||||||
|
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000 |
||||||
|
|
||||||
|
northing = K0 * (m + n * lat_tan * (a2 / 2 + |
||||||
|
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) + |
||||||
|
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2))) |
||||||
|
|
||||||
|
if latitude < 0: |
||||||
|
northing += 10000000 |
||||||
|
|
||||||
|
return easting, northing, zone_number, zone_letter |
||||||
|
|
||||||
|
|
||||||
|
def latitude_to_zone_letter(latitude): |
||||||
|
if -80 <= latitude <= 84: |
||||||
|
return ZONE_LETTERS[int(latitude + 80) >> 3] |
||||||
|
else: |
||||||
|
return None |
||||||
|
|
||||||
|
|
||||||
|
def latlon_to_zone_number(latitude, longitude): |
||||||
|
if 56 <= latitude < 64 and 3 <= longitude < 12: |
||||||
|
return 32 |
||||||
|
|
||||||
|
if 72 <= latitude <= 84 and longitude >= 0: |
||||||
|
if longitude <= 9: |
||||||
|
return 31 |
||||||
|
elif longitude <= 21: |
||||||
|
return 33 |
||||||
|
elif longitude <= 33: |
||||||
|
return 35 |
||||||
|
elif longitude <= 42: |
||||||
|
return 37 |
||||||
|
|
||||||
|
return int((longitude + 180) / 6) + 1 |
||||||
|
|
||||||
|
|
||||||
|
def zone_number_to_central_longitude(zone_number): |
||||||
|
return (zone_number - 1) * 6 - 180 + 3 |
@ -0,0 +1,2 @@ |
|||||||
|
class OutOfRangeError(ValueError): |
||||||
|
pass |
Loading…
Reference in new issue