aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/Werkzeug/py3/werkzeug
diff options
context:
space:
mode:
authorMaxim Yurchuk <maxim-yurchuk@ydb.tech>2024-12-12 15:00:43 +0000
committerGitHub <noreply@github.com>2024-12-12 15:00:43 +0000
commit42701242eaf5be980cb935631586d0e90b82641c (patch)
tree6dbf5fcd37d3c16591e196c4a69d166e3ab3a398 /contrib/python/Werkzeug/py3/werkzeug
parent7f5a9f394dbd9ac290cabbb7977538656b3a541e (diff)
parentf7c04b5876af3d16849ab5e3079c0eabbd4e3a00 (diff)
downloadydb-42701242eaf5be980cb935631586d0e90b82641c.tar.gz
Merge pull request #12554 from vitalyisaev2/YQ-3839.with_rightlib.3
Import from Arcadia + YDB FQ: turning gateways_config.proto into a file without external dependencies
Diffstat (limited to 'contrib/python/Werkzeug/py3/werkzeug')
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/__init__.py2
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/_internal.py82
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/_reloader.py90
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/datastructures.py105
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/datastructures.pyi25
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/__init__.py125
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/console.py50
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/repr.py7
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/shared/FONT_LICENSE96
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/shared/source.pngbin818 -> 0 bytes
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/shared/style.css23
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/shared/ubuntu.ttfbin70220 -> 0 bytes
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/debug/tbtools.py695
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/exceptions.py78
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/filesystem.py55
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/formparser.py40
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/http.py180
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/local.py584
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/middleware/lint.py4
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/middleware/shared_data.py96
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing.py2342
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/__init__.py133
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/converters.py257
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/exceptions.py146
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/map.py944
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/matcher.py185
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/routing/rules.py879
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/sansio/http.py140
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/sansio/multipart.py23
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/sansio/request.py5
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/sansio/response.py42
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/sansio/utils.py23
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/security.py117
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/serving.py682
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/test.py100
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/testapp.py11
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/urls.py145
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/useragents.py215
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/utils.py522
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/__init__.py13
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/accept.py14
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/auth.py26
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/base_request.py36
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/base_response.py36
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/common_descriptors.py26
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/cors.py26
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/etag.py26
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/json.py13
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/request.py114
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/response.py44
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wrappers/user_agent.py14
-rw-r--r--contrib/python/Werkzeug/py3/werkzeug/wsgi.py58
52 files changed, 4179 insertions, 5515 deletions
diff --git a/contrib/python/Werkzeug/py3/werkzeug/__init__.py b/contrib/python/Werkzeug/py3/werkzeug/__init__.py
index 0313497cc6..fd7f8d229a 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/__init__.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/__init__.py
@@ -3,4 +3,4 @@ from .test import Client as Client
from .wrappers import Request as Request
from .wrappers import Response as Response
-__version__ = "2.0.3"
+__version__ = "2.2.2"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/_internal.py b/contrib/python/Werkzeug/py3/werkzeug/_internal.py
index 0c8d0d0474..4636647db3 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/_internal.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/_internal.py
@@ -1,4 +1,3 @@
-import inspect
import logging
import operator
import re
@@ -225,83 +224,6 @@ def _log(type: str, message: str, *args: t.Any, **kwargs: t.Any) -> None:
getattr(_logger, type)(message.rstrip(), *args, **kwargs)
-def _parse_signature(func): # type: ignore
- """Return a signature object for the function.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1 along with ``utils.bind`` and
- ``validate_arguments``.
- """
- # if we have a cached validator for this function, return it
- parse = _signature_cache.get(func)
- if parse is not None:
- return parse
-
- # inspect the function signature and collect all the information
- tup = inspect.getfullargspec(func)
- positional, vararg_var, kwarg_var, defaults = tup[:4]
- defaults = defaults or ()
- arg_count = len(positional)
- arguments = []
- for idx, name in enumerate(positional):
- if isinstance(name, list):
- raise TypeError(
- "cannot parse functions that unpack tuples in the function signature"
- )
- try:
- default = defaults[idx - arg_count]
- except IndexError:
- param = (name, False, None)
- else:
- param = (name, True, default)
- arguments.append(param)
- arguments = tuple(arguments)
-
- def parse(args, kwargs): # type: ignore
- new_args = []
- missing = []
- extra = {}
-
- # consume as many arguments as positional as possible
- for idx, (name, has_default, default) in enumerate(arguments):
- try:
- new_args.append(args[idx])
- except IndexError:
- try:
- new_args.append(kwargs.pop(name))
- except KeyError:
- if has_default:
- new_args.append(default)
- else:
- missing.append(name)
- else:
- if name in kwargs:
- extra[name] = kwargs.pop(name)
-
- # handle extra arguments
- extra_positional = args[arg_count:]
- if vararg_var is not None:
- new_args.extend(extra_positional)
- extra_positional = ()
- if kwargs and kwarg_var is None:
- extra.update(kwargs)
- kwargs = {}
-
- return (
- new_args,
- kwargs,
- missing,
- extra,
- extra_positional,
- arguments,
- vararg_var,
- kwarg_var,
- )
-
- _signature_cache[func] = parse
- return parse
-
-
@typing.overload
def _dt_as_utc(dt: None) -> None:
...
@@ -601,8 +523,8 @@ mj2Z/FM1vQWgDynsRwNvrWnJHlespkrp8+vO1jNaibm+PhqXPPv30YwDZ6jApe3wUjFQobghvW9p
injecting_start_response("200 OK", [("Content-Type", "text/html")])
return [
f"""\
-<!DOCTYPE html>
-<html>
+<!doctype html>
+<html lang=en>
<head>
<title>About Werkzeug</title>
<style type="text/css">
diff --git a/contrib/python/Werkzeug/py3/werkzeug/_reloader.py b/contrib/python/Werkzeug/py3/werkzeug/_reloader.py
index d7a45152d5..57f3117bd7 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/_reloader.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/_reloader.py
@@ -11,16 +11,28 @@ from pathlib import PurePath
from ._internal import _log
# The various system prefixes where imports are found. Base values are
-# different when running in a virtualenv. The stat reloader won't scan
-# these directories, it would be too inefficient.
-prefix = {sys.prefix, sys.base_prefix, sys.exec_prefix, sys.base_exec_prefix}
+# different when running in a virtualenv. All reloaders will ignore the
+# base paths (usually the system installation). The stat reloader won't
+# scan the virtualenv paths, it will only include modules that are
+# already imported.
+_ignore_always = tuple({sys.base_prefix, sys.base_exec_prefix})
+prefix = {*_ignore_always, sys.prefix, sys.exec_prefix}
if hasattr(sys, "real_prefix"):
# virtualenv < 20
- prefix.add(sys.real_prefix) # type: ignore
+ prefix.add(sys.real_prefix) # type: ignore[attr-defined]
-_ignore_prefixes = tuple(prefix)
+_stat_ignore_scan = tuple(prefix)
del prefix
+_ignore_common_dirs = {
+ "__pycache__",
+ ".git",
+ ".hg",
+ ".tox",
+ ".nox",
+ ".pytest_cache",
+ ".mypy_cache",
+}
def _iter_module_paths() -> t.Iterator[str]:
@@ -29,7 +41,7 @@ def _iter_module_paths() -> t.Iterator[str]:
for module in list(sys.modules.values()):
name = getattr(module, "__file__", None)
- if name is None:
+ if name is None or name.startswith(_ignore_always):
continue
while not os.path.isfile(name):
@@ -67,24 +79,36 @@ def _find_stat_paths(
if os.path.isfile(path):
# zip file on sys.path, or extra file
paths.add(path)
+ continue
+
+ parent_has_py = {os.path.dirname(path): True}
for root, dirs, files in os.walk(path):
- # Ignore system prefixes for efficience. Don't scan
- # __pycache__, it will have a py or pyc module at the import
- # path. As an optimization, ignore .git and .hg since
- # nothing interesting will be there.
- if root.startswith(_ignore_prefixes) or os.path.basename(root) in {
- "__pycache__",
- ".git",
- ".hg",
- }:
+ # Optimizations: ignore system prefixes, __pycache__ will
+ # have a py or pyc module at the import path, ignore some
+ # common known dirs such as version control and tool caches.
+ if (
+ root.startswith(_stat_ignore_scan)
+ or os.path.basename(root) in _ignore_common_dirs
+ ):
dirs.clear()
continue
+ has_py = False
+
for name in files:
if name.endswith((".py", ".pyc")):
+ has_py = True
paths.add(os.path.join(root, name))
+ # Optimization: stop scanning a directory if neither it nor
+ # its parent contained Python files.
+ if not (has_py or parent_has_py[os.path.dirname(root)]):
+ dirs.clear()
+ continue
+
+ parent_has_py[root] = has_py
+
paths.update(_iter_module_paths())
_remove_by_pattern(paths, exclude_patterns)
return paths
@@ -167,32 +191,26 @@ def _get_args_for_reloading() -> t.List[str]:
py_script += ".exe"
if (
- (os.path.splitext(sys.executable)[1] == ".exe"
- and os.path.splitext(py_script)[1] == ".exe") or getattr(sys, "is_standalone_binary", False)
+ os.path.splitext(sys.executable)[1] == ".exe"
+ and os.path.splitext(py_script)[1] == ".exe"
):
rv.pop(0)
rv.append(py_script)
else:
# Executed a module, like "python -m werkzeug.serving".
- if sys.argv[0] == "-m":
- # Flask works around previous behavior by putting
- # "-m flask" in sys.argv.
- # TODO remove this once Flask no longer misbehaves
- args = sys.argv
- else:
- if os.path.isfile(py_script):
- # Rewritten by Python from "-m script" to "/path/to/script.py".
- py_module = t.cast(str, __main__.__package__)
- name = os.path.splitext(os.path.basename(py_script))[0]
+ if os.path.isfile(py_script):
+ # Rewritten by Python from "-m script" to "/path/to/script.py".
+ py_module = t.cast(str, __main__.__package__)
+ name = os.path.splitext(os.path.basename(py_script))[0]
- if name != "__main__":
- py_module += f".{name}"
- else:
- # Incorrectly rewritten by pydevd debugger from "-m script" to "script".
- py_module = py_script
+ if name != "__main__":
+ py_module += f".{name}"
+ else:
+ # Incorrectly rewritten by pydevd debugger from "-m script" to "script".
+ py_module = py_script
- #rv.extend(("-m", py_module.lstrip(".")))
+ rv.extend(("-m", py_module.lstrip(".")))
rv.extend(args)
return rv
@@ -267,7 +285,7 @@ class StatReloaderLoop(ReloaderLoop):
return super().__enter__()
def run_step(self) -> None:
- for name in chain(_find_stat_paths(self.extra_files, self.exclude_patterns)):
+ for name in _find_stat_paths(self.extra_files, self.exclude_patterns):
try:
mtime = os.stat(name).st_mtime
except OSError:
@@ -311,9 +329,7 @@ class WatchdogReloaderLoop(ReloaderLoop):
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
- "*/__pycache__/*",
- "*/.git/*",
- "*/.hg/*",
+ *[f"*/{d}/*" for d in _ignore_common_dirs],
*self.exclude_patterns,
],
)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/datastructures.py b/contrib/python/Werkzeug/py3/werkzeug/datastructures.py
index ff48a0d045..43ee8c7545 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/datastructures.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/datastructures.py
@@ -1,8 +1,8 @@
import base64
import codecs
import mimetypes
+import os
import re
-import warnings
from collections.abc import Collection
from collections.abc import MutableSet
from copy import deepcopy
@@ -11,9 +11,7 @@ from itertools import repeat
from os import fspath
from . import exceptions
-from ._internal import _make_encode_wrapper
from ._internal import _missing
-from .filesystem import get_filesystem_encoding
def is_immutable(self):
@@ -865,12 +863,15 @@ class Headers:
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
- To create a new :class:`Headers` object pass it a list or dict of headers
- which are used as default values. This does not reuse the list passed
- to the constructor for internal usage.
+ To create a new ``Headers`` object, pass it a list, dict, or
+ other ``Headers`` object with default values. These values are
+ validated the same way values added later are.
:param defaults: The list of default values for the :class:`Headers`.
+ .. versionchanged:: 2.1.0
+ Default values are validated the same as values added later.
+
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
@@ -884,10 +885,7 @@ class Headers:
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
- if isinstance(defaults, (list, Headers)):
- self._list.extend(defaults)
- else:
- self.extend(defaults)
+ self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
@@ -1082,20 +1080,6 @@ class Headers:
return False
return True
- def has_key(self, key):
- """
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use ``key in data``
- instead.
- """
- warnings.warn(
- "'has_key' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'key in data' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return key in self
-
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
@@ -1548,20 +1532,6 @@ class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
return True
return False
- def has_key(self, key):
- """
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use ``key in data``
- instead.
- """
- warnings.warn(
- "'has_key' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'key in data' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return key in self
-
def __repr__(self):
return f"{type(self).__name__}({self.dicts!r})"
@@ -1991,16 +1961,6 @@ def cache_control_property(key, empty, type):
)
-def cache_property(key, empty, type):
- warnings.warn(
- "'cache_property' is renamed to 'cache_control_property'. The"
- " old name is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return cache_control_property(key, empty, type)
-
-
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
@@ -2014,6 +1974,10 @@ class _CacheControl(UpdateDictMixin, dict):
to subclass it and add your own items have a look at the sourcecode for
that class.
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
@@ -2072,7 +2036,10 @@ class _CacheControl(UpdateDictMixin, dict):
elif value is True:
self[key] = None
else:
- self[key] = value
+ if type is not None:
+ self[key] = type(value)
+ else:
+ self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
@@ -2102,6 +2069,10 @@ class RequestCacheControl(ImmutableDictMixin, _CacheControl):
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
@@ -2122,6 +2093,13 @@ class ResponseCacheControl(_CacheControl):
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
+ .. versionchanged:: 2.1.1
+ ``s_maxage`` converts the value to an int.
+
+ .. versionchanged:: 2.1.0
+ Setting int properties such as ``max_age`` will convert the
+ value to an int.
+
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
@@ -2131,7 +2109,7 @@ class ResponseCacheControl(_CacheControl):
private = cache_control_property("private", "*", None)
must_revalidate = cache_control_property("must-revalidate", None, bool)
proxy_revalidate = cache_control_property("proxy-revalidate", None, bool)
- s_maxage = cache_control_property("s-maxage", None, None)
+ s_maxage = cache_control_property("s-maxage", None, int)
immutable = cache_control_property("immutable", None, bool)
@@ -2933,22 +2911,22 @@ class FileStorage:
self.name = name
self.stream = stream or BytesIO()
- # if no filename is provided we can attempt to get the filename
- # from the stream object passed. There we have to be careful to
- # skip things like <fdopen>, <stderr> etc. Python marks these
- # special filenames with angular brackets.
+ # If no filename is provided, attempt to get the filename from
+ # the stream object. Python names special streams like
+ # ``<stderr>`` with angular brackets, skip these streams.
if filename is None:
filename = getattr(stream, "name", None)
- s = _make_encode_wrapper(filename)
- if filename and filename[0] == s("<") and filename[-1] == s(">"):
- filename = None
- # Make sure the filename is not bytes. This might happen if
- # the file was opened from the bytes API.
- if isinstance(filename, bytes):
- filename = filename.decode(get_filesystem_encoding(), "replace")
+ if filename is not None:
+ filename = os.fsdecode(filename)
+
+ if filename and filename[0] == "<" and filename[-1] == ">":
+ filename = None
+ else:
+ filename = os.fsdecode(filename)
self.filename = filename
+
if headers is None:
headers = Headers()
self.headers = headers
@@ -2969,7 +2947,10 @@ class FileStorage:
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
- return int(self.headers.get("content-length") or 0)
+ try:
+ return int(self.headers.get("content-length") or 0)
+ except ValueError:
+ return 0
@property
def mimetype(self):
diff --git a/contrib/python/Werkzeug/py3/werkzeug/datastructures.pyi b/contrib/python/Werkzeug/py3/werkzeug/datastructures.pyi
index ee6e46d583..7bf7297898 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/datastructures.pyi
+++ b/contrib/python/Werkzeug/py3/werkzeug/datastructures.pyi
@@ -20,6 +20,7 @@ from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
+from _typeshed import SupportsKeysAndGetItem
from _typeshed.wsgi import WSGIEnvironment
from typing_extensions import Literal
@@ -96,9 +97,12 @@ class UpdateDictMixin(Dict[K, V]):
def __delitem__(self, key: K) -> None: ...
def clear(self) -> None: ...
def popitem(self) -> Tuple[K, V]: ...
- def update(
- self, *args: Union[Mapping[K, V], Iterable[Tuple[K, V]]], **kwargs: V
- ) -> None: ...
+ @overload
+ def update(self, __m: SupportsKeysAndGetItem[K, V], **kwargs: V) -> None: ...
+ @overload
+ def update(self, __m: Iterable[Tuple[K, V]], **kwargs: V) -> None: ...
+ @overload
+ def update(self, **kwargs: V) -> None: ...
class TypeConversionDict(Dict[K, V]):
@overload
@@ -274,11 +278,16 @@ class Headers(Dict[str, str]):
def __setitem__(self, key: int, value: Tuple[str, HV]) -> None: ...
@overload
def __setitem__(self, key: slice, value: Iterable[Tuple[str, HV]]) -> None: ...
+ @overload
def update(
- self,
- *args: Union[Mapping[str, HV], Iterable[Tuple[str, HV]]],
- **kwargs: Union[HV, Iterable[HV]],
+ self, __m: SupportsKeysAndGetItem[str, HV], **kwargs: Union[HV, Iterable[HV]]
+ ) -> None: ...
+ @overload
+ def update(
+ self, __m: Iterable[Tuple[str, HV]], **kwargs: Union[HV, Iterable[HV]]
) -> None: ...
+ @overload
+ def update(self, **kwargs: Union[HV, Iterable[HV]]) -> None: ...
def to_wsgi_list(self) -> List[Tuple[str, str]]: ...
def copy(self) -> Headers: ...
def __copy__(self) -> Headers: ...
@@ -420,7 +429,7 @@ class CharsetAccept(Accept):
_CPT = TypeVar("_CPT", str, int, bool)
_OptCPT = Optional[_CPT]
-def cache_property(key: str, empty: _OptCPT, type: Type[_CPT]) -> property: ...
+def cache_control_property(key: str, empty: _OptCPT, type: Type[_CPT]) -> property: ...
class _CacheControl(UpdateDictMixin[str, _OptCPT], Dict[str, _OptCPT]):
provided: bool
@@ -887,7 +896,7 @@ class FileStorage:
def __init__(
self,
stream: Optional[IO[bytes]] = None,
- filename: Optional[str] = None,
+ filename: Union[str, PathLike, None] = None,
name: Optional[str] = None,
content_type: Optional[str] = None,
content_length: Optional[int] = None,
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/__init__.py b/contrib/python/Werkzeug/py3/werkzeug/debug/__init__.py
index dbbe9651dd..e0dcc65fb4 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/__init__.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/debug/__init__.py
@@ -1,7 +1,6 @@
import getpass
import hashlib
import json
-import mimetypes
import os
import pkgutil
import re
@@ -9,20 +8,25 @@ import sys
import time
import typing as t
import uuid
+from contextlib import ExitStack
+from contextlib import nullcontext
+from io import BytesIO
from itertools import chain
from os.path import basename
from os.path import join
+from zlib import adler32
from .._internal import _log
+from ..exceptions import NotFound
from ..http import parse_cookie
from ..security import gen_salt
+from ..utils import send_file
from ..wrappers.request import Request
from ..wrappers.response import Response
from .console import Console
-from .tbtools import Frame
-from .tbtools import get_current_traceback
+from .tbtools import DebugFrameSummary
+from .tbtools import DebugTraceback
from .tbtools import render_console_html
-from .tbtools import Traceback
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
@@ -126,6 +130,9 @@ class _ConsoleFrame:
self.console = Console(namespace)
self.id = 0
+ def eval(self, code: str) -> t.Any:
+ return self.console.eval(code)
+
def get_pin_and_cookie_name(
app: "WSGIApplication",
@@ -146,7 +153,7 @@ def get_pin_and_cookie_name(
return None, None
# Pin was provided explicitly
- if pin is not None and pin.replace("-", "").isdigit():
+ if pin is not None and pin.replace("-", "").isdecimal():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
@@ -220,13 +227,20 @@ class DebuggedApplication:
from myapp import app
app = DebuggedApplication(app, evalex=True)
- The `evalex` keyword argument allows evaluating expressions in a
- traceback's frame context.
+ The ``evalex`` argument allows evaluating expressions in any frame
+ of a traceback. This works by preserving each frame with its local
+ state. Some state, such as :doc:`local`, cannot be restored with the
+ frame by default. When ``evalex`` is enabled,
+ ``environ["werkzeug.debug.preserve_context"]`` will be a callable
+ that takes a context manager, and can be called multiple times.
+ Each context manager will be entered before evaluating code in the
+ frame, then exited again, so they can perform setup and cleanup for
+ each call.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
- :param request_key: The key that points to the request object in ths
+ :param request_key: The key that points to the request object in this
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
@@ -238,6 +252,9 @@ class DebuggedApplication:
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
+
+ .. versionchanged:: 2.2
+ Added the ``werkzeug.debug.preserve_context`` environ key.
"""
_pin: str
@@ -258,8 +275,8 @@ class DebuggedApplication:
console_init_func = None
self.app = app
self.evalex = evalex
- self.frames: t.Dict[int, t.Union[Frame, _ConsoleFrame]] = {}
- self.tracebacks: t.Dict[int, Traceback] = {}
+ self.frames: t.Dict[int, t.Union[DebugFrameSummary, _ConsoleFrame]] = {}
+ self.frame_contexts: t.Dict[int, t.List[t.ContextManager[None]]] = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
@@ -302,34 +319,37 @@ class DebuggedApplication:
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterator[bytes]:
"""Run the application and conserve the traceback frames."""
+ contexts: t.List[t.ContextManager[t.Any]] = []
+
+ if self.evalex:
+ environ["werkzeug.debug.preserve_context"] = contexts.append
+
app_iter = None
try:
app_iter = self.app(environ, start_response)
yield from app_iter
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
- except Exception:
+ except Exception as e:
if hasattr(app_iter, "close"):
app_iter.close() # type: ignore
- traceback = get_current_traceback(
- skip=1,
- show_hidden_frames=self.show_hidden_frames,
- ignore_system_exceptions=True,
+
+ tb = DebugTraceback(e, skip=1, hide=not self.show_hidden_frames)
+
+ for frame in tb.all_frames:
+ self.frames[id(frame)] = frame
+ self.frame_contexts[id(frame)] = contexts
+
+ is_trusted = bool(self.check_pin_trust(environ))
+ html = tb.render_debugger_html(
+ evalex=self.evalex,
+ secret=self.secret,
+ evalex_trusted=is_trusted,
)
- for frame in traceback.frames:
- self.frames[frame.id] = frame
- self.tracebacks[traceback.id] = traceback
+ response = Response(html, status=500, mimetype="text/html")
try:
- start_response(
- "500 INTERNAL SERVER ERROR",
- [
- ("Content-Type", "text/html; charset=utf-8"),
- # Disable Chrome's XSS protection, the debug
- # output can cause false-positives.
- ("X-XSS-Protection", "0"),
- ],
- )
+ yield from response(environ, start_response)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
@@ -340,19 +360,23 @@ class DebuggedApplication:
"response at a point where response headers were already "
"sent.\n"
)
- else:
- is_trusted = bool(self.check_pin_trust(environ))
- yield traceback.render_full(
- evalex=self.evalex, evalex_trusted=is_trusted, secret=self.secret
- ).encode("utf-8", "replace")
- traceback.log(environ["wsgi.errors"])
+ environ["wsgi.errors"].write("".join(tb.render_traceback_text()))
- def execute_command(
- self, request: Request, command: str, frame: t.Union[Frame, _ConsoleFrame]
+ def execute_command( # type: ignore[return]
+ self,
+ request: Request,
+ command: str,
+ frame: t.Union[DebugFrameSummary, _ConsoleFrame],
) -> Response:
"""Execute a command in a console."""
- return Response(frame.console.eval(command), mimetype="text/html")
+ contexts = self.frame_contexts.get(id(frame), [])
+
+ with ExitStack() as exit_stack:
+ for cm in contexts:
+ exit_stack.enter_context(cm)
+
+ return Response(frame.eval(command), mimetype="text/html")
def display_console(self, request: Request) -> Response:
"""Display a standalone shell."""
@@ -371,15 +395,20 @@ class DebuggedApplication:
def get_resource(self, request: Request, filename: str) -> Response:
"""Return a static resource from the shared folder."""
- filename = join("shared", basename(filename))
+ path = join("shared", basename(filename))
+
try:
- data = pkgutil.get_data(__package__, filename)
+ data = pkgutil.get_data(__package__, path)
except OSError:
- data = None
- if data is not None:
- mimetype = mimetypes.guess_type(filename)[0] or "application/octet-stream"
- return Response(data, mimetype=mimetype)
- return Response("Not Found", status=404)
+ return NotFound() # type: ignore[return-value]
+ else:
+ if data is None:
+ return NotFound() # type: ignore[return-value]
+
+ etag = str(adler32(data) & 0xFFFFFFFF)
+ return send_file(
+ BytesIO(data), request.environ, download_name=filename, etag=etag
+ )
def check_pin_trust(self, environ: "WSGIEnvironment") -> t.Optional[bool]:
"""Checks if the request passed the pin test. This returns `True` if the
@@ -392,12 +421,16 @@ class DebuggedApplication:
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
- ts, pin_hash = val.split("|", 1)
- if not ts.isdigit():
+ ts_str, pin_hash = val.split("|", 1)
+
+ try:
+ ts = int(ts_str)
+ except ValueError:
return False
+
if pin_hash != hash_pin(self.pin):
return None
- return (time.time() - PIN_TIME) < int(ts)
+ return (time.time() - PIN_TIME) < ts
def _fail_pin_auth(self) -> None:
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/console.py b/contrib/python/Werkzeug/py3/werkzeug/debug/console.py
index f1a7b725ef..69974d1235 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/console.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/debug/console.py
@@ -1,10 +1,11 @@
import code
import sys
import typing as t
-from html import escape
+from contextvars import ContextVar
from types import CodeType
-from ..local import Local
+from markupsafe import escape
+
from .repr import debug_repr
from .repr import dump
from .repr import helper
@@ -12,7 +13,8 @@ from .repr import helper
if t.TYPE_CHECKING:
import codeop # noqa: F401
-_local = Local()
+_stream: ContextVar["HTMLStringO"] = ContextVar("werkzeug.debug.console.stream")
+_ipy: ContextVar = ContextVar("werkzeug.debug.console.ipy")
class HTMLStringO:
@@ -64,26 +66,29 @@ class ThreadedStream:
def push() -> None:
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = t.cast(t.TextIO, ThreadedStream())
- _local.stream = HTMLStringO()
+
+ _stream.set(HTMLStringO())
@staticmethod
def fetch() -> str:
try:
- stream = _local.stream
- except AttributeError:
+ stream = _stream.get()
+ except LookupError:
return ""
- return stream.reset() # type: ignore
+
+ return stream.reset()
@staticmethod
def displayhook(obj: object) -> None:
try:
- stream = _local.stream
- except AttributeError:
+ stream = _stream.get()
+ except LookupError:
return _displayhook(obj) # type: ignore
+
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
- _local._current_ipy.locals["_"] = obj
+ _ipy.get().locals["_"] = obj
stream._write(debug_repr(obj))
def __setattr__(self, name: str, value: t.Any) -> None:
@@ -94,9 +99,10 @@ class ThreadedStream:
def __getattribute__(self, name: str) -> t.Any:
try:
- stream = _local.stream
- except AttributeError:
- stream = sys.__stdout__
+ stream = _stream.get()
+ except LookupError:
+ stream = sys.__stdout__ # type: ignore[assignment]
+
return getattr(stream, name)
def __repr__(self) -> str:
@@ -167,7 +173,7 @@ class _InteractiveConsole(code.InteractiveInterpreter):
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
- return prompt + escape(source) + output
+ return f"{prompt}{escape(source)}{output}"
def runcode(self, code: CodeType) -> None:
try:
@@ -176,16 +182,18 @@ class _InteractiveConsole(code.InteractiveInterpreter):
self.showtraceback()
def showtraceback(self) -> None:
- from .tbtools import get_current_traceback
+ from .tbtools import DebugTraceback
- tb = get_current_traceback(skip=1)
- sys.stdout._write(tb.render_summary()) # type: ignore
+ exc = t.cast(BaseException, sys.exc_info()[1])
+ te = DebugTraceback(exc, skip=1)
+ sys.stdout._write(te.render_traceback_html()) # type: ignore
def showsyntaxerror(self, filename: t.Optional[str] = None) -> None:
- from .tbtools import get_current_traceback
+ from .tbtools import DebugTraceback
- tb = get_current_traceback(skip=4)
- sys.stdout._write(tb.render_summary()) # type: ignore
+ exc = t.cast(BaseException, sys.exc_info()[1])
+ te = DebugTraceback(exc, skip=4)
+ sys.stdout._write(te.render_traceback_html()) # type: ignore
def write(self, data: str) -> None:
sys.stdout.write(data)
@@ -206,7 +214,7 @@ class Console:
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code: str) -> str:
- _local._current_ipy = self._ipy
+ _ipy.set(self._ipy)
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/repr.py b/contrib/python/Werkzeug/py3/werkzeug/debug/repr.py
index 7d847b0399..c0872f1808 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/repr.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/debug/repr.py
@@ -9,9 +9,10 @@ import re
import sys
import typing as t
from collections import deque
-from html import escape
from traceback import format_exception_only
+from markupsafe import escape
+
missing = object()
_paragraph_re = re.compile(r"(?:\r\n|\r|\n){2,}")
RegexType = type(_paragraph_re)
@@ -83,8 +84,8 @@ def _add_subclass_info(
inner: str, obj: object, base: t.Union[t.Type, t.Tuple[t.Type, ...]]
) -> str:
if isinstance(base, tuple):
- for base in base:
- if type(obj) is base:
+ for cls in base:
+ if type(obj) is cls:
return inner
elif type(obj) is base:
return inner
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/FONT_LICENSE b/contrib/python/Werkzeug/py3/werkzeug/debug/shared/FONT_LICENSE
deleted file mode 100644
index ae78a8f94e..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/FONT_LICENSE
+++ /dev/null
@@ -1,96 +0,0 @@
--------------------------------
-UBUNTU FONT LICENCE Version 1.0
--------------------------------
-
-PREAMBLE
-This licence allows the licensed fonts to be used, studied, modified and
-redistributed freely. The fonts, including any derivative works, can be
-bundled, embedded, and redistributed provided the terms of this licence
-are met. The fonts and derivatives, however, cannot be released under
-any other licence. The requirement for fonts to remain under this
-licence does not require any document created using the fonts or their
-derivatives to be published under this licence, as long as the primary
-purpose of the document is not to be a vehicle for the distribution of
-the fonts.
-
-DEFINITIONS
-"Font Software" refers to the set of files released by the Copyright
-Holder(s) under this licence and clearly marked as such. This may
-include source files, build scripts and documentation.
-
-"Original Version" refers to the collection of Font Software components
-as received under this licence.
-
-"Modified Version" refers to any derivative made by adding to, deleting,
-or substituting -- in part or in whole -- any of the components of the
-Original Version, by changing formats or by porting the Font Software to
-a new environment.
-
-"Copyright Holder(s)" refers to all individuals and companies who have a
-copyright ownership of the Font Software.
-
-"Substantially Changed" refers to Modified Versions which can be easily
-identified as dissimilar to the Font Software by users of the Font
-Software comparing the Original Version with the Modified Version.
-
-To "Propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification and with or without charging
-a redistribution fee), making available to the public, and in some
-countries other activities as well.
-
-PERMISSION & CONDITIONS
-This licence does not grant any rights under trademark law and all such
-rights are reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of the Font Software, to propagate the Font Software, subject to
-the below conditions:
-
-1) Each copy of the Font Software must contain the above copyright
-notice and this licence. These can be included either as stand-alone
-text files, human-readable headers or in the appropriate machine-
-readable metadata fields within text or binary files as long as those
-fields can be easily viewed by the user.
-
-2) The font name complies with the following:
-(a) The Original Version must retain its name, unmodified.
-(b) Modified Versions which are Substantially Changed must be renamed to
-avoid use of the name of the Original Version or similar names entirely.
-(c) Modified Versions which are not Substantially Changed must be
-renamed to both (i) retain the name of the Original Version and (ii) add
-additional naming elements to distinguish the Modified Version from the
-Original Version. The name of such Modified Versions must be the name of
-the Original Version, with "derivative X" where X represents the name of
-the new work, appended to that name.
-
-3) The name(s) of the Copyright Holder(s) and any contributor to the
-Font Software shall not be used to promote, endorse or advertise any
-Modified Version, except (i) as required by this licence, (ii) to
-acknowledge the contribution(s) of the Copyright Holder(s) or (iii) with
-their explicit written permission.
-
-4) The Font Software, modified or unmodified, in part or in whole, must
-be distributed entirely under this licence, and must not be distributed
-under any other licence. The requirement for fonts to remain under this
-licence does not affect any document created using the Font Software,
-except any version of the Font Software extracted from a document
-created using the Font Software may only be distributed under this
-licence.
-
-TERMINATION
-This licence becomes null and void if any of the above conditions are
-not met.
-
-DISCLAIMER
-THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
-COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
-DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER
-DEALINGS IN THE FONT SOFTWARE.
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/source.png b/contrib/python/Werkzeug/py3/werkzeug/debug/shared/source.png
deleted file mode 100644
index f7ea90419d..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/source.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/style.css b/contrib/python/Werkzeug/py3/werkzeug/debug/shared/style.css
index bd996134dd..e9397ca0a1 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/style.css
+++ b/contrib/python/Werkzeug/py3/werkzeug/debug/shared/style.css
@@ -1,16 +1,6 @@
-@font-face {
- font-family: 'Ubuntu';
- font-style: normal;
- font-weight: normal;
- src: local('Ubuntu'), local('Ubuntu-Regular'),
- url('?__debugger__=yes&cmd=resource&f=ubuntu.ttf') format('truetype');
-}
-
-body, input { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
- 'Verdana', sans-serif; color: #000; text-align: center;
+body, input { font-family: sans-serif; color: #000; text-align: center;
margin: 1em; padding: 0; font-size: 15px; }
-h1, h2, h3 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
- 'Geneva', 'Verdana', sans-serif; font-weight: normal; }
+h1, h2, h3 { font-weight: normal; }
input { background-color: #fff; margin: 0; text-align: left;
outline: none !important; }
@@ -18,8 +8,7 @@ input[type="submit"] { padding: 3px 6px; }
a { color: #11557C; }
a:hover { color: #177199; }
pre, code,
-textarea { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
- monospace; font-size: 14px; }
+textarea { font-family: monospace; font-size: 14px; }
div.debugger { text-align: left; padding: 12px; margin: auto;
background-color: white; }
@@ -75,8 +64,7 @@ pre.console { border: 1px solid #ccc; background: white!important;
max-height: 400px; overflow: auto; }
pre.console form { color: #555; }
pre.console input { background-color: transparent; color: #555;
- width: 90%; font-family: 'Consolas', 'Deja Vu Sans Mono',
- 'Bitstream Vera Sans Mono', monospace; font-size: 14px;
+ width: 90%; font-family: monospace; font-size: 14px;
border: none!important; }
span.string { color: #30799B; }
@@ -94,8 +82,7 @@ a.open { background-image: url(?__debugger__=yes&cmd=resource&f=less.png); }
pre.console div.traceback,
pre.console div.box { margin: 5px 10px; white-space: normal;
border: 1px solid #11557C; padding: 10px;
- font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
- 'Verdana', sans-serif; }
+ font-family: sans-serif; }
pre.console div.box h3,
pre.console div.traceback h3 { margin: -10px -10px 10px -10px; padding: 5px;
background: #11557C; color: white; }
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/ubuntu.ttf b/contrib/python/Werkzeug/py3/werkzeug/debug/shared/ubuntu.ttf
deleted file mode 100644
index 8079f938c9..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/shared/ubuntu.ttf
+++ /dev/null
Binary files differ
diff --git a/contrib/python/Werkzeug/py3/werkzeug/debug/tbtools.py b/contrib/python/Werkzeug/py3/werkzeug/debug/tbtools.py
index 9d754aa5eb..ea90de9254 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/debug/tbtools.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/debug/tbtools.py
@@ -1,42 +1,28 @@
-import codecs
-import inspect
+import itertools
+import linecache
import os
import re
import sys
import sysconfig
import traceback
import typing as t
-from html import escape
-from tokenize import TokenError
-from types import CodeType
-from types import TracebackType
-from .._internal import _to_str
-from ..filesystem import get_filesystem_encoding
+from markupsafe import escape
+
from ..utils import cached_property
from .console import Console
-_coding_re = re.compile(rb"coding[:=]\s*([-\w.]+)")
-_line_re = re.compile(rb"^(.*?)$", re.MULTILINE)
-_funcdef_re = re.compile(r"^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)")
-
HEADER = """\
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
-<html>
+<!doctype html>
+<html lang=en>
<head>
<title>%(title)s // Werkzeug Debugger</title>
- <link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css"
- type="text/css">
- <!-- We need to make sure this has a favicon so that the debugger does
- not accidentally trigger a request to /favicon.ico which might
- change the application's state. -->
+ <link rel="stylesheet" href="?__debugger__=yes&amp;cmd=resource&amp;f=style.css">
<link rel="shortcut icon"
href="?__debugger__=yes&amp;cmd=resource&amp;f=console.png">
<script src="?__debugger__=yes&amp;cmd=resource&amp;f=debugger.js"></script>
- <script type="text/javascript">
- var TRACEBACK = %(traceback_id)d,
- CONSOLE_MODE = %(console)s,
+ <script>
+ var CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
EVALEX_TRUSTED = %(evalex_trusted)s,
SECRET = "%(secret)s";
@@ -45,6 +31,7 @@ HEADER = """\
<body style="background-color: #fff">
<div class="debugger">
"""
+
FOOTER = """\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
@@ -133,468 +120,316 @@ FRAME_HTML = """\
</div>
"""
-SOURCE_LINE_HTML = """\
-<tr class="%(classes)s">
- <td class=lineno>%(lineno)s</td>
- <td>%(code)s</td>
-</tr>
-"""
+def _process_traceback(
+ exc: BaseException,
+ te: t.Optional[traceback.TracebackException] = None,
+ *,
+ skip: int = 0,
+ hide: bool = True,
+) -> traceback.TracebackException:
+ if te is None:
+ te = traceback.TracebackException.from_exception(exc, lookup_lines=False)
+
+ # Get the frames the same way StackSummary.extract did, in order
+ # to match each frame with the FrameSummary to augment.
+ frame_gen = traceback.walk_tb(exc.__traceback__)
+ limit = getattr(sys, "tracebacklimit", None)
+
+ if limit is not None:
+ if limit < 0:
+ limit = 0
+
+ frame_gen = itertools.islice(frame_gen, limit)
+
+ if skip:
+ frame_gen = itertools.islice(frame_gen, skip, None)
+ del te.stack[:skip]
+
+ new_stack: t.List[DebugFrameSummary] = []
+ hidden = False
+
+ # Match each frame with the FrameSummary that was generated.
+ # Hide frames using Paste's __traceback_hide__ rules. Replace
+ # all visible FrameSummary with DebugFrameSummary.
+ for (f, _), fs in zip(frame_gen, te.stack):
+ if hide:
+ hide_value = f.f_locals.get("__traceback_hide__", False)
+
+ if hide_value in {"before", "before_and_this"}:
+ new_stack = []
+ hidden = False
-def render_console_html(secret: str, evalex_trusted: bool = True) -> str:
- return CONSOLE_HTML % {
- "evalex": "true",
- "evalex_trusted": "true" if evalex_trusted else "false",
- "console": "true",
- "title": "Console",
- "secret": secret,
- "traceback_id": -1,
- }
+ if hide_value == "before_and_this":
+ continue
+ elif hide_value in {"reset", "reset_and_this"}:
+ hidden = False
+ if hide_value == "reset_and_this":
+ continue
+ elif hide_value in {"after", "after_and_this"}:
+ hidden = True
-def get_current_traceback(
- ignore_system_exceptions: bool = False,
- show_hidden_frames: bool = False,
- skip: int = 0,
-) -> "Traceback":
- """Get the current exception info as `Traceback` object. Per default
- calling this method will reraise system exceptions such as generator exit,
- system exit or others. This behavior can be disabled by passing `False`
- to the function as first parameter.
- """
- info = t.cast(
- t.Tuple[t.Type[BaseException], BaseException, TracebackType], sys.exc_info()
- )
- exc_type, exc_value, tb = info
-
- if ignore_system_exceptions and exc_type in {
- SystemExit,
- KeyboardInterrupt,
- GeneratorExit,
- }:
- raise
- for _ in range(skip):
- if tb.tb_next is None:
- break
- tb = tb.tb_next
- tb = Traceback(exc_type, exc_value, tb)
- if not show_hidden_frames:
- tb.filter_hidden_frames()
- return tb
-
-
-class Line:
- """Helper for the source renderer."""
-
- __slots__ = ("lineno", "code", "in_frame", "current")
-
- def __init__(self, lineno: int, code: str) -> None:
- self.lineno = lineno
- self.code = code
- self.in_frame = False
- self.current = False
-
- @property
- def classes(self) -> t.List[str]:
- rv = ["line"]
- if self.in_frame:
- rv.append("in-frame")
- if self.current:
- rv.append("current")
- return rv
-
- def render(self) -> str:
- return SOURCE_LINE_HTML % {
- "classes": " ".join(self.classes),
- "lineno": self.lineno,
- "code": escape(self.code),
+ if hide_value == "after_and_this":
+ continue
+ elif hide_value or hidden:
+ continue
+
+ frame_args: t.Dict[str, t.Any] = {
+ "filename": fs.filename,
+ "lineno": fs.lineno,
+ "name": fs.name,
+ "locals": f.f_locals,
+ "globals": f.f_globals,
}
+ if hasattr(fs, "colno"):
+ frame_args["colno"] = fs.colno # type: ignore[attr-defined]
+ frame_args["end_colno"] = fs.end_colno # type: ignore[attr-defined]
+
+ new_stack.append(DebugFrameSummary(**frame_args))
-class Traceback:
- """Wraps a traceback."""
+ # The codeop module is used to compile code from the interactive
+ # debugger. Hide any codeop frames from the bottom of the traceback.
+ while new_stack:
+ module = new_stack[0].global_ns.get("__name__")
+
+ if module is None:
+ module = new_stack[0].local_ns.get("__name__")
+
+ if module == "codeop":
+ del new_stack[0]
+ else:
+ break
+
+ te.stack[:] = new_stack
+
+ if te.__context__:
+ context_exc = t.cast(BaseException, exc.__context__)
+ te.__context__ = _process_traceback(context_exc, te.__context__, hide=hide)
+
+ if te.__cause__:
+ cause_exc = t.cast(BaseException, exc.__cause__)
+ te.__cause__ = _process_traceback(cause_exc, te.__cause__, hide=hide)
+
+ return te
+
+
+class DebugTraceback:
+ __slots__ = ("_te", "_cache_all_tracebacks", "_cache_all_frames")
def __init__(
self,
- exc_type: t.Type[BaseException],
- exc_value: BaseException,
- tb: TracebackType,
+ exc: BaseException,
+ te: t.Optional[traceback.TracebackException] = None,
+ *,
+ skip: int = 0,
+ hide: bool = True,
) -> None:
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.tb = tb
-
- exception_type = exc_type.__name__
- if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}:
- exception_type = f"{exc_type.__module__}.{exception_type}"
- self.exception_type = exception_type
-
- self.groups = []
- memo = set()
- while True:
- self.groups.append(Group(exc_type, exc_value, tb))
- memo.add(id(exc_value))
- exc_value = exc_value.__cause__ or exc_value.__context__ # type: ignore
- if exc_value is None or id(exc_value) in memo:
- break
- exc_type = type(exc_value)
- tb = exc_value.__traceback__ # type: ignore
- self.groups.reverse()
- self.frames = [frame for group in self.groups for frame in group.frames]
-
- def filter_hidden_frames(self) -> None:
- """Remove the frames according to the paste spec."""
- for group in self.groups:
- group.filter_hidden_frames()
-
- self.frames[:] = [frame for group in self.groups for frame in group.frames]
-
- @property
- def is_syntax_error(self) -> bool:
- """Is it a syntax error?"""
- return isinstance(self.exc_value, SyntaxError)
-
- @property
- def exception(self) -> str:
- """String representation of the final exception."""
- return self.groups[-1].exception
-
- def log(self, logfile: t.Optional[t.IO[str]] = None) -> None:
- """Log the ASCII traceback into a file object."""
- if logfile is None:
- logfile = sys.stderr
- tb = f"{self.plaintext.rstrip()}\n"
- logfile.write(tb)
-
- def render_summary(self, include_title: bool = True) -> str:
- """Render the traceback for the interactive console."""
- title = ""
- classes = ["traceback"]
- if not self.frames:
- classes.append("noframe-traceback")
- frames = []
+ self._te = _process_traceback(exc, te, skip=skip, hide=hide)
+
+ def __str__(self) -> str:
+ return f"<{type(self).__name__} {self._te}>"
+
+ @cached_property
+ def all_tracebacks(
+ self,
+ ) -> t.List[t.Tuple[t.Optional[str], traceback.TracebackException]]:
+ out = []
+ current = self._te
+
+ while current is not None:
+ if current.__cause__ is not None:
+ chained_msg = (
+ "The above exception was the direct cause of the"
+ " following exception"
+ )
+ chained_exc = current.__cause__
+ elif current.__context__ is not None and not current.__suppress_context__:
+ chained_msg = (
+ "During handling of the above exception, another"
+ " exception occurred"
+ )
+ chained_exc = current.__context__
+ else:
+ chained_msg = None
+ chained_exc = None
+
+ out.append((chained_msg, current))
+ current = chained_exc
+
+ return out
+
+ @cached_property
+ def all_frames(self) -> t.List["DebugFrameSummary"]:
+ return [
+ f for _, te in self.all_tracebacks for f in te.stack # type: ignore[misc]
+ ]
+
+ def render_traceback_text(self) -> str:
+ return "".join(self._te.format())
+
+ def render_traceback_html(self, include_title: bool = True) -> str:
+ library_frames = [f.is_library for f in self.all_frames]
+ mark_library = 0 < sum(library_frames) < len(library_frames)
+ rows = []
+
+ if not library_frames:
+ classes = "traceback noframe-traceback"
else:
- library_frames = sum(frame.is_library for frame in self.frames)
- mark_lib = 0 < library_frames < len(self.frames)
- frames = [group.render(mark_lib=mark_lib) for group in self.groups]
+ classes = "traceback"
+
+ for msg, current in reversed(self.all_tracebacks):
+ row_parts = []
+
+ if msg is not None:
+ row_parts.append(f'<li><div class="exc-divider">{msg}:</div>')
+
+ for frame in current.stack:
+ frame = t.cast(DebugFrameSummary, frame)
+ info = f' title="{escape(frame.info)}"' if frame.info else ""
+ row_parts.append(f"<li{info}>{frame.render_html(mark_library)}")
+
+ rows.append("\n".join(row_parts))
+
+ is_syntax_error = issubclass(self._te.exc_type, SyntaxError)
if include_title:
- if self.is_syntax_error:
+ if is_syntax_error:
title = "Syntax Error"
else:
title = "Traceback <em>(most recent call last)</em>:"
+ else:
+ title = ""
+
+ exc_full = escape("".join(self._te.format_exception_only()))
- if self.is_syntax_error:
- description = f"<pre class=syntaxerror>{escape(self.exception)}</pre>"
+ if is_syntax_error:
+ description = f"<pre class=syntaxerror>{exc_full}</pre>"
else:
- description = f"<blockquote>{escape(self.exception)}</blockquote>"
+ description = f"<blockquote>{exc_full}</blockquote>"
return SUMMARY_HTML % {
- "classes": " ".join(classes),
- "title": f"<h3>{title if title else ''}</h3>",
- "frames": "\n".join(frames),
+ "classes": classes,
+ "title": f"<h3>{title}</h3>",
+ "frames": "\n".join(rows),
"description": description,
}
- def render_full(
- self,
- evalex: bool = False,
- secret: t.Optional[str] = None,
- evalex_trusted: bool = True,
+ def render_debugger_html(
+ self, evalex: bool, secret: str, evalex_trusted: bool
) -> str:
- """Render the Full HTML page with the traceback info."""
- exc = escape(self.exception)
+ exc_lines = list(self._te.format_exception_only())
+ plaintext = "".join(self._te.format())
return PAGE_HTML % {
"evalex": "true" if evalex else "false",
"evalex_trusted": "true" if evalex_trusted else "false",
"console": "false",
- "title": exc,
- "exception": exc,
- "exception_type": escape(self.exception_type),
- "summary": self.render_summary(include_title=False),
- "plaintext": escape(self.plaintext),
- "plaintext_cs": re.sub("-{2,}", "-", self.plaintext),
- "traceback_id": self.id,
+ "title": exc_lines[0],
+ "exception": escape("".join(exc_lines)),
+ "exception_type": escape(self._te.exc_type.__name__),
+ "summary": self.render_traceback_html(include_title=False),
+ "plaintext": escape(plaintext),
+ "plaintext_cs": re.sub("-{2,}", "-", plaintext),
"secret": secret,
}
- @cached_property
- def plaintext(self) -> str:
- return "\n".join([group.render_text() for group in self.groups])
-
- @property
- def id(self) -> int:
- return id(self)
-
-class Group:
- """A group of frames for an exception in a traceback. If the
- exception has a ``__cause__`` or ``__context__``, there are multiple
- exception groups.
+class DebugFrameSummary(traceback.FrameSummary):
+ """A :class:`traceback.FrameSummary` that can evaluate code in the
+ frame's namespace.
"""
- def __init__(
- self,
- exc_type: t.Type[BaseException],
- exc_value: BaseException,
- tb: TracebackType,
- ) -> None:
- self.exc_type = exc_type
- self.exc_value = exc_value
- self.info = None
- if exc_value.__cause__ is not None:
- self.info = (
- "The above exception was the direct cause of the following exception"
- )
- elif exc_value.__context__ is not None:
- self.info = (
- "During handling of the above exception, another exception occurred"
- )
-
- self.frames = []
- while tb is not None:
- self.frames.append(Frame(exc_type, exc_value, tb))
- tb = tb.tb_next # type: ignore
-
- def filter_hidden_frames(self) -> None:
- # An exception may not have a traceback to filter frames, such
- # as one re-raised from ProcessPoolExecutor.
- if not self.frames:
- return
-
- new_frames: t.List[Frame] = []
- hidden = False
-
- for frame in self.frames:
- hide = frame.hide
- if hide in ("before", "before_and_this"):
- new_frames = []
- hidden = False
- if hide == "before_and_this":
- continue
- elif hide in ("reset", "reset_and_this"):
- hidden = False
- if hide == "reset_and_this":
- continue
- elif hide in ("after", "after_and_this"):
- hidden = True
- if hide == "after_and_this":
- continue
- elif hide or hidden:
- continue
- new_frames.append(frame)
-
- # if we only have one frame and that frame is from the codeop
- # module, remove it.
- if len(new_frames) == 1 and self.frames[0].module == "codeop":
- del self.frames[:]
-
- # if the last frame is missing something went terrible wrong :(
- elif self.frames[-1] in new_frames:
- self.frames[:] = new_frames
-
- @property
- def exception(self) -> str:
- """String representation of the exception."""
- buf = traceback.format_exception_only(self.exc_type, self.exc_value)
- rv = "".join(buf).strip()
- return _to_str(rv, "utf-8", "replace")
-
- def render(self, mark_lib: bool = True) -> str:
- out = []
- if self.info is not None:
- out.append(f'<li><div class="exc-divider">{self.info}:</div>')
- for frame in self.frames:
- title = f' title="{escape(frame.info)}"' if frame.info else ""
- out.append(f"<li{title}>{frame.render(mark_lib=mark_lib)}")
- return "\n".join(out)
-
- def render_text(self) -> str:
- out = []
- if self.info is not None:
- out.append(f"\n{self.info}:\n")
- out.append("Traceback (most recent call last):")
- for frame in self.frames:
- out.append(frame.render_text())
- out.append(self.exception)
- return "\n".join(out)
-
-
-class Frame:
- """A single frame in a traceback."""
+ __slots__ = (
+ "local_ns",
+ "global_ns",
+ "_cache_info",
+ "_cache_is_library",
+ "_cache_console",
+ )
def __init__(
self,
- exc_type: t.Type[BaseException],
- exc_value: BaseException,
- tb: TracebackType,
+ *,
+ locals: t.Dict[str, t.Any],
+ globals: t.Dict[str, t.Any],
+ **kwargs: t.Any,
) -> None:
- self.lineno = tb.tb_lineno
- self.function_name = tb.tb_frame.f_code.co_name
- self.locals = tb.tb_frame.f_locals
- self.globals = tb.tb_frame.f_globals
-
- fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
- if fn[-4:] in (".pyo", ".pyc"):
- fn = fn[:-1]
- # if it's a file on the file system resolve the real filename.
- if os.path.isfile(fn):
- fn = os.path.realpath(fn)
- self.filename = _to_str(fn, get_filesystem_encoding())
- self.module = self.globals.get("__name__", self.locals.get("__name__"))
- self.loader = self.globals.get("__loader__", self.locals.get("__loader__"))
- self.code = tb.tb_frame.f_code
-
- # support for paste's traceback extensions
- self.hide = self.locals.get("__traceback_hide__", False)
- info = self.locals.get("__traceback_info__")
- if info is not None:
- info = _to_str(info, "utf-8", "replace")
- self.info = info
-
- def render(self, mark_lib: bool = True) -> str:
- """Render a single frame in a traceback."""
- return FRAME_HTML % {
- "id": self.id,
- "filename": escape(self.filename),
- "lineno": self.lineno,
- "function_name": escape(self.function_name),
- "lines": self.render_line_context(),
- "library": "library" if mark_lib and self.is_library else "",
- }
+ super().__init__(locals=None, **kwargs)
+ self.local_ns = locals
+ self.global_ns = globals
+
+ @cached_property
+ def info(self) -> t.Optional[str]:
+ return self.local_ns.get("__traceback_info__")
@cached_property
def is_library(self) -> bool:
return any(
- self.filename.startswith(os.path.realpath(path))
+ self.filename.startswith((path, os.path.realpath(path)))
for path in sysconfig.get_paths().values()
)
- def render_text(self) -> str:
- return (
- f' File "{self.filename}", line {self.lineno}, in {self.function_name}\n'
- f" {self.current_line.strip()}"
- )
+ @cached_property
+ def console(self) -> Console:
+ return Console(self.global_ns, self.local_ns)
+
+ def eval(self, code: str) -> t.Any:
+ return self.console.eval(code)
- def render_line_context(self) -> str:
- before, current, after = self.get_context_lines()
- rv = []
+ def render_html(self, mark_library: bool) -> str:
+ context = 5
+ lines = linecache.getlines(self.filename)
+ line_idx = self.lineno - 1 # type: ignore[operator]
+ start_idx = max(0, line_idx - context)
+ stop_idx = min(len(lines), line_idx + context + 1)
+ rendered_lines = []
def render_line(line: str, cls: str) -> None:
line = line.expandtabs().rstrip()
stripped_line = line.strip()
prefix = len(line) - len(stripped_line)
- rv.append(
+ colno = getattr(self, "colno", 0)
+ end_colno = getattr(self, "end_colno", 0)
+
+ if cls == "current" and colno and end_colno:
+ arrow = (
+ f'\n<span class="ws">{" " * prefix}</span>'
+ f'{" " * (colno - prefix)}{"^" * (end_colno - colno)}'
+ )
+ else:
+ arrow = ""
+
+ rendered_lines.append(
f'<pre class="line {cls}"><span class="ws">{" " * prefix}</span>'
- f"{escape(stripped_line) if stripped_line else ' '}</pre>"
+ f"{escape(stripped_line) if stripped_line else ' '}"
+ f"{arrow if arrow else ''}</pre>"
)
- for line in before:
- render_line(line, "before")
- render_line(current, "current")
- for line in after:
- render_line(line, "after")
-
- return "\n".join(rv)
-
- def get_annotated_lines(self) -> t.List[Line]:
- """Helper function that returns lines with extra information."""
- lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
-
- # find function definition and mark lines
- if hasattr(self.code, "co_firstlineno"):
- lineno = self.code.co_firstlineno - 1
- while lineno > 0:
- if _funcdef_re.match(lines[lineno].code):
- break
- lineno -= 1
- try:
- offset = len(inspect.getblock([f"{x.code}\n" for x in lines[lineno:]]))
- except TokenError:
- offset = 0
- for line in lines[lineno : lineno + offset]:
- line.in_frame = True
-
- # mark current line
- try:
- lines[self.lineno - 1].current = True
- except IndexError:
- pass
-
- return lines
-
- def eval(self, code: t.Union[str, CodeType], mode: str = "single") -> t.Any:
- """Evaluate code in the context of the frame."""
- if isinstance(code, str):
- code = compile(code, "<interactive>", mode)
- return eval(code, self.globals, self.locals)
+ if lines:
+ for line in lines[start_idx:line_idx]:
+ render_line(line, "before")
- @cached_property
- def sourcelines(self) -> t.List[str]:
- """The sourcecode of the file as list of strings."""
- # get sourcecode from loader or file
- source = None
- if self.loader is not None:
- try:
- if hasattr(self.loader, "get_source"):
- source = self.loader.get_source(self.module)
- elif hasattr(self.loader, "get_source_by_code"):
- source = self.loader.get_source_by_code(self.code)
- except Exception:
- # we munch the exception so that we don't cause troubles
- # if the loader is broken.
- pass
-
- if source is None:
- try:
- with open(self.filename, mode="rb") as f:
- source = f.read()
- except OSError:
- return []
-
- # already str? return right away
- if isinstance(source, str):
- return source.splitlines()
-
- charset = "utf-8"
- if source.startswith(codecs.BOM_UTF8):
- source = source[3:]
- else:
- for idx, match in enumerate(_line_re.finditer(source)):
- coding_match = _coding_re.search(match.group())
- if coding_match is not None:
- charset = coding_match.group(1).decode("utf-8")
- break
- if idx > 1:
- break
-
- # on broken cookies we fall back to utf-8 too
- charset = _to_str(charset)
- try:
- codecs.lookup(charset)
- except LookupError:
- charset = "utf-8"
-
- return source.decode(charset, "replace").splitlines()
-
- def get_context_lines(
- self, context: int = 5
- ) -> t.Tuple[t.List[str], str, t.List[str]]:
- before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1]
- past = self.sourcelines[self.lineno : self.lineno + context]
- return (before, self.current_line, past)
-
- @property
- def current_line(self) -> str:
- try:
- return self.sourcelines[self.lineno - 1]
- except IndexError:
- return ""
+ render_line(lines[line_idx], "current")
+
+ for line in lines[line_idx + 1 : stop_idx]:
+ render_line(line, "after")
+
+ return FRAME_HTML % {
+ "id": id(self),
+ "filename": escape(self.filename),
+ "lineno": self.lineno,
+ "function_name": escape(self.name),
+ "lines": "\n".join(rendered_lines),
+ "library": "library" if mark_library and self.is_library else "",
+ }
- @cached_property
- def console(self) -> Console:
- return Console(self.globals, self.locals)
- @property
- def id(self) -> int:
- return id(self)
+def render_console_html(secret: str, evalex_trusted: bool) -> str:
+ return CONSOLE_HTML % {
+ "evalex": "true",
+ "evalex_trusted": "true" if evalex_trusted else "false",
+ "console": "true",
+ "title": "Console",
+ "secret": secret,
+ }
diff --git a/contrib/python/Werkzeug/py3/werkzeug/exceptions.py b/contrib/python/Werkzeug/py3/werkzeug/exceptions.py
index 8a31c4d33b..013df72bd3 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/exceptions.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/exceptions.py
@@ -43,11 +43,11 @@ code, you can add a second except for a specific subclass of an error:
return e
"""
-import sys
import typing as t
-import warnings
from datetime import datetime
-from html import escape
+
+from markupsafe import escape
+from markupsafe import Markup
from ._internal import _get_environ
@@ -65,6 +65,9 @@ class HTTPException(Exception):
"""The base class for all HTTP exceptions. This exception can be called as a WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
+
+ .. versionchanged:: 2.1
+ Removed the ``wrap`` class method.
"""
code: t.Optional[int] = None
@@ -80,70 +83,6 @@ class HTTPException(Exception):
self.description = description
self.response = response
- @classmethod
- def wrap(
- cls, exception: t.Type[BaseException], name: t.Optional[str] = None
- ) -> t.Type["HTTPException"]:
- """Create an exception that is a subclass of the calling HTTP
- exception and the ``exception`` argument.
-
- The first argument to the class will be passed to the
- wrapped ``exception``, the rest to the HTTP exception. If
- ``e.args`` is not empty and ``e.show_exception`` is ``True``,
- the wrapped exception message is added to the HTTP error
- description.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Create a subclass manually
- instead.
-
- .. versionchanged:: 0.15.5
- The ``show_exception`` attribute controls whether the
- description includes the wrapped exception message.
-
- .. versionchanged:: 0.15.0
- The description includes the wrapped exception message.
- """
- warnings.warn(
- "'HTTPException.wrap' is deprecated and will be removed in"
- " Werkzeug 2.1. Create a subclass manually instead.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- class newcls(cls, exception): # type: ignore
- _description = cls.description
- show_exception = False
-
- def __init__(
- self, arg: t.Optional[t.Any] = None, *args: t.Any, **kwargs: t.Any
- ) -> None:
- super().__init__(*args, **kwargs)
-
- if arg is None:
- exception.__init__(self)
- else:
- exception.__init__(self, arg)
-
- @property
- def description(self) -> str:
- if self.show_exception:
- return (
- f"{self._description}\n"
- f"{exception.__name__}: {exception.__str__(self)}"
- )
-
- return self._description # type: ignore
-
- @description.setter
- def description(self, value: str) -> None:
- self._description = value
-
- newcls.__module__ = sys._getframe(1).f_globals["__name__"]
- name = name or cls.__name__ + exception.__name__
- newcls.__name__ = newcls.__qualname__ = name
- return newcls
-
@property
def name(self) -> str:
"""The status name."""
@@ -164,7 +103,7 @@ class HTTPException(Exception):
else:
description = self.description
- description = escape(description).replace("\n", "<br>")
+ description = escape(description).replace("\n", Markup("<br>"))
return f"<p>{description}</p>"
def get_body(
@@ -174,7 +113,8 @@ class HTTPException(Exception):
) -> str:
"""Get the HTML body."""
return (
- '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+ "<!doctype html>\n"
+ "<html lang=en>\n"
f"<title>{self.code} {escape(self.name)}</title>\n"
f"<h1>{escape(self.name)}</h1>\n"
f"{self.get_description(environ)}\n"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/filesystem.py b/contrib/python/Werkzeug/py3/werkzeug/filesystem.py
deleted file mode 100644
index 36a3d12e97..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/filesystem.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import codecs
-import sys
-import typing as t
-import warnings
-
-# We do not trust traditional unixes.
-has_likely_buggy_unicode_filesystem = (
- sys.platform.startswith("linux") or "bsd" in sys.platform
-)
-
-
-def _is_ascii_encoding(encoding: t.Optional[str]) -> bool:
- """Given an encoding this figures out if the encoding is actually ASCII (which
- is something we don't actually want in most cases). This is necessary
- because ASCII comes under many names such as ANSI_X3.4-1968.
- """
- if encoding is None:
- return False
- try:
- return codecs.lookup(encoding).name == "ascii"
- except LookupError:
- return False
-
-
-class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning):
- """The warning used by Werkzeug to signal a broken filesystem. Will only be
- used once per runtime."""
-
-
-_warned_about_filesystem_encoding = False
-
-
-def get_filesystem_encoding() -> str:
- """Returns the filesystem encoding that should be used. Note that this is
- different from the Python understanding of the filesystem encoding which
- might be deeply flawed. Do not use this value against Python's string APIs
- because it might be different. See :ref:`filesystem-encoding` for the exact
- behavior.
-
- The concept of a filesystem encoding in generally is not something you
- should rely on. As such if you ever need to use this function except for
- writing wrapper code reconsider.
- """
- global _warned_about_filesystem_encoding
- rv = sys.getfilesystemencoding()
- if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv):
- if not _warned_about_filesystem_encoding:
- warnings.warn(
- "Detected a misconfigured UNIX filesystem: Will use"
- f" UTF-8 as filesystem encoding instead of {rv!r}",
- BrokenFilesystemWarning,
- )
- _warned_about_filesystem_encoding = True
- return "utf-8"
- return rv
diff --git a/contrib/python/Werkzeug/py3/werkzeug/formparser.py b/contrib/python/Werkzeug/py3/werkzeug/formparser.py
index 6cb758feb1..10d58ca3fa 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/formparser.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/formparser.py
@@ -1,12 +1,10 @@
import typing as t
-import warnings
from functools import update_wrapper
from io import BytesIO
from itertools import chain
from typing import Union
from . import exceptions
-from ._internal import _to_str
from .datastructures import FileStorage
from .datastructures import Headers
from .datastructures import MultiDict
@@ -339,44 +337,6 @@ def _line_parse(line: str) -> t.Tuple[str, bool]:
return line, False
-def parse_multipart_headers(iterable: t.Iterable[bytes]) -> Headers:
- """Parses multipart headers from an iterable that yields lines (including
- the trailing newline symbol). The iterable has to be newline terminated.
- The iterable will stop at the line where the headers ended so it can be
- further consumed.
- :param iterable: iterable of strings that are newline terminated
- """
- warnings.warn(
- "'parse_multipart_headers' is deprecated and will be removed in"
- " Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- result: t.List[t.Tuple[str, str]] = []
-
- for b_line in iterable:
- line = _to_str(b_line)
- line, line_terminated = _line_parse(line)
-
- if not line_terminated:
- raise ValueError("unexpected end of line in multipart header")
-
- if not line:
- break
- elif line[0] in " \t" and result:
- key, value = result[-1]
- result[-1] = (key, f"{value}\n {line[1:]}")
- else:
- parts = line.split(":", 1)
-
- if len(parts) == 2:
- result.append((parts[0].strip(), parts[1].strip()))
-
- # we link the list to the headers, no need to create a copy, the
- # list was not shared anyways.
- return Headers(result)
-
-
class MultiPartParser:
def __init__(
self,
diff --git a/contrib/python/Werkzeug/py3/werkzeug/http.py b/contrib/python/Werkzeug/py3/werkzeug/http.py
index 45799bf102..97776855d7 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/http.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/http.py
@@ -16,16 +16,14 @@ from time import struct_time
from urllib.parse import unquote_to_bytes as _unquote
from urllib.request import parse_http_list as _parse_list_header
-from ._internal import _cookie_parse_impl
from ._internal import _cookie_quote
+from ._internal import _dt_as_utc
from ._internal import _make_cookie_domain
from ._internal import _to_bytes
from ._internal import _to_str
from ._internal import _wsgi_decoding_dance
-from werkzeug._internal import _dt_as_utc
if t.TYPE_CHECKING:
- import typing_extensions as te
from _typeshed.wsgi import WSGIEnvironment
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
@@ -376,42 +374,29 @@ def parse_dict_header(value: str, cls: t.Type[dict] = dict) -> t.Dict[str, str]:
return result
-@typing.overload
-def parse_options_header(
- value: t.Optional[str], multiple: "te.Literal[False]" = False
-) -> t.Tuple[str, t.Dict[str, str]]:
- ...
-
+def parse_options_header(value: t.Optional[str]) -> t.Tuple[str, t.Dict[str, str]]:
+ """Parse a ``Content-Type``-like header into a tuple with the
+ value and any options:
-@typing.overload
-def parse_options_header(
- value: t.Optional[str], multiple: "te.Literal[True]"
-) -> t.Tuple[t.Any, ...]:
- ...
+ >>> parse_options_header('text/html; charset=utf8')
+ ('text/html', {'charset': 'utf8'})
+ This should is not for ``Cache-Control``-like headers, which use a
+ different format. For those, use :func:`parse_dict_header`.
-def parse_options_header(
- value: t.Optional[str], multiple: bool = False
-) -> t.Union[t.Tuple[str, t.Dict[str, str]], t.Tuple[t.Any, ...]]:
- """Parse a ``Content-Type`` like header into a tuple with the content
- type and the options:
+ :param value: The header value to parse.
- >>> parse_options_header('text/html; charset=utf8')
- ('text/html', {'charset': 'utf8'})
+ .. versionchanged:: 2.2
+ Option names are always converted to lowercase.
- This should not be used to parse ``Cache-Control`` like headers that use
- a slightly different format. For these headers use the
- :func:`parse_dict_header` function.
+ .. versionchanged:: 2.1
+ The ``multiple`` parameter is deprecated and will be removed in
+ Werkzeug 2.2.
.. versionchanged:: 0.15
:rfc:`2231` parameter continuations are handled.
.. versionadded:: 0.5
-
- :param value: the header to parse.
- :param multiple: Whether try to parse and return multiple MIME types
- :return: (mimetype, options) or (mimetype, options, mimetype, options, …)
- if multiple=True
"""
if not value:
return "", {}
@@ -444,7 +429,7 @@ def parse_options_header(
if not encoding:
encoding = continued_encoding
continued_encoding = encoding
- option = unquote_header_value(option)
+ option = unquote_header_value(option).lower()
if option_value is not None:
option_value = unquote_header_value(option_value, option == "filename")
@@ -463,11 +448,9 @@ def parse_options_header(
rest = rest[optmatch.end() :]
result.append(options)
- if multiple is False:
- return tuple(result)
- value = rest
+ return tuple(result) # type: ignore[return-value]
- return tuple(result) if result else ("", {})
+ return tuple(result) if result else ("", {}) # type: ignore[return-value]
_TAnyAccept = t.TypeVar("_TAnyAccept", bound="ds.Accept")
@@ -777,15 +760,20 @@ def parse_range_header(
begin_str, end_str = item.split("-", 1)
begin_str = begin_str.strip()
end_str = end_str.strip()
- if not begin_str.isdigit():
+
+ try:
+ begin = int(begin_str)
+ except ValueError:
return None
- begin = int(begin_str)
+
if begin < last_end or last_end < 0:
return None
if end_str:
- if not end_str.isdigit():
+ try:
+ end = int(end_str) + 1
+ except ValueError:
return None
- end = int(end_str) + 1
+
if begin >= end:
return None
else:
@@ -823,10 +811,11 @@ def parse_content_range_header(
rng, length_str = rangedef.split("/", 1)
if length_str == "*":
length = None
- elif length_str.isdigit():
- length = int(length_str)
else:
- return None
+ try:
+ length = int(length_str)
+ except ValueError:
+ return None
if rng == "*":
return ds.ContentRange(units, None, None, length, on_update=on_update)
@@ -952,24 +941,6 @@ def parse_date(value: t.Optional[str]) -> t.Optional[datetime]:
return dt
-def cookie_date(
- expires: t.Optional[t.Union[datetime, date, int, float, struct_time]] = None
-) -> str:
- """Format a datetime object or timestamp into an :rfc:`2822` date
- string for ``Set-Cookie expires``.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :func:`http_date` instead.
- """
- warnings.warn(
- "'cookie_date' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'http_date' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return http_date(expires)
-
-
def http_date(
timestamp: t.Optional[t.Union[datetime, date, int, float, struct_time]] = None
) -> str:
@@ -1069,57 +1040,17 @@ def is_resource_modified(
.. versionchanged:: 1.0.0
The check is run for methods other than ``GET`` and ``HEAD``.
"""
- if etag is None and data is not None:
- etag = generate_etag(data)
- elif data is not None:
- raise TypeError("both data and etag given")
-
- unmodified = False
- if isinstance(last_modified, str):
- last_modified = parse_date(last_modified)
-
- # HTTP doesn't use microsecond, remove it to avoid false positive
- # comparisons. Mark naive datetimes as UTC.
- if last_modified is not None:
- last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
-
- if_range = None
- if not ignore_if_range and "HTTP_RANGE" in environ:
- # https://tools.ietf.org/html/rfc7233#section-3.2
- # A server MUST ignore an If-Range header field received in a request
- # that does not contain a Range header field.
- if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE"))
-
- if if_range is not None and if_range.date is not None:
- modified_since: t.Optional[datetime] = if_range.date
- else:
- modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE"))
-
- if modified_since and last_modified and last_modified <= modified_since:
- unmodified = True
-
- if etag:
- etag, _ = unquote_etag(etag)
- etag = t.cast(str, etag)
-
- if if_range is not None and if_range.etag is not None:
- unmodified = parse_etags(if_range.etag).contains(etag)
- else:
- if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH"))
- if if_none_match:
- # https://tools.ietf.org/html/rfc7232#section-3.2
- # "A recipient MUST use the weak comparison function when comparing
- # entity-tags for If-None-Match"
- unmodified = if_none_match.contains_weak(etag)
-
- # https://tools.ietf.org/html/rfc7232#section-3.1
- # "Origin server MUST use the strong comparison function when
- # comparing entity-tags for If-Match"
- if_match = parse_etags(environ.get("HTTP_IF_MATCH"))
- if if_match:
- unmodified = not if_match.is_strong(etag)
-
- return not unmodified
+ return _sansio_http.is_resource_modified(
+ http_range=environ.get("HTTP_RANGE"),
+ http_if_range=environ.get("HTTP_IF_RANGE"),
+ http_if_modified_since=environ.get("HTTP_IF_MODIFIED_SINCE"),
+ http_if_none_match=environ.get("HTTP_IF_NONE_MATCH"),
+ http_if_match=environ.get("HTTP_IF_MATCH"),
+ etag=etag,
+ data=data,
+ last_modified=last_modified,
+ ignore_if_range=ignore_if_range,
+ )
def remove_entity_headers(
@@ -1212,29 +1143,15 @@ def parse_cookie(
The ``cls`` parameter was added.
"""
if isinstance(header, dict):
- header = header.get("HTTP_COOKIE", "")
+ cookie = header.get("HTTP_COOKIE", "")
elif header is None:
- header = ""
-
- # PEP 3333 sends headers through the environ as latin1 decoded
- # strings. Encode strings back to bytes for parsing.
- if isinstance(header, str):
- header = header.encode("latin1", "replace")
-
- if cls is None:
- cls = ds.MultiDict
-
- def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:
- for key, val in _cookie_parse_impl(header): # type: ignore
- key_str = _to_str(key, charset, errors, allow_none_charset=True)
-
- if not key_str:
- continue
-
- val_str = _to_str(val, charset, errors, allow_none_charset=True)
- yield key_str, val_str
+ cookie = ""
+ else:
+ cookie = header
- return cls(_parse_pairs())
+ return _sansio_http.parse_cookie(
+ cookie=cookie, charset=charset, errors=errors, cls=cls
+ )
def dump_cookie(
@@ -1391,3 +1308,4 @@ def is_byte_range_valid(
# circular dependencies
from . import datastructures as ds
+from .sansio import http as _sansio_http
diff --git a/contrib/python/Werkzeug/py3/werkzeug/local.py b/contrib/python/Werkzeug/py3/werkzeug/local.py
index 6e33bbe5ab..70e9bf72f2 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/local.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/local.py
@@ -1,11 +1,11 @@
import copy
import math
import operator
-import sys
import typing as t
-import warnings
+from contextvars import ContextVar
from functools import partial
from functools import update_wrapper
+from operator import attrgetter
from .wsgi import ClosingIterator
@@ -14,98 +14,16 @@ if t.TYPE_CHECKING:
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
+T = t.TypeVar("T")
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
-try:
- from greenlet import getcurrent as _get_ident
-except ImportError:
- from threading import get_ident as _get_ident
-
-
-def get_ident() -> int:
- warnings.warn(
- "'get_ident' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'greenlet.getcurrent' or 'threading.get_ident' for"
- " previous behavior.",
- DeprecationWarning,
- stacklevel=2,
- )
- return _get_ident() # type: ignore
-
-
-class _CannotUseContextVar(Exception):
- pass
-
-
-try:
- from contextvars import ContextVar
-
- if "gevent" in sys.modules or "eventlet" in sys.modules:
- # Both use greenlet, so first check it has patched
- # ContextVars, Greenlet <0.4.17 does not.
- import greenlet
-
- greenlet_patched = getattr(greenlet, "GREENLET_USE_CONTEXT_VARS", False)
-
- if not greenlet_patched:
- # If Gevent is used, check it has patched ContextVars,
- # <20.5 does not.
- try:
- from gevent.monkey import is_object_patched
- except ImportError:
- # Gevent isn't used, but Greenlet is and hasn't patched
- raise _CannotUseContextVar() from None
- else:
- if is_object_patched("threading", "local") and not is_object_patched(
- "contextvars", "ContextVar"
- ):
- raise _CannotUseContextVar()
-
- def __release_local__(storage: t.Any) -> None:
- # Can remove when support for non-stdlib ContextVars is
- # removed, see "Fake" version below.
- storage.set({})
-
-except (ImportError, _CannotUseContextVar):
-
- class ContextVar: # type: ignore
- """A fake ContextVar based on the previous greenlet/threading
- ident function. Used on Python 3.6, eventlet, and old versions
- of gevent.
- """
-
- def __init__(self, _name: str) -> None:
- self.storage: t.Dict[int, t.Dict[str, t.Any]] = {}
-
- def get(self, default: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
- return self.storage.get(_get_ident(), default)
-
- def set(self, value: t.Dict[str, t.Any]) -> None:
- self.storage[_get_ident()] = value
-
- def __release_local__(storage: t.Any) -> None:
- # Special version to ensure that the storage is cleaned up on
- # release.
- storage.storage.pop(_get_ident(), None)
-
def release_local(local: t.Union["Local", "LocalStack"]) -> None:
- """Releases the contents of the local for the current context.
- This makes it possible to use locals without a manager.
-
- Example::
-
- >>> loc = Local()
- >>> loc.foo = 42
- >>> release_local(loc)
- >>> hasattr(loc, 'foo')
- False
+ """Release the data for the current context in a :class:`Local` or
+ :class:`LocalStack` without using a :class:`LocalManager`.
- With this function one can release :class:`Local` objects as well
- as :class:`LocalStack` objects. However it is not possible to
- release data held by proxies that way, one always has to retain
- a reference to the underlying local object in order to be able
- to release it.
+ This should not be needed for modern use cases, and may be removed
+ in the future.
.. versionadded:: 0.6.1
"""
@@ -113,239 +31,204 @@ def release_local(local: t.Union["Local", "LocalStack"]) -> None:
class Local:
- __slots__ = ("_storage",)
+ """Create a namespace of context-local data. This wraps a
+ :class:`ContextVar` containing a :class:`dict` value.
- def __init__(self) -> None:
- object.__setattr__(self, "_storage", ContextVar("local_storage"))
+ This may incur a performance penalty compared to using individual
+ context vars, as it has to copy data to avoid mutating the dict
+ between nested contexts.
- @property
- def __storage__(self) -> t.Dict[str, t.Any]:
- warnings.warn(
- "'__storage__' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return self._storage.get({}) # type: ignore
+ :param context_var: The :class:`~contextvars.ContextVar` to use as
+ storage for this local. If not given, one will be created.
+ Context vars not created at the global scope may interfere with
+ garbage collection.
- @property
- def __ident_func__(self) -> t.Callable[[], int]:
- warnings.warn(
- "'__ident_func__' is deprecated and will be removed in"
- " Werkzeug 2.1. It should not be used in Python 3.7+.",
- DeprecationWarning,
- stacklevel=2,
- )
- return _get_ident # type: ignore
-
- @__ident_func__.setter
- def __ident_func__(self, func: t.Callable[[], int]) -> None:
- warnings.warn(
- "'__ident_func__' is deprecated and will be removed in"
- " Werkzeug 2.1. Setting it no longer has any effect.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- def __iter__(self) -> t.Iterator[t.Tuple[int, t.Any]]:
- return iter(self._storage.get({}).items())
-
- def __call__(self, proxy: str) -> "LocalProxy":
- """Create a proxy for a name."""
- return LocalProxy(self, proxy)
+ .. versionchanged:: 2.0
+ Uses ``ContextVar`` instead of a custom storage implementation.
+ """
+
+ __slots__ = ("__storage",)
+
+ def __init__(
+ self, context_var: t.Optional[ContextVar[t.Dict[str, t.Any]]] = None
+ ) -> None:
+ if context_var is None:
+ # A ContextVar not created at global scope interferes with
+ # Python's garbage collection. However, a local only makes
+ # sense defined at the global scope as well, in which case
+ # the GC issue doesn't seem relevant.
+ context_var = ContextVar(f"werkzeug.Local<{id(self)}>.storage")
+
+ object.__setattr__(self, "_Local__storage", context_var)
+
+ def __iter__(self) -> t.Iterator[t.Tuple[str, t.Any]]:
+ return iter(self.__storage.get({}).items())
+
+ def __call__(
+ self, name: str, *, unbound_message: t.Optional[str] = None
+ ) -> "LocalProxy":
+ """Create a :class:`LocalProxy` that access an attribute on this
+ local namespace.
+
+ :param name: Proxy this attribute.
+ :param unbound_message: The error message that the proxy will
+ show if the attribute isn't set.
+ """
+ return LocalProxy(self, name, unbound_message=unbound_message)
def __release_local__(self) -> None:
- __release_local__(self._storage)
+ self.__storage.set({})
def __getattr__(self, name: str) -> t.Any:
- values = self._storage.get({})
- try:
+ values = self.__storage.get({})
+
+ if name in values:
return values[name]
- except KeyError:
- raise AttributeError(name) from None
+
+ raise AttributeError(name)
def __setattr__(self, name: str, value: t.Any) -> None:
- values = self._storage.get({}).copy()
+ values = self.__storage.get({}).copy()
values[name] = value
- self._storage.set(values)
+ self.__storage.set(values)
def __delattr__(self, name: str) -> None:
- values = self._storage.get({}).copy()
- try:
+ values = self.__storage.get({})
+
+ if name in values:
+ values = values.copy()
del values[name]
- self._storage.set(values)
- except KeyError:
- raise AttributeError(name) from None
-
-
-class LocalStack:
- """This class works similar to a :class:`Local` but keeps a stack
- of objects instead. This is best explained with an example::
-
- >>> ls = LocalStack()
- >>> ls.push(42)
- >>> ls.top
- 42
- >>> ls.push(23)
- >>> ls.top
- 23
- >>> ls.pop()
- 23
- >>> ls.top
- 42
-
- They can be force released by using a :class:`LocalManager` or with
- the :func:`release_local` function but the correct way is to pop the
- item from the stack after using. When the stack is empty it will
- no longer be bound to the current context (and as such released).
-
- By calling the stack without arguments it returns a proxy that resolves to
- the topmost item on the stack.
+ self.__storage.set(values)
+ else:
+ raise AttributeError(name)
+
+
+class LocalStack(t.Generic[T]):
+ """Create a stack of context-local data. This wraps a
+ :class:`ContextVar` containing a :class:`list` value.
+
+ This may incur a performance penalty compared to using individual
+ context vars, as it has to copy data to avoid mutating the list
+ between nested contexts.
+
+ :param context_var: The :class:`~contextvars.ContextVar` to use as
+ storage for this local. If not given, one will be created.
+ Context vars not created at the global scope may interfere with
+ garbage collection.
+
+ .. versionchanged:: 2.0
+ Uses ``ContextVar`` instead of a custom storage implementation.
.. versionadded:: 0.6.1
"""
- def __init__(self) -> None:
- self._local = Local()
+ __slots__ = ("_storage",)
- def __release_local__(self) -> None:
- self._local.__release_local__()
+ def __init__(self, context_var: t.Optional[ContextVar[t.List[T]]] = None) -> None:
+ if context_var is None:
+ # A ContextVar not created at global scope interferes with
+ # Python's garbage collection. However, a local only makes
+ # sense defined at the global scope as well, in which case
+ # the GC issue doesn't seem relevant.
+ context_var = ContextVar(f"werkzeug.LocalStack<{id(self)}>.storage")
- @property
- def __ident_func__(self) -> t.Callable[[], int]:
- return self._local.__ident_func__
-
- @__ident_func__.setter
- def __ident_func__(self, value: t.Callable[[], int]) -> None:
- object.__setattr__(self._local, "__ident_func__", value)
-
- def __call__(self) -> "LocalProxy":
- def _lookup() -> t.Any:
- rv = self.top
- if rv is None:
- raise RuntimeError("object unbound")
- return rv
-
- return LocalProxy(_lookup)
-
- def push(self, obj: t.Any) -> t.List[t.Any]:
- """Pushes a new item to the stack"""
- rv = getattr(self._local, "stack", []).copy()
- rv.append(obj)
- self._local.stack = rv
- return rv
+ self._storage = context_var
- def pop(self) -> t.Any:
- """Removes the topmost item from the stack, will return the
- old value or `None` if the stack was already empty.
+ def __release_local__(self) -> None:
+ self._storage.set([])
+
+ def push(self, obj: T) -> t.List[T]:
+ """Add a new item to the top of the stack."""
+ stack = self._storage.get([]).copy()
+ stack.append(obj)
+ self._storage.set(stack)
+ return stack
+
+ def pop(self) -> t.Optional[T]:
+ """Remove the top item from the stack and return it. If the
+ stack is empty, return ``None``.
"""
- stack = getattr(self._local, "stack", None)
- if stack is None:
+ stack = self._storage.get([])
+
+ if len(stack) == 0:
return None
- elif len(stack) == 1:
- release_local(self._local)
- return stack[-1]
- else:
- return stack.pop()
+
+ rv = stack[-1]
+ self._storage.set(stack[:-1])
+ return rv
@property
- def top(self) -> t.Any:
+ def top(self) -> t.Optional[T]:
"""The topmost item on the stack. If the stack is empty,
`None` is returned.
"""
- try:
- return self._local.stack[-1]
- except (AttributeError, IndexError):
+ stack = self._storage.get([])
+
+ if len(stack) == 0:
return None
+ return stack[-1]
+
+ def __call__(
+ self, name: t.Optional[str] = None, *, unbound_message: t.Optional[str] = None
+ ) -> "LocalProxy":
+ """Create a :class:`LocalProxy` that accesses the top of this
+ local stack.
+
+ :param name: If given, the proxy access this attribute of the
+ top item, rather than the item itself.
+ :param unbound_message: The error message that the proxy will
+ show if the stack is empty.
+ """
+ return LocalProxy(self, name, unbound_message=unbound_message)
+
class LocalManager:
- """Local objects cannot manage themselves. For that you need a local
- manager. You can pass a local manager multiple locals or add them
- later by appending them to `manager.locals`. Every time the manager
- cleans up, it will clean up all the data left in the locals for this
- context.
+ """Manage releasing the data for the current context in one or more
+ :class:`Local` and :class:`LocalStack` objects.
+
+ This should not be needed for modern use cases, and may be removed
+ in the future.
+
+ :param locals: A local or list of locals to manage.
.. versionchanged:: 2.0
``ident_func`` is deprecated and will be removed in Werkzeug
2.1.
+ .. versionchanged:: 0.7
+ The ``ident_func`` parameter was added.
+
.. versionchanged:: 0.6.1
The :func:`release_local` function can be used instead of a
manager.
-
- .. versionchanged:: 0.7
- The ``ident_func`` parameter was added.
"""
+ __slots__ = ("locals",)
+
def __init__(
self,
- locals: t.Optional[t.Iterable[t.Union[Local, LocalStack]]] = None,
- ident_func: None = None,
+ locals: t.Optional[
+ t.Union[Local, LocalStack, t.Iterable[t.Union[Local, LocalStack]]]
+ ] = None,
) -> None:
if locals is None:
self.locals = []
elif isinstance(locals, Local):
self.locals = [locals]
else:
- self.locals = list(locals)
-
- if ident_func is not None:
- warnings.warn(
- "'ident_func' is deprecated and will be removed in"
- " Werkzeug 2.1. Setting it no longer has any effect.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- @property
- def ident_func(self) -> t.Callable[[], int]:
- warnings.warn(
- "'ident_func' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return _get_ident # type: ignore
-
- @ident_func.setter
- def ident_func(self, func: t.Callable[[], int]) -> None:
- warnings.warn(
- "'ident_func' is deprecated and will be removedin Werkzeug"
- " 2.1. Setting it no longer has any effect.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- def get_ident(self) -> int:
- """Return the context identifier the local objects use internally for
- this context. You cannot override this method to change the behavior
- but use it to link other context local objects (such as SQLAlchemy's
- scoped sessions) to the Werkzeug locals.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1.
-
- .. versionchanged:: 0.7
- You can pass a different ident function to the local manager that
- will then be propagated to all the locals passed to the
- constructor.
- """
- warnings.warn(
- "'get_ident' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return self.ident_func()
+ self.locals = list(locals) # type: ignore[arg-type]
def cleanup(self) -> None:
- """Manually clean up the data in the locals for this context. Call
- this at the end of the request or use `make_middleware()`.
+ """Release the data in the locals for this context. Call this at
+ the end of each request or use :meth:`make_middleware`.
"""
for local in self.locals:
release_local(local)
def make_middleware(self, app: "WSGIApplication") -> "WSGIApplication":
- """Wrap a WSGI application so that cleaning up happens after
- request end.
+ """Wrap a WSGI application so that local data is released
+ automatically after the response has been sent for a request.
"""
def application(
@@ -356,17 +239,14 @@ class LocalManager:
return application
def middleware(self, func: "WSGIApplication") -> "WSGIApplication":
- """Like `make_middleware` but for decorating functions.
+ """Like :meth:`make_middleware` but used as a decorator on the
+ WSGI application function.
- Example usage::
+ .. code-block:: python
@manager.middleware
def application(environ, start_response):
...
-
- The difference to `make_middleware` is that the function passed
- will have all the arguments copied from the inner application
- (name, docstring, module).
"""
return update_wrapper(self.make_middleware(func), func)
@@ -433,12 +313,12 @@ class _ProxyLookup:
return self
try:
- obj = instance._get_current_object()
+ obj = instance._get_current_object() # type: ignore[misc]
except RuntimeError:
if self.fallback is None:
raise
- fallback = self.fallback.__get__(instance, owner) # type: ignore
+ fallback = self.fallback.__get__(instance, owner)
if self.is_attr:
# __class__ and __doc__ are attributes, not methods.
@@ -495,29 +375,56 @@ def _l_to_r_op(op: F) -> F:
return t.cast(F, r_op)
-class LocalProxy:
- """A proxy to the object bound to a :class:`Local`. All operations
- on the proxy are forwarded to the bound object. If no object is
- bound, a :exc:`RuntimeError` is raised.
+def _identity(o: T) -> T:
+ return o
+
+
+class LocalProxy(t.Generic[T]):
+ """A proxy to the object bound to a context-local object. All
+ operations on the proxy are forwarded to the bound object. If no
+ object is bound, a ``RuntimeError`` is raised.
+
+ :param local: The context-local object that provides the proxied
+ object.
+ :param name: Proxy this attribute from the proxied object.
+ :param unbound_message: The error message to show if the
+ context-local object is unbound.
+
+ Proxy a :class:`~contextvars.ContextVar` to make it easier to
+ access. Pass a name to proxy that attribute.
+
+ .. code-block:: python
+
+ _request_var = ContextVar("request")
+ request = LocalProxy(_request_var)
+ session = LocalProxy(_request_var, "session")
+
+ Proxy an attribute on a :class:`Local` namespace by calling the
+ local with the attribute name:
.. code-block:: python
- from werkzeug.local import Local
- l = Local()
+ data = Local()
+ user = data("user")
+
+ Proxy the top item on a :class:`LocalStack` by calling the local.
+ Pass a name to proxy that attribute.
- # a proxy to whatever l.user is set to
- user = l("user")
+ .. code-block::
- from werkzeug.local import LocalStack
- _request_stack = LocalStack()
+ app_stack = LocalStack()
+ current_app = app_stack()
+ g = app_stack("g")
- # a proxy to _request_stack.top
- request = _request_stack()
+ Pass a function to proxy the return value from that function. This
+ was previously used to access attributes of local objects before
+ that was supported directly.
+
+ .. code-block:: python
- # a proxy to the session attribute of the request proxy
session = LocalProxy(lambda: request.session)
- ``__repr__`` and ``__class__`` are forwarded, so ``repr(x)`` and
+ ``__repr__`` and ``__class__`` are proxied, so ``repr(x)`` and
``isinstance(x, cls)`` will look like the proxied object. Use
``issubclass(type(x), LocalProxy)`` to check if an object is a
proxy.
@@ -528,10 +435,19 @@ class LocalProxy:
isinstance(user, User) # True
issubclass(type(user), LocalProxy) # True
- :param local: The :class:`Local` or callable that provides the
- proxied object.
- :param name: The attribute name to look up on a :class:`Local`. Not
- used if a callable is given.
+ .. versionchanged:: 2.2.2
+ ``__wrapped__`` is set when wrapping an object, not only when
+ wrapping a function, to prevent doctest from failing.
+
+ .. versionchanged:: 2.2
+ Can proxy a ``ContextVar`` or ``LocalStack`` directly.
+
+ .. versionchanged:: 2.2
+ The ``name`` parameter can be used with any proxied object, not
+ only ``Local``.
+
+ .. versionchanged:: 2.2
+ Added the ``unbound_message`` parameter.
.. versionchanged:: 2.0
Updated proxied attributes and methods to reflect the current
@@ -541,38 +457,80 @@ class LocalProxy:
The class can be instantiated with a callable.
"""
- __slots__ = ("__local", "__name", "__wrapped__", "__dict__")
+ __slots__ = ("__wrapped", "_get_current_object")
+
+ _get_current_object: t.Callable[[], T]
+ """Return the current object this proxy is bound to. If the proxy is
+ unbound, this raises a ``RuntimeError``.
+
+ This should be used if you need to pass the object to something that
+ doesn't understand the proxy. It can also be useful for performance
+ if you are accessing the object multiple times in a function, rather
+ than going through the proxy multiple times.
+ """
def __init__(
self,
- local: t.Union["Local", t.Callable[[], t.Any]],
+ local: t.Union[ContextVar[T], Local, LocalStack[T], t.Callable[[], T]],
name: t.Optional[str] = None,
+ *,
+ unbound_message: t.Optional[str] = None,
) -> None:
- object.__setattr__(self, "_LocalProxy__local", local)
- object.__setattr__(self, "_LocalProxy__name", name)
-
- if callable(local) and not hasattr(local, "__release_local__"):
- # "local" is a callable that is not an instance of Local or
- # LocalManager: mark it as a wrapped function.
- object.__setattr__(self, "__wrapped__", local)
-
- def _get_current_object(self) -> t.Any:
- """Return the current object. This is useful if you want the real
- object behind the proxy at a time for performance reasons or because
- you want to pass the object into a different context.
- """
- if not hasattr(self.__local, "__release_local__"): # type: ignore
- return self.__local() # type: ignore
+ if name is None:
+ get_name = _identity
+ else:
+ get_name = attrgetter(name) # type: ignore[assignment]
- try:
- return getattr(self.__local, self.__name) # type: ignore
- except AttributeError:
- name = self.__name # type: ignore
- raise RuntimeError(f"no object bound to {name}") from None
+ if unbound_message is None:
+ unbound_message = "object is not bound"
+
+ if isinstance(local, Local):
+ if name is None:
+ raise TypeError("'name' is required when proxying a 'Local' object.")
+
+ def _get_current_object() -> T:
+ try:
+ return get_name(local) # type: ignore[return-value]
+ except AttributeError:
+ raise RuntimeError(unbound_message) from None
+
+ elif isinstance(local, LocalStack):
+
+ def _get_current_object() -> T:
+ obj = local.top # type: ignore[union-attr]
+
+ if obj is None:
+ raise RuntimeError(unbound_message)
+
+ return get_name(obj)
+
+ elif isinstance(local, ContextVar):
+
+ def _get_current_object() -> T:
+ try:
+ obj = local.get() # type: ignore[union-attr]
+ except LookupError:
+ raise RuntimeError(unbound_message) from None
+
+ return get_name(obj)
+
+ elif callable(local):
+
+ def _get_current_object() -> T:
+ return get_name(local()) # type: ignore
+
+ else:
+ raise TypeError(f"Don't know how to proxy '{type(local)}'.")
+
+ object.__setattr__(self, "_LocalProxy__wrapped", local)
+ object.__setattr__(self, "_get_current_object", _get_current_object)
__doc__ = _ProxyLookup( # type: ignore
class_value=__doc__, fallback=lambda self: type(self).__doc__, is_attr=True
)
+ __wrapped__ = _ProxyLookup(
+ fallback=lambda self: self._LocalProxy__wrapped, is_attr=True
+ )
# __del__ should only delete the proxy
__repr__ = _ProxyLookup( # type: ignore
repr, fallback=lambda self: f"<{type(self).__name__} unbound>"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/middleware/lint.py b/contrib/python/Werkzeug/py3/werkzeug/middleware/lint.py
index c74703b27c..6b54630e6e 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/middleware/lint.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/middleware/lint.py
@@ -117,7 +117,7 @@ class ErrorStream:
class GuardedWrite:
- def __init__(self, write: t.Callable[[bytes], None], chunks: t.List[int]) -> None:
+ def __init__(self, write: t.Callable[[bytes], object], chunks: t.List[int]) -> None:
self._write = write
self._chunks = chunks
@@ -288,7 +288,7 @@ class LintMiddleware:
check_type("status", status, str)
status_code_str = status.split(None, 1)[0]
- if len(status_code_str) != 3 or not status_code_str.isdigit():
+ if len(status_code_str) != 3 or not status_code_str.isdecimal():
warn("Status code must be three digits.", WSGIWarning, stacklevel=3)
if len(status) < 4 or status[3] != " ":
diff --git a/contrib/python/Werkzeug/py3/werkzeug/middleware/shared_data.py b/contrib/python/Werkzeug/py3/werkzeug/middleware/shared_data.py
index 62da67277b..2ec396c533 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/middleware/shared_data.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/middleware/shared_data.py
@@ -19,7 +19,6 @@ from io import BytesIO
from time import time
from zlib import adler32
-from ..filesystem import get_filesystem_encoding
from ..http import http_date
from ..http import is_resource_modified
from ..security import safe_join
@@ -158,77 +157,42 @@ class SharedDataMiddleware:
def get_package_loader(self, package: str, package_path: str) -> _TLoader:
load_time = datetime.now(timezone.utc)
provider = pkgutil.get_loader(package)
+ reader = provider.get_resource_reader(package) # type: ignore
- if hasattr(provider, "get_resource_reader"):
- # Python 3
- reader = provider.get_resource_reader(package) # type: ignore
-
- def loader(
- path: t.Optional[str],
- ) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
- if path is None:
- return None, None
-
- path = safe_join(package_path, path)
+ def loader(
+ path: t.Optional[str],
+ ) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
+ if path is None:
+ return None, None
- if path is None:
- return None, None
+ path = safe_join(package_path, path)
- basename = posixpath.basename(path)
+ if path is None:
+ return None, None
- try:
- resource = reader.open_resource(path)
- except OSError:
- return None, None
+ basename = posixpath.basename(path)
- if isinstance(resource, BytesIO):
- return (
- basename,
- lambda: (resource, load_time, len(resource.getvalue())),
- )
+ try:
+ resource = reader.open_resource(path)
+ except OSError:
+ return None, None
+ if isinstance(resource, BytesIO):
return (
basename,
- lambda: (
- resource,
- datetime.fromtimestamp(
- os.path.getmtime(resource.name), tz=timezone.utc
- ),
- os.path.getsize(resource.name),
- ),
+ lambda: (resource, load_time, len(resource.getvalue())),
)
- else:
- # Python 3.6
- package_filename = provider.get_filename(package) # type: ignore
- is_filesystem = os.path.exists(package_filename)
- root = os.path.join(os.path.dirname(package_filename), package_path)
-
- def loader(
- path: t.Optional[str],
- ) -> t.Tuple[t.Optional[str], t.Optional[_TOpener]]:
- if path is None:
- return None, None
-
- path = safe_join(root, path)
-
- if path is None:
- return None, None
-
- basename = posixpath.basename(path)
-
- if is_filesystem:
- if not os.path.isfile(path):
- return None, None
-
- return basename, self._opener(path)
-
- try:
- data = provider.get_data(path) # type: ignore
- except OSError:
- return None, None
-
- return basename, lambda: (BytesIO(data), load_time, len(data))
+ return (
+ basename,
+ lambda: (
+ resource,
+ datetime.fromtimestamp(
+ os.path.getmtime(resource.name), tz=timezone.utc
+ ),
+ os.path.getsize(resource.name),
+ ),
+ )
return loader
@@ -252,13 +216,9 @@ class SharedDataMiddleware:
return loader
def generate_etag(self, mtime: datetime, file_size: int, real_filename: str) -> str:
- if not isinstance(real_filename, bytes):
- real_filename = real_filename.encode( # type: ignore
- get_filesystem_encoding()
- )
-
+ real_filename = os.fsencode(real_filename)
timestamp = mtime.timestamp()
- checksum = adler32(real_filename) & 0xFFFFFFFF # type: ignore
+ checksum = adler32(real_filename) & 0xFFFFFFFF
return f"wzsdm-{timestamp}-{file_size}-{checksum}"
def __call__(
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing.py b/contrib/python/Werkzeug/py3/werkzeug/routing.py
deleted file mode 100644
index 937b42b055..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/routing.py
+++ /dev/null
@@ -1,2342 +0,0 @@
-"""When it comes to combining multiple controller or view functions
-(however you want to call them) you need a dispatcher. A simple way
-would be applying regular expression tests on the ``PATH_INFO`` and
-calling registered callback functions that return the value then.
-
-This module implements a much more powerful system than simple regular
-expression matching because it can also convert values in the URLs and
-build URLs.
-
-Here a simple example that creates a URL map for an application with
-two subdomains (www and kb) and some URL rules:
-
-.. code-block:: python
-
- m = Map([
- # Static URLs
- Rule('/', endpoint='static/index'),
- Rule('/about', endpoint='static/about'),
- Rule('/help', endpoint='static/help'),
- # Knowledge Base
- Subdomain('kb', [
- Rule('/', endpoint='kb/index'),
- Rule('/browse/', endpoint='kb/browse'),
- Rule('/browse/<int:id>/', endpoint='kb/browse'),
- Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
- ])
- ], default_subdomain='www')
-
-If the application doesn't use subdomains it's perfectly fine to not set
-the default subdomain and not use the `Subdomain` rule factory. The
-endpoint in the rules can be anything, for example import paths or
-unique identifiers. The WSGI application can use those endpoints to get the
-handler for that URL. It doesn't have to be a string at all but it's
-recommended.
-
-Now it's possible to create a URL adapter for one of the subdomains and
-build URLs:
-
-.. code-block:: python
-
- c = m.bind('example.com')
-
- c.build("kb/browse", dict(id=42))
- 'http://kb.example.com/browse/42/'
-
- c.build("kb/browse", dict())
- 'http://kb.example.com/browse/'
-
- c.build("kb/browse", dict(id=42, page=3))
- 'http://kb.example.com/browse/42/3'
-
- c.build("static/about")
- '/about'
-
- c.build("static/index", force_external=True)
- 'http://www.example.com/'
-
- c = m.bind('example.com', subdomain='kb')
-
- c.build("static/about")
- 'http://www.example.com/about'
-
-The first argument to bind is the server name *without* the subdomain.
-Per default it will assume that the script is mounted on the root, but
-often that's not the case so you can provide the real mount point as
-second argument:
-
-.. code-block:: python
-
- c = m.bind('example.com', '/applications/example')
-
-The third argument can be the subdomain, if not given the default
-subdomain is used. For more details about binding have a look at the
-documentation of the `MapAdapter`.
-
-And here is how you can match URLs:
-
-.. code-block:: python
-
- c = m.bind('example.com')
-
- c.match("/")
- ('static/index', {})
-
- c.match("/about")
- ('static/about', {})
-
- c = m.bind('example.com', '/', 'kb')
-
- c.match("/")
- ('kb/index', {})
-
- c.match("/browse/42/23")
- ('kb/browse', {'id': 42, 'page': 23})
-
-If matching fails you get a ``NotFound`` exception, if the rule thinks
-it's a good idea to redirect (for example because the URL was defined
-to have a slash at the end but the request was missing that slash) it
-will raise a ``RequestRedirect`` exception. Both are subclasses of
-``HTTPException`` so you can use those errors as responses in the
-application.
-
-If matching succeeded but the URL rule was incompatible to the given
-method (for example there were only rules for ``GET`` and ``HEAD`` but
-routing tried to match a ``POST`` request) a ``MethodNotAllowed``
-exception is raised.
-"""
-import ast
-import difflib
-import posixpath
-import re
-import typing
-import typing as t
-import uuid
-import warnings
-from pprint import pformat
-from string import Template
-from threading import Lock
-from types import CodeType
-
-from ._internal import _encode_idna
-from ._internal import _get_environ
-from ._internal import _to_bytes
-from ._internal import _to_str
-from ._internal import _wsgi_decoding_dance
-from .datastructures import ImmutableDict
-from .datastructures import MultiDict
-from .exceptions import BadHost
-from .exceptions import BadRequest
-from .exceptions import HTTPException
-from .exceptions import MethodNotAllowed
-from .exceptions import NotFound
-from .urls import _fast_url_quote
-from .urls import url_encode
-from .urls import url_join
-from .urls import url_quote
-from .urls import url_unquote
-from .utils import cached_property
-from .utils import redirect
-from .wsgi import get_host
-
-if t.TYPE_CHECKING:
- import typing_extensions as te
- from _typeshed.wsgi import WSGIApplication
- from _typeshed.wsgi import WSGIEnvironment
- from .wrappers.request import Request
- from .wrappers.response import Response
-
-_rule_re = re.compile(
- r"""
- (?P<static>[^<]*) # static rule data
- <
- (?:
- (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
- (?:\((?P<args>.*?)\))? # converter arguments
- \: # variable delimiter
- )?
- (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
- >
- """,
- re.VERBOSE,
-)
-_simple_rule_re = re.compile(r"<([^>]+)>")
-_converter_args_re = re.compile(
- r"""
- ((?P<name>\w+)\s*=\s*)?
- (?P<value>
- True|False|
- \d+.\d+|
- \d+.|
- \d+|
- [\w\d_.]+|
- [urUR]?(?P<stringval>"[^"]*?"|'[^']*')
- )\s*,
- """,
- re.VERBOSE,
-)
-
-
-_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
-
-
-def _pythonize(value: str) -> t.Union[None, bool, int, float, str]:
- if value in _PYTHON_CONSTANTS:
- return _PYTHON_CONSTANTS[value]
- for convert in int, float:
- try:
- return convert(value) # type: ignore
- except ValueError:
- pass
- if value[:1] == value[-1:] and value[0] in "\"'":
- value = value[1:-1]
- return str(value)
-
-
-def parse_converter_args(argstr: str) -> t.Tuple[t.Tuple, t.Dict[str, t.Any]]:
- argstr += ","
- args = []
- kwargs = {}
-
- for item in _converter_args_re.finditer(argstr):
- value = item.group("stringval")
- if value is None:
- value = item.group("value")
- value = _pythonize(value)
- if not item.group("name"):
- args.append(value)
- else:
- name = item.group("name")
- kwargs[name] = value
-
- return tuple(args), kwargs
-
-
-def parse_rule(rule: str) -> t.Iterator[t.Tuple[t.Optional[str], t.Optional[str], str]]:
- """Parse a rule and return it as generator. Each iteration yields tuples
- in the form ``(converter, arguments, variable)``. If the converter is
- `None` it's a static url part, otherwise it's a dynamic one.
-
- :internal:
- """
- pos = 0
- end = len(rule)
- do_match = _rule_re.match
- used_names = set()
- while pos < end:
- m = do_match(rule, pos)
- if m is None:
- break
- data = m.groupdict()
- if data["static"]:
- yield None, None, data["static"]
- variable = data["variable"]
- converter = data["converter"] or "default"
- if variable in used_names:
- raise ValueError(f"variable name {variable!r} used twice.")
- used_names.add(variable)
- yield converter, data["args"] or None, variable
- pos = m.end()
- if pos < end:
- remaining = rule[pos:]
- if ">" in remaining or "<" in remaining:
- raise ValueError(f"malformed url rule: {rule!r}")
- yield None, None, remaining
-
-
-class RoutingException(Exception):
- """Special exceptions that require the application to redirect, notifying
- about missing urls, etc.
-
- :internal:
- """
-
-
-class RequestRedirect(HTTPException, RoutingException):
- """Raise if the map requests a redirect. This is for example the case if
- `strict_slashes` are activated and an url that requires a trailing slash.
-
- The attribute `new_url` contains the absolute destination url.
- """
-
- code = 308
-
- def __init__(self, new_url: str) -> None:
- super().__init__(new_url)
- self.new_url = new_url
-
- def get_response(
- self,
- environ: t.Optional[t.Union["WSGIEnvironment", "Request"]] = None,
- scope: t.Optional[dict] = None,
- ) -> "Response":
- return redirect(self.new_url, self.code)
-
-
-class RequestPath(RoutingException):
- """Internal exception."""
-
- __slots__ = ("path_info",)
-
- def __init__(self, path_info: str) -> None:
- super().__init__()
- self.path_info = path_info
-
-
-class RequestAliasRedirect(RoutingException): # noqa: B903
- """This rule is an alias and wants to redirect to the canonical URL."""
-
- def __init__(self, matched_values: t.Mapping[str, t.Any]) -> None:
- super().__init__()
- self.matched_values = matched_values
-
-
-class BuildError(RoutingException, LookupError):
- """Raised if the build system cannot find a URL for an endpoint with the
- values provided.
- """
-
- def __init__(
- self,
- endpoint: str,
- values: t.Mapping[str, t.Any],
- method: t.Optional[str],
- adapter: t.Optional["MapAdapter"] = None,
- ) -> None:
- super().__init__(endpoint, values, method)
- self.endpoint = endpoint
- self.values = values
- self.method = method
- self.adapter = adapter
-
- @cached_property
- def suggested(self) -> t.Optional["Rule"]:
- return self.closest_rule(self.adapter)
-
- def closest_rule(self, adapter: t.Optional["MapAdapter"]) -> t.Optional["Rule"]:
- def _score_rule(rule: "Rule") -> float:
- return sum(
- [
- 0.98
- * difflib.SequenceMatcher(
- None, rule.endpoint, self.endpoint
- ).ratio(),
- 0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
- 0.01 * bool(rule.methods and self.method in rule.methods),
- ]
- )
-
- if adapter and adapter.map._rules:
- return max(adapter.map._rules, key=_score_rule)
-
- return None
-
- def __str__(self) -> str:
- message = [f"Could not build url for endpoint {self.endpoint!r}"]
- if self.method:
- message.append(f" ({self.method!r})")
- if self.values:
- message.append(f" with values {sorted(self.values)!r}")
- message.append(".")
- if self.suggested:
- if self.endpoint == self.suggested.endpoint:
- if (
- self.method
- and self.suggested.methods is not None
- and self.method not in self.suggested.methods
- ):
- message.append(
- " Did you mean to use methods"
- f" {sorted(self.suggested.methods)!r}?"
- )
- missing_values = self.suggested.arguments.union(
- set(self.suggested.defaults or ())
- ) - set(self.values.keys())
- if missing_values:
- message.append(
- f" Did you forget to specify values {sorted(missing_values)!r}?"
- )
- else:
- message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
- return "".join(message)
-
-
-class WebsocketMismatch(BadRequest):
- """The only matched rule is either a WebSocket and the request is
- HTTP, or the rule is HTTP and the request is a WebSocket.
- """
-
-
-class ValidationError(ValueError):
- """Validation error. If a rule converter raises this exception the rule
- does not match the current URL and the next URL is tried.
- """
-
-
-class RuleFactory:
- """As soon as you have more complex URL setups it's a good idea to use rule
- factories to avoid repetitive tasks. Some of them are builtin, others can
- be added by subclassing `RuleFactory` and overriding `get_rules`.
- """
-
- def get_rules(self, map: "Map") -> t.Iterable["Rule"]:
- """Subclasses of `RuleFactory` have to override this method and return
- an iterable of rules."""
- raise NotImplementedError()
-
-
-class Subdomain(RuleFactory):
- """All URLs provided by this factory have the subdomain set to a
- specific domain. For example if you want to use the subdomain for
- the current language this can be a good setup::
-
- url_map = Map([
- Rule('/', endpoint='#select_language'),
- Subdomain('<string(length=2):lang_code>', [
- Rule('/', endpoint='index'),
- Rule('/about', endpoint='about'),
- Rule('/help', endpoint='help')
- ])
- ])
-
- All the rules except for the ``'#select_language'`` endpoint will now
- listen on a two letter long subdomain that holds the language code
- for the current request.
- """
-
- def __init__(self, subdomain: str, rules: t.Iterable[RuleFactory]) -> None:
- self.subdomain = subdomain
- self.rules = rules
-
- def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
- for rulefactory in self.rules:
- for rule in rulefactory.get_rules(map):
- rule = rule.empty()
- rule.subdomain = self.subdomain
- yield rule
-
-
-class Submount(RuleFactory):
- """Like `Subdomain` but prefixes the URL rule with a given string::
-
- url_map = Map([
- Rule('/', endpoint='index'),
- Submount('/blog', [
- Rule('/', endpoint='blog/index'),
- Rule('/entry/<entry_slug>', endpoint='blog/show')
- ])
- ])
-
- Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
- """
-
- def __init__(self, path: str, rules: t.Iterable[RuleFactory]) -> None:
- self.path = path.rstrip("/")
- self.rules = rules
-
- def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
- for rulefactory in self.rules:
- for rule in rulefactory.get_rules(map):
- rule = rule.empty()
- rule.rule = self.path + rule.rule
- yield rule
-
-
-class EndpointPrefix(RuleFactory):
- """Prefixes all endpoints (which must be strings for this factory) with
- another string. This can be useful for sub applications::
-
- url_map = Map([
- Rule('/', endpoint='index'),
- EndpointPrefix('blog/', [Submount('/blog', [
- Rule('/', endpoint='index'),
- Rule('/entry/<entry_slug>', endpoint='show')
- ])])
- ])
- """
-
- def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
- self.prefix = prefix
- self.rules = rules
-
- def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
- for rulefactory in self.rules:
- for rule in rulefactory.get_rules(map):
- rule = rule.empty()
- rule.endpoint = self.prefix + rule.endpoint
- yield rule
-
-
-class RuleTemplate:
- """Returns copies of the rules wrapped and expands string templates in
- the endpoint, rule, defaults or subdomain sections.
-
- Here a small example for such a rule template::
-
- from werkzeug.routing import Map, Rule, RuleTemplate
-
- resource = RuleTemplate([
- Rule('/$name/', endpoint='$name.list'),
- Rule('/$name/<int:id>', endpoint='$name.show')
- ])
-
- url_map = Map([resource(name='user'), resource(name='page')])
-
- When a rule template is called the keyword arguments are used to
- replace the placeholders in all the string parameters.
- """
-
- def __init__(self, rules: t.Iterable["Rule"]) -> None:
- self.rules = list(rules)
-
- def __call__(self, *args: t.Any, **kwargs: t.Any) -> "RuleTemplateFactory":
- return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
-
-
-class RuleTemplateFactory(RuleFactory):
- """A factory that fills in template variables into rules. Used by
- `RuleTemplate` internally.
-
- :internal:
- """
-
- def __init__(
- self, rules: t.Iterable[RuleFactory], context: t.Dict[str, t.Any]
- ) -> None:
- self.rules = rules
- self.context = context
-
- def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
- for rulefactory in self.rules:
- for rule in rulefactory.get_rules(map):
- new_defaults = subdomain = None
- if rule.defaults:
- new_defaults = {}
- for key, value in rule.defaults.items():
- if isinstance(value, str):
- value = Template(value).substitute(self.context)
- new_defaults[key] = value
- if rule.subdomain is not None:
- subdomain = Template(rule.subdomain).substitute(self.context)
- new_endpoint = rule.endpoint
- if isinstance(new_endpoint, str):
- new_endpoint = Template(new_endpoint).substitute(self.context)
- yield Rule(
- Template(rule.rule).substitute(self.context),
- new_defaults,
- subdomain,
- rule.methods,
- rule.build_only,
- new_endpoint,
- rule.strict_slashes,
- )
-
-
-def _prefix_names(src: str) -> ast.stmt:
- """ast parse and prefix names with `.` to avoid collision with user vars"""
- tree = ast.parse(src).body[0]
- if isinstance(tree, ast.Expr):
- tree = tree.value # type: ignore
- for node in ast.walk(tree):
- if isinstance(node, ast.Name):
- node.id = f".{node.id}"
- return tree
-
-
-_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
-_IF_KWARGS_URL_ENCODE_CODE = """\
-if kwargs:
- q = '?'
- params = self._encode_query_vars(kwargs)
-else:
- q = params = ''
-"""
-_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
-_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
-
-
-class Rule(RuleFactory):
- """A Rule represents one URL pattern. There are some options for `Rule`
- that change the way it behaves and are passed to the `Rule` constructor.
- Note that besides the rule-string all arguments *must* be keyword arguments
- in order to not break the application on Werkzeug upgrades.
-
- `string`
- Rule strings basically are just normal URL paths with placeholders in
- the format ``<converter(arguments):name>`` where the converter and the
- arguments are optional. If no converter is defined the `default`
- converter is used which means `string` in the normal configuration.
-
- URL rules that end with a slash are branch URLs, others are leaves.
- If you have `strict_slashes` enabled (which is the default), all
- branch URLs that are matched without a trailing slash will trigger a
- redirect to the same URL with the missing slash appended.
-
- The converters are defined on the `Map`.
-
- `endpoint`
- The endpoint for this rule. This can be anything. A reference to a
- function, a string, a number etc. The preferred way is using a string
- because the endpoint is used for URL generation.
-
- `defaults`
- An optional dict with defaults for other rules with the same endpoint.
- This is a bit tricky but useful if you want to have unique URLs::
-
- url_map = Map([
- Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
- Rule('/all/page/<int:page>', endpoint='all_entries')
- ])
-
- If a user now visits ``http://example.com/all/page/1`` he will be
- redirected to ``http://example.com/all/``. If `redirect_defaults` is
- disabled on the `Map` instance this will only affect the URL
- generation.
-
- `subdomain`
- The subdomain rule string for this rule. If not specified the rule
- only matches for the `default_subdomain` of the map. If the map is
- not bound to a subdomain this feature is disabled.
-
- Can be useful if you want to have user profiles on different subdomains
- and all subdomains are forwarded to your application::
-
- url_map = Map([
- Rule('/', subdomain='<username>', endpoint='user/homepage'),
- Rule('/stats', subdomain='<username>', endpoint='user/stats')
- ])
-
- `methods`
- A sequence of http methods this rule applies to. If not specified, all
- methods are allowed. For example this can be useful if you want different
- endpoints for `POST` and `GET`. If methods are defined and the path
- matches but the method matched against is not in this list or in the
- list of another rule for that path the error raised is of the type
- `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
- list of methods and `HEAD` is not, `HEAD` is added automatically.
-
- `strict_slashes`
- Override the `Map` setting for `strict_slashes` only for this rule. If
- not specified the `Map` setting is used.
-
- `merge_slashes`
- Override :attr:`Map.merge_slashes` for this rule.
-
- `build_only`
- Set this to True and the rule will never match but will create a URL
- that can be build. This is useful if you have resources on a subdomain
- or folder that are not handled by the WSGI application (like static data)
-
- `redirect_to`
- If given this must be either a string or callable. In case of a
- callable it's called with the url adapter that triggered the match and
- the values of the URL as keyword arguments and has to return the target
- for the redirect, otherwise it has to be a string with placeholders in
- rule syntax::
-
- def foo_with_slug(adapter, id):
- # ask the database for the slug for the old id. this of
- # course has nothing to do with werkzeug.
- return f'foo/{Foo.get_slug_for_id(id)}'
-
- url_map = Map([
- Rule('/foo/<slug>', endpoint='foo'),
- Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
- Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
- ])
-
- When the rule is matched the routing system will raise a
- `RequestRedirect` exception with the target for the redirect.
-
- Keep in mind that the URL will be joined against the URL root of the
- script so don't use a leading slash on the target URL unless you
- really mean root of that domain.
-
- `alias`
- If enabled this rule serves as an alias for another rule with the same
- endpoint and arguments.
-
- `host`
- If provided and the URL map has host matching enabled this can be
- used to provide a match rule for the whole host. This also means
- that the subdomain feature is disabled.
-
- `websocket`
- If ``True``, this rule is only matches for WebSocket (``ws://``,
- ``wss://``) requests. By default, rules will only match for HTTP
- requests.
-
- .. versionadded:: 1.0
- Added ``websocket``.
-
- .. versionadded:: 1.0
- Added ``merge_slashes``.
-
- .. versionadded:: 0.7
- Added ``alias`` and ``host``.
-
- .. versionchanged:: 0.6.1
- ``HEAD`` is added to ``methods`` if ``GET`` is present.
- """
-
- def __init__(
- self,
- string: str,
- defaults: t.Optional[t.Mapping[str, t.Any]] = None,
- subdomain: t.Optional[str] = None,
- methods: t.Optional[t.Iterable[str]] = None,
- build_only: bool = False,
- endpoint: t.Optional[str] = None,
- strict_slashes: t.Optional[bool] = None,
- merge_slashes: t.Optional[bool] = None,
- redirect_to: t.Optional[t.Union[str, t.Callable[..., str]]] = None,
- alias: bool = False,
- host: t.Optional[str] = None,
- websocket: bool = False,
- ) -> None:
- if not string.startswith("/"):
- raise ValueError("urls must start with a leading slash")
- self.rule = string
- self.is_leaf = not string.endswith("/")
-
- self.map: "Map" = None # type: ignore
- self.strict_slashes = strict_slashes
- self.merge_slashes = merge_slashes
- self.subdomain = subdomain
- self.host = host
- self.defaults = defaults
- self.build_only = build_only
- self.alias = alias
- self.websocket = websocket
-
- if methods is not None:
- if isinstance(methods, str):
- raise TypeError("'methods' should be a list of strings.")
-
- methods = {x.upper() for x in methods}
-
- if "HEAD" not in methods and "GET" in methods:
- methods.add("HEAD")
-
- if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
- raise ValueError(
- "WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
- )
-
- self.methods = methods
- self.endpoint: str = endpoint # type: ignore
- self.redirect_to = redirect_to
-
- if defaults:
- self.arguments = set(map(str, defaults))
- else:
- self.arguments = set()
-
- self._trace: t.List[t.Tuple[bool, str]] = []
-
- def empty(self) -> "Rule":
- """
- Return an unbound copy of this rule.
-
- This can be useful if want to reuse an already bound URL for another
- map. See ``get_empty_kwargs`` to override what keyword arguments are
- provided to the new copy.
- """
- return type(self)(self.rule, **self.get_empty_kwargs())
-
- def get_empty_kwargs(self) -> t.Mapping[str, t.Any]:
- """
- Provides kwargs for instantiating empty copy with empty()
-
- Use this method to provide custom keyword arguments to the subclass of
- ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
- has custom keyword arguments that are needed at instantiation.
-
- Must return a ``dict`` that will be provided as kwargs to the new
- instance of ``Rule``, following the initial ``self.rule`` value which
- is always provided as the first, required positional argument.
- """
- defaults = None
- if self.defaults:
- defaults = dict(self.defaults)
- return dict(
- defaults=defaults,
- subdomain=self.subdomain,
- methods=self.methods,
- build_only=self.build_only,
- endpoint=self.endpoint,
- strict_slashes=self.strict_slashes,
- redirect_to=self.redirect_to,
- alias=self.alias,
- host=self.host,
- )
-
- def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
- yield self
-
- def refresh(self) -> None:
- """Rebinds and refreshes the URL. Call this if you modified the
- rule in place.
-
- :internal:
- """
- self.bind(self.map, rebind=True)
-
- def bind(self, map: "Map", rebind: bool = False) -> None:
- """Bind the url to a map and create a regular expression based on
- the information from the rule itself and the defaults from the map.
-
- :internal:
- """
- if self.map is not None and not rebind:
- raise RuntimeError(f"url rule {self!r} already bound to map {self.map!r}")
- self.map = map
- if self.strict_slashes is None:
- self.strict_slashes = map.strict_slashes
- if self.merge_slashes is None:
- self.merge_slashes = map.merge_slashes
- if self.subdomain is None:
- self.subdomain = map.default_subdomain
- self.compile()
-
- def get_converter(
- self,
- variable_name: str,
- converter_name: str,
- args: t.Tuple,
- kwargs: t.Mapping[str, t.Any],
- ) -> "BaseConverter":
- """Looks up the converter for the given parameter.
-
- .. versionadded:: 0.9
- """
- if converter_name not in self.map.converters:
- raise LookupError(f"the converter {converter_name!r} does not exist")
- return self.map.converters[converter_name](self.map, *args, **kwargs)
-
- def _encode_query_vars(self, query_vars: t.Mapping[str, t.Any]) -> str:
- return url_encode(
- query_vars,
- charset=self.map.charset,
- sort=self.map.sort_parameters,
- key=self.map.sort_key,
- )
-
- def compile(self) -> None:
- """Compiles the regular expression and stores it."""
- assert self.map is not None, "rule not bound"
-
- if self.map.host_matching:
- domain_rule = self.host or ""
- else:
- domain_rule = self.subdomain or ""
-
- self._trace = []
- self._converters: t.Dict[str, "BaseConverter"] = {}
- self._static_weights: t.List[t.Tuple[int, int]] = []
- self._argument_weights: t.List[int] = []
- regex_parts = []
-
- def _build_regex(rule: str) -> None:
- index = 0
- for converter, arguments, variable in parse_rule(rule):
- if converter is None:
- for match in re.finditer(r"/+|[^/]+", variable):
- part = match.group(0)
- if part.startswith("/"):
- if self.merge_slashes:
- regex_parts.append(r"/+?")
- self._trace.append((False, "/"))
- else:
- regex_parts.append(part)
- self._trace.append((False, part))
- continue
- self._trace.append((False, part))
- regex_parts.append(re.escape(part))
- if part:
- self._static_weights.append((index, -len(part)))
- else:
- if arguments:
- c_args, c_kwargs = parse_converter_args(arguments)
- else:
- c_args = ()
- c_kwargs = {}
- convobj = self.get_converter(variable, converter, c_args, c_kwargs)
- regex_parts.append(f"(?P<{variable}>{convobj.regex})")
- self._converters[variable] = convobj
- self._trace.append((True, variable))
- self._argument_weights.append(convobj.weight)
- self.arguments.add(str(variable))
- index = index + 1
-
- _build_regex(domain_rule)
- regex_parts.append("\\|")
- self._trace.append((False, "|"))
- _build_regex(self.rule if self.is_leaf else self.rule.rstrip("/"))
- if not self.is_leaf:
- self._trace.append((False, "/"))
-
- self._build: t.Callable[..., t.Tuple[str, str]]
- self._build = self._compile_builder(False).__get__(self, None) # type: ignore
- self._build_unknown: t.Callable[..., t.Tuple[str, str]]
- self._build_unknown = self._compile_builder(True).__get__( # type: ignore
- self, None
- )
-
- if self.build_only:
- return
-
- if not (self.is_leaf and self.strict_slashes):
- reps = "*" if self.merge_slashes else "?"
- tail = f"(?<!/)(?P<__suffix__>/{reps})"
- else:
- tail = ""
-
- regex = f"^{''.join(regex_parts)}{tail}$"
- self._regex = re.compile(regex)
-
- def match(
- self, path: str, method: t.Optional[str] = None
- ) -> t.Optional[t.MutableMapping[str, t.Any]]:
- """Check if the rule matches a given path. Path is a string in the
- form ``"subdomain|/path"`` and is assembled by the map. If
- the map is doing host matching the subdomain part will be the host
- instead.
-
- If the rule matches a dict with the converted values is returned,
- otherwise the return value is `None`.
-
- :internal:
- """
- if not self.build_only:
- require_redirect = False
-
- m = self._regex.search(path)
- if m is not None:
- groups = m.groupdict()
- # we have a folder like part of the url without a trailing
- # slash and strict slashes enabled. raise an exception that
- # tells the map to redirect to the same url but with a
- # trailing slash
- if (
- self.strict_slashes
- and not self.is_leaf
- and not groups.pop("__suffix__")
- and (
- method is None or self.methods is None or method in self.methods
- )
- ):
- path += "/"
- require_redirect = True
- # if we are not in strict slashes mode we have to remove
- # a __suffix__
- elif not self.strict_slashes:
- del groups["__suffix__"]
-
- result = {}
- for name, value in groups.items():
- try:
- value = self._converters[name].to_python(value)
- except ValidationError:
- return None
- result[str(name)] = value
- if self.defaults:
- result.update(self.defaults)
-
- if self.merge_slashes:
- new_path = "|".join(self.build(result, False)) # type: ignore
- if path.endswith("/") and not new_path.endswith("/"):
- new_path += "/"
- if new_path.count("/") < path.count("/"):
- # The URL will be encoded when MapAdapter.match
- # handles the RequestPath raised below. Decode
- # the URL here to avoid a double encoding.
- path = url_unquote(new_path)
- require_redirect = True
-
- if require_redirect:
- path = path.split("|", 1)[1]
- raise RequestPath(path)
-
- if self.alias and self.map.redirect_defaults:
- raise RequestAliasRedirect(result)
-
- return result
-
- return None
-
- @staticmethod
- def _get_func_code(code: CodeType, name: str) -> t.Callable[..., t.Tuple[str, str]]:
- globs: t.Dict[str, t.Any] = {}
- locs: t.Dict[str, t.Any] = {}
- exec(code, globs, locs)
- return locs[name] # type: ignore
-
- def _compile_builder(
- self, append_unknown: bool = True
- ) -> t.Callable[..., t.Tuple[str, str]]:
- defaults = self.defaults or {}
- dom_ops: t.List[t.Tuple[bool, str]] = []
- url_ops: t.List[t.Tuple[bool, str]] = []
-
- opl = dom_ops
- for is_dynamic, data in self._trace:
- if data == "|" and opl is dom_ops:
- opl = url_ops
- continue
- # this seems like a silly case to ever come up but:
- # if a default is given for a value that appears in the rule,
- # resolve it to a constant ahead of time
- if is_dynamic and data in defaults:
- data = self._converters[data].to_url(defaults[data])
- opl.append((False, data))
- elif not is_dynamic:
- opl.append(
- (False, url_quote(_to_bytes(data, self.map.charset), safe="/:|+"))
- )
- else:
- opl.append((True, data))
-
- def _convert(elem: str) -> ast.stmt:
- ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
- ret.args = [ast.Name(str(elem), ast.Load())] # type: ignore # str for py2
- return ret
-
- def _parts(ops: t.List[t.Tuple[bool, str]]) -> t.List[ast.AST]:
- parts = [
- _convert(elem) if is_dynamic else ast.Str(s=elem)
- for is_dynamic, elem in ops
- ]
- parts = parts or [ast.Str("")]
- # constant fold
- ret = [parts[0]]
- for p in parts[1:]:
- if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str):
- ret[-1] = ast.Str(ret[-1].s + p.s)
- else:
- ret.append(p)
- return ret
-
- dom_parts = _parts(dom_ops)
- url_parts = _parts(url_ops)
- if not append_unknown:
- body = []
- else:
- body = [_IF_KWARGS_URL_ENCODE_AST]
- url_parts.extend(_URL_ENCODE_AST_NAMES)
-
- def _join(parts: t.List[ast.AST]) -> ast.AST:
- if len(parts) == 1: # shortcut
- return parts[0]
- return ast.JoinedStr(parts)
-
- body.append(
- ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
- )
-
- pargs = [
- elem
- for is_dynamic, elem in dom_ops + url_ops
- if is_dynamic and elem not in defaults
- ]
- kargs = [str(k) for k in defaults]
-
- func_ast: ast.FunctionDef = _prefix_names("def _(): pass") # type: ignore
- func_ast.name = f"<builder:{self.rule!r}>"
- func_ast.args.args.append(ast.arg(".self", None))
- for arg in pargs + kargs:
- func_ast.args.args.append(ast.arg(arg, None))
- func_ast.args.kwarg = ast.arg(".kwargs", None)
- for _ in kargs:
- func_ast.args.defaults.append(ast.Str(""))
- func_ast.body = body
-
- # use `ast.parse` instead of `ast.Module` for better portability
- # Python 3.8 changes the signature of `ast.Module`
- module = ast.parse("")
- module.body = [func_ast]
-
- # mark everything as on line 1, offset 0
- # less error-prone than `ast.fix_missing_locations`
- # bad line numbers cause an assert to fail in debug builds
- for node in ast.walk(module):
- if "lineno" in node._attributes:
- node.lineno = 1
- if "col_offset" in node._attributes:
- node.col_offset = 0
-
- code = compile(module, "<werkzeug routing>", "exec")
- return self._get_func_code(code, func_ast.name)
-
- def build(
- self, values: t.Mapping[str, t.Any], append_unknown: bool = True
- ) -> t.Optional[t.Tuple[str, str]]:
- """Assembles the relative url for that rule and the subdomain.
- If building doesn't work for some reasons `None` is returned.
-
- :internal:
- """
- try:
- if append_unknown:
- return self._build_unknown(**values)
- else:
- return self._build(**values)
- except ValidationError:
- return None
-
- def provides_defaults_for(self, rule: "Rule") -> bool:
- """Check if this rule has defaults for a given rule.
-
- :internal:
- """
- return bool(
- not self.build_only
- and self.defaults
- and self.endpoint == rule.endpoint
- and self != rule
- and self.arguments == rule.arguments
- )
-
- def suitable_for(
- self, values: t.Mapping[str, t.Any], method: t.Optional[str] = None
- ) -> bool:
- """Check if the dict of values has enough data for url generation.
-
- :internal:
- """
- # if a method was given explicitly and that method is not supported
- # by this rule, this rule is not suitable.
- if (
- method is not None
- and self.methods is not None
- and method not in self.methods
- ):
- return False
-
- defaults = self.defaults or ()
-
- # all arguments required must be either in the defaults dict or
- # the value dictionary otherwise it's not suitable
- for key in self.arguments:
- if key not in defaults and key not in values:
- return False
-
- # in case defaults are given we ensure that either the value was
- # skipped or the value is the same as the default value.
- if defaults:
- for key, value in defaults.items():
- if key in values and value != values[key]:
- return False
-
- return True
-
- def match_compare_key(
- self,
- ) -> t.Tuple[bool, int, t.Iterable[t.Tuple[int, int]], int, t.Iterable[int]]:
- """The match compare key for sorting.
-
- Current implementation:
-
- 1. rules without any arguments come first for performance
- reasons only as we expect them to match faster and some
- common ones usually don't have any arguments (index pages etc.)
- 2. rules with more static parts come first so the second argument
- is the negative length of the number of the static weights.
- 3. we order by static weights, which is a combination of index
- and length
- 4. The more complex rules come first so the next argument is the
- negative length of the number of argument weights.
- 5. lastly we order by the actual argument weights.
-
- :internal:
- """
- return (
- bool(self.arguments),
- -len(self._static_weights),
- self._static_weights,
- -len(self._argument_weights),
- self._argument_weights,
- )
-
- def build_compare_key(self) -> t.Tuple[int, int, int]:
- """The build compare key for sorting.
-
- :internal:
- """
- return (1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()))
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, type(self)) and self._trace == other._trace
-
- __hash__ = None # type: ignore
-
- def __str__(self) -> str:
- return self.rule
-
- def __repr__(self) -> str:
- if self.map is None:
- return f"<{type(self).__name__} (unbound)>"
- parts = []
- for is_dynamic, data in self._trace:
- if is_dynamic:
- parts.append(f"<{data}>")
- else:
- parts.append(data)
- parts = "".join(parts).lstrip("|")
- methods = f" ({', '.join(self.methods)})" if self.methods is not None else ""
- return f"<{type(self).__name__} {parts!r}{methods} -> {self.endpoint}>"
-
-
-class BaseConverter:
- """Base class for all converters."""
-
- regex = "[^/]+"
- weight = 100
-
- def __init__(self, map: "Map", *args: t.Any, **kwargs: t.Any) -> None:
- self.map = map
-
- def to_python(self, value: str) -> t.Any:
- return value
-
- def to_url(self, value: t.Any) -> str:
- if isinstance(value, (bytes, bytearray)):
- return _fast_url_quote(value)
- return _fast_url_quote(str(value).encode(self.map.charset))
-
-
-class UnicodeConverter(BaseConverter):
- """This converter is the default converter and accepts any string but
- only one path segment. Thus the string can not include a slash.
-
- This is the default validator.
-
- Example::
-
- Rule('/pages/<page>'),
- Rule('/<string(length=2):lang_code>')
-
- :param map: the :class:`Map`.
- :param minlength: the minimum length of the string. Must be greater
- or equal 1.
- :param maxlength: the maximum length of the string.
- :param length: the exact length of the string.
- """
-
- def __init__(
- self,
- map: "Map",
- minlength: int = 1,
- maxlength: t.Optional[int] = None,
- length: t.Optional[int] = None,
- ) -> None:
- super().__init__(map)
- if length is not None:
- length_regex = f"{{{int(length)}}}"
- else:
- if maxlength is None:
- maxlength_value = ""
- else:
- maxlength_value = str(int(maxlength))
- length_regex = f"{{{int(minlength)},{maxlength_value}}}"
- self.regex = f"[^/]{length_regex}"
-
-
-class AnyConverter(BaseConverter):
- """Matches one of the items provided. Items can either be Python
- identifiers or strings::
-
- Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
-
- :param map: the :class:`Map`.
- :param items: this function accepts the possible items as positional
- arguments.
- """
-
- def __init__(self, map: "Map", *items: str) -> None:
- super().__init__(map)
- self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
-
-
-class PathConverter(BaseConverter):
- """Like the default :class:`UnicodeConverter`, but it also matches
- slashes. This is useful for wikis and similar applications::
-
- Rule('/<path:wikipage>')
- Rule('/<path:wikipage>/edit')
-
- :param map: the :class:`Map`.
- """
-
- regex = "[^/].*?"
- weight = 200
-
-
-class NumberConverter(BaseConverter):
- """Baseclass for `IntegerConverter` and `FloatConverter`.
-
- :internal:
- """
-
- weight = 50
- num_convert: t.Callable = int
-
- def __init__(
- self,
- map: "Map",
- fixed_digits: int = 0,
- min: t.Optional[int] = None,
- max: t.Optional[int] = None,
- signed: bool = False,
- ) -> None:
- if signed:
- self.regex = self.signed_regex
- super().__init__(map)
- self.fixed_digits = fixed_digits
- self.min = min
- self.max = max
- self.signed = signed
-
- def to_python(self, value: str) -> t.Any:
- if self.fixed_digits and len(value) != self.fixed_digits:
- raise ValidationError()
- value = self.num_convert(value)
- if (self.min is not None and value < self.min) or (
- self.max is not None and value > self.max
- ):
- raise ValidationError()
- return value
-
- def to_url(self, value: t.Any) -> str:
- value = str(self.num_convert(value))
- if self.fixed_digits:
- value = value.zfill(self.fixed_digits)
- return value
-
- @property
- def signed_regex(self) -> str:
- return f"-?{self.regex}"
-
-
-class IntegerConverter(NumberConverter):
- """This converter only accepts integer values::
-
- Rule("/page/<int:page>")
-
- By default it only accepts unsigned, positive values. The ``signed``
- parameter will enable signed, negative values. ::
-
- Rule("/page/<int(signed=True):page>")
-
- :param map: The :class:`Map`.
- :param fixed_digits: The number of fixed digits in the URL. If you
- set this to ``4`` for example, the rule will only match if the
- URL looks like ``/0001/``. The default is variable length.
- :param min: The minimal value.
- :param max: The maximal value.
- :param signed: Allow signed (negative) values.
-
- .. versionadded:: 0.15
- The ``signed`` parameter.
- """
-
- regex = r"\d+"
-
-
-class FloatConverter(NumberConverter):
- """This converter only accepts floating point values::
-
- Rule("/probability/<float:probability>")
-
- By default it only accepts unsigned, positive values. The ``signed``
- parameter will enable signed, negative values. ::
-
- Rule("/offset/<float(signed=True):offset>")
-
- :param map: The :class:`Map`.
- :param min: The minimal value.
- :param max: The maximal value.
- :param signed: Allow signed (negative) values.
-
- .. versionadded:: 0.15
- The ``signed`` parameter.
- """
-
- regex = r"\d+\.\d+"
- num_convert = float
-
- def __init__(
- self,
- map: "Map",
- min: t.Optional[float] = None,
- max: t.Optional[float] = None,
- signed: bool = False,
- ) -> None:
- super().__init__(map, min=min, max=max, signed=signed) # type: ignore
-
-
-class UUIDConverter(BaseConverter):
- """This converter only accepts UUID strings::
-
- Rule('/object/<uuid:identifier>')
-
- .. versionadded:: 0.10
-
- :param map: the :class:`Map`.
- """
-
- regex = (
- r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
- r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
- )
-
- def to_python(self, value: str) -> uuid.UUID:
- return uuid.UUID(value)
-
- def to_url(self, value: uuid.UUID) -> str:
- return str(value)
-
-
-#: the default converter mapping for the map.
-DEFAULT_CONVERTERS: t.Mapping[str, t.Type[BaseConverter]] = {
- "default": UnicodeConverter,
- "string": UnicodeConverter,
- "any": AnyConverter,
- "path": PathConverter,
- "int": IntegerConverter,
- "float": FloatConverter,
- "uuid": UUIDConverter,
-}
-
-
-class Map:
- """The map class stores all the URL rules and some configuration
- parameters. Some of the configuration values are only stored on the
- `Map` instance since those affect all rules, others are just defaults
- and can be overridden for each rule. Note that you have to specify all
- arguments besides the `rules` as keyword arguments!
-
- :param rules: sequence of url rules for this map.
- :param default_subdomain: The default subdomain for rules without a
- subdomain defined.
- :param charset: charset of the url. defaults to ``"utf-8"``
- :param strict_slashes: If a rule ends with a slash but the matched
- URL does not, redirect to the URL with a trailing slash.
- :param merge_slashes: Merge consecutive slashes when matching or
- building URLs. Matches will redirect to the normalized URL.
- Slashes in variable parts are not merged.
- :param redirect_defaults: This will redirect to the default rule if it
- wasn't visited that way. This helps creating
- unique URLs.
- :param converters: A dict of converters that adds additional converters
- to the list of converters. If you redefine one
- converter this will override the original one.
- :param sort_parameters: If set to `True` the url parameters are sorted.
- See `url_encode` for more details.
- :param sort_key: The sort key function for `url_encode`.
- :param encoding_errors: the error method to use for decoding
- :param host_matching: if set to `True` it enables the host matching
- feature and disables the subdomain one. If
- enabled the `host` parameter to rules is used
- instead of the `subdomain` one.
-
- .. versionchanged:: 1.0
- If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
- will match.
-
- .. versionchanged:: 1.0
- Added ``merge_slashes``.
-
- .. versionchanged:: 0.7
- Added ``encoding_errors`` and ``host_matching``.
-
- .. versionchanged:: 0.5
- Added ``sort_parameters`` and ``sort_key``.
- """
-
- #: A dict of default converters to be used.
- default_converters = ImmutableDict(DEFAULT_CONVERTERS)
-
- #: The type of lock to use when updating.
- #:
- #: .. versionadded:: 1.0
- lock_class = Lock
-
- def __init__(
- self,
- rules: t.Optional[t.Iterable[RuleFactory]] = None,
- default_subdomain: str = "",
- charset: str = "utf-8",
- strict_slashes: bool = True,
- merge_slashes: bool = True,
- redirect_defaults: bool = True,
- converters: t.Optional[t.Mapping[str, t.Type[BaseConverter]]] = None,
- sort_parameters: bool = False,
- sort_key: t.Optional[t.Callable[[t.Any], t.Any]] = None,
- encoding_errors: str = "replace",
- host_matching: bool = False,
- ) -> None:
- self._rules: t.List[Rule] = []
- self._rules_by_endpoint: t.Dict[str, t.List[Rule]] = {}
- self._remap = True
- self._remap_lock = self.lock_class()
-
- self.default_subdomain = default_subdomain
- self.charset = charset
- self.encoding_errors = encoding_errors
- self.strict_slashes = strict_slashes
- self.merge_slashes = merge_slashes
- self.redirect_defaults = redirect_defaults
- self.host_matching = host_matching
-
- self.converters = self.default_converters.copy()
- if converters:
- self.converters.update(converters)
-
- self.sort_parameters = sort_parameters
- self.sort_key = sort_key
-
- for rulefactory in rules or ():
- self.add(rulefactory)
-
- def is_endpoint_expecting(self, endpoint: str, *arguments: str) -> bool:
- """Iterate over all rules and check if the endpoint expects
- the arguments provided. This is for example useful if you have
- some URLs that expect a language code and others that do not and
- you want to wrap the builder a bit so that the current language
- code is automatically added if not provided but endpoints expect
- it.
-
- :param endpoint: the endpoint to check.
- :param arguments: this function accepts one or more arguments
- as positional arguments. Each one of them is
- checked.
- """
- self.update()
- arguments = set(arguments)
- for rule in self._rules_by_endpoint[endpoint]:
- if arguments.issubset(rule.arguments):
- return True
- return False
-
- def iter_rules(self, endpoint: t.Optional[str] = None) -> t.Iterator[Rule]:
- """Iterate over all rules or the rules of an endpoint.
-
- :param endpoint: if provided only the rules for that endpoint
- are returned.
- :return: an iterator
- """
- self.update()
- if endpoint is not None:
- return iter(self._rules_by_endpoint[endpoint])
- return iter(self._rules)
-
- def add(self, rulefactory: RuleFactory) -> None:
- """Add a new rule or factory to the map and bind it. Requires that the
- rule is not bound to another map.
-
- :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
- """
- for rule in rulefactory.get_rules(self):
- rule.bind(self)
- self._rules.append(rule)
- self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
- self._remap = True
-
- def bind(
- self,
- server_name: str,
- script_name: t.Optional[str] = None,
- subdomain: t.Optional[str] = None,
- url_scheme: str = "http",
- default_method: str = "GET",
- path_info: t.Optional[str] = None,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- ) -> "MapAdapter":
- """Return a new :class:`MapAdapter` with the details specified to the
- call. Note that `script_name` will default to ``'/'`` if not further
- specified or `None`. The `server_name` at least is a requirement
- because the HTTP RFC requires absolute URLs for redirects and so all
- redirect exceptions raised by Werkzeug will contain the full canonical
- URL.
-
- If no path_info is passed to :meth:`match` it will use the default path
- info passed to bind. While this doesn't really make sense for
- manual bind calls, it's useful if you bind a map to a WSGI
- environment which already contains the path info.
-
- `subdomain` will default to the `default_subdomain` for this map if
- no defined. If there is no `default_subdomain` you cannot use the
- subdomain feature.
-
- .. versionchanged:: 1.0
- If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
- will match.
-
- .. versionchanged:: 0.15
- ``path_info`` defaults to ``'/'`` if ``None``.
-
- .. versionchanged:: 0.8
- ``query_args`` can be a string.
-
- .. versionchanged:: 0.7
- Added ``query_args``.
- """
- server_name = server_name.lower()
- if self.host_matching:
- if subdomain is not None:
- raise RuntimeError("host matching enabled and a subdomain was provided")
- elif subdomain is None:
- subdomain = self.default_subdomain
- if script_name is None:
- script_name = "/"
- if path_info is None:
- path_info = "/"
-
- try:
- server_name = _encode_idna(server_name) # type: ignore
- except UnicodeError as e:
- raise BadHost() from e
-
- return MapAdapter(
- self,
- server_name,
- script_name,
- subdomain,
- url_scheme,
- path_info,
- default_method,
- query_args,
- )
-
- def bind_to_environ(
- self,
- environ: t.Union["WSGIEnvironment", "Request"],
- server_name: t.Optional[str] = None,
- subdomain: t.Optional[str] = None,
- ) -> "MapAdapter":
- """Like :meth:`bind` but you can pass it an WSGI environment and it
- will fetch the information from that dictionary. Note that because of
- limitations in the protocol there is no way to get the current
- subdomain and real `server_name` from the environment. If you don't
- provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
- `HTTP_HOST` if provided) as used `server_name` with disabled subdomain
- feature.
-
- If `subdomain` is `None` but an environment and a server name is
- provided it will calculate the current subdomain automatically.
- Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
- in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
- subdomain will be ``'staging.dev'``.
-
- If the object passed as environ has an environ attribute, the value of
- this attribute is used instead. This allows you to pass request
- objects. Additionally `PATH_INFO` added as a default of the
- :class:`MapAdapter` so that you don't have to pass the path info to
- the match method.
-
- .. versionchanged:: 1.0.0
- If the passed server name specifies port 443, it will match
- if the incoming scheme is ``https`` without a port.
-
- .. versionchanged:: 1.0.0
- A warning is shown when the passed server name does not
- match the incoming WSGI server name.
-
- .. versionchanged:: 0.8
- This will no longer raise a ValueError when an unexpected server
- name was passed.
-
- .. versionchanged:: 0.5
- previously this method accepted a bogus `calculate_subdomain`
- parameter that did not have any effect. It was removed because
- of that.
-
- :param environ: a WSGI environment.
- :param server_name: an optional server name hint (see above).
- :param subdomain: optionally the current subdomain (see above).
- """
- env = _get_environ(environ)
- wsgi_server_name = get_host(env).lower()
- scheme = env["wsgi.url_scheme"]
- upgrade = any(
- v.strip() == "upgrade"
- for v in env.get("HTTP_CONNECTION", "").lower().split(",")
- )
-
- if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
- scheme = "wss" if scheme == "https" else "ws"
-
- if server_name is None:
- server_name = wsgi_server_name
- else:
- server_name = server_name.lower()
-
- # strip standard port to match get_host()
- if scheme in {"http", "ws"} and server_name.endswith(":80"):
- server_name = server_name[:-3]
- elif scheme in {"https", "wss"} and server_name.endswith(":443"):
- server_name = server_name[:-4]
-
- if subdomain is None and not self.host_matching:
- cur_server_name = wsgi_server_name.split(".")
- real_server_name = server_name.split(".")
- offset = -len(real_server_name)
-
- if cur_server_name[offset:] != real_server_name:
- # This can happen even with valid configs if the server was
- # accessed directly by IP address under some situations.
- # Instead of raising an exception like in Werkzeug 0.7 or
- # earlier we go by an invalid subdomain which will result
- # in a 404 error on matching.
- warnings.warn(
- f"Current server name {wsgi_server_name!r} doesn't match configured"
- f" server name {server_name!r}",
- stacklevel=2,
- )
- subdomain = "<invalid>"
- else:
- subdomain = ".".join(filter(None, cur_server_name[:offset]))
-
- def _get_wsgi_string(name: str) -> t.Optional[str]:
- val = env.get(name)
- if val is not None:
- return _wsgi_decoding_dance(val, self.charset)
- return None
-
- script_name = _get_wsgi_string("SCRIPT_NAME")
- path_info = _get_wsgi_string("PATH_INFO")
- query_args = _get_wsgi_string("QUERY_STRING")
- return Map.bind(
- self,
- server_name,
- script_name,
- subdomain,
- scheme,
- env["REQUEST_METHOD"],
- path_info,
- query_args=query_args,
- )
-
- def update(self) -> None:
- """Called before matching and building to keep the compiled rules
- in the correct order after things changed.
- """
- if not self._remap:
- return
-
- with self._remap_lock:
- if not self._remap:
- return
-
- self._rules.sort(key=lambda x: x.match_compare_key())
- for rules in self._rules_by_endpoint.values():
- rules.sort(key=lambda x: x.build_compare_key())
- self._remap = False
-
- def __repr__(self) -> str:
- rules = self.iter_rules()
- return f"{type(self).__name__}({pformat(list(rules))})"
-
-
-class MapAdapter:
-
- """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
- the URL matching and building based on runtime information.
- """
-
- def __init__(
- self,
- map: Map,
- server_name: str,
- script_name: str,
- subdomain: t.Optional[str],
- url_scheme: str,
- path_info: str,
- default_method: str,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- ):
- self.map = map
- self.server_name = _to_str(server_name)
- script_name = _to_str(script_name)
- if not script_name.endswith("/"):
- script_name += "/"
- self.script_name = script_name
- self.subdomain = _to_str(subdomain)
- self.url_scheme = _to_str(url_scheme)
- self.path_info = _to_str(path_info)
- self.default_method = _to_str(default_method)
- self.query_args = query_args
- self.websocket = self.url_scheme in {"ws", "wss"}
-
- def dispatch(
- self,
- view_func: t.Callable[[str, t.Mapping[str, t.Any]], "WSGIApplication"],
- path_info: t.Optional[str] = None,
- method: t.Optional[str] = None,
- catch_http_exceptions: bool = False,
- ) -> "WSGIApplication":
- """Does the complete dispatching process. `view_func` is called with
- the endpoint and a dict with the values for the view. It should
- look up the view function, call it, and return a response object
- or WSGI application. http exceptions are not caught by default
- so that applications can display nicer error messages by just
- catching them by hand. If you want to stick with the default
- error messages you can pass it ``catch_http_exceptions=True`` and
- it will catch the http exceptions.
-
- Here a small example for the dispatch usage::
-
- from werkzeug.wrappers import Request, Response
- from werkzeug.wsgi import responder
- from werkzeug.routing import Map, Rule
-
- def on_index(request):
- return Response('Hello from the index')
-
- url_map = Map([Rule('/', endpoint='index')])
- views = {'index': on_index}
-
- @responder
- def application(environ, start_response):
- request = Request(environ)
- urls = url_map.bind_to_environ(environ)
- return urls.dispatch(lambda e, v: views[e](request, **v),
- catch_http_exceptions=True)
-
- Keep in mind that this method might return exception objects, too, so
- use :class:`Response.force_type` to get a response object.
-
- :param view_func: a function that is called with the endpoint as
- first argument and the value dict as second. Has
- to dispatch to the actual view function with this
- information. (see above)
- :param path_info: the path info to use for matching. Overrides the
- path info specified on binding.
- :param method: the HTTP method used for matching. Overrides the
- method specified on binding.
- :param catch_http_exceptions: set to `True` to catch any of the
- werkzeug :class:`HTTPException`\\s.
- """
- try:
- try:
- endpoint, args = self.match(path_info, method)
- except RequestRedirect as e:
- return e
- return view_func(endpoint, args)
- except HTTPException as e:
- if catch_http_exceptions:
- return e
- raise
-
- @typing.overload
- def match( # type: ignore
- self,
- path_info: t.Optional[str] = None,
- method: t.Optional[str] = None,
- return_rule: "te.Literal[False]" = False,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- websocket: t.Optional[bool] = None,
- ) -> t.Tuple[str, t.Mapping[str, t.Any]]:
- ...
-
- @typing.overload
- def match(
- self,
- path_info: t.Optional[str] = None,
- method: t.Optional[str] = None,
- return_rule: "te.Literal[True]" = True,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- websocket: t.Optional[bool] = None,
- ) -> t.Tuple[Rule, t.Mapping[str, t.Any]]:
- ...
-
- def match(
- self,
- path_info: t.Optional[str] = None,
- method: t.Optional[str] = None,
- return_rule: bool = False,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- websocket: t.Optional[bool] = None,
- ) -> t.Tuple[t.Union[str, Rule], t.Mapping[str, t.Any]]:
- """The usage is simple: you just pass the match method the current
- path info as well as the method (which defaults to `GET`). The
- following things can then happen:
-
- - you receive a `NotFound` exception that indicates that no URL is
- matching. A `NotFound` exception is also a WSGI application you
- can call to get a default page not found page (happens to be the
- same object as `werkzeug.exceptions.NotFound`)
-
- - you receive a `MethodNotAllowed` exception that indicates that there
- is a match for this URL but not for the current request method.
- This is useful for RESTful applications.
-
- - you receive a `RequestRedirect` exception with a `new_url`
- attribute. This exception is used to notify you about a request
- Werkzeug requests from your WSGI application. This is for example the
- case if you request ``/foo`` although the correct URL is ``/foo/``
- You can use the `RequestRedirect` instance as response-like object
- similar to all other subclasses of `HTTPException`.
-
- - you receive a ``WebsocketMismatch`` exception if the only
- match is a WebSocket rule but the bind is an HTTP request, or
- if the match is an HTTP rule but the bind is a WebSocket
- request.
-
- - you get a tuple in the form ``(endpoint, arguments)`` if there is
- a match (unless `return_rule` is True, in which case you get a tuple
- in the form ``(rule, arguments)``)
-
- If the path info is not passed to the match method the default path
- info of the map is used (defaults to the root URL if not defined
- explicitly).
-
- All of the exceptions raised are subclasses of `HTTPException` so they
- can be used as WSGI responses. They will all render generic error or
- redirect pages.
-
- Here is a small example for matching:
-
- >>> m = Map([
- ... Rule('/', endpoint='index'),
- ... Rule('/downloads/', endpoint='downloads/index'),
- ... Rule('/downloads/<int:id>', endpoint='downloads/show')
- ... ])
- >>> urls = m.bind("example.com", "/")
- >>> urls.match("/", "GET")
- ('index', {})
- >>> urls.match("/downloads/42")
- ('downloads/show', {'id': 42})
-
- And here is what happens on redirect and missing URLs:
-
- >>> urls.match("/downloads")
- Traceback (most recent call last):
- ...
- RequestRedirect: http://example.com/downloads/
- >>> urls.match("/missing")
- Traceback (most recent call last):
- ...
- NotFound: 404 Not Found
-
- :param path_info: the path info to use for matching. Overrides the
- path info specified on binding.
- :param method: the HTTP method used for matching. Overrides the
- method specified on binding.
- :param return_rule: return the rule that matched instead of just the
- endpoint (defaults to `False`).
- :param query_args: optional query arguments that are used for
- automatic redirects as string or dictionary. It's
- currently not possible to use the query arguments
- for URL matching.
- :param websocket: Match WebSocket instead of HTTP requests. A
- websocket request has a ``ws`` or ``wss``
- :attr:`url_scheme`. This overrides that detection.
-
- .. versionadded:: 1.0
- Added ``websocket``.
-
- .. versionchanged:: 0.8
- ``query_args`` can be a string.
-
- .. versionadded:: 0.7
- Added ``query_args``.
-
- .. versionadded:: 0.6
- Added ``return_rule``.
- """
- self.map.update()
- if path_info is None:
- path_info = self.path_info
- else:
- path_info = _to_str(path_info, self.map.charset)
- if query_args is None:
- query_args = self.query_args or {}
- method = (method or self.default_method).upper()
-
- if websocket is None:
- websocket = self.websocket
-
- require_redirect = False
-
- domain_part = self.server_name if self.map.host_matching else self.subdomain
- path_part = f"/{path_info.lstrip('/')}" if path_info else ""
- path = f"{domain_part}|{path_part}"
-
- have_match_for = set()
- websocket_mismatch = False
-
- for rule in self.map._rules:
- try:
- rv = rule.match(path, method)
- except RequestPath as e:
- raise RequestRedirect(
- self.make_redirect_url(
- url_quote(e.path_info, self.map.charset, safe="/:|+"),
- query_args,
- )
- ) from None
- except RequestAliasRedirect as e:
- raise RequestRedirect(
- self.make_alias_redirect_url(
- path, rule.endpoint, e.matched_values, method, query_args
- )
- ) from None
- if rv is None:
- continue
- if rule.methods is not None and method not in rule.methods:
- have_match_for.update(rule.methods)
- continue
-
- if rule.websocket != websocket:
- websocket_mismatch = True
- continue
-
- if self.map.redirect_defaults:
- redirect_url = self.get_default_redirect(rule, method, rv, query_args)
- if redirect_url is not None:
- raise RequestRedirect(redirect_url)
-
- if rule.redirect_to is not None:
- if isinstance(rule.redirect_to, str):
-
- def _handle_match(match: t.Match[str]) -> str:
- value = rv[match.group(1)] # type: ignore
- return rule._converters[match.group(1)].to_url(value)
-
- redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
- else:
- redirect_url = rule.redirect_to(self, **rv)
-
- if self.subdomain:
- netloc = f"{self.subdomain}.{self.server_name}"
- else:
- netloc = self.server_name
-
- raise RequestRedirect(
- url_join(
- f"{self.url_scheme or 'http'}://{netloc}{self.script_name}",
- redirect_url,
- )
- )
-
- if require_redirect:
- raise RequestRedirect(
- self.make_redirect_url(
- url_quote(path_info, self.map.charset, safe="/:|+"), query_args
- )
- )
-
- if return_rule:
- return rule, rv
- else:
- return rule.endpoint, rv
-
- if have_match_for:
- raise MethodNotAllowed(valid_methods=list(have_match_for))
-
- if websocket_mismatch:
- raise WebsocketMismatch()
-
- raise NotFound()
-
- def test(
- self, path_info: t.Optional[str] = None, method: t.Optional[str] = None
- ) -> bool:
- """Test if a rule would match. Works like `match` but returns `True`
- if the URL matches, or `False` if it does not exist.
-
- :param path_info: the path info to use for matching. Overrides the
- path info specified on binding.
- :param method: the HTTP method used for matching. Overrides the
- method specified on binding.
- """
- try:
- self.match(path_info, method)
- except RequestRedirect:
- pass
- except HTTPException:
- return False
- return True
-
- def allowed_methods(self, path_info: t.Optional[str] = None) -> t.Iterable[str]:
- """Returns the valid methods that match for a given path.
-
- .. versionadded:: 0.7
- """
- try:
- self.match(path_info, method="--")
- except MethodNotAllowed as e:
- return e.valid_methods # type: ignore
- except HTTPException:
- pass
- return []
-
- def get_host(self, domain_part: t.Optional[str]) -> str:
- """Figures out the full host name for the given domain part. The
- domain part is a subdomain in case host matching is disabled or
- a full host name.
- """
- if self.map.host_matching:
- if domain_part is None:
- return self.server_name
- return _to_str(domain_part, "ascii")
- subdomain = domain_part
- if subdomain is None:
- subdomain = self.subdomain
- else:
- subdomain = _to_str(subdomain, "ascii")
-
- if subdomain:
- return f"{subdomain}.{self.server_name}"
- else:
- return self.server_name
-
- def get_default_redirect(
- self,
- rule: Rule,
- method: str,
- values: t.MutableMapping[str, t.Any],
- query_args: t.Union[t.Mapping[str, t.Any], str],
- ) -> t.Optional[str]:
- """A helper that returns the URL to redirect to if it finds one.
- This is used for default redirecting only.
-
- :internal:
- """
- assert self.map.redirect_defaults
- for r in self.map._rules_by_endpoint[rule.endpoint]:
- # every rule that comes after this one, including ourself
- # has a lower priority for the defaults. We order the ones
- # with the highest priority up for building.
- if r is rule:
- break
- if r.provides_defaults_for(rule) and r.suitable_for(values, method):
- values.update(r.defaults) # type: ignore
- domain_part, path = r.build(values) # type: ignore
- return self.make_redirect_url(path, query_args, domain_part=domain_part)
- return None
-
- def encode_query_args(self, query_args: t.Union[t.Mapping[str, t.Any], str]) -> str:
- if not isinstance(query_args, str):
- return url_encode(query_args, self.map.charset)
- return query_args
-
- def make_redirect_url(
- self,
- path_info: str,
- query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
- domain_part: t.Optional[str] = None,
- ) -> str:
- """Creates a redirect URL.
-
- :internal:
- """
- if query_args:
- suffix = f"?{self.encode_query_args(query_args)}"
- else:
- suffix = ""
-
- scheme = self.url_scheme or "http"
- host = self.get_host(domain_part)
- path = posixpath.join(self.script_name.strip("/"), path_info.lstrip("/"))
- return f"{scheme}://{host}/{path}{suffix}"
-
- def make_alias_redirect_url(
- self,
- path: str,
- endpoint: str,
- values: t.Mapping[str, t.Any],
- method: str,
- query_args: t.Union[t.Mapping[str, t.Any], str],
- ) -> str:
- """Internally called to make an alias redirect URL."""
- url = self.build(
- endpoint, values, method, append_unknown=False, force_external=True
- )
- if query_args:
- url += f"?{self.encode_query_args(query_args)}"
- assert url != path, "detected invalid alias setting. No canonical URL found"
- return url
-
- def _partial_build(
- self,
- endpoint: str,
- values: t.Mapping[str, t.Any],
- method: t.Optional[str],
- append_unknown: bool,
- ) -> t.Optional[t.Tuple[str, str, bool]]:
- """Helper for :meth:`build`. Returns subdomain and path for the
- rule that accepts this endpoint, values and method.
-
- :internal:
- """
- # in case the method is none, try with the default method first
- if method is None:
- rv = self._partial_build(
- endpoint, values, self.default_method, append_unknown
- )
- if rv is not None:
- return rv
-
- # Default method did not match or a specific method is passed.
- # Check all for first match with matching host. If no matching
- # host is found, go with first result.
- first_match = None
-
- for rule in self.map._rules_by_endpoint.get(endpoint, ()):
- if rule.suitable_for(values, method):
- build_rv = rule.build(values, append_unknown)
-
- if build_rv is not None:
- rv = (build_rv[0], build_rv[1], rule.websocket)
- if self.map.host_matching:
- if rv[0] == self.server_name:
- return rv
- elif first_match is None:
- first_match = rv
- else:
- return rv
-
- return first_match
-
- def build(
- self,
- endpoint: str,
- values: t.Optional[t.Mapping[str, t.Any]] = None,
- method: t.Optional[str] = None,
- force_external: bool = False,
- append_unknown: bool = True,
- url_scheme: t.Optional[str] = None,
- ) -> str:
- """Building URLs works pretty much the other way round. Instead of
- `match` you call `build` and pass it the endpoint and a dict of
- arguments for the placeholders.
-
- The `build` function also accepts an argument called `force_external`
- which, if you set it to `True` will force external URLs. Per default
- external URLs (include the server name) will only be used if the
- target URL is on a different subdomain.
-
- >>> m = Map([
- ... Rule('/', endpoint='index'),
- ... Rule('/downloads/', endpoint='downloads/index'),
- ... Rule('/downloads/<int:id>', endpoint='downloads/show')
- ... ])
- >>> urls = m.bind("example.com", "/")
- >>> urls.build("index", {})
- '/'
- >>> urls.build("downloads/show", {'id': 42})
- '/downloads/42'
- >>> urls.build("downloads/show", {'id': 42}, force_external=True)
- 'http://example.com/downloads/42'
-
- Because URLs cannot contain non ASCII data you will always get
- bytes back. Non ASCII characters are urlencoded with the
- charset defined on the map instance.
-
- Additional values are converted to strings and appended to the URL as
- URL querystring parameters:
-
- >>> urls.build("index", {'q': 'My Searchstring'})
- '/?q=My+Searchstring'
-
- When processing those additional values, lists are furthermore
- interpreted as multiple values (as per
- :py:class:`werkzeug.datastructures.MultiDict`):
-
- >>> urls.build("index", {'q': ['a', 'b', 'c']})
- '/?q=a&q=b&q=c'
-
- Passing a ``MultiDict`` will also add multiple values:
-
- >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
- '/?p=z&q=a&q=b'
-
- If a rule does not exist when building a `BuildError` exception is
- raised.
-
- The build method accepts an argument called `method` which allows you
- to specify the method you want to have an URL built for if you have
- different methods for the same endpoint specified.
-
- :param endpoint: the endpoint of the URL to build.
- :param values: the values for the URL to build. Unhandled values are
- appended to the URL as query parameters.
- :param method: the HTTP method for the rule if there are different
- URLs for different methods on the same endpoint.
- :param force_external: enforce full canonical external URLs. If the URL
- scheme is not provided, this will generate
- a protocol-relative URL.
- :param append_unknown: unknown parameters are appended to the generated
- URL as query string argument. Disable this
- if you want the builder to ignore those.
- :param url_scheme: Scheme to use in place of the bound
- :attr:`url_scheme`.
-
- .. versionchanged:: 2.0
- Added the ``url_scheme`` parameter.
-
- .. versionadded:: 0.6
- Added the ``append_unknown`` parameter.
- """
- self.map.update()
-
- if values:
- temp_values: t.Dict[str, t.Union[t.List[t.Any], t.Any]] = {}
- always_list = isinstance(values, MultiDict)
- key: str
- value: t.Optional[t.Union[t.List[t.Any], t.Any]]
-
- # For MultiDict, dict.items(values) is like values.lists()
- # without the call or list coercion overhead.
- for key, value in dict.items(values): # type: ignore
- if value is None:
- continue
-
- if always_list or isinstance(value, (list, tuple)):
- value = [v for v in value if v is not None]
-
- if not value:
- continue
-
- if len(value) == 1:
- value = value[0]
-
- temp_values[key] = value
-
- values = temp_values
- else:
- values = {}
-
- rv = self._partial_build(endpoint, values, method, append_unknown)
- if rv is None:
- raise BuildError(endpoint, values, method, self)
-
- domain_part, path, websocket = rv
- host = self.get_host(domain_part)
-
- if url_scheme is None:
- url_scheme = self.url_scheme
-
- # Always build WebSocket routes with the scheme (browsers
- # require full URLs). If bound to a WebSocket, ensure that HTTP
- # routes are built with an HTTP scheme.
- secure = url_scheme in {"https", "wss"}
-
- if websocket:
- force_external = True
- url_scheme = "wss" if secure else "ws"
- elif url_scheme:
- url_scheme = "https" if secure else "http"
-
- # shortcut this.
- if not force_external and (
- (self.map.host_matching and host == self.server_name)
- or (not self.map.host_matching and domain_part == self.subdomain)
- ):
- return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}"
-
- scheme = f"{url_scheme}:" if url_scheme else ""
- return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/__init__.py b/contrib/python/Werkzeug/py3/werkzeug/routing/__init__.py
new file mode 100644
index 0000000000..84b043fdf4
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/__init__.py
@@ -0,0 +1,133 @@
+"""When it comes to combining multiple controller or view functions
+(however you want to call them) you need a dispatcher. A simple way
+would be applying regular expression tests on the ``PATH_INFO`` and
+calling registered callback functions that return the value then.
+
+This module implements a much more powerful system than simple regular
+expression matching because it can also convert values in the URLs and
+build URLs.
+
+Here a simple example that creates a URL map for an application with
+two subdomains (www and kb) and some URL rules:
+
+.. code-block:: python
+
+ m = Map([
+ # Static URLs
+ Rule('/', endpoint='static/index'),
+ Rule('/about', endpoint='static/about'),
+ Rule('/help', endpoint='static/help'),
+ # Knowledge Base
+ Subdomain('kb', [
+ Rule('/', endpoint='kb/index'),
+ Rule('/browse/', endpoint='kb/browse'),
+ Rule('/browse/<int:id>/', endpoint='kb/browse'),
+ Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
+ ])
+ ], default_subdomain='www')
+
+If the application doesn't use subdomains it's perfectly fine to not set
+the default subdomain and not use the `Subdomain` rule factory. The
+endpoint in the rules can be anything, for example import paths or
+unique identifiers. The WSGI application can use those endpoints to get the
+handler for that URL. It doesn't have to be a string at all but it's
+recommended.
+
+Now it's possible to create a URL adapter for one of the subdomains and
+build URLs:
+
+.. code-block:: python
+
+ c = m.bind('example.com')
+
+ c.build("kb/browse", dict(id=42))
+ 'http://kb.example.com/browse/42/'
+
+ c.build("kb/browse", dict())
+ 'http://kb.example.com/browse/'
+
+ c.build("kb/browse", dict(id=42, page=3))
+ 'http://kb.example.com/browse/42/3'
+
+ c.build("static/about")
+ '/about'
+
+ c.build("static/index", force_external=True)
+ 'http://www.example.com/'
+
+ c = m.bind('example.com', subdomain='kb')
+
+ c.build("static/about")
+ 'http://www.example.com/about'
+
+The first argument to bind is the server name *without* the subdomain.
+Per default it will assume that the script is mounted on the root, but
+often that's not the case so you can provide the real mount point as
+second argument:
+
+.. code-block:: python
+
+ c = m.bind('example.com', '/applications/example')
+
+The third argument can be the subdomain, if not given the default
+subdomain is used. For more details about binding have a look at the
+documentation of the `MapAdapter`.
+
+And here is how you can match URLs:
+
+.. code-block:: python
+
+ c = m.bind('example.com')
+
+ c.match("/")
+ ('static/index', {})
+
+ c.match("/about")
+ ('static/about', {})
+
+ c = m.bind('example.com', '/', 'kb')
+
+ c.match("/")
+ ('kb/index', {})
+
+ c.match("/browse/42/23")
+ ('kb/browse', {'id': 42, 'page': 23})
+
+If matching fails you get a ``NotFound`` exception, if the rule thinks
+it's a good idea to redirect (for example because the URL was defined
+to have a slash at the end but the request was missing that slash) it
+will raise a ``RequestRedirect`` exception. Both are subclasses of
+``HTTPException`` so you can use those errors as responses in the
+application.
+
+If matching succeeded but the URL rule was incompatible to the given
+method (for example there were only rules for ``GET`` and ``HEAD`` but
+routing tried to match a ``POST`` request) a ``MethodNotAllowed``
+exception is raised.
+"""
+from .converters import AnyConverter as AnyConverter
+from .converters import BaseConverter as BaseConverter
+from .converters import FloatConverter as FloatConverter
+from .converters import IntegerConverter as IntegerConverter
+from .converters import PathConverter as PathConverter
+from .converters import UnicodeConverter as UnicodeConverter
+from .converters import UUIDConverter as UUIDConverter
+from .converters import ValidationError as ValidationError
+from .exceptions import BuildError as BuildError
+from .exceptions import NoMatch as NoMatch
+from .exceptions import RequestAliasRedirect as RequestAliasRedirect
+from .exceptions import RequestPath as RequestPath
+from .exceptions import RequestRedirect as RequestRedirect
+from .exceptions import RoutingException as RoutingException
+from .exceptions import WebsocketMismatch as WebsocketMismatch
+from .map import Map as Map
+from .map import MapAdapter as MapAdapter
+from .matcher import StateMachineMatcher as StateMachineMatcher
+from .rules import EndpointPrefix as EndpointPrefix
+from .rules import parse_converter_args as parse_converter_args
+from .rules import Rule as Rule
+from .rules import RuleFactory as RuleFactory
+from .rules import RuleTemplate as RuleTemplate
+from .rules import RuleTemplateFactory as RuleTemplateFactory
+from .rules import Subdomain as Subdomain
+from .rules import Submount as Submount
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/converters.py b/contrib/python/Werkzeug/py3/werkzeug/routing/converters.py
new file mode 100644
index 0000000000..bbad29d7ad
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/converters.py
@@ -0,0 +1,257 @@
+import re
+import typing as t
+import uuid
+
+from ..urls import _fast_url_quote
+
+if t.TYPE_CHECKING:
+ from .map import Map
+
+
+class ValidationError(ValueError):
+ """Validation error. If a rule converter raises this exception the rule
+ does not match the current URL and the next URL is tried.
+ """
+
+
+class BaseConverter:
+ """Base class for all converters."""
+
+ regex = "[^/]+"
+ weight = 100
+ part_isolating = True
+
+ def __init__(self, map: "Map", *args: t.Any, **kwargs: t.Any) -> None:
+ self.map = map
+
+ def to_python(self, value: str) -> t.Any:
+ return value
+
+ def to_url(self, value: t.Any) -> str:
+ if isinstance(value, (bytes, bytearray)):
+ return _fast_url_quote(value)
+ return _fast_url_quote(str(value).encode(self.map.charset))
+
+
+class UnicodeConverter(BaseConverter):
+ """This converter is the default converter and accepts any string but
+ only one path segment. Thus the string can not include a slash.
+
+ This is the default validator.
+
+ Example::
+
+ Rule('/pages/<page>'),
+ Rule('/<string(length=2):lang_code>')
+
+ :param map: the :class:`Map`.
+ :param minlength: the minimum length of the string. Must be greater
+ or equal 1.
+ :param maxlength: the maximum length of the string.
+ :param length: the exact length of the string.
+ """
+
+ part_isolating = True
+
+ def __init__(
+ self,
+ map: "Map",
+ minlength: int = 1,
+ maxlength: t.Optional[int] = None,
+ length: t.Optional[int] = None,
+ ) -> None:
+ super().__init__(map)
+ if length is not None:
+ length_regex = f"{{{int(length)}}}"
+ else:
+ if maxlength is None:
+ maxlength_value = ""
+ else:
+ maxlength_value = str(int(maxlength))
+ length_regex = f"{{{int(minlength)},{maxlength_value}}}"
+ self.regex = f"[^/]{length_regex}"
+
+
+class AnyConverter(BaseConverter):
+ """Matches one of the items provided. Items can either be Python
+ identifiers or strings::
+
+ Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
+
+ :param map: the :class:`Map`.
+ :param items: this function accepts the possible items as positional
+ arguments.
+
+ .. versionchanged:: 2.2
+ Value is validated when building a URL.
+ """
+
+ part_isolating = True
+
+ def __init__(self, map: "Map", *items: str) -> None:
+ super().__init__(map)
+ self.items = set(items)
+ self.regex = f"(?:{'|'.join([re.escape(x) for x in items])})"
+
+ def to_url(self, value: t.Any) -> str:
+ if value in self.items:
+ return str(value)
+
+ valid_values = ", ".join(f"'{item}'" for item in sorted(self.items))
+ raise ValueError(f"'{value}' is not one of {valid_values}")
+
+
+class PathConverter(BaseConverter):
+ """Like the default :class:`UnicodeConverter`, but it also matches
+ slashes. This is useful for wikis and similar applications::
+
+ Rule('/<path:wikipage>')
+ Rule('/<path:wikipage>/edit')
+
+ :param map: the :class:`Map`.
+ """
+
+ regex = "[^/].*?"
+ weight = 200
+ part_isolating = False
+
+
+class NumberConverter(BaseConverter):
+ """Baseclass for `IntegerConverter` and `FloatConverter`.
+
+ :internal:
+ """
+
+ weight = 50
+ num_convert: t.Callable = int
+ part_isolating = True
+
+ def __init__(
+ self,
+ map: "Map",
+ fixed_digits: int = 0,
+ min: t.Optional[int] = None,
+ max: t.Optional[int] = None,
+ signed: bool = False,
+ ) -> None:
+ if signed:
+ self.regex = self.signed_regex
+ super().__init__(map)
+ self.fixed_digits = fixed_digits
+ self.min = min
+ self.max = max
+ self.signed = signed
+
+ def to_python(self, value: str) -> t.Any:
+ if self.fixed_digits and len(value) != self.fixed_digits:
+ raise ValidationError()
+ value = self.num_convert(value)
+ if (self.min is not None and value < self.min) or (
+ self.max is not None and value > self.max
+ ):
+ raise ValidationError()
+ return value
+
+ def to_url(self, value: t.Any) -> str:
+ value = str(self.num_convert(value))
+ if self.fixed_digits:
+ value = value.zfill(self.fixed_digits)
+ return value
+
+ @property
+ def signed_regex(self) -> str:
+ return f"-?{self.regex}"
+
+
+class IntegerConverter(NumberConverter):
+ """This converter only accepts integer values::
+
+ Rule("/page/<int:page>")
+
+ By default it only accepts unsigned, positive values. The ``signed``
+ parameter will enable signed, negative values. ::
+
+ Rule("/page/<int(signed=True):page>")
+
+ :param map: The :class:`Map`.
+ :param fixed_digits: The number of fixed digits in the URL. If you
+ set this to ``4`` for example, the rule will only match if the
+ URL looks like ``/0001/``. The default is variable length.
+ :param min: The minimal value.
+ :param max: The maximal value.
+ :param signed: Allow signed (negative) values.
+
+ .. versionadded:: 0.15
+ The ``signed`` parameter.
+ """
+
+ regex = r"\d+"
+ part_isolating = True
+
+
+class FloatConverter(NumberConverter):
+ """This converter only accepts floating point values::
+
+ Rule("/probability/<float:probability>")
+
+ By default it only accepts unsigned, positive values. The ``signed``
+ parameter will enable signed, negative values. ::
+
+ Rule("/offset/<float(signed=True):offset>")
+
+ :param map: The :class:`Map`.
+ :param min: The minimal value.
+ :param max: The maximal value.
+ :param signed: Allow signed (negative) values.
+
+ .. versionadded:: 0.15
+ The ``signed`` parameter.
+ """
+
+ regex = r"\d+\.\d+"
+ num_convert = float
+ part_isolating = True
+
+ def __init__(
+ self,
+ map: "Map",
+ min: t.Optional[float] = None,
+ max: t.Optional[float] = None,
+ signed: bool = False,
+ ) -> None:
+ super().__init__(map, min=min, max=max, signed=signed) # type: ignore
+
+
+class UUIDConverter(BaseConverter):
+ """This converter only accepts UUID strings::
+
+ Rule('/object/<uuid:identifier>')
+
+ .. versionadded:: 0.10
+
+ :param map: the :class:`Map`.
+ """
+
+ regex = (
+ r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
+ r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
+ )
+ part_isolating = True
+
+ def to_python(self, value: str) -> uuid.UUID:
+ return uuid.UUID(value)
+
+ def to_url(self, value: uuid.UUID) -> str:
+ return str(value)
+
+
+#: the default converter mapping for the map.
+DEFAULT_CONVERTERS: t.Mapping[str, t.Type[BaseConverter]] = {
+ "default": UnicodeConverter,
+ "string": UnicodeConverter,
+ "any": AnyConverter,
+ "path": PathConverter,
+ "int": IntegerConverter,
+ "float": FloatConverter,
+ "uuid": UUIDConverter,
+}
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/exceptions.py b/contrib/python/Werkzeug/py3/werkzeug/routing/exceptions.py
new file mode 100644
index 0000000000..7cbe6e9131
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/exceptions.py
@@ -0,0 +1,146 @@
+import difflib
+import typing as t
+
+from ..exceptions import BadRequest
+from ..exceptions import HTTPException
+from ..utils import cached_property
+from ..utils import redirect
+
+if t.TYPE_CHECKING:
+ from _typeshed.wsgi import WSGIEnvironment
+ from .map import MapAdapter
+ from .rules import Rule # noqa: F401
+ from ..wrappers.request import Request
+ from ..wrappers.response import Response
+
+
+class RoutingException(Exception):
+ """Special exceptions that require the application to redirect, notifying
+ about missing urls, etc.
+
+ :internal:
+ """
+
+
+class RequestRedirect(HTTPException, RoutingException):
+ """Raise if the map requests a redirect. This is for example the case if
+ `strict_slashes` are activated and an url that requires a trailing slash.
+
+ The attribute `new_url` contains the absolute destination url.
+ """
+
+ code = 308
+
+ def __init__(self, new_url: str) -> None:
+ super().__init__(new_url)
+ self.new_url = new_url
+
+ def get_response(
+ self,
+ environ: t.Optional[t.Union["WSGIEnvironment", "Request"]] = None,
+ scope: t.Optional[dict] = None,
+ ) -> "Response":
+ return redirect(self.new_url, self.code)
+
+
+class RequestPath(RoutingException):
+ """Internal exception."""
+
+ __slots__ = ("path_info",)
+
+ def __init__(self, path_info: str) -> None:
+ super().__init__()
+ self.path_info = path_info
+
+
+class RequestAliasRedirect(RoutingException): # noqa: B903
+ """This rule is an alias and wants to redirect to the canonical URL."""
+
+ def __init__(self, matched_values: t.Mapping[str, t.Any], endpoint: str) -> None:
+ super().__init__()
+ self.matched_values = matched_values
+ self.endpoint = endpoint
+
+
+class BuildError(RoutingException, LookupError):
+ """Raised if the build system cannot find a URL for an endpoint with the
+ values provided.
+ """
+
+ def __init__(
+ self,
+ endpoint: str,
+ values: t.Mapping[str, t.Any],
+ method: t.Optional[str],
+ adapter: t.Optional["MapAdapter"] = None,
+ ) -> None:
+ super().__init__(endpoint, values, method)
+ self.endpoint = endpoint
+ self.values = values
+ self.method = method
+ self.adapter = adapter
+
+ @cached_property
+ def suggested(self) -> t.Optional["Rule"]:
+ return self.closest_rule(self.adapter)
+
+ def closest_rule(self, adapter: t.Optional["MapAdapter"]) -> t.Optional["Rule"]:
+ def _score_rule(rule: "Rule") -> float:
+ return sum(
+ [
+ 0.98
+ * difflib.SequenceMatcher(
+ None, rule.endpoint, self.endpoint
+ ).ratio(),
+ 0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
+ 0.01 * bool(rule.methods and self.method in rule.methods),
+ ]
+ )
+
+ if adapter and adapter.map._rules:
+ return max(adapter.map._rules, key=_score_rule)
+
+ return None
+
+ def __str__(self) -> str:
+ message = [f"Could not build url for endpoint {self.endpoint!r}"]
+ if self.method:
+ message.append(f" ({self.method!r})")
+ if self.values:
+ message.append(f" with values {sorted(self.values)!r}")
+ message.append(".")
+ if self.suggested:
+ if self.endpoint == self.suggested.endpoint:
+ if (
+ self.method
+ and self.suggested.methods is not None
+ and self.method not in self.suggested.methods
+ ):
+ message.append(
+ " Did you mean to use methods"
+ f" {sorted(self.suggested.methods)!r}?"
+ )
+ missing_values = self.suggested.arguments.union(
+ set(self.suggested.defaults or ())
+ ) - set(self.values.keys())
+ if missing_values:
+ message.append(
+ f" Did you forget to specify values {sorted(missing_values)!r}?"
+ )
+ else:
+ message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
+ return "".join(message)
+
+
+class WebsocketMismatch(BadRequest):
+ """The only matched rule is either a WebSocket and the request is
+ HTTP, or the rule is HTTP and the request is a WebSocket.
+ """
+
+
+class NoMatch(Exception):
+ __slots__ = ("have_match_for", "websocket_mismatch")
+
+ def __init__(self, have_match_for: t.Set[str], websocket_mismatch: bool) -> None:
+ self.have_match_for = have_match_for
+ self.websocket_mismatch = websocket_mismatch
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/map.py b/contrib/python/Werkzeug/py3/werkzeug/routing/map.py
new file mode 100644
index 0000000000..daf94b6a1c
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/map.py
@@ -0,0 +1,944 @@
+import posixpath
+import typing as t
+import warnings
+from pprint import pformat
+from threading import Lock
+
+from .._internal import _encode_idna
+from .._internal import _get_environ
+from .._internal import _to_str
+from .._internal import _wsgi_decoding_dance
+from ..datastructures import ImmutableDict
+from ..datastructures import MultiDict
+from ..exceptions import BadHost
+from ..exceptions import HTTPException
+from ..exceptions import MethodNotAllowed
+from ..exceptions import NotFound
+from ..urls import url_encode
+from ..urls import url_join
+from ..urls import url_quote
+from ..wsgi import get_host
+from .converters import DEFAULT_CONVERTERS
+from .exceptions import BuildError
+from .exceptions import NoMatch
+from .exceptions import RequestAliasRedirect
+from .exceptions import RequestPath
+from .exceptions import RequestRedirect
+from .exceptions import WebsocketMismatch
+from .matcher import StateMachineMatcher
+from .rules import _simple_rule_re
+from .rules import Rule
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+ from .converters import BaseConverter
+ from .rules import RuleFactory
+ from ..wrappers.request import Request
+
+
+class Map:
+ """The map class stores all the URL rules and some configuration
+ parameters. Some of the configuration values are only stored on the
+ `Map` instance since those affect all rules, others are just defaults
+ and can be overridden for each rule. Note that you have to specify all
+ arguments besides the `rules` as keyword arguments!
+
+ :param rules: sequence of url rules for this map.
+ :param default_subdomain: The default subdomain for rules without a
+ subdomain defined.
+ :param charset: charset of the url. defaults to ``"utf-8"``
+ :param strict_slashes: If a rule ends with a slash but the matched
+ URL does not, redirect to the URL with a trailing slash.
+ :param merge_slashes: Merge consecutive slashes when matching or
+ building URLs. Matches will redirect to the normalized URL.
+ Slashes in variable parts are not merged.
+ :param redirect_defaults: This will redirect to the default rule if it
+ wasn't visited that way. This helps creating
+ unique URLs.
+ :param converters: A dict of converters that adds additional converters
+ to the list of converters. If you redefine one
+ converter this will override the original one.
+ :param sort_parameters: If set to `True` the url parameters are sorted.
+ See `url_encode` for more details.
+ :param sort_key: The sort key function for `url_encode`.
+ :param encoding_errors: the error method to use for decoding
+ :param host_matching: if set to `True` it enables the host matching
+ feature and disables the subdomain one. If
+ enabled the `host` parameter to rules is used
+ instead of the `subdomain` one.
+
+ .. versionchanged:: 1.0
+ If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
+ will match.
+
+ .. versionchanged:: 1.0
+ Added ``merge_slashes``.
+
+ .. versionchanged:: 0.7
+ Added ``encoding_errors`` and ``host_matching``.
+
+ .. versionchanged:: 0.5
+ Added ``sort_parameters`` and ``sort_key``.
+ """
+
+ #: A dict of default converters to be used.
+ default_converters = ImmutableDict(DEFAULT_CONVERTERS)
+
+ #: The type of lock to use when updating.
+ #:
+ #: .. versionadded:: 1.0
+ lock_class = Lock
+
+ def __init__(
+ self,
+ rules: t.Optional[t.Iterable["RuleFactory"]] = None,
+ default_subdomain: str = "",
+ charset: str = "utf-8",
+ strict_slashes: bool = True,
+ merge_slashes: bool = True,
+ redirect_defaults: bool = True,
+ converters: t.Optional[t.Mapping[str, t.Type["BaseConverter"]]] = None,
+ sort_parameters: bool = False,
+ sort_key: t.Optional[t.Callable[[t.Any], t.Any]] = None,
+ encoding_errors: str = "replace",
+ host_matching: bool = False,
+ ) -> None:
+ self._matcher = StateMachineMatcher(merge_slashes)
+ self._rules_by_endpoint: t.Dict[str, t.List[Rule]] = {}
+ self._remap = True
+ self._remap_lock = self.lock_class()
+
+ self.default_subdomain = default_subdomain
+ self.charset = charset
+ self.encoding_errors = encoding_errors
+ self.strict_slashes = strict_slashes
+ self.merge_slashes = merge_slashes
+ self.redirect_defaults = redirect_defaults
+ self.host_matching = host_matching
+
+ self.converters = self.default_converters.copy()
+ if converters:
+ self.converters.update(converters)
+
+ self.sort_parameters = sort_parameters
+ self.sort_key = sort_key
+
+ for rulefactory in rules or ():
+ self.add(rulefactory)
+
+ def is_endpoint_expecting(self, endpoint: str, *arguments: str) -> bool:
+ """Iterate over all rules and check if the endpoint expects
+ the arguments provided. This is for example useful if you have
+ some URLs that expect a language code and others that do not and
+ you want to wrap the builder a bit so that the current language
+ code is automatically added if not provided but endpoints expect
+ it.
+
+ :param endpoint: the endpoint to check.
+ :param arguments: this function accepts one or more arguments
+ as positional arguments. Each one of them is
+ checked.
+ """
+ self.update()
+ arguments = set(arguments)
+ for rule in self._rules_by_endpoint[endpoint]:
+ if arguments.issubset(rule.arguments):
+ return True
+ return False
+
+ @property
+ def _rules(self) -> t.List[Rule]:
+ return [rule for rules in self._rules_by_endpoint.values() for rule in rules]
+
+ def iter_rules(self, endpoint: t.Optional[str] = None) -> t.Iterator[Rule]:
+ """Iterate over all rules or the rules of an endpoint.
+
+ :param endpoint: if provided only the rules for that endpoint
+ are returned.
+ :return: an iterator
+ """
+ self.update()
+ if endpoint is not None:
+ return iter(self._rules_by_endpoint[endpoint])
+ return iter(self._rules)
+
+ def add(self, rulefactory: "RuleFactory") -> None:
+ """Add a new rule or factory to the map and bind it. Requires that the
+ rule is not bound to another map.
+
+ :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
+ """
+ for rule in rulefactory.get_rules(self):
+ rule.bind(self)
+ if not rule.build_only:
+ self._matcher.add(rule)
+ self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
+ self._remap = True
+
+ def bind(
+ self,
+ server_name: str,
+ script_name: t.Optional[str] = None,
+ subdomain: t.Optional[str] = None,
+ url_scheme: str = "http",
+ default_method: str = "GET",
+ path_info: t.Optional[str] = None,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ ) -> "MapAdapter":
+ """Return a new :class:`MapAdapter` with the details specified to the
+ call. Note that `script_name` will default to ``'/'`` if not further
+ specified or `None`. The `server_name` at least is a requirement
+ because the HTTP RFC requires absolute URLs for redirects and so all
+ redirect exceptions raised by Werkzeug will contain the full canonical
+ URL.
+
+ If no path_info is passed to :meth:`match` it will use the default path
+ info passed to bind. While this doesn't really make sense for
+ manual bind calls, it's useful if you bind a map to a WSGI
+ environment which already contains the path info.
+
+ `subdomain` will default to the `default_subdomain` for this map if
+ no defined. If there is no `default_subdomain` you cannot use the
+ subdomain feature.
+
+ .. versionchanged:: 1.0
+ If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
+ will match.
+
+ .. versionchanged:: 0.15
+ ``path_info`` defaults to ``'/'`` if ``None``.
+
+ .. versionchanged:: 0.8
+ ``query_args`` can be a string.
+
+ .. versionchanged:: 0.7
+ Added ``query_args``.
+ """
+ server_name = server_name.lower()
+ if self.host_matching:
+ if subdomain is not None:
+ raise RuntimeError("host matching enabled and a subdomain was provided")
+ elif subdomain is None:
+ subdomain = self.default_subdomain
+ if script_name is None:
+ script_name = "/"
+ if path_info is None:
+ path_info = "/"
+
+ try:
+ server_name = _encode_idna(server_name) # type: ignore
+ except UnicodeError as e:
+ raise BadHost() from e
+
+ return MapAdapter(
+ self,
+ server_name,
+ script_name,
+ subdomain,
+ url_scheme,
+ path_info,
+ default_method,
+ query_args,
+ )
+
+ def bind_to_environ(
+ self,
+ environ: t.Union["WSGIEnvironment", "Request"],
+ server_name: t.Optional[str] = None,
+ subdomain: t.Optional[str] = None,
+ ) -> "MapAdapter":
+ """Like :meth:`bind` but you can pass it an WSGI environment and it
+ will fetch the information from that dictionary. Note that because of
+ limitations in the protocol there is no way to get the current
+ subdomain and real `server_name` from the environment. If you don't
+ provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
+ `HTTP_HOST` if provided) as used `server_name` with disabled subdomain
+ feature.
+
+ If `subdomain` is `None` but an environment and a server name is
+ provided it will calculate the current subdomain automatically.
+ Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
+ in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
+ subdomain will be ``'staging.dev'``.
+
+ If the object passed as environ has an environ attribute, the value of
+ this attribute is used instead. This allows you to pass request
+ objects. Additionally `PATH_INFO` added as a default of the
+ :class:`MapAdapter` so that you don't have to pass the path info to
+ the match method.
+
+ .. versionchanged:: 1.0.0
+ If the passed server name specifies port 443, it will match
+ if the incoming scheme is ``https`` without a port.
+
+ .. versionchanged:: 1.0.0
+ A warning is shown when the passed server name does not
+ match the incoming WSGI server name.
+
+ .. versionchanged:: 0.8
+ This will no longer raise a ValueError when an unexpected server
+ name was passed.
+
+ .. versionchanged:: 0.5
+ previously this method accepted a bogus `calculate_subdomain`
+ parameter that did not have any effect. It was removed because
+ of that.
+
+ :param environ: a WSGI environment.
+ :param server_name: an optional server name hint (see above).
+ :param subdomain: optionally the current subdomain (see above).
+ """
+ env = _get_environ(environ)
+ wsgi_server_name = get_host(env).lower()
+ scheme = env["wsgi.url_scheme"]
+ upgrade = any(
+ v.strip() == "upgrade"
+ for v in env.get("HTTP_CONNECTION", "").lower().split(",")
+ )
+
+ if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
+ scheme = "wss" if scheme == "https" else "ws"
+
+ if server_name is None:
+ server_name = wsgi_server_name
+ else:
+ server_name = server_name.lower()
+
+ # strip standard port to match get_host()
+ if scheme in {"http", "ws"} and server_name.endswith(":80"):
+ server_name = server_name[:-3]
+ elif scheme in {"https", "wss"} and server_name.endswith(":443"):
+ server_name = server_name[:-4]
+
+ if subdomain is None and not self.host_matching:
+ cur_server_name = wsgi_server_name.split(".")
+ real_server_name = server_name.split(".")
+ offset = -len(real_server_name)
+
+ if cur_server_name[offset:] != real_server_name:
+ # This can happen even with valid configs if the server was
+ # accessed directly by IP address under some situations.
+ # Instead of raising an exception like in Werkzeug 0.7 or
+ # earlier we go by an invalid subdomain which will result
+ # in a 404 error on matching.
+ warnings.warn(
+ f"Current server name {wsgi_server_name!r} doesn't match configured"
+ f" server name {server_name!r}",
+ stacklevel=2,
+ )
+ subdomain = "<invalid>"
+ else:
+ subdomain = ".".join(filter(None, cur_server_name[:offset]))
+
+ def _get_wsgi_string(name: str) -> t.Optional[str]:
+ val = env.get(name)
+ if val is not None:
+ return _wsgi_decoding_dance(val, self.charset)
+ return None
+
+ script_name = _get_wsgi_string("SCRIPT_NAME")
+ path_info = _get_wsgi_string("PATH_INFO")
+ query_args = _get_wsgi_string("QUERY_STRING")
+ return Map.bind(
+ self,
+ server_name,
+ script_name,
+ subdomain,
+ scheme,
+ env["REQUEST_METHOD"],
+ path_info,
+ query_args=query_args,
+ )
+
+ def update(self) -> None:
+ """Called before matching and building to keep the compiled rules
+ in the correct order after things changed.
+ """
+ if not self._remap:
+ return
+
+ with self._remap_lock:
+ if not self._remap:
+ return
+
+ self._matcher.update()
+ for rules in self._rules_by_endpoint.values():
+ rules.sort(key=lambda x: x.build_compare_key())
+ self._remap = False
+
+ def __repr__(self) -> str:
+ rules = self.iter_rules()
+ return f"{type(self).__name__}({pformat(list(rules))})"
+
+
+class MapAdapter:
+
+ """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
+ the URL matching and building based on runtime information.
+ """
+
+ def __init__(
+ self,
+ map: Map,
+ server_name: str,
+ script_name: str,
+ subdomain: t.Optional[str],
+ url_scheme: str,
+ path_info: str,
+ default_method: str,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ ):
+ self.map = map
+ self.server_name = _to_str(server_name)
+ script_name = _to_str(script_name)
+ if not script_name.endswith("/"):
+ script_name += "/"
+ self.script_name = script_name
+ self.subdomain = _to_str(subdomain)
+ self.url_scheme = _to_str(url_scheme)
+ self.path_info = _to_str(path_info)
+ self.default_method = _to_str(default_method)
+ self.query_args = query_args
+ self.websocket = self.url_scheme in {"ws", "wss"}
+
+ def dispatch(
+ self,
+ view_func: t.Callable[[str, t.Mapping[str, t.Any]], "WSGIApplication"],
+ path_info: t.Optional[str] = None,
+ method: t.Optional[str] = None,
+ catch_http_exceptions: bool = False,
+ ) -> "WSGIApplication":
+ """Does the complete dispatching process. `view_func` is called with
+ the endpoint and a dict with the values for the view. It should
+ look up the view function, call it, and return a response object
+ or WSGI application. http exceptions are not caught by default
+ so that applications can display nicer error messages by just
+ catching them by hand. If you want to stick with the default
+ error messages you can pass it ``catch_http_exceptions=True`` and
+ it will catch the http exceptions.
+
+ Here a small example for the dispatch usage::
+
+ from werkzeug.wrappers import Request, Response
+ from werkzeug.wsgi import responder
+ from werkzeug.routing import Map, Rule
+
+ def on_index(request):
+ return Response('Hello from the index')
+
+ url_map = Map([Rule('/', endpoint='index')])
+ views = {'index': on_index}
+
+ @responder
+ def application(environ, start_response):
+ request = Request(environ)
+ urls = url_map.bind_to_environ(environ)
+ return urls.dispatch(lambda e, v: views[e](request, **v),
+ catch_http_exceptions=True)
+
+ Keep in mind that this method might return exception objects, too, so
+ use :class:`Response.force_type` to get a response object.
+
+ :param view_func: a function that is called with the endpoint as
+ first argument and the value dict as second. Has
+ to dispatch to the actual view function with this
+ information. (see above)
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ :param catch_http_exceptions: set to `True` to catch any of the
+ werkzeug :class:`HTTPException`\\s.
+ """
+ try:
+ try:
+ endpoint, args = self.match(path_info, method)
+ except RequestRedirect as e:
+ return e
+ return view_func(endpoint, args)
+ except HTTPException as e:
+ if catch_http_exceptions:
+ return e
+ raise
+
+ @t.overload
+ def match( # type: ignore
+ self,
+ path_info: t.Optional[str] = None,
+ method: t.Optional[str] = None,
+ return_rule: "te.Literal[False]" = False,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ websocket: t.Optional[bool] = None,
+ ) -> t.Tuple[str, t.Mapping[str, t.Any]]:
+ ...
+
+ @t.overload
+ def match(
+ self,
+ path_info: t.Optional[str] = None,
+ method: t.Optional[str] = None,
+ return_rule: "te.Literal[True]" = True,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ websocket: t.Optional[bool] = None,
+ ) -> t.Tuple[Rule, t.Mapping[str, t.Any]]:
+ ...
+
+ def match(
+ self,
+ path_info: t.Optional[str] = None,
+ method: t.Optional[str] = None,
+ return_rule: bool = False,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ websocket: t.Optional[bool] = None,
+ ) -> t.Tuple[t.Union[str, Rule], t.Mapping[str, t.Any]]:
+ """The usage is simple: you just pass the match method the current
+ path info as well as the method (which defaults to `GET`). The
+ following things can then happen:
+
+ - you receive a `NotFound` exception that indicates that no URL is
+ matching. A `NotFound` exception is also a WSGI application you
+ can call to get a default page not found page (happens to be the
+ same object as `werkzeug.exceptions.NotFound`)
+
+ - you receive a `MethodNotAllowed` exception that indicates that there
+ is a match for this URL but not for the current request method.
+ This is useful for RESTful applications.
+
+ - you receive a `RequestRedirect` exception with a `new_url`
+ attribute. This exception is used to notify you about a request
+ Werkzeug requests from your WSGI application. This is for example the
+ case if you request ``/foo`` although the correct URL is ``/foo/``
+ You can use the `RequestRedirect` instance as response-like object
+ similar to all other subclasses of `HTTPException`.
+
+ - you receive a ``WebsocketMismatch`` exception if the only
+ match is a WebSocket rule but the bind is an HTTP request, or
+ if the match is an HTTP rule but the bind is a WebSocket
+ request.
+
+ - you get a tuple in the form ``(endpoint, arguments)`` if there is
+ a match (unless `return_rule` is True, in which case you get a tuple
+ in the form ``(rule, arguments)``)
+
+ If the path info is not passed to the match method the default path
+ info of the map is used (defaults to the root URL if not defined
+ explicitly).
+
+ All of the exceptions raised are subclasses of `HTTPException` so they
+ can be used as WSGI responses. They will all render generic error or
+ redirect pages.
+
+ Here is a small example for matching:
+
+ >>> m = Map([
+ ... Rule('/', endpoint='index'),
+ ... Rule('/downloads/', endpoint='downloads/index'),
+ ... Rule('/downloads/<int:id>', endpoint='downloads/show')
+ ... ])
+ >>> urls = m.bind("example.com", "/")
+ >>> urls.match("/", "GET")
+ ('index', {})
+ >>> urls.match("/downloads/42")
+ ('downloads/show', {'id': 42})
+
+ And here is what happens on redirect and missing URLs:
+
+ >>> urls.match("/downloads")
+ Traceback (most recent call last):
+ ...
+ RequestRedirect: http://example.com/downloads/
+ >>> urls.match("/missing")
+ Traceback (most recent call last):
+ ...
+ NotFound: 404 Not Found
+
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ :param return_rule: return the rule that matched instead of just the
+ endpoint (defaults to `False`).
+ :param query_args: optional query arguments that are used for
+ automatic redirects as string or dictionary. It's
+ currently not possible to use the query arguments
+ for URL matching.
+ :param websocket: Match WebSocket instead of HTTP requests. A
+ websocket request has a ``ws`` or ``wss``
+ :attr:`url_scheme`. This overrides that detection.
+
+ .. versionadded:: 1.0
+ Added ``websocket``.
+
+ .. versionchanged:: 0.8
+ ``query_args`` can be a string.
+
+ .. versionadded:: 0.7
+ Added ``query_args``.
+
+ .. versionadded:: 0.6
+ Added ``return_rule``.
+ """
+ self.map.update()
+ if path_info is None:
+ path_info = self.path_info
+ else:
+ path_info = _to_str(path_info, self.map.charset)
+ if query_args is None:
+ query_args = self.query_args or {}
+ method = (method or self.default_method).upper()
+
+ if websocket is None:
+ websocket = self.websocket
+
+ domain_part = self.server_name if self.map.host_matching else self.subdomain
+ path_part = f"/{path_info.lstrip('/')}" if path_info else ""
+
+ try:
+ result = self.map._matcher.match(domain_part, path_part, method, websocket)
+ except RequestPath as e:
+ raise RequestRedirect(
+ self.make_redirect_url(
+ url_quote(e.path_info, self.map.charset, safe="/:|+"),
+ query_args,
+ )
+ ) from None
+ except RequestAliasRedirect as e:
+ raise RequestRedirect(
+ self.make_alias_redirect_url(
+ f"{domain_part}|{path_part}",
+ e.endpoint,
+ e.matched_values,
+ method,
+ query_args,
+ )
+ ) from None
+ except NoMatch as e:
+ if e.have_match_for:
+ raise MethodNotAllowed(valid_methods=list(e.have_match_for)) from None
+
+ if e.websocket_mismatch:
+ raise WebsocketMismatch() from None
+
+ raise NotFound() from None
+ else:
+ rule, rv = result
+
+ if self.map.redirect_defaults:
+ redirect_url = self.get_default_redirect(rule, method, rv, query_args)
+ if redirect_url is not None:
+ raise RequestRedirect(redirect_url)
+
+ if rule.redirect_to is not None:
+ if isinstance(rule.redirect_to, str):
+
+ def _handle_match(match: t.Match[str]) -> str:
+ value = rv[match.group(1)]
+ return rule._converters[match.group(1)].to_url(value)
+
+ redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
+ else:
+ redirect_url = rule.redirect_to(self, **rv)
+
+ if self.subdomain:
+ netloc = f"{self.subdomain}.{self.server_name}"
+ else:
+ netloc = self.server_name
+
+ raise RequestRedirect(
+ url_join(
+ f"{self.url_scheme or 'http'}://{netloc}{self.script_name}",
+ redirect_url,
+ )
+ )
+
+ if return_rule:
+ return rule, rv
+ else:
+ return rule.endpoint, rv
+
+ def test(
+ self, path_info: t.Optional[str] = None, method: t.Optional[str] = None
+ ) -> bool:
+ """Test if a rule would match. Works like `match` but returns `True`
+ if the URL matches, or `False` if it does not exist.
+
+ :param path_info: the path info to use for matching. Overrides the
+ path info specified on binding.
+ :param method: the HTTP method used for matching. Overrides the
+ method specified on binding.
+ """
+ try:
+ self.match(path_info, method)
+ except RequestRedirect:
+ pass
+ except HTTPException:
+ return False
+ return True
+
+ def allowed_methods(self, path_info: t.Optional[str] = None) -> t.Iterable[str]:
+ """Returns the valid methods that match for a given path.
+
+ .. versionadded:: 0.7
+ """
+ try:
+ self.match(path_info, method="--")
+ except MethodNotAllowed as e:
+ return e.valid_methods # type: ignore
+ except HTTPException:
+ pass
+ return []
+
+ def get_host(self, domain_part: t.Optional[str]) -> str:
+ """Figures out the full host name for the given domain part. The
+ domain part is a subdomain in case host matching is disabled or
+ a full host name.
+ """
+ if self.map.host_matching:
+ if domain_part is None:
+ return self.server_name
+ return _to_str(domain_part, "ascii")
+ subdomain = domain_part
+ if subdomain is None:
+ subdomain = self.subdomain
+ else:
+ subdomain = _to_str(subdomain, "ascii")
+
+ if subdomain:
+ return f"{subdomain}.{self.server_name}"
+ else:
+ return self.server_name
+
+ def get_default_redirect(
+ self,
+ rule: Rule,
+ method: str,
+ values: t.MutableMapping[str, t.Any],
+ query_args: t.Union[t.Mapping[str, t.Any], str],
+ ) -> t.Optional[str]:
+ """A helper that returns the URL to redirect to if it finds one.
+ This is used for default redirecting only.
+
+ :internal:
+ """
+ assert self.map.redirect_defaults
+ for r in self.map._rules_by_endpoint[rule.endpoint]:
+ # every rule that comes after this one, including ourself
+ # has a lower priority for the defaults. We order the ones
+ # with the highest priority up for building.
+ if r is rule:
+ break
+ if r.provides_defaults_for(rule) and r.suitable_for(values, method):
+ values.update(r.defaults) # type: ignore
+ domain_part, path = r.build(values) # type: ignore
+ return self.make_redirect_url(path, query_args, domain_part=domain_part)
+ return None
+
+ def encode_query_args(self, query_args: t.Union[t.Mapping[str, t.Any], str]) -> str:
+ if not isinstance(query_args, str):
+ return url_encode(query_args, self.map.charset)
+ return query_args
+
+ def make_redirect_url(
+ self,
+ path_info: str,
+ query_args: t.Optional[t.Union[t.Mapping[str, t.Any], str]] = None,
+ domain_part: t.Optional[str] = None,
+ ) -> str:
+ """Creates a redirect URL.
+
+ :internal:
+ """
+ if query_args:
+ suffix = f"?{self.encode_query_args(query_args)}"
+ else:
+ suffix = ""
+
+ scheme = self.url_scheme or "http"
+ host = self.get_host(domain_part)
+ path = posixpath.join(self.script_name.strip("/"), path_info.lstrip("/"))
+ return f"{scheme}://{host}/{path}{suffix}"
+
+ def make_alias_redirect_url(
+ self,
+ path: str,
+ endpoint: str,
+ values: t.Mapping[str, t.Any],
+ method: str,
+ query_args: t.Union[t.Mapping[str, t.Any], str],
+ ) -> str:
+ """Internally called to make an alias redirect URL."""
+ url = self.build(
+ endpoint, values, method, append_unknown=False, force_external=True
+ )
+ if query_args:
+ url += f"?{self.encode_query_args(query_args)}"
+ assert url != path, "detected invalid alias setting. No canonical URL found"
+ return url
+
+ def _partial_build(
+ self,
+ endpoint: str,
+ values: t.Mapping[str, t.Any],
+ method: t.Optional[str],
+ append_unknown: bool,
+ ) -> t.Optional[t.Tuple[str, str, bool]]:
+ """Helper for :meth:`build`. Returns subdomain and path for the
+ rule that accepts this endpoint, values and method.
+
+ :internal:
+ """
+ # in case the method is none, try with the default method first
+ if method is None:
+ rv = self._partial_build(
+ endpoint, values, self.default_method, append_unknown
+ )
+ if rv is not None:
+ return rv
+
+ # Default method did not match or a specific method is passed.
+ # Check all for first match with matching host. If no matching
+ # host is found, go with first result.
+ first_match = None
+
+ for rule in self.map._rules_by_endpoint.get(endpoint, ()):
+ if rule.suitable_for(values, method):
+ build_rv = rule.build(values, append_unknown)
+
+ if build_rv is not None:
+ rv = (build_rv[0], build_rv[1], rule.websocket)
+ if self.map.host_matching:
+ if rv[0] == self.server_name:
+ return rv
+ elif first_match is None:
+ first_match = rv
+ else:
+ return rv
+
+ return first_match
+
+ def build(
+ self,
+ endpoint: str,
+ values: t.Optional[t.Mapping[str, t.Any]] = None,
+ method: t.Optional[str] = None,
+ force_external: bool = False,
+ append_unknown: bool = True,
+ url_scheme: t.Optional[str] = None,
+ ) -> str:
+ """Building URLs works pretty much the other way round. Instead of
+ `match` you call `build` and pass it the endpoint and a dict of
+ arguments for the placeholders.
+
+ The `build` function also accepts an argument called `force_external`
+ which, if you set it to `True` will force external URLs. Per default
+ external URLs (include the server name) will only be used if the
+ target URL is on a different subdomain.
+
+ >>> m = Map([
+ ... Rule('/', endpoint='index'),
+ ... Rule('/downloads/', endpoint='downloads/index'),
+ ... Rule('/downloads/<int:id>', endpoint='downloads/show')
+ ... ])
+ >>> urls = m.bind("example.com", "/")
+ >>> urls.build("index", {})
+ '/'
+ >>> urls.build("downloads/show", {'id': 42})
+ '/downloads/42'
+ >>> urls.build("downloads/show", {'id': 42}, force_external=True)
+ 'http://example.com/downloads/42'
+
+ Because URLs cannot contain non ASCII data you will always get
+ bytes back. Non ASCII characters are urlencoded with the
+ charset defined on the map instance.
+
+ Additional values are converted to strings and appended to the URL as
+ URL querystring parameters:
+
+ >>> urls.build("index", {'q': 'My Searchstring'})
+ '/?q=My+Searchstring'
+
+ When processing those additional values, lists are furthermore
+ interpreted as multiple values (as per
+ :py:class:`werkzeug.datastructures.MultiDict`):
+
+ >>> urls.build("index", {'q': ['a', 'b', 'c']})
+ '/?q=a&q=b&q=c'
+
+ Passing a ``MultiDict`` will also add multiple values:
+
+ >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
+ '/?p=z&q=a&q=b'
+
+ If a rule does not exist when building a `BuildError` exception is
+ raised.
+
+ The build method accepts an argument called `method` which allows you
+ to specify the method you want to have an URL built for if you have
+ different methods for the same endpoint specified.
+
+ :param endpoint: the endpoint of the URL to build.
+ :param values: the values for the URL to build. Unhandled values are
+ appended to the URL as query parameters.
+ :param method: the HTTP method for the rule if there are different
+ URLs for different methods on the same endpoint.
+ :param force_external: enforce full canonical external URLs. If the URL
+ scheme is not provided, this will generate
+ a protocol-relative URL.
+ :param append_unknown: unknown parameters are appended to the generated
+ URL as query string argument. Disable this
+ if you want the builder to ignore those.
+ :param url_scheme: Scheme to use in place of the bound
+ :attr:`url_scheme`.
+
+ .. versionchanged:: 2.0
+ Added the ``url_scheme`` parameter.
+
+ .. versionadded:: 0.6
+ Added the ``append_unknown`` parameter.
+ """
+ self.map.update()
+
+ if values:
+ if isinstance(values, MultiDict):
+ values = {
+ k: (v[0] if len(v) == 1 else v)
+ for k, v in dict.items(values)
+ if len(v) != 0
+ }
+ else: # plain dict
+ values = {k: v for k, v in values.items() if v is not None}
+ else:
+ values = {}
+
+ rv = self._partial_build(endpoint, values, method, append_unknown)
+ if rv is None:
+ raise BuildError(endpoint, values, method, self)
+
+ domain_part, path, websocket = rv
+ host = self.get_host(domain_part)
+
+ if url_scheme is None:
+ url_scheme = self.url_scheme
+
+ # Always build WebSocket routes with the scheme (browsers
+ # require full URLs). If bound to a WebSocket, ensure that HTTP
+ # routes are built with an HTTP scheme.
+ secure = url_scheme in {"https", "wss"}
+
+ if websocket:
+ force_external = True
+ url_scheme = "wss" if secure else "ws"
+ elif url_scheme:
+ url_scheme = "https" if secure else "http"
+
+ # shortcut this.
+ if not force_external and (
+ (self.map.host_matching and host == self.server_name)
+ or (not self.map.host_matching and domain_part == self.subdomain)
+ ):
+ return f"{self.script_name.rstrip('/')}/{path.lstrip('/')}"
+
+ scheme = f"{url_scheme}:" if url_scheme else ""
+ return f"{scheme}//{host}{self.script_name[:-1]}/{path.lstrip('/')}"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/matcher.py b/contrib/python/Werkzeug/py3/werkzeug/routing/matcher.py
new file mode 100644
index 0000000000..d22b05a5c9
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/matcher.py
@@ -0,0 +1,185 @@
+import re
+import typing as t
+from dataclasses import dataclass
+from dataclasses import field
+
+from .converters import ValidationError
+from .exceptions import NoMatch
+from .exceptions import RequestAliasRedirect
+from .exceptions import RequestPath
+from .rules import Rule
+from .rules import RulePart
+
+
+class SlashRequired(Exception):
+ pass
+
+
+@dataclass
+class State:
+ """A representation of a rule state.
+
+ This includes the *rules* that correspond to the state and the
+ possible *static* and *dynamic* transitions to the next state.
+ """
+
+ dynamic: t.List[t.Tuple[RulePart, "State"]] = field(default_factory=list)
+ rules: t.List[Rule] = field(default_factory=list)
+ static: t.Dict[str, "State"] = field(default_factory=dict)
+
+
+class StateMachineMatcher:
+ def __init__(self, merge_slashes: bool) -> None:
+ self._root = State()
+ self.merge_slashes = merge_slashes
+
+ def add(self, rule: Rule) -> None:
+ state = self._root
+ for part in rule._parts:
+ if part.static:
+ state.static.setdefault(part.content, State())
+ state = state.static[part.content]
+ else:
+ for test_part, new_state in state.dynamic:
+ if test_part == part:
+ state = new_state
+ break
+ else:
+ new_state = State()
+ state.dynamic.append((part, new_state))
+ state = new_state
+ state.rules.append(rule)
+
+ def update(self) -> None:
+ # For every state the dynamic transitions should be sorted by
+ # the weight of the transition
+ state = self._root
+
+ def _update_state(state: State) -> None:
+ state.dynamic.sort(key=lambda entry: entry[0].weight)
+ for new_state in state.static.values():
+ _update_state(new_state)
+ for _, new_state in state.dynamic:
+ _update_state(new_state)
+
+ _update_state(state)
+
+ def match(
+ self, domain: str, path: str, method: str, websocket: bool
+ ) -> t.Tuple[Rule, t.MutableMapping[str, t.Any]]:
+ # To match to a rule we need to start at the root state and
+ # try to follow the transitions until we find a match, or find
+ # there is no transition to follow.
+
+ have_match_for = set()
+ websocket_mismatch = False
+
+ def _match(
+ state: State, parts: t.List[str], values: t.List[str]
+ ) -> t.Optional[t.Tuple[Rule, t.List[str]]]:
+ # This function is meant to be called recursively, and will attempt
+ # to match the head part to the state's transitions.
+ nonlocal have_match_for, websocket_mismatch
+
+ # The base case is when all parts have been matched via
+ # transitions. Hence if there is a rule with methods &
+ # websocket that work return it and the dynamic values
+ # extracted.
+ if parts == []:
+ for rule in state.rules:
+ if rule.methods is not None and method not in rule.methods:
+ have_match_for.update(rule.methods)
+ elif rule.websocket != websocket:
+ websocket_mismatch = True
+ else:
+ return rule, values
+
+ # Test if there is a match with this path with a
+ # trailing slash, if so raise an exception to report
+ # that matching is possible with an additional slash
+ if "" in state.static:
+ for rule in state.static[""].rules:
+ if websocket == rule.websocket and (
+ rule.methods is None or method in rule.methods
+ ):
+ if rule.strict_slashes:
+ raise SlashRequired()
+ else:
+ return rule, values
+ return None
+
+ part = parts[0]
+ # To match this part try the static transitions first
+ if part in state.static:
+ rv = _match(state.static[part], parts[1:], values)
+ if rv is not None:
+ return rv
+ # No match via the static transitions, so try the dynamic
+ # ones.
+ for test_part, new_state in state.dynamic:
+ target = part
+ remaining = parts[1:]
+ # A final part indicates a transition that always
+ # consumes the remaining parts i.e. transitions to a
+ # final state.
+ if test_part.final:
+ target = "/".join(parts)
+ remaining = []
+ match = re.compile(test_part.content).match(target)
+ if match is not None:
+ rv = _match(new_state, remaining, values + list(match.groups()))
+ if rv is not None:
+ return rv
+
+ # If there is no match and the only part left is a
+ # trailing slash ("") consider rules that aren't
+ # strict-slashes as these should match if there is a final
+ # slash part.
+ if parts == [""]:
+ for rule in state.rules:
+ if rule.strict_slashes:
+ continue
+ if rule.methods is not None and method not in rule.methods:
+ have_match_for.update(rule.methods)
+ elif rule.websocket != websocket:
+ websocket_mismatch = True
+ else:
+ return rule, values
+
+ return None
+
+ try:
+ rv = _match(self._root, [domain, *path.split("/")], [])
+ except SlashRequired:
+ raise RequestPath(f"{path}/") from None
+
+ if self.merge_slashes and rv is None:
+ # Try to match again, but with slashes merged
+ path = re.sub("/{2,}?", "/", path)
+ try:
+ rv = _match(self._root, [domain, *path.split("/")], [])
+ except SlashRequired:
+ raise RequestPath(f"{path}/") from None
+ if rv is None:
+ raise NoMatch(have_match_for, websocket_mismatch)
+ else:
+ raise RequestPath(f"{path}")
+ elif rv is not None:
+ rule, values = rv
+
+ result = {}
+ for name, value in zip(rule._converters.keys(), values):
+ try:
+ value = rule._converters[name].to_python(value)
+ except ValidationError:
+ raise NoMatch(have_match_for, websocket_mismatch) from None
+ result[str(name)] = value
+ if rule.defaults:
+ result.update(rule.defaults)
+
+ if rule.alias and rule.map.redirect_defaults:
+ raise RequestAliasRedirect(result, rule.endpoint)
+
+ return rule, result
+
+ raise NoMatch(have_match_for, websocket_mismatch)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/routing/rules.py b/contrib/python/Werkzeug/py3/werkzeug/routing/rules.py
new file mode 100644
index 0000000000..a61717ade8
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/routing/rules.py
@@ -0,0 +1,879 @@
+import ast
+import re
+import typing as t
+from dataclasses import dataclass
+from string import Template
+from types import CodeType
+
+from .._internal import _to_bytes
+from ..urls import url_encode
+from ..urls import url_quote
+from .converters import ValidationError
+
+if t.TYPE_CHECKING:
+ from .converters import BaseConverter
+ from .map import Map
+
+
+class Weighting(t.NamedTuple):
+ number_static_weights: int
+ static_weights: t.List[t.Tuple[int, int]]
+ number_argument_weights: int
+ argument_weights: t.List[int]
+
+
+@dataclass
+class RulePart:
+ """A part of a rule.
+
+ Rules can be represented by parts as delimited by `/` with
+ instances of this class representing those parts. The *content* is
+ either the raw content if *static* or a regex string to match
+ against. The *weight* can be used to order parts when matching.
+
+ """
+
+ content: str
+ final: bool
+ static: bool
+ weight: Weighting
+
+
+_part_re = re.compile(
+ r"""
+ (?:
+ (?P<slash>\/) # a slash
+ |
+ (?P<static>[^<\/]+) # static rule data
+ |
+ (?:
+ <
+ (?:
+ (?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
+ (?:\((?P<arguments>.*?)\))? # converter arguments
+ \: # variable delimiter
+ )?
+ (?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
+ >
+ )
+ )
+ """,
+ re.VERBOSE,
+)
+
+_simple_rule_re = re.compile(r"<([^>]+)>")
+_converter_args_re = re.compile(
+ r"""
+ ((?P<name>\w+)\s*=\s*)?
+ (?P<value>
+ True|False|
+ \d+.\d+|
+ \d+.|
+ \d+|
+ [\w\d_.]+|
+ [urUR]?(?P<stringval>"[^"]*?"|'[^']*')
+ )\s*,
+ """,
+ re.VERBOSE,
+)
+
+
+_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
+
+
+def _find(value: str, target: str, pos: int) -> int:
+ """Find the *target* in *value* after *pos*.
+
+ Returns the *value* length if *target* isn't found.
+ """
+ try:
+ return value.index(target, pos)
+ except ValueError:
+ return len(value)
+
+
+def _pythonize(value: str) -> t.Union[None, bool, int, float, str]:
+ if value in _PYTHON_CONSTANTS:
+ return _PYTHON_CONSTANTS[value]
+ for convert in int, float:
+ try:
+ return convert(value) # type: ignore
+ except ValueError:
+ pass
+ if value[:1] == value[-1:] and value[0] in "\"'":
+ value = value[1:-1]
+ return str(value)
+
+
+def parse_converter_args(argstr: str) -> t.Tuple[t.Tuple, t.Dict[str, t.Any]]:
+ argstr += ","
+ args = []
+ kwargs = {}
+
+ for item in _converter_args_re.finditer(argstr):
+ value = item.group("stringval")
+ if value is None:
+ value = item.group("value")
+ value = _pythonize(value)
+ if not item.group("name"):
+ args.append(value)
+ else:
+ name = item.group("name")
+ kwargs[name] = value
+
+ return tuple(args), kwargs
+
+
+class RuleFactory:
+ """As soon as you have more complex URL setups it's a good idea to use rule
+ factories to avoid repetitive tasks. Some of them are builtin, others can
+ be added by subclassing `RuleFactory` and overriding `get_rules`.
+ """
+
+ def get_rules(self, map: "Map") -> t.Iterable["Rule"]:
+ """Subclasses of `RuleFactory` have to override this method and return
+ an iterable of rules."""
+ raise NotImplementedError()
+
+
+class Subdomain(RuleFactory):
+ """All URLs provided by this factory have the subdomain set to a
+ specific domain. For example if you want to use the subdomain for
+ the current language this can be a good setup::
+
+ url_map = Map([
+ Rule('/', endpoint='#select_language'),
+ Subdomain('<string(length=2):lang_code>', [
+ Rule('/', endpoint='index'),
+ Rule('/about', endpoint='about'),
+ Rule('/help', endpoint='help')
+ ])
+ ])
+
+ All the rules except for the ``'#select_language'`` endpoint will now
+ listen on a two letter long subdomain that holds the language code
+ for the current request.
+ """
+
+ def __init__(self, subdomain: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.subdomain = subdomain
+ self.rules = rules
+
+ def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.subdomain = self.subdomain
+ yield rule
+
+
+class Submount(RuleFactory):
+ """Like `Subdomain` but prefixes the URL rule with a given string::
+
+ url_map = Map([
+ Rule('/', endpoint='index'),
+ Submount('/blog', [
+ Rule('/', endpoint='blog/index'),
+ Rule('/entry/<entry_slug>', endpoint='blog/show')
+ ])
+ ])
+
+ Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
+ """
+
+ def __init__(self, path: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.path = path.rstrip("/")
+ self.rules = rules
+
+ def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.rule = self.path + rule.rule
+ yield rule
+
+
+class EndpointPrefix(RuleFactory):
+ """Prefixes all endpoints (which must be strings for this factory) with
+ another string. This can be useful for sub applications::
+
+ url_map = Map([
+ Rule('/', endpoint='index'),
+ EndpointPrefix('blog/', [Submount('/blog', [
+ Rule('/', endpoint='index'),
+ Rule('/entry/<entry_slug>', endpoint='show')
+ ])])
+ ])
+ """
+
+ def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
+ self.prefix = prefix
+ self.rules = rules
+
+ def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ rule = rule.empty()
+ rule.endpoint = self.prefix + rule.endpoint
+ yield rule
+
+
+class RuleTemplate:
+ """Returns copies of the rules wrapped and expands string templates in
+ the endpoint, rule, defaults or subdomain sections.
+
+ Here a small example for such a rule template::
+
+ from werkzeug.routing import Map, Rule, RuleTemplate
+
+ resource = RuleTemplate([
+ Rule('/$name/', endpoint='$name.list'),
+ Rule('/$name/<int:id>', endpoint='$name.show')
+ ])
+
+ url_map = Map([resource(name='user'), resource(name='page')])
+
+ When a rule template is called the keyword arguments are used to
+ replace the placeholders in all the string parameters.
+ """
+
+ def __init__(self, rules: t.Iterable["Rule"]) -> None:
+ self.rules = list(rules)
+
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> "RuleTemplateFactory":
+ return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
+
+
+class RuleTemplateFactory(RuleFactory):
+ """A factory that fills in template variables into rules. Used by
+ `RuleTemplate` internally.
+
+ :internal:
+ """
+
+ def __init__(
+ self, rules: t.Iterable[RuleFactory], context: t.Dict[str, t.Any]
+ ) -> None:
+ self.rules = rules
+ self.context = context
+
+ def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
+ for rulefactory in self.rules:
+ for rule in rulefactory.get_rules(map):
+ new_defaults = subdomain = None
+ if rule.defaults:
+ new_defaults = {}
+ for key, value in rule.defaults.items():
+ if isinstance(value, str):
+ value = Template(value).substitute(self.context)
+ new_defaults[key] = value
+ if rule.subdomain is not None:
+ subdomain = Template(rule.subdomain).substitute(self.context)
+ new_endpoint = rule.endpoint
+ if isinstance(new_endpoint, str):
+ new_endpoint = Template(new_endpoint).substitute(self.context)
+ yield Rule(
+ Template(rule.rule).substitute(self.context),
+ new_defaults,
+ subdomain,
+ rule.methods,
+ rule.build_only,
+ new_endpoint,
+ rule.strict_slashes,
+ )
+
+
+def _prefix_names(src: str) -> ast.stmt:
+ """ast parse and prefix names with `.` to avoid collision with user vars"""
+ tree = ast.parse(src).body[0]
+ if isinstance(tree, ast.Expr):
+ tree = tree.value # type: ignore
+ for node in ast.walk(tree):
+ if isinstance(node, ast.Name):
+ node.id = f".{node.id}"
+ return tree
+
+
+_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
+_IF_KWARGS_URL_ENCODE_CODE = """\
+if kwargs:
+ params = self._encode_query_vars(kwargs)
+ q = "?" if params else ""
+else:
+ q = params = ""
+"""
+_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
+_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
+
+
+class Rule(RuleFactory):
+ """A Rule represents one URL pattern. There are some options for `Rule`
+ that change the way it behaves and are passed to the `Rule` constructor.
+ Note that besides the rule-string all arguments *must* be keyword arguments
+ in order to not break the application on Werkzeug upgrades.
+
+ `string`
+ Rule strings basically are just normal URL paths with placeholders in
+ the format ``<converter(arguments):name>`` where the converter and the
+ arguments are optional. If no converter is defined the `default`
+ converter is used which means `string` in the normal configuration.
+
+ URL rules that end with a slash are branch URLs, others are leaves.
+ If you have `strict_slashes` enabled (which is the default), all
+ branch URLs that are matched without a trailing slash will trigger a
+ redirect to the same URL with the missing slash appended.
+
+ The converters are defined on the `Map`.
+
+ `endpoint`
+ The endpoint for this rule. This can be anything. A reference to a
+ function, a string, a number etc. The preferred way is using a string
+ because the endpoint is used for URL generation.
+
+ `defaults`
+ An optional dict with defaults for other rules with the same endpoint.
+ This is a bit tricky but useful if you want to have unique URLs::
+
+ url_map = Map([
+ Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
+ Rule('/all/page/<int:page>', endpoint='all_entries')
+ ])
+
+ If a user now visits ``http://example.com/all/page/1`` they will be
+ redirected to ``http://example.com/all/``. If `redirect_defaults` is
+ disabled on the `Map` instance this will only affect the URL
+ generation.
+
+ `subdomain`
+ The subdomain rule string for this rule. If not specified the rule
+ only matches for the `default_subdomain` of the map. If the map is
+ not bound to a subdomain this feature is disabled.
+
+ Can be useful if you want to have user profiles on different subdomains
+ and all subdomains are forwarded to your application::
+
+ url_map = Map([
+ Rule('/', subdomain='<username>', endpoint='user/homepage'),
+ Rule('/stats', subdomain='<username>', endpoint='user/stats')
+ ])
+
+ `methods`
+ A sequence of http methods this rule applies to. If not specified, all
+ methods are allowed. For example this can be useful if you want different
+ endpoints for `POST` and `GET`. If methods are defined and the path
+ matches but the method matched against is not in this list or in the
+ list of another rule for that path the error raised is of the type
+ `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
+ list of methods and `HEAD` is not, `HEAD` is added automatically.
+
+ `strict_slashes`
+ Override the `Map` setting for `strict_slashes` only for this rule. If
+ not specified the `Map` setting is used.
+
+ `merge_slashes`
+ Override :attr:`Map.merge_slashes` for this rule.
+
+ `build_only`
+ Set this to True and the rule will never match but will create a URL
+ that can be build. This is useful if you have resources on a subdomain
+ or folder that are not handled by the WSGI application (like static data)
+
+ `redirect_to`
+ If given this must be either a string or callable. In case of a
+ callable it's called with the url adapter that triggered the match and
+ the values of the URL as keyword arguments and has to return the target
+ for the redirect, otherwise it has to be a string with placeholders in
+ rule syntax::
+
+ def foo_with_slug(adapter, id):
+ # ask the database for the slug for the old id. this of
+ # course has nothing to do with werkzeug.
+ return f'foo/{Foo.get_slug_for_id(id)}'
+
+ url_map = Map([
+ Rule('/foo/<slug>', endpoint='foo'),
+ Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
+ Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
+ ])
+
+ When the rule is matched the routing system will raise a
+ `RequestRedirect` exception with the target for the redirect.
+
+ Keep in mind that the URL will be joined against the URL root of the
+ script so don't use a leading slash on the target URL unless you
+ really mean root of that domain.
+
+ `alias`
+ If enabled this rule serves as an alias for another rule with the same
+ endpoint and arguments.
+
+ `host`
+ If provided and the URL map has host matching enabled this can be
+ used to provide a match rule for the whole host. This also means
+ that the subdomain feature is disabled.
+
+ `websocket`
+ If ``True``, this rule is only matches for WebSocket (``ws://``,
+ ``wss://``) requests. By default, rules will only match for HTTP
+ requests.
+
+ .. versionchanged:: 2.1
+ Percent-encoded newlines (``%0a``), which are decoded by WSGI
+ servers, are considered when routing instead of terminating the
+ match early.
+
+ .. versionadded:: 1.0
+ Added ``websocket``.
+
+ .. versionadded:: 1.0
+ Added ``merge_slashes``.
+
+ .. versionadded:: 0.7
+ Added ``alias`` and ``host``.
+
+ .. versionchanged:: 0.6.1
+ ``HEAD`` is added to ``methods`` if ``GET`` is present.
+ """
+
+ def __init__(
+ self,
+ string: str,
+ defaults: t.Optional[t.Mapping[str, t.Any]] = None,
+ subdomain: t.Optional[str] = None,
+ methods: t.Optional[t.Iterable[str]] = None,
+ build_only: bool = False,
+ endpoint: t.Optional[str] = None,
+ strict_slashes: t.Optional[bool] = None,
+ merge_slashes: t.Optional[bool] = None,
+ redirect_to: t.Optional[t.Union[str, t.Callable[..., str]]] = None,
+ alias: bool = False,
+ host: t.Optional[str] = None,
+ websocket: bool = False,
+ ) -> None:
+ if not string.startswith("/"):
+ raise ValueError("urls must start with a leading slash")
+ self.rule = string
+ self.is_leaf = not string.endswith("/")
+ self.is_branch = string.endswith("/")
+
+ self.map: "Map" = None # type: ignore
+ self.strict_slashes = strict_slashes
+ self.merge_slashes = merge_slashes
+ self.subdomain = subdomain
+ self.host = host
+ self.defaults = defaults
+ self.build_only = build_only
+ self.alias = alias
+ self.websocket = websocket
+
+ if methods is not None:
+ if isinstance(methods, str):
+ raise TypeError("'methods' should be a list of strings.")
+
+ methods = {x.upper() for x in methods}
+
+ if "HEAD" not in methods and "GET" in methods:
+ methods.add("HEAD")
+
+ if websocket and methods - {"GET", "HEAD", "OPTIONS"}:
+ raise ValueError(
+ "WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods."
+ )
+
+ self.methods = methods
+ self.endpoint: str = endpoint # type: ignore
+ self.redirect_to = redirect_to
+
+ if defaults:
+ self.arguments = set(map(str, defaults))
+ else:
+ self.arguments = set()
+
+ self._converters: t.Dict[str, "BaseConverter"] = {}
+ self._trace: t.List[t.Tuple[bool, str]] = []
+ self._parts: t.List[RulePart] = []
+
+ def empty(self) -> "Rule":
+ """
+ Return an unbound copy of this rule.
+
+ This can be useful if want to reuse an already bound URL for another
+ map. See ``get_empty_kwargs`` to override what keyword arguments are
+ provided to the new copy.
+ """
+ return type(self)(self.rule, **self.get_empty_kwargs())
+
+ def get_empty_kwargs(self) -> t.Mapping[str, t.Any]:
+ """
+ Provides kwargs for instantiating empty copy with empty()
+
+ Use this method to provide custom keyword arguments to the subclass of
+ ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
+ has custom keyword arguments that are needed at instantiation.
+
+ Must return a ``dict`` that will be provided as kwargs to the new
+ instance of ``Rule``, following the initial ``self.rule`` value which
+ is always provided as the first, required positional argument.
+ """
+ defaults = None
+ if self.defaults:
+ defaults = dict(self.defaults)
+ return dict(
+ defaults=defaults,
+ subdomain=self.subdomain,
+ methods=self.methods,
+ build_only=self.build_only,
+ endpoint=self.endpoint,
+ strict_slashes=self.strict_slashes,
+ redirect_to=self.redirect_to,
+ alias=self.alias,
+ host=self.host,
+ )
+
+ def get_rules(self, map: "Map") -> t.Iterator["Rule"]:
+ yield self
+
+ def refresh(self) -> None:
+ """Rebinds and refreshes the URL. Call this if you modified the
+ rule in place.
+
+ :internal:
+ """
+ self.bind(self.map, rebind=True)
+
+ def bind(self, map: "Map", rebind: bool = False) -> None:
+ """Bind the url to a map and create a regular expression based on
+ the information from the rule itself and the defaults from the map.
+
+ :internal:
+ """
+ if self.map is not None and not rebind:
+ raise RuntimeError(f"url rule {self!r} already bound to map {self.map!r}")
+ self.map = map
+ if self.strict_slashes is None:
+ self.strict_slashes = map.strict_slashes
+ if self.merge_slashes is None:
+ self.merge_slashes = map.merge_slashes
+ if self.subdomain is None:
+ self.subdomain = map.default_subdomain
+ self.compile()
+
+ def get_converter(
+ self,
+ variable_name: str,
+ converter_name: str,
+ args: t.Tuple,
+ kwargs: t.Mapping[str, t.Any],
+ ) -> "BaseConverter":
+ """Looks up the converter for the given parameter.
+
+ .. versionadded:: 0.9
+ """
+ if converter_name not in self.map.converters:
+ raise LookupError(f"the converter {converter_name!r} does not exist")
+ return self.map.converters[converter_name](self.map, *args, **kwargs)
+
+ def _encode_query_vars(self, query_vars: t.Mapping[str, t.Any]) -> str:
+ return url_encode(
+ query_vars,
+ charset=self.map.charset,
+ sort=self.map.sort_parameters,
+ key=self.map.sort_key,
+ )
+
+ def _parse_rule(self, rule: str) -> t.Iterable[RulePart]:
+ content = ""
+ static = True
+ argument_weights = []
+ static_weights: t.List[t.Tuple[int, int]] = []
+ final = False
+
+ pos = 0
+ while pos < len(rule):
+ match = _part_re.match(rule, pos)
+ if match is None:
+ raise ValueError(f"malformed url rule: {rule!r}")
+
+ data = match.groupdict()
+ if data["static"] is not None:
+ static_weights.append((len(static_weights), -len(data["static"])))
+ self._trace.append((False, data["static"]))
+ content += data["static"] if static else re.escape(data["static"])
+
+ if data["variable"] is not None:
+ if static:
+ # Switching content to represent regex, hence the need to escape
+ content = re.escape(content)
+ static = False
+ c_args, c_kwargs = parse_converter_args(data["arguments"] or "")
+ convobj = self.get_converter(
+ data["variable"], data["converter"] or "default", c_args, c_kwargs
+ )
+ self._converters[data["variable"]] = convobj
+ self.arguments.add(data["variable"])
+ if not convobj.part_isolating:
+ final = True
+ content += f"({convobj.regex})"
+ argument_weights.append(convobj.weight)
+ self._trace.append((True, data["variable"]))
+
+ if data["slash"] is not None:
+ self._trace.append((False, "/"))
+ if final:
+ content += "/"
+ else:
+ if not static:
+ content += r"\Z"
+ weight = Weighting(
+ -len(static_weights),
+ static_weights,
+ -len(argument_weights),
+ argument_weights,
+ )
+ yield RulePart(
+ content=content, final=final, static=static, weight=weight
+ )
+ content = ""
+ static = True
+ argument_weights = []
+ static_weights = []
+ final = False
+
+ pos = match.end()
+
+ if not static:
+ content += r"\Z"
+ weight = Weighting(
+ -len(static_weights),
+ static_weights,
+ -len(argument_weights),
+ argument_weights,
+ )
+ yield RulePart(content=content, final=final, static=static, weight=weight)
+
+ def compile(self) -> None:
+ """Compiles the regular expression and stores it."""
+ assert self.map is not None, "rule not bound"
+
+ if self.map.host_matching:
+ domain_rule = self.host or ""
+ else:
+ domain_rule = self.subdomain or ""
+ self._parts = []
+ self._trace = []
+ self._converters = {}
+ if domain_rule == "":
+ self._parts = [
+ RulePart(
+ content="", final=False, static=True, weight=Weighting(0, [], 0, [])
+ )
+ ]
+ else:
+ self._parts.extend(self._parse_rule(domain_rule))
+ self._trace.append((False, "|"))
+ rule = self.rule
+ if self.merge_slashes:
+ rule = re.sub("/{2,}?", "/", self.rule)
+ self._parts.extend(self._parse_rule(rule))
+
+ self._build: t.Callable[..., t.Tuple[str, str]]
+ self._build = self._compile_builder(False).__get__(self, None)
+ self._build_unknown: t.Callable[..., t.Tuple[str, str]]
+ self._build_unknown = self._compile_builder(True).__get__(self, None)
+
+ @staticmethod
+ def _get_func_code(code: CodeType, name: str) -> t.Callable[..., t.Tuple[str, str]]:
+ globs: t.Dict[str, t.Any] = {}
+ locs: t.Dict[str, t.Any] = {}
+ exec(code, globs, locs)
+ return locs[name] # type: ignore
+
+ def _compile_builder(
+ self, append_unknown: bool = True
+ ) -> t.Callable[..., t.Tuple[str, str]]:
+ defaults = self.defaults or {}
+ dom_ops: t.List[t.Tuple[bool, str]] = []
+ url_ops: t.List[t.Tuple[bool, str]] = []
+
+ opl = dom_ops
+ for is_dynamic, data in self._trace:
+ if data == "|" and opl is dom_ops:
+ opl = url_ops
+ continue
+ # this seems like a silly case to ever come up but:
+ # if a default is given for a value that appears in the rule,
+ # resolve it to a constant ahead of time
+ if is_dynamic and data in defaults:
+ data = self._converters[data].to_url(defaults[data])
+ opl.append((False, data))
+ elif not is_dynamic:
+ opl.append(
+ (False, url_quote(_to_bytes(data, self.map.charset), safe="/:|+"))
+ )
+ else:
+ opl.append((True, data))
+
+ def _convert(elem: str) -> ast.stmt:
+ ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
+ ret.args = [ast.Name(str(elem), ast.Load())] # type: ignore # str for py2
+ return ret
+
+ def _parts(ops: t.List[t.Tuple[bool, str]]) -> t.List[ast.AST]:
+ parts = [
+ _convert(elem) if is_dynamic else ast.Str(s=elem)
+ for is_dynamic, elem in ops
+ ]
+ parts = parts or [ast.Str("")]
+ # constant fold
+ ret = [parts[0]]
+ for p in parts[1:]:
+ if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str):
+ ret[-1] = ast.Str(ret[-1].s + p.s)
+ else:
+ ret.append(p)
+ return ret
+
+ dom_parts = _parts(dom_ops)
+ url_parts = _parts(url_ops)
+ if not append_unknown:
+ body = []
+ else:
+ body = [_IF_KWARGS_URL_ENCODE_AST]
+ url_parts.extend(_URL_ENCODE_AST_NAMES)
+
+ def _join(parts: t.List[ast.AST]) -> ast.AST:
+ if len(parts) == 1: # shortcut
+ return parts[0]
+ return ast.JoinedStr(parts)
+
+ body.append(
+ ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
+ )
+
+ pargs = [
+ elem
+ for is_dynamic, elem in dom_ops + url_ops
+ if is_dynamic and elem not in defaults
+ ]
+ kargs = [str(k) for k in defaults]
+
+ func_ast: ast.FunctionDef = _prefix_names("def _(): pass") # type: ignore
+ func_ast.name = f"<builder:{self.rule!r}>"
+ func_ast.args.args.append(ast.arg(".self", None))
+ for arg in pargs + kargs:
+ func_ast.args.args.append(ast.arg(arg, None))
+ func_ast.args.kwarg = ast.arg(".kwargs", None)
+ for _ in kargs:
+ func_ast.args.defaults.append(ast.Str(""))
+ func_ast.body = body
+
+ # use `ast.parse` instead of `ast.Module` for better portability
+ # Python 3.8 changes the signature of `ast.Module`
+ module = ast.parse("")
+ module.body = [func_ast]
+
+ # mark everything as on line 1, offset 0
+ # less error-prone than `ast.fix_missing_locations`
+ # bad line numbers cause an assert to fail in debug builds
+ for node in ast.walk(module):
+ if "lineno" in node._attributes:
+ node.lineno = 1
+ if "end_lineno" in node._attributes:
+ node.end_lineno = node.lineno # type: ignore[attr-defined]
+ if "col_offset" in node._attributes:
+ node.col_offset = 0
+ if "end_col_offset" in node._attributes:
+ node.end_col_offset = node.col_offset # type: ignore[attr-defined]
+
+ code = compile(module, "<werkzeug routing>", "exec")
+ return self._get_func_code(code, func_ast.name)
+
+ def build(
+ self, values: t.Mapping[str, t.Any], append_unknown: bool = True
+ ) -> t.Optional[t.Tuple[str, str]]:
+ """Assembles the relative url for that rule and the subdomain.
+ If building doesn't work for some reasons `None` is returned.
+
+ :internal:
+ """
+ try:
+ if append_unknown:
+ return self._build_unknown(**values)
+ else:
+ return self._build(**values)
+ except ValidationError:
+ return None
+
+ def provides_defaults_for(self, rule: "Rule") -> bool:
+ """Check if this rule has defaults for a given rule.
+
+ :internal:
+ """
+ return bool(
+ not self.build_only
+ and self.defaults
+ and self.endpoint == rule.endpoint
+ and self != rule
+ and self.arguments == rule.arguments
+ )
+
+ def suitable_for(
+ self, values: t.Mapping[str, t.Any], method: t.Optional[str] = None
+ ) -> bool:
+ """Check if the dict of values has enough data for url generation.
+
+ :internal:
+ """
+ # if a method was given explicitly and that method is not supported
+ # by this rule, this rule is not suitable.
+ if (
+ method is not None
+ and self.methods is not None
+ and method not in self.methods
+ ):
+ return False
+
+ defaults = self.defaults or ()
+
+ # all arguments required must be either in the defaults dict or
+ # the value dictionary otherwise it's not suitable
+ for key in self.arguments:
+ if key not in defaults and key not in values:
+ return False
+
+ # in case defaults are given we ensure that either the value was
+ # skipped or the value is the same as the default value.
+ if defaults:
+ for key, value in defaults.items():
+ if key in values and value != values[key]:
+ return False
+
+ return True
+
+ def build_compare_key(self) -> t.Tuple[int, int, int]:
+ """The build compare key for sorting.
+
+ :internal:
+ """
+ return (1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()))
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, type(self)) and self._trace == other._trace
+
+ __hash__ = None # type: ignore
+
+ def __str__(self) -> str:
+ return self.rule
+
+ def __repr__(self) -> str:
+ if self.map is None:
+ return f"<{type(self).__name__} (unbound)>"
+ parts = []
+ for is_dynamic, data in self._trace:
+ if is_dynamic:
+ parts.append(f"<{data}>")
+ else:
+ parts.append(data)
+ parts = "".join(parts).lstrip("|")
+ methods = f" ({', '.join(self.methods)})" if self.methods is not None else ""
+ return f"<{type(self).__name__} {parts!r}{methods} -> {self.endpoint}>"
diff --git a/contrib/python/Werkzeug/py3/werkzeug/sansio/http.py b/contrib/python/Werkzeug/py3/werkzeug/sansio/http.py
new file mode 100644
index 0000000000..8288882b72
--- /dev/null
+++ b/contrib/python/Werkzeug/py3/werkzeug/sansio/http.py
@@ -0,0 +1,140 @@
+import re
+import typing as t
+from datetime import datetime
+
+from .._internal import _cookie_parse_impl
+from .._internal import _dt_as_utc
+from .._internal import _to_str
+from ..http import generate_etag
+from ..http import parse_date
+from ..http import parse_etags
+from ..http import parse_if_range_header
+from ..http import unquote_etag
+
+_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
+
+
+def is_resource_modified(
+ http_range: t.Optional[str] = None,
+ http_if_range: t.Optional[str] = None,
+ http_if_modified_since: t.Optional[str] = None,
+ http_if_none_match: t.Optional[str] = None,
+ http_if_match: t.Optional[str] = None,
+ etag: t.Optional[str] = None,
+ data: t.Optional[bytes] = None,
+ last_modified: t.Optional[t.Union[datetime, str]] = None,
+ ignore_if_range: bool = True,
+) -> bool:
+ """Convenience method for conditional requests.
+ :param http_range: Range HTTP header
+ :param http_if_range: If-Range HTTP header
+ :param http_if_modified_since: If-Modified-Since HTTP header
+ :param http_if_none_match: If-None-Match HTTP header
+ :param http_if_match: If-Match HTTP header
+ :param etag: the etag for the response for comparison.
+ :param data: or alternatively the data of the response to automatically
+ generate an etag using :func:`generate_etag`.
+ :param last_modified: an optional date of the last modification.
+ :param ignore_if_range: If `False`, `If-Range` header will be taken into
+ account.
+ :return: `True` if the resource was modified, otherwise `False`.
+
+ .. versionadded:: 2.2
+ """
+ if etag is None and data is not None:
+ etag = generate_etag(data)
+ elif data is not None:
+ raise TypeError("both data and etag given")
+
+ unmodified = False
+ if isinstance(last_modified, str):
+ last_modified = parse_date(last_modified)
+
+ # HTTP doesn't use microsecond, remove it to avoid false positive
+ # comparisons. Mark naive datetimes as UTC.
+ if last_modified is not None:
+ last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
+
+ if_range = None
+ if not ignore_if_range and http_range is not None:
+ # https://tools.ietf.org/html/rfc7233#section-3.2
+ # A server MUST ignore an If-Range header field received in a request
+ # that does not contain a Range header field.
+ if_range = parse_if_range_header(http_if_range)
+
+ if if_range is not None and if_range.date is not None:
+ modified_since: t.Optional[datetime] = if_range.date
+ else:
+ modified_since = parse_date(http_if_modified_since)
+
+ if modified_since and last_modified and last_modified <= modified_since:
+ unmodified = True
+
+ if etag:
+ etag, _ = unquote_etag(etag)
+ etag = t.cast(str, etag)
+
+ if if_range is not None and if_range.etag is not None:
+ unmodified = parse_etags(if_range.etag).contains(etag)
+ else:
+ if_none_match = parse_etags(http_if_none_match)
+ if if_none_match:
+ # https://tools.ietf.org/html/rfc7232#section-3.2
+ # "A recipient MUST use the weak comparison function when comparing
+ # entity-tags for If-None-Match"
+ unmodified = if_none_match.contains_weak(etag)
+
+ # https://tools.ietf.org/html/rfc7232#section-3.1
+ # "Origin server MUST use the strong comparison function when
+ # comparing entity-tags for If-Match"
+ if_match = parse_etags(http_if_match)
+ if if_match:
+ unmodified = not if_match.is_strong(etag)
+
+ return not unmodified
+
+
+def parse_cookie(
+ cookie: t.Union[bytes, str, None] = "",
+ charset: str = "utf-8",
+ errors: str = "replace",
+ cls: t.Optional[t.Type["ds.MultiDict"]] = None,
+) -> "ds.MultiDict[str, str]":
+ """Parse a cookie from a string.
+
+ The same key can be provided multiple times, the values are stored
+ in-order. The default :class:`MultiDict` will have the first value
+ first, and all values can be retrieved with
+ :meth:`MultiDict.getlist`.
+
+ :param cookie: The cookie header as a string.
+ :param charset: The charset for the cookie values.
+ :param errors: The error behavior for the charset decoding.
+ :param cls: A dict-like class to store the parsed cookies in.
+ Defaults to :class:`MultiDict`.
+
+ .. versionadded:: 2.2
+ """
+ # PEP 3333 sends headers through the environ as latin1 decoded
+ # strings. Encode strings back to bytes for parsing.
+ if isinstance(cookie, str):
+ cookie = cookie.encode("latin1", "replace")
+
+ if cls is None:
+ cls = ds.MultiDict
+
+ def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:
+ for key, val in _cookie_parse_impl(cookie): # type: ignore
+ key_str = _to_str(key, charset, errors, allow_none_charset=True)
+
+ if not key_str:
+ continue
+
+ val_str = _to_str(val, charset, errors, allow_none_charset=True)
+ yield key_str, val_str
+
+ return cls(_parse_pairs())
+
+
+# circular dependencies
+from .. import datastructures as ds
diff --git a/contrib/python/Werkzeug/py3/werkzeug/sansio/multipart.py b/contrib/python/Werkzeug/py3/werkzeug/sansio/multipart.py
index 2d544224cb..d8abeb3543 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/sansio/multipart.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/sansio/multipart.py
@@ -70,6 +70,10 @@ LINE_BREAK_RE = re.compile(LINE_BREAK, re.MULTILINE)
# Header values can be continued via a space or tab after the linebreak, as
# per RFC2231
HEADER_CONTINUATION_RE = re.compile(b"%s[ \t]" % LINE_BREAK, re.MULTILINE)
+# This must be long enough to contain any line breaks plus any
+# additional boundary markers (--) such that they will be found in a
+# subsequent search
+SEARCH_EXTRA_LENGTH = 8
class MultipartDecoder:
@@ -113,6 +117,7 @@ class MultipartDecoder:
% (LINE_BREAK, re.escape(boundary), LINE_BREAK, LINE_BREAK),
re.MULTILINE,
)
+ self._search_position = 0
def last_newline(self) -> int:
try:
@@ -141,7 +146,7 @@ class MultipartDecoder:
event: Event = NEED_DATA
if self.state == State.PREAMBLE:
- match = self.preamble_re.search(self.buffer)
+ match = self.preamble_re.search(self.buffer, self._search_position)
if match is not None:
if match.group(1).startswith(b"--"):
self.state = State.EPILOGUE
@@ -150,9 +155,17 @@ class MultipartDecoder:
data = bytes(self.buffer[: match.start()])
del self.buffer[: match.end()]
event = Preamble(data=data)
+ self._search_position = 0
+ else:
+ # Update the search start position to be equal to the
+ # current buffer length (already searched) minus a
+ # safe buffer for part of the search target.
+ self._search_position = max(
+ 0, len(self.buffer) - len(self.boundary) - SEARCH_EXTRA_LENGTH
+ )
elif self.state == State.PART:
- match = BLANK_LINE_RE.search(self.buffer)
+ match = BLANK_LINE_RE.search(self.buffer, self._search_position)
if match is not None:
headers = self._parse_headers(self.buffer[: match.start()])
del self.buffer[: match.end()]
@@ -177,6 +190,12 @@ class MultipartDecoder:
name=name,
)
self.state = State.DATA
+ self._search_position = 0
+ else:
+ # Update the search start position to be equal to the
+ # current buffer length (already searched) minus a
+ # safe buffer for part of the search target.
+ self._search_position = max(0, len(self.buffer) - SEARCH_EXTRA_LENGTH)
elif self.state == State.DATA:
if self.buffer.find(b"--" + self.boundary) == -1:
diff --git a/contrib/python/Werkzeug/py3/werkzeug/sansio/request.py b/contrib/python/Werkzeug/py3/werkzeug/sansio/request.py
index 040074f3a7..e100a1f27c 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/sansio/request.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/sansio/request.py
@@ -19,7 +19,6 @@ from ..datastructures import RequestCacheControl
from ..http import parse_accept_header
from ..http import parse_authorization_header
from ..http import parse_cache_control_header
-from ..http import parse_cookie
from ..http import parse_date
from ..http import parse_etags
from ..http import parse_if_range_header
@@ -29,9 +28,9 @@ from ..http import parse_range_header
from ..http import parse_set_header
from ..urls import url_decode
from ..user_agent import UserAgent
-from ..useragents import _UserAgent as _DeprecatedUserAgent
from ..utils import cached_property
from ..utils import header_property
+from .http import parse_cookie
from .utils import get_current_url
from .utils import get_host
@@ -95,7 +94,7 @@ class Request:
#: .. versionadded:: 0.6
list_storage_class: t.Type[t.List] = ImmutableList
- user_agent_class: t.Type[UserAgent] = _DeprecatedUserAgent
+ user_agent_class: t.Type[UserAgent] = UserAgent
"""The class used and returned by the :attr:`user_agent` property to
parse the header. Defaults to
:class:`~werkzeug.user_agent.UserAgent`, which does no parsing. An
diff --git a/contrib/python/Werkzeug/py3/werkzeug/sansio/response.py b/contrib/python/Werkzeug/py3/werkzeug/sansio/response.py
index 82817e8c11..de0bec2967 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/sansio/response.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/sansio/response.py
@@ -92,7 +92,7 @@ class Response:
default_status = 200
#: the default mimetype if none is provided.
- default_mimetype = "text/plain"
+ default_mimetype: t.Optional[str] = "text/plain"
#: Warn if a cookie header exceeds this size. The default, 4093, should be
#: safely `supported by most browsers <cookie_>`_. A cookie larger than
@@ -171,27 +171,23 @@ class Response:
if len(split_status) == 0:
raise ValueError("Empty status argument")
- if len(split_status) > 1:
- if split_status[0].isdigit():
- # code and message
- return status, int(split_status[0])
-
- # multi-word message
- return f"0 {status}", 0
-
- if split_status[0].isdigit():
- # code only
+ try:
status_code = int(split_status[0])
+ except ValueError:
+ # only message
+ return f"0 {status}", 0
- try:
- status = f"{status_code} {HTTP_STATUS_CODES[status_code].upper()}"
- except KeyError:
- status = f"{status_code} UNKNOWN"
-
+ if len(split_status) > 1:
+ # code and message
return status, status_code
- # one-word message
- return f"0 {status}", 0
+ # only code, look up message
+ try:
+ status = f"{status_code} {HTTP_STATUS_CODES[status_code].upper()}"
+ except KeyError:
+ status = f"{status_code} UNKNOWN"
+
+ return status, status_code
def set_cookie(
self,
@@ -438,9 +434,13 @@ class Response:
value = self.headers.get("retry-after")
if value is None:
return None
- elif value.isdigit():
- return datetime.now(timezone.utc) + timedelta(seconds=int(value))
- return parse_date(value)
+
+ try:
+ seconds = int(value)
+ except ValueError:
+ return parse_date(value)
+
+ return datetime.now(timezone.utc) + timedelta(seconds=seconds)
@retry_after.setter
def retry_after(self, value: t.Optional[t.Union[datetime, int, str]]) -> None:
diff --git a/contrib/python/Werkzeug/py3/werkzeug/sansio/utils.py b/contrib/python/Werkzeug/py3/werkzeug/sansio/utils.py
index 1b4d8920cb..e639dcb40f 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/sansio/utils.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/sansio/utils.py
@@ -140,3 +140,26 @@ def get_current_url(
url.append(url_quote(query_string, safe=":&%=+$!*'(),"))
return uri_to_iri("".join(url))
+
+
+def get_content_length(
+ http_content_length: t.Union[str, None] = None,
+ http_transfer_encoding: t.Union[str, None] = "",
+) -> t.Optional[int]:
+ """Returns the content length as an integer or ``None`` if
+ unavailable or chunked transfer encoding is used.
+
+ :param http_content_length: The Content-Length HTTP header.
+ :param http_transfer_encoding: The Transfer-Encoding HTTP header.
+
+ .. versionadded:: 2.2
+ """
+ if http_transfer_encoding == "chunked":
+ return None
+
+ if http_content_length is not None:
+ try:
+ return max(0, int(http_content_length))
+ except (ValueError, TypeError):
+ pass
+ return None
diff --git a/contrib/python/Werkzeug/py3/werkzeug/security.py b/contrib/python/Werkzeug/py3/werkzeug/security.py
index e23040af9b..18d0919f83 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/security.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/security.py
@@ -4,7 +4,6 @@ import os
import posixpath
import secrets
import typing as t
-import warnings
if t.TYPE_CHECKING:
pass
@@ -17,117 +16,6 @@ _os_alt_seps: t.List[str] = list(
)
-def pbkdf2_hex(
- data: t.Union[str, bytes],
- salt: t.Union[str, bytes],
- iterations: int = DEFAULT_PBKDF2_ITERATIONS,
- keylen: t.Optional[int] = None,
- hashfunc: t.Optional[t.Union[str, t.Callable]] = None,
-) -> str:
- """Like :func:`pbkdf2_bin`, but returns a hex-encoded string.
-
- :param data: the data to derive.
- :param salt: the salt for the derivation.
- :param iterations: the number of iterations.
- :param keylen: the length of the resulting key. If not provided,
- the digest size will be used.
- :param hashfunc: the hash function to use. This can either be the
- string name of a known hash function, or a function
- from the hashlib module. Defaults to sha256.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :func:`hashlib.pbkdf2_hmac`
- instead.
-
- .. versionadded:: 0.9
- """
- warnings.warn(
- "'pbkdf2_hex' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'hashlib.pbkdf2_hmac().hex()' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return pbkdf2_bin(data, salt, iterations, keylen, hashfunc).hex()
-
-
-def pbkdf2_bin(
- data: t.Union[str, bytes],
- salt: t.Union[str, bytes],
- iterations: int = DEFAULT_PBKDF2_ITERATIONS,
- keylen: t.Optional[int] = None,
- hashfunc: t.Optional[t.Union[str, t.Callable]] = None,
-) -> bytes:
- """Returns a binary digest for the PBKDF2 hash algorithm of `data`
- with the given `salt`. It iterates `iterations` times and produces a
- key of `keylen` bytes. By default, SHA-256 is used as hash function;
- a different hashlib `hashfunc` can be provided.
-
- :param data: the data to derive.
- :param salt: the salt for the derivation.
- :param iterations: the number of iterations.
- :param keylen: the length of the resulting key. If not provided
- the digest size will be used.
- :param hashfunc: the hash function to use. This can either be the
- string name of a known hash function or a function
- from the hashlib module. Defaults to sha256.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :func:`hashlib.pbkdf2_hmac`
- instead.
-
- .. versionadded:: 0.9
- """
- warnings.warn(
- "'pbkdf2_bin' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'hashlib.pbkdf2_hmac()' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- if isinstance(data, str):
- data = data.encode("utf8")
-
- if isinstance(salt, str):
- salt = salt.encode("utf8")
-
- if not hashfunc:
- hash_name = "sha256"
- elif callable(hashfunc):
- hash_name = hashfunc().name
- else:
- hash_name = hashfunc
-
- return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen)
-
-
-def safe_str_cmp(a: str, b: str) -> bool:
- """This function compares strings in somewhat constant time. This
- requires that the length of at least one string is known in advance.
-
- Returns `True` if the two strings are equal, or `False` if they are not.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use
- :func:`hmac.compare_digest` instead.
-
- .. versionadded:: 0.7
- """
- warnings.warn(
- "'safe_str_cmp' is deprecated and will be removed in Werkzeug"
- " 2.1. Use 'hmac.compare_digest' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- if isinstance(a, str):
- a = a.encode("utf-8") # type: ignore
-
- if isinstance(b, str):
- b = b.encode("utf-8") # type: ignore
-
- return hmac.compare_digest(a, b)
-
-
def gen_salt(length: int) -> str:
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
@@ -228,6 +116,11 @@ def safe_join(directory: str, *pathnames: str) -> t.Optional[str]:
base directory.
:return: A safe path, otherwise ``None``.
"""
+ if not directory:
+ # Ensure we end up with ./path if directory="" is given,
+ # otherwise the first untrusted part could become trusted.
+ directory = "."
+
parts = [directory]
for filename in pathnames:
diff --git a/contrib/python/Werkzeug/py3/werkzeug/serving.py b/contrib/python/Werkzeug/py3/werkzeug/serving.py
index 80e4192ead..c482469863 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/serving.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/serving.py
@@ -11,15 +11,13 @@ It provides features like interactive debugging and code reloading. Use
from myapp import create_app
from werkzeug import run_simple
"""
+import errno
import io
import os
-import platform
-import signal
import socket
import socketserver
import sys
import typing as t
-import warnings
from datetime import datetime as dt
from datetime import timedelta
from datetime import timezone
@@ -70,7 +68,6 @@ except AttributeError:
af_unix = None # type: ignore
LISTEN_QUEUE = 128
-can_open_by_fd = not platform.system() == "Windows" and hasattr(socket, "fromfd")
_TSSLContextArg = t.Optional[
t.Union["ssl.SSLContext", t.Tuple[str, t.Optional[str]], "te.Literal['adhoc']"]
@@ -161,15 +158,6 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
def make_environ(self) -> "WSGIEnvironment":
request_url = url_parse(self.path)
-
- def shutdown_server() -> None:
- warnings.warn(
- "The 'environ['werkzeug.server.shutdown']' function is"
- " deprecated and will be removed in Werkzeug 2.1.",
- stacklevel=2,
- )
- self.server.shutdown_signal = True
-
url_scheme = "http" if self.server.ssl_context is None else "https"
if not self.client_address:
@@ -195,7 +183,6 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
"wsgi.multithread": self.server.multithread,
"wsgi.multiprocess": self.server.multiprocess,
"wsgi.run_once": False,
- "werkzeug.server.shutdown": shutdown_server,
"werkzeug.socket": self.connection,
"SERVER_SOFTWARE": self.server_version,
"REQUEST_METHOD": self.command,
@@ -258,9 +245,10 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
headers_set: t.Optional[t.List[t.Tuple[str, str]]] = None
status_sent: t.Optional[str] = None
headers_sent: t.Optional[t.List[t.Tuple[str, str]]] = None
+ chunk_response: bool = False
def write(data: bytes) -> None:
- nonlocal status_sent, headers_sent
+ nonlocal status_sent, headers_sent, chunk_response
assert status_set is not None, "write() before start_response"
assert headers_set is not None, "write() before start_response"
if status_sent is None:
@@ -275,24 +263,45 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
header_keys = set()
for key, value in headers_sent:
self.send_header(key, value)
- key = key.lower()
- header_keys.add(key)
- if not (
- "content-length" in header_keys
- or environ["REQUEST_METHOD"] == "HEAD"
- or code < 200
- or code in (204, 304)
+ header_keys.add(key.lower())
+
+ # Use chunked transfer encoding if there is no content
+ # length. Do not use for 1xx and 204 responses. 304
+ # responses and HEAD requests are also excluded, which
+ # is the more conservative behavior and matches other
+ # parts of the code.
+ # https://httpwg.org/specs/rfc7230.html#rfc.section.3.3.1
+ if (
+ not (
+ "content-length" in header_keys
+ or environ["REQUEST_METHOD"] == "HEAD"
+ or (100 <= code < 200)
+ or code in {204, 304}
+ )
+ and self.protocol_version >= "HTTP/1.1"
):
- self.close_connection = True
- self.send_header("Connection", "close")
- if "server" not in header_keys:
- self.send_header("Server", self.version_string())
- if "date" not in header_keys:
- self.send_header("Date", self.date_time_string())
+ chunk_response = True
+ self.send_header("Transfer-Encoding", "chunked")
+
+ # Always close the connection. This disables HTTP/1.1
+ # keep-alive connections. They aren't handled well by
+ # Python's http.server because it doesn't know how to
+ # drain the stream before the next request line.
+ self.send_header("Connection", "close")
self.end_headers()
assert isinstance(data, bytes), "applications must write bytes"
- self.wfile.write(data)
+
+ if data:
+ if chunk_response:
+ self.wfile.write(hex(len(data))[2:].encode())
+ self.wfile.write(b"\r\n")
+
+ self.wfile.write(data)
+
+ if chunk_response:
+ self.wfile.write(b"\r\n")
+
self.wfile.flush()
def start_response(status, headers, exc_info=None): # type: ignore
@@ -316,6 +325,8 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
write(data)
if not headers_sent:
write(b"")
+ if chunk_response:
+ self.wfile.write(b"0\r\n\r\n")
finally:
if hasattr(application_iter, "close"):
application_iter.close() # type: ignore
@@ -324,12 +335,13 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
execute(self.server.app)
except (ConnectionError, socket.timeout) as e:
self.connection_dropped(e, environ)
- except Exception:
+ except Exception as e:
if self.server.passthrough_errors:
raise
- from .debug.tbtools import get_current_traceback
- traceback = get_current_traceback(ignore_system_exceptions=True)
+ if status_sent is not None and chunk_response:
+ self.close_connection = True
+
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
@@ -339,12 +351,16 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
execute(InternalServerError())
except Exception:
pass
- self.server.log("error", "Error on request:\n%s", traceback.plaintext)
+
+ from .debug.tbtools import DebugTraceback
+
+ msg = DebugTraceback(e).render_traceback_text()
+ self.server.log("error", f"Error on request:\n{msg}")
def handle(self) -> None:
"""Handles a request ignoring dropped connections."""
try:
- BaseHTTPRequestHandler.handle(self)
+ super().handle()
except (ConnectionError, socket.timeout) as e:
self.connection_dropped(e)
except Exception as e:
@@ -352,16 +368,6 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
self.log_error("SSL error occurred: %s", e)
else:
raise
- if self.server.shutdown_signal:
- self.initiate_shutdown()
-
- def initiate_shutdown(self) -> None:
- if is_running_from_reloader():
- # Windows does not provide SIGKILL, go with SIGTERM then.
- sig = getattr(signal, "SIGKILL", signal.SIGTERM)
- os.kill(os.getpid(), sig)
-
- self.server._BaseServer__shutdown_request = True # type: ignore
def connection_dropped(
self, error: BaseException, environ: t.Optional["WSGIEnvironment"] = None
@@ -370,25 +376,13 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
nothing happens.
"""
- def handle_one_request(self) -> None:
- """Handle a single HTTP request."""
- self.raw_requestline = self.rfile.readline()
- if not self.raw_requestline:
- self.close_connection = True
- elif self.parse_request():
- self.run_wsgi()
-
- def send_response(self, code: int, message: t.Optional[str] = None) -> None:
- """Send the response header and log the response code."""
- self.log_request(code)
- if message is None:
- message = self.responses[code][0] if code in self.responses else ""
- if self.request_version != "HTTP/0.9":
- hdr = f"{self.protocol_version} {code} {message}\r\n"
- self.wfile.write(hdr.encode("ascii"))
-
- def version_string(self) -> str:
- return super().version_string().strip()
+ def __getattr__(self, name: str) -> t.Any:
+ # All HTTP methods are handled by run_wsgi.
+ if name.startswith("do_"):
+ return self.run_wsgi
+
+ # All other attributes are forwarded to the base class.
+ return getattr(super(), name)
def address_string(self) -> str:
if getattr(self, "environ", None):
@@ -414,21 +408,20 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
code = str(code)
- if _log_add_style:
- if code[0] == "1": # 1xx - Informational
- msg = _ansi_style(msg, "bold")
- elif code == "200": # 2xx - Success
- pass
- elif code == "304": # 304 - Resource Not Modified
- msg = _ansi_style(msg, "cyan")
- elif code[0] == "3": # 3xx - Redirection
- msg = _ansi_style(msg, "green")
- elif code == "404": # 404 - Resource Not Found
- msg = _ansi_style(msg, "yellow")
- elif code[0] == "4": # 4xx - Client Error
- msg = _ansi_style(msg, "bold", "red")
- else: # 5xx, or any other response
- msg = _ansi_style(msg, "bold", "magenta")
+ if code[0] == "1": # 1xx - Informational
+ msg = _ansi_style(msg, "bold")
+ elif code == "200": # 2xx - Success
+ pass
+ elif code == "304": # 304 - Resource Not Modified
+ msg = _ansi_style(msg, "cyan")
+ elif code[0] == "3": # 3xx - Redirection
+ msg = _ansi_style(msg, "green")
+ elif code == "404": # 404 - Resource Not Found
+ msg = _ansi_style(msg, "yellow")
+ elif code[0] == "4": # 4xx - Client Error
+ msg = _ansi_style(msg, "bold", "red")
+ else: # 5xx, or any other response
+ msg = _ansi_style(msg, "bold", "magenta")
self.log("info", '"%s" %s %s', msg, code, size)
@@ -447,6 +440,9 @@ class WSGIRequestHandler(BaseHTTPRequestHandler):
def _ansi_style(value: str, *styles: str) -> str:
+ if not _log_add_style:
+ return value
+
codes = {
"bold": 1,
"red": 31,
@@ -655,8 +651,10 @@ def get_interface_ip(family: socket.AddressFamily) -> str:
class BaseWSGIServer(HTTPServer):
+ """A WSGI server that that handles one request at a time.
- """Simple single-threaded, single-process WSGI server."""
+ Use :func:`make_server` to create a server instance.
+ """
multithread = False
multiprocess = False
@@ -675,39 +673,59 @@ class BaseWSGIServer(HTTPServer):
if handler is None:
handler = WSGIRequestHandler
- self.address_family = select_address_family(host, port)
+ # If the handler doesn't directly set a protocol version and
+ # thread or process workers are used, then allow chunked
+ # responses and keep-alive connections by enabling HTTP/1.1.
+ if "protocol_version" not in vars(handler) and (
+ self.multithread or self.multiprocess
+ ):
+ handler.protocol_version = "HTTP/1.1"
- if fd is not None:
- real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
- port = 0
+ self.host = host
+ self.port = port
+ self.app = app
+ self.passthrough_errors = passthrough_errors
- server_address = get_sockaddr(host, int(port), self.address_family)
+ self.address_family = address_family = select_address_family(host, port)
+ server_address = get_sockaddr(host, int(port), address_family)
- # remove socket file if it already exists
- if self.address_family == af_unix:
+ # Remove a leftover Unix socket file from a previous run. Don't
+ # remove a file that was set up by run_simple.
+ if address_family == af_unix and fd is None:
server_address = t.cast(str, server_address)
if os.path.exists(server_address):
os.unlink(server_address)
- super().__init__(server_address, handler) # type: ignore
-
- self.app = app
- self.passthrough_errors = passthrough_errors
- self.shutdown_signal = False
- self.host = host
- self.port = self.socket.getsockname()[1]
+ # Bind and activate will be handled manually, it should only
+ # happen if we're not using a socket that was already set up.
+ super().__init__(
+ server_address, # type: ignore[arg-type]
+ handler,
+ bind_and_activate=False,
+ )
- # Patch in the original socket.
- if fd is not None:
- self.socket.close()
- self.socket = real_sock
+ if fd is None:
+ # No existing socket descriptor, do bind_and_activate=True.
+ try:
+ self.server_bind()
+ self.server_activate()
+ except BaseException:
+ self.server_close()
+ raise
+ else:
+ # Use the passed in socket directly.
+ self.socket = socket.fromfd(fd, address_family, socket.SOCK_STREAM)
self.server_address = self.socket.getsockname()
+ if address_family != af_unix:
+ # If port was 0, this will record the bound port.
+ self.port = self.server_address[1]
+
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
- if ssl_context == "adhoc":
+ elif ssl_context == "adhoc":
ssl_context = generate_adhoc_ssl_context()
self.socket = ssl_context.wrap_socket(self.socket, server_side=True)
@@ -719,7 +737,6 @@ class BaseWSGIServer(HTTPServer):
_log(type, message, *args)
def serve_forever(self, poll_interval: float = 0.5) -> None:
- self.shutdown_signal = False
try:
super().serve_forever(poll_interval=poll_interval)
except KeyboardInterrupt:
@@ -735,18 +752,58 @@ class BaseWSGIServer(HTTPServer):
return super().handle_error(request, client_address)
+ def log_startup(self) -> None:
+ """Show information about the address when starting the server."""
+ dev_warning = (
+ "WARNING: This is a development server. Do not use it in a production"
+ " deployment. Use a production WSGI server instead."
+ )
+ dev_warning = _ansi_style(dev_warning, "bold", "red")
+ messages = [dev_warning]
+
+ if self.address_family == af_unix:
+ messages.append(f" * Running on {self.host}")
+ else:
+ scheme = "http" if self.ssl_context is None else "https"
+ display_hostname = self.host
+
+ if self.host in {"0.0.0.0", "::"}:
+ messages.append(f" * Running on all addresses ({self.host})")
+
+ if self.host == "0.0.0.0":
+ localhost = "127.0.0.1"
+ display_hostname = get_interface_ip(socket.AF_INET)
+ else:
+ localhost = "[::1]"
+ display_hostname = get_interface_ip(socket.AF_INET6)
+
+ messages.append(f" * Running on {scheme}://{localhost}:{self.port}")
+
+ if ":" in display_hostname:
+ display_hostname = f"[{display_hostname}]"
+
+ messages.append(f" * Running on {scheme}://{display_hostname}:{self.port}")
+
+ _log("info", "\n".join(messages))
+
class ThreadedWSGIServer(socketserver.ThreadingMixIn, BaseWSGIServer):
+ """A WSGI server that handles concurrent requests in separate
+ threads.
- """A WSGI server that does threading."""
+ Use :func:`make_server` to create a server instance.
+ """
multithread = True
daemon_threads = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
+ """A WSGI server that handles concurrent requests in separate forked
+ processes.
- """A WSGI server that does forking."""
+ Use :func:`make_server` to create a server instance.
+ """
multiprocess = True
@@ -763,9 +820,8 @@ class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
) -> None:
if not can_fork:
raise ValueError("Your platform does not support forking.")
- BaseWSGIServer.__init__(
- self, host, port, app, handler, passthrough_errors, ssl_context, fd
- )
+
+ super().__init__(host, port, app, handler, passthrough_errors, ssl_context, fd)
self.max_children = processes
@@ -780,16 +836,24 @@ def make_server(
ssl_context: t.Optional[_TSSLContextArg] = None,
fd: t.Optional[int] = None,
) -> BaseWSGIServer:
- """Create a new server instance that is either threaded, or forks
- or just processes one request after another.
+ """Create an appropriate WSGI server instance based on the value of
+ ``threaded`` and ``processes``.
+
+ This is called from :func:`run_simple`, but can be used separately
+ to have access to the server object, such as to run it in a separate
+ thread.
+
+ See :func:`run_simple` for parameter docs.
"""
if threaded and processes > 1:
- raise ValueError("cannot have a multithreaded and multi process server.")
- elif threaded:
+ raise ValueError("Cannot have a multi-thread and multi-process server.")
+
+ if threaded:
return ThreadedWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
- elif processes > 1:
+
+ if processes > 1:
return ForkingWSGIServer(
host,
port,
@@ -800,21 +864,75 @@ def make_server(
ssl_context,
fd=fd,
)
- else:
- return BaseWSGIServer(
- host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
- )
+
+ return BaseWSGIServer(
+ host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
+ )
def is_running_from_reloader() -> bool:
- """Checks if the application is running from within the Werkzeug
- reloader subprocess.
+ """Check if the server is running as a subprocess within the
+ Werkzeug reloader.
.. versionadded:: 0.10
"""
return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
+def prepare_socket(hostname: str, port: int) -> socket.socket:
+ """Prepare a socket for use by the WSGI server and reloader.
+
+ The socket is marked inheritable so that it can be kept across
+ reloads instead of breaking connections.
+
+ Catch errors during bind and show simpler error messages. For
+ "address already in use", show instructions for resolving the issue,
+ with special instructions for macOS.
+
+ This is called from :func:`run_simple`, but can be used separately
+ to control server creation with :func:`make_server`.
+ """
+ address_family = select_address_family(hostname, port)
+ server_address = get_sockaddr(hostname, port, address_family)
+ s = socket.socket(address_family, socket.SOCK_STREAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.set_inheritable(True)
+
+ # Remove the socket file if it already exists.
+ if address_family == af_unix:
+ server_address = t.cast(str, server_address)
+
+ if os.path.exists(server_address):
+ os.unlink(server_address)
+
+ # Catch connection issues and show them without the traceback. Show
+ # extra instructions for address not found, and for macOS.
+ try:
+ s.bind(server_address)
+ except OSError as e:
+ print(e.strerror, file=sys.stderr)
+
+ if e.errno == errno.EADDRINUSE:
+ print(
+ f"Port {port} is in use by another program. Either"
+ " identify and stop that program, or start the"
+ " server with a different port.",
+ file=sys.stderr,
+ )
+
+ if sys.platform == "darwin" and port == 5000:
+ print(
+ "On macOS, try disabling the 'AirPlay Receiver'"
+ " service from System Preferences -> Sharing.",
+ file=sys.stderr,
+ )
+
+ sys.exit(1)
+
+ s.listen(LISTEN_QUEUE)
+ return s
+
+
def run_simple(
hostname: str,
port: int,
@@ -833,256 +951,148 @@ def run_simple(
passthrough_errors: bool = False,
ssl_context: t.Optional[_TSSLContextArg] = None,
) -> None:
- """Start a WSGI application. Optional features include a reloader,
- multithreading and fork support.
+ """Start a development server for a WSGI application. Various
+ optional features can be enabled.
+
+ .. warning::
- This function has a command-line interface too::
+ Do not use the development server when deploying to production.
+ It is intended for use only during local development. It is not
+ designed to be particularly efficient, stable, or secure.
- python -m werkzeug.serving --help
+ :param hostname: The host to bind to, for example ``'localhost'``.
+ Can be a domain, IPv4 or IPv6 address, or file path starting
+ with ``unix://`` for a Unix socket.
+ :param port: The port to bind to, for example ``8080``. Using ``0``
+ tells the OS to pick a random free port.
+ :param application: The WSGI application to run.
+ :param use_reloader: Use a reloader process to restart the server
+ process when files are changed.
+ :param use_debugger: Use Werkzeug's debugger, which will show
+ formatted tracebacks on unhandled exceptions.
+ :param use_evalex: Make the debugger interactive. A Python terminal
+ can be opened for any frame in the traceback. Some protection is
+ provided by requiring a PIN, but this should never be enabled
+ on a publicly visible server.
+ :param extra_files: The reloader will watch these files for changes
+ in addition to Python modules. For example, watch a
+ configuration file.
+ :param exclude_patterns: The reloader will ignore changes to any
+ files matching these :mod:`fnmatch` patterns. For example,
+ ignore cache files.
+ :param reloader_interval: How often the reloader tries to check for
+ changes.
+ :param reloader_type: The reloader to use. The ``'stat'`` reloader
+ is built in, but may require significant CPU to watch files. The
+ ``'watchdog'`` reloader is much more efficient but requires
+ installing the ``watchdog`` package first.
+ :param threaded: Handle concurrent requests using threads. Cannot be
+ used with ``processes``.
+ :param processes: Handle concurrent requests using up to this number
+ of processes. Cannot be used with ``threaded``.
+ :param request_handler: Use a different
+ :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass to
+ handle requests.
+ :param static_files: A dict mapping URL prefixes to directories to
+ serve static files from using
+ :class:`~werkzeug.middleware.SharedDataMiddleware`.
+ :param passthrough_errors: Don't catch unhandled exceptions at the
+ server level, let the serve crash instead. If ``use_debugger``
+ is enabled, the debugger will still catch such errors.
+ :param ssl_context: Configure TLS to serve over HTTPS. Can be an
+ :class:`ssl.SSLContext` object, a ``(cert_file, key_file)``
+ tuple to create a typical context, or the string ``'adhoc'`` to
+ generate a temporary self-signed certificate.
+
+ .. versionchanged:: 2.1
+ Instructions are shown for dealing with an "address already in
+ use" error.
+
+ .. versionchanged:: 2.1
+ Running on ``0.0.0.0`` or ``::`` shows the loopback IP in
+ addition to a real IP.
+
+ .. versionchanged:: 2.1
+ The command-line interface was removed.
.. versionchanged:: 2.0
- Added ``exclude_patterns`` parameter.
+ Running on ``0.0.0.0`` or ``::`` shows a real IP address that
+ was bound as well as a warning not to run the development server
+ in production.
- .. versionadded:: 0.5
- `static_files` was added to simplify serving of static files as well
- as `passthrough_errors`.
+ .. versionchanged:: 2.0
+ The ``exclude_patterns`` parameter was added.
- .. versionadded:: 0.6
- support for SSL was added.
+ .. versionchanged:: 0.15
+ Bind to a Unix socket by passing a ``hostname`` that starts with
+ ``unix://``.
- .. versionadded:: 0.8
- Added support for automatically loading a SSL context from certificate
- file and private key.
+ .. versionchanged:: 0.10
+ Improved the reloader and added support for changing the backend
+ through the ``reloader_type`` parameter.
- .. versionadded:: 0.9
- Added command-line interface.
+ .. versionchanged:: 0.9
+ A command-line interface was added.
- .. versionadded:: 0.10
- Improved the reloader and added support for changing the backend
- through the `reloader_type` parameter. See :ref:`reloader`
- for more information.
+ .. versionchanged:: 0.8
+ ``ssl_context`` can be a tuple of paths to the certificate and
+ private key files.
- .. versionchanged:: 0.15
- Bind to a Unix socket by passing a path that starts with
- ``unix://`` as the ``hostname``.
+ .. versionchanged:: 0.6
+ The ``ssl_context`` parameter was added.
- :param hostname: The host to bind to, for example ``'localhost'``.
- If the value is a path that starts with ``unix://`` it will bind
- to a Unix socket instead of a TCP socket..
- :param port: The port for the server. eg: ``8080``
- :param application: the WSGI application to execute
- :param use_reloader: should the server automatically restart the python
- process if modules were changed?
- :param use_debugger: should the werkzeug debugging system be used?
- :param use_evalex: should the exception evaluation feature be enabled?
- :param extra_files: a list of files the reloader should watch
- additionally to the modules. For example configuration
- files.
- :param exclude_patterns: List of :mod:`fnmatch` patterns to ignore
- when running the reloader. For example, ignore cache files that
- shouldn't reload when updated.
- :param reloader_interval: the interval for the reloader in seconds.
- :param reloader_type: the type of reloader to use. The default is
- auto detection. Valid values are ``'stat'`` and
- ``'watchdog'``. See :ref:`reloader` for more
- information.
- :param threaded: should the process handle each request in a separate
- thread?
- :param processes: if greater than 1 then handle each request in a new process
- up to this maximum number of concurrent processes.
- :param request_handler: optional parameter that can be used to replace
- the default one. You can use this to replace it
- with a different
- :class:`~BaseHTTPServer.BaseHTTPRequestHandler`
- subclass.
- :param static_files: a list or dict of paths for static files. This works
- exactly like :class:`SharedDataMiddleware`, it's actually
- just wrapping the application in that middleware before
- serving.
- :param passthrough_errors: set this to `True` to disable the error catching.
- This means that the server will die on errors but
- it can be useful to hook debuggers in (pdb etc.)
- :param ssl_context: an SSL context for the connection. Either an
- :class:`ssl.SSLContext`, a tuple in the form
- ``(cert_file, pkey_file)``, the string ``'adhoc'`` if
- the server should automatically create one, or ``None``
- to disable SSL (which is the default).
+ .. versionchanged:: 0.5
+ The ``static_files`` and ``passthrough_errors`` parameters were
+ added.
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
- if use_debugger:
- from .debug import DebuggedApplication
- application = DebuggedApplication(application, use_evalex)
if static_files:
from .middleware.shared_data import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
- def log_startup(sock: socket.socket) -> None:
- all_addresses_message = (
- " * Running on all addresses.\n"
- " WARNING: This is a development server. Do not use it in"
- " a production deployment."
- )
-
- if sock.family == af_unix:
- _log("info", " * Running on %s (Press CTRL+C to quit)", hostname)
- else:
- if hostname == "0.0.0.0":
- _log("warning", all_addresses_message)
- display_hostname = get_interface_ip(socket.AF_INET)
- elif hostname == "::":
- _log("warning", all_addresses_message)
- display_hostname = get_interface_ip(socket.AF_INET6)
- else:
- display_hostname = hostname
+ if use_debugger:
+ from .debug import DebuggedApplication
- if ":" in display_hostname:
- display_hostname = f"[{display_hostname}]"
+ application = DebuggedApplication(application, evalex=use_evalex)
- _log(
- "info",
- " * Running on %s://%s:%d/ (Press CTRL+C to quit)",
- "http" if ssl_context is None else "https",
- display_hostname,
- sock.getsockname()[1],
- )
+ if not is_running_from_reloader():
+ s = prepare_socket(hostname, port)
+ fd = s.fileno()
+ # Silence a ResourceWarning about an unclosed socket. This object is no longer
+ # used, the server will create another with fromfd.
+ s.detach()
+ os.environ["WERKZEUG_SERVER_FD"] = str(fd)
+ else:
+ fd = int(os.environ["WERKZEUG_SERVER_FD"])
+
+ srv = make_server(
+ hostname,
+ port,
+ application,
+ threaded,
+ processes,
+ request_handler,
+ passthrough_errors,
+ ssl_context,
+ fd=fd,
+ )
- def inner() -> None:
- try:
- fd: t.Optional[int] = int(os.environ["WERKZEUG_SERVER_FD"])
- except (LookupError, ValueError):
- fd = None
- srv = make_server(
- hostname,
- port,
- application,
- threaded,
- processes,
- request_handler,
- passthrough_errors,
- ssl_context,
- fd=fd,
- )
- if fd is None:
- log_startup(srv.socket)
- srv.serve_forever()
+ if not is_running_from_reloader():
+ srv.log_startup()
+ _log("info", _ansi_style("Press CTRL+C to quit", "yellow"))
if use_reloader:
- # If we're not running already in the subprocess that is the
- # reloader we want to open up a socket early to make sure the
- # port is actually available.
- if not is_running_from_reloader():
- if port == 0 and not can_open_by_fd:
- raise ValueError(
- "Cannot bind to a random port with enabled "
- "reloader if the Python interpreter does "
- "not support socket opening by fd."
- )
-
- # Create and destroy a socket so that any exceptions are
- # raised before we spawn a separate Python interpreter and
- # lose this ability.
- address_family = select_address_family(hostname, port)
- server_address = get_sockaddr(hostname, port, address_family)
- s = socket.socket(address_family, socket.SOCK_STREAM)
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- s.bind(server_address)
- s.set_inheritable(True)
-
- # If we can open the socket by file descriptor, then we can just
- # reuse this one and our socket will survive the restarts.
- if can_open_by_fd:
- os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
- s.listen(LISTEN_QUEUE)
- log_startup(s)
- else:
- s.close()
- if address_family == af_unix:
- server_address = t.cast(str, server_address)
- _log("info", "Unlinking %s", server_address)
- os.unlink(server_address)
-
- from ._reloader import run_with_reloader as _rwr
+ from ._reloader import run_with_reloader
- _rwr(
- inner,
+ run_with_reloader(
+ srv.serve_forever,
extra_files=extra_files,
exclude_patterns=exclude_patterns,
interval=reloader_interval,
reloader_type=reloader_type,
)
else:
- inner()
-
-
-def run_with_reloader(*args: t.Any, **kwargs: t.Any) -> None:
- """Run a process with the reloader. This is not a public API, do
- not use this function.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1.
- """
- from ._reloader import run_with_reloader as _rwr
-
- warnings.warn(
- (
- "'run_with_reloader' is a private API, it will no longer be"
- " accessible in Werkzeug 2.1. Use 'run_simple' instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- _rwr(*args, **kwargs)
-
-
-def main() -> None:
- """A simple command-line interface for :py:func:`run_simple`."""
- import argparse
- from .utils import import_string
-
- _log("warning", "This CLI is deprecated and will be removed in version 2.1.")
-
- parser = argparse.ArgumentParser(
- description="Run the given WSGI application with the development server.",
- allow_abbrev=False,
- )
- parser.add_argument(
- "-b",
- "--bind",
- dest="address",
- help="The hostname:port the app should listen on.",
- )
- parser.add_argument(
- "-d",
- "--debug",
- action="store_true",
- help="Show the interactive debugger for unhandled exceptions.",
- )
- parser.add_argument(
- "-r",
- "--reload",
- action="store_true",
- help="Reload the process if modules change.",
- )
- parser.add_argument(
- "application", help="Application to import and serve, in the form module:app."
- )
- args = parser.parse_args()
- hostname, port = None, None
-
- if args.address:
- hostname, _, port = args.address.partition(":")
-
- run_simple(
- hostname=hostname or "127.0.0.1",
- port=int(port or 5000),
- application=import_string(args.application),
- use_reloader=args.reload,
- use_debugger=args.debug,
- )
-
-
-if __name__ == "__main__":
- main()
+ srv.serve_forever()
diff --git a/contrib/python/Werkzeug/py3/werkzeug/test.py b/contrib/python/Werkzeug/py3/werkzeug/test.py
index a6f6e40fc3..866aa97e53 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/test.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/test.py
@@ -1,7 +1,6 @@
import mimetypes
import sys
import typing as t
-import warnings
from collections import defaultdict
from datetime import datetime
from datetime import timedelta
@@ -39,6 +38,7 @@ from .urls import url_fix
from .urls import url_parse
from .urls import url_unparse
from .urls import url_unquote
+from .utils import cached_property
from .utils import get_content_type
from .wrappers.request import Request
from .wrappers.response import Response
@@ -67,6 +67,7 @@ def stream_encode_multipart(
stream: t.IO[bytes] = BytesIO()
total_length = 0
on_disk = False
+ write_binary: t.Callable[[bytes], int]
if use_tempfile:
@@ -306,6 +307,10 @@ class EnvironBuilder:
``Authorization`` header value. A ``(username, password)`` tuple
is a shortcut for ``Basic`` authorization.
+ .. versionchanged:: 2.1
+ ``CONTENT_TYPE`` and ``CONTENT_LENGTH`` are not duplicated as
+ header keys in the environ.
+
.. versionchanged:: 2.0
``REQUEST_URI`` and ``RAW_URI`` is the full raw URI including
the query string, not only the path.
@@ -322,7 +327,7 @@ class EnvironBuilder:
.. versionadded:: 0.15
The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing
- the path before perecent-decoding. This is not part of the WSGI
+ the path before percent-decoding. This is not part of the WSGI
PEP, but many WSGI servers include it.
.. versionchanged:: 0.6
@@ -693,8 +698,13 @@ class EnvironBuilder:
def server_port(self) -> int:
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(":", 1)
- if len(pieces) == 2 and pieces[1].isdigit():
- return int(pieces[1])
+
+ if len(pieces) == 2:
+ try:
+ return int(pieces[1])
+ except ValueError:
+ pass
+
if self.url_scheme == "https":
return 443
return 80
@@ -788,14 +798,15 @@ class EnvironBuilder:
)
headers = self.headers.copy()
+ # Don't send these as headers, they're part of the environ.
+ headers.remove("Content-Type")
+ headers.remove("Content-Length")
if content_type is not None:
result["CONTENT_TYPE"] = content_type
- headers.set("Content-Type", content_type)
if content_length is not None:
result["CONTENT_LENGTH"] = str(content_length)
- headers.set("Content-Length", content_length)
combined_headers = defaultdict(list)
@@ -839,6 +850,11 @@ class Client:
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
+ .. versionchanged:: 2.1
+ Removed deprecated behavior of treating the response as a
+ tuple. All data is available as properties on the returned
+ response object.
+
.. versionchanged:: 2.0
``response_wrapper`` is always a subclass of
:class:``TestResponse``.
@@ -1015,7 +1031,6 @@ class Client:
def open(
self,
*args: t.Any,
- as_tuple: bool = False,
buffered: bool = False,
follow_redirects: bool = False,
**kwargs: t.Any,
@@ -1034,6 +1049,9 @@ class Client:
:attr:`TestResponse.history` lists the intermediate
responses.
+ .. versionchanged:: 2.1
+ Removed the ``as_tuple`` parameter.
+
.. versionchanged:: 2.0
``as_tuple`` is deprecated and will be removed in Werkzeug
2.1. Use :attr:`TestResponse.request` and
@@ -1079,7 +1097,10 @@ class Client:
redirects = set()
history: t.List["TestResponse"] = []
- while follow_redirects and response.status_code in {
+ if not follow_redirects:
+ return response
+
+ while response.status_code in {
301,
302,
303,
@@ -1106,24 +1127,12 @@ class Client:
history.append(response)
response = self.resolve_redirect(response, buffered=buffered)
else:
- # This is the final request after redirects, or not
- # following redirects.
+ # This is the final request after redirects.
response.history = tuple(history)
# Close the input stream when closing the response, in case
# the input is an open temporary file.
response.call_on_close(request.input_stream.close)
-
- if as_tuple:
- warnings.warn(
- "'as_tuple' is deprecated and will be removed in"
- " Werkzeug 2.1. Access 'response.request.environ'"
- " instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return request.environ, response # type: ignore
-
- return response
+ return response
def get(self, *args: t.Any, **kw: t.Any) -> "TestResponse":
"""Call :meth:`open` with ``method`` set to ``GET``."""
@@ -1275,8 +1284,22 @@ class TestResponse(Response):
If the test request included large files, or if the application is
serving a file, call :meth:`close` to close any open files and
prevent Python showing a ``ResourceWarning``.
+
+ .. versionchanged:: 2.2
+ Set the ``default_mimetype`` to None to prevent a mimetype being
+ assumed if missing.
+
+ .. versionchanged:: 2.1
+ Removed deprecated behavior for treating the response instance
+ as a tuple.
+
+ .. versionadded:: 2.0
+ Test client methods always return instances of this class.
"""
+ default_mimetype = None
+ # Don't assume a mimetype, instead use whatever the response provides
+
request: Request
"""A request object with the environ used to make the request that
resulted in this response.
@@ -1304,28 +1327,11 @@ class TestResponse(Response):
self.history = history
self._compat_tuple = response, status, headers
- def __iter__(self) -> t.Iterator:
- warnings.warn(
- (
- "The test client no longer returns a tuple, it returns"
- " a 'TestResponse'. Tuple unpacking is deprecated and"
- " will be removed in Werkzeug 2.1. Access the"
- " attributes 'data', 'status', and 'headers' instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- return iter(self._compat_tuple)
-
- def __getitem__(self, item: int) -> t.Any:
- warnings.warn(
- (
- "The test client no longer returns a tuple, it returns"
- " a 'TestResponse'. Item indexing is deprecated and"
- " will be removed in Werkzeug 2.1. Access the"
- " attributes 'data', 'status', and 'headers' instead."
- ),
- DeprecationWarning,
- stacklevel=2,
- )
- return self._compat_tuple[item]
+ @cached_property
+ def text(self) -> str:
+ """The response data as text. A shortcut for
+ ``response.get_data(as_text=True)``.
+
+ .. versionadded:: 2.1
+ """
+ return self.get_data(as_text=True)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/testapp.py b/contrib/python/Werkzeug/py3/werkzeug/testapp.py
index 981f8878b1..0d7ffbb187 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/testapp.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/testapp.py
@@ -5,9 +5,10 @@ import base64
import os
import sys
import typing as t
-from html import escape
from textwrap import wrap
+from markupsafe import escape
+
from . import __version__ as _werkzeug_version
from .wrappers.request import Request
from .wrappers.response import Response
@@ -60,8 +61,8 @@ kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
TEMPLATE = """\
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
- "http://www.w3.org/TR/html4/loose.dtd">
+<!doctype html>
+<html lang=en>
<title>WSGI Information</title>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
@@ -181,8 +182,8 @@ def render_testapp(req: Request) -> bytes:
wsgi_env = []
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
- value = "".join(wrap(escape(repr(value))))
- wsgi_env.append(f"<tr><th>{escape(str(key))}<td><code>{value}</code>")
+ value = "".join(wrap(str(escape(repr(value)))))
+ wsgi_env.append(f"<tr><th>{escape(key)}<td><code>{value}</code>")
sys_path = []
for item, virtual, expanded in iter_sys_path():
diff --git a/contrib/python/Werkzeug/py3/werkzeug/urls.py b/contrib/python/Werkzeug/py3/werkzeug/urls.py
index 9529da0c70..1cb9418d2f 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/urls.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/urls.py
@@ -7,7 +7,6 @@ import codecs
import os
import re
import typing as t
-import warnings
from ._internal import _check_str_tuple
from ._internal import _decode_idna
@@ -819,7 +818,6 @@ def iri_to_uri(
def url_decode(
s: t.AnyStr,
charset: str = "utf-8",
- decode_keys: None = None,
include_empty: bool = True,
errors: str = "replace",
separator: str = "&",
@@ -847,12 +845,6 @@ def url_decode(
.. versionchanged:: 0.5
The ``cls`` parameter was added.
"""
- if decode_keys is not None:
- warnings.warn(
- "'decode_keys' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
if cls is None:
from .datastructures import MultiDict # noqa: F811
@@ -871,13 +863,11 @@ def url_decode(
def url_decode_stream(
stream: t.IO[bytes],
charset: str = "utf-8",
- decode_keys: None = None,
include_empty: bool = True,
errors: str = "replace",
separator: bytes = b"&",
cls: t.Optional[t.Type["ds.MultiDict"]] = None,
limit: t.Optional[int] = None,
- return_iterator: bool = False,
) -> "ds.MultiDict[str, str]":
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
@@ -905,24 +895,9 @@ def url_decode_stream(
"""
from .wsgi import make_chunk_iter
- if decode_keys is not None:
- warnings.warn(
- "'decode_keys' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
-
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, include_empty, errors)
- if return_iterator:
- warnings.warn(
- "'return_iterator' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return decoder # type: ignore
-
if cls is None:
from .datastructures import MultiDict # noqa: F811
@@ -955,7 +930,6 @@ def _url_decode_impl(
def url_encode(
obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],
charset: str = "utf-8",
- encode_keys: None = None,
sort: bool = False,
key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,
separator: str = "&",
@@ -978,12 +952,6 @@ def url_encode(
.. versionchanged:: 0.5
Added the ``sort``, ``key``, and ``separator`` parameters.
"""
- if encode_keys is not None:
- warnings.warn(
- "'encode_keys' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
separator = _to_str(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, sort, key))
@@ -992,7 +960,6 @@ def url_encode_stream(
obj: t.Union[t.Mapping[str, str], t.Iterable[t.Tuple[str, str]]],
stream: t.Optional[t.IO[str]] = None,
charset: str = "utf-8",
- encode_keys: None = None,
sort: bool = False,
key: t.Optional[t.Callable[[t.Tuple[str, str]], t.Any]] = None,
separator: str = "&",
@@ -1017,12 +984,6 @@ def url_encode_stream(
.. versionadded:: 0.8
"""
- if encode_keys is not None:
- warnings.warn(
- "'encode_keys' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
separator = _to_str(separator, "ascii")
gen = _url_encode_impl(obj, charset, sort, key)
if stream is None:
@@ -1103,109 +1064,3 @@ def url_join(
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
-
-
-class Href:
- """Implements a callable that constructs URLs with the given base. The
- function can be called with any number of positional and keyword
- arguments which than are used to assemble the URL. Works with URLs
- and posix paths.
-
- Positional arguments are appended as individual segments to
- the path of the URL:
-
- >>> href = Href('/foo')
- >>> href('bar', 23)
- '/foo/bar/23'
- >>> href('foo', bar=23)
- '/foo/foo?bar=23'
-
- If any of the arguments (positional or keyword) evaluates to `None` it
- will be skipped. If no keyword arguments are given the last argument
- can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
- otherwise the keyword arguments are used for the query parameters, cutting
- off the first trailing underscore of the parameter name:
-
- >>> href(is_=42)
- '/foo?is=42'
- >>> href({'foo': 'bar'})
- '/foo?foo=bar'
-
- Combining of both methods is not allowed:
-
- >>> href({'foo': 'bar'}, bar=42)
- Traceback (most recent call last):
- ...
- TypeError: keyword arguments and query-dicts can't be combined
-
- Accessing attributes on the href object creates a new href object with
- the attribute name as prefix:
-
- >>> bar_href = href.bar
- >>> bar_href("blub")
- '/foo/bar/blub'
-
- If `sort` is set to `True` the items are sorted by `key` or the default
- sorting algorithm:
-
- >>> href = Href("/", sort=True)
- >>> href(a=1, b=2, c=3)
- '/?a=1&b=2&c=3'
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :mod:`werkzeug.routing`
- instead.
-
- .. versionadded:: 0.5
- `sort` and `key` were added.
- """
-
- def __init__( # type: ignore
- self, base="./", charset="utf-8", sort=False, key=None
- ):
- warnings.warn(
- "'Href' is deprecated and will be removed in Werkzeug 2.1."
- " Use 'werkzeug.routing' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- if not base:
- base = "./"
- self.base = base
- self.charset = charset
- self.sort = sort
- self.key = key
-
- def __getattr__(self, name): # type: ignore
- if name[:2] == "__":
- raise AttributeError(name)
- base = self.base
- if base[-1:] != "/":
- base += "/"
- return Href(url_join(base, name), self.charset, self.sort, self.key)
-
- def __call__(self, *path, **query): # type: ignore
- if path and isinstance(path[-1], dict):
- if query:
- raise TypeError("keyword arguments and query-dicts can't be combined")
- query, path = path[-1], path[:-1]
- elif query:
- query = {k[:-1] if k.endswith("_") else k: v for k, v in query.items()}
- path = "/".join(
- [
- _to_str(url_quote(x, self.charset), "ascii")
- for x in path
- if x is not None
- ]
- ).lstrip("/")
- rv = self.base
- if path:
- if not rv.endswith("/"):
- rv += "/"
- rv = url_join(rv, f"./{path}")
- if query:
- rv += "?" + _to_str(
- url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
- )
- return rv
diff --git a/contrib/python/Werkzeug/py3/werkzeug/useragents.py b/contrib/python/Werkzeug/py3/werkzeug/useragents.py
deleted file mode 100644
index 4deed8f464..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/useragents.py
+++ /dev/null
@@ -1,215 +0,0 @@
-import re
-import typing as t
-import warnings
-
-from .user_agent import UserAgent as _BaseUserAgent
-
-if t.TYPE_CHECKING:
- from _typeshed.wsgi import WSGIEnvironment
-
-
-class _UserAgentParser:
- platform_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
- (" cros ", "chromeos"),
- ("iphone|ios", "iphone"),
- ("ipad", "ipad"),
- (r"darwin\b|mac\b|os\s*x", "macos"),
- ("win", "windows"),
- (r"android", "android"),
- ("netbsd", "netbsd"),
- ("openbsd", "openbsd"),
- ("freebsd", "freebsd"),
- ("dragonfly", "dragonflybsd"),
- ("(sun|i86)os", "solaris"),
- (r"x11\b|lin(\b|ux)?", "linux"),
- (r"nintendo\s+wii", "wii"),
- ("irix", "irix"),
- ("hp-?ux", "hpux"),
- ("aix", "aix"),
- ("sco|unix_sv", "sco"),
- ("bsd", "bsd"),
- ("amiga", "amiga"),
- ("blackberry|playbook", "blackberry"),
- ("symbian", "symbian"),
- )
- browser_rules: t.ClassVar[t.Iterable[t.Tuple[str, str]]] = (
- ("googlebot", "google"),
- ("msnbot", "msn"),
- ("yahoo", "yahoo"),
- ("ask jeeves", "ask"),
- (r"aol|america\s+online\s+browser", "aol"),
- (r"opera|opr", "opera"),
- ("edge|edg", "edge"),
- ("chrome|crios", "chrome"),
- ("seamonkey", "seamonkey"),
- ("firefox|firebird|phoenix|iceweasel", "firefox"),
- ("galeon", "galeon"),
- ("safari|version", "safari"),
- ("webkit", "webkit"),
- ("camino", "camino"),
- ("konqueror", "konqueror"),
- ("k-meleon", "kmeleon"),
- ("netscape", "netscape"),
- (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"),
- ("lynx", "lynx"),
- ("links", "links"),
- ("Baiduspider", "baidu"),
- ("bingbot", "bing"),
- ("mozilla", "mozilla"),
- )
-
- _browser_version_re = r"(?:{pattern})[/\sa-z(]*(\d+[.\da-z]+)?"
- _language_re = re.compile(
- r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|"
- r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)"
- )
-
- def __init__(self) -> None:
- self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platform_rules]
- self.browsers = [
- (b, re.compile(self._browser_version_re.format(pattern=a), re.I))
- for a, b in self.browser_rules
- ]
-
- def __call__(
- self, user_agent: str
- ) -> t.Tuple[t.Optional[str], t.Optional[str], t.Optional[str], t.Optional[str]]:
- platform: t.Optional[str]
- browser: t.Optional[str]
- version: t.Optional[str]
- language: t.Optional[str]
-
- for platform, regex in self.platforms: # noqa: B007
- match = regex.search(user_agent)
- if match is not None:
- break
- else:
- platform = None
-
- # Except for Trident, all browser key words come after the last ')'
- last_closing_paren = 0
- if (
- not re.compile(r"trident/.+? rv:", re.I).search(user_agent)
- and ")" in user_agent
- and user_agent[-1] != ")"
- ):
- last_closing_paren = user_agent.rindex(")")
-
- for browser, regex in self.browsers: # noqa: B007
- match = regex.search(user_agent[last_closing_paren:])
- if match is not None:
- version = match.group(1)
- break
- else:
- browser = version = None
- match = self._language_re.search(user_agent)
- if match is not None:
- language = match.group(1) or match.group(2)
- else:
- language = None
- return platform, browser, version, language
-
-
-# It wasn't public, but users might have imported it anyway, show a
-# warning if a user created an instance.
-class UserAgentParser(_UserAgentParser):
- """A simple user agent parser. Used by the `UserAgent`.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use a dedicated parser library
- instead.
- """
-
- def __init__(self) -> None:
- warnings.warn(
- "'UserAgentParser' is deprecated and will be removed in"
- " Werkzeug 2.1. Use a dedicated parser library instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__()
-
-
-class _deprecated_property(property):
- def __init__(self, fget: t.Callable[["_UserAgent"], t.Any]) -> None:
- super().__init__(fget)
- self.message = (
- "The built-in user agent parser is deprecated and will be"
- f" removed in Werkzeug 2.1. The {fget.__name__!r} property"
- " will be 'None'. Subclass 'werkzeug.user_agent.UserAgent'"
- " and set 'Request.user_agent_class' to use a different"
- " parser."
- )
-
- def __get__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
- warnings.warn(self.message, DeprecationWarning, stacklevel=3)
- return super().__get__(*args, **kwargs)
-
-
-# This is what Request.user_agent returns for now, only show warnings on
-# attribute access, not creation.
-class _UserAgent(_BaseUserAgent):
- _parser = _UserAgentParser()
-
- def __init__(self, string: str) -> None:
- super().__init__(string)
- info = self._parser(string)
- self._platform, self._browser, self._version, self._language = info
-
- @_deprecated_property
- def platform(self) -> t.Optional[str]: # type: ignore
- return self._platform
-
- @_deprecated_property
- def browser(self) -> t.Optional[str]: # type: ignore
- return self._browser
-
- @_deprecated_property
- def version(self) -> t.Optional[str]: # type: ignore
- return self._version
-
- @_deprecated_property
- def language(self) -> t.Optional[str]: # type: ignore
- return self._language
-
-
-# This is what users might be importing, show warnings on create.
-class UserAgent(_UserAgent):
- """Represents a parsed user agent header value.
-
- This uses a basic parser to try to extract some information from the
- header.
-
- :param environ_or_string: The header value to parse, or a WSGI
- environ containing the header.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Subclass
- :class:`werkzeug.user_agent.UserAgent` (note the new module
- name) to use a dedicated parser instead.
-
- .. versionchanged:: 2.0
- Passing a WSGI environ is deprecated and will be removed in 2.1.
- """
-
- def __init__(self, environ_or_string: "t.Union[str, WSGIEnvironment]") -> None:
- if isinstance(environ_or_string, dict):
- warnings.warn(
- "Passing an environ to 'UserAgent' is deprecated and"
- " will be removed in Werkzeug 2.1. Pass the header"
- " value string instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- string = environ_or_string.get("HTTP_USER_AGENT", "")
- else:
- string = environ_or_string
-
- warnings.warn(
- "The 'werkzeug.useragents' module is deprecated and will be"
- " removed in Werkzeug 2.1. The new base API is"
- " 'werkzeug.user_agent.UserAgent'.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(string)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/utils.py b/contrib/python/Werkzeug/py3/werkzeug/utils.py
index 900723149a..672e6e5ade 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/utils.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/utils.py
@@ -1,4 +1,3 @@
-import codecs
import io
import mimetypes
import os
@@ -7,15 +6,14 @@ import re
import sys
import typing as t
import unicodedata
-import warnings
from datetime import datetime
-from html.entities import name2codepoint
from time import time
from zlib import adler32
+from markupsafe import escape
+
from ._internal import _DictAccessorProperty
from ._internal import _missing
-from ._internal import _parse_signature
from ._internal import _TAccessorValue
from .datastructures import Headers
from .exceptions import NotFound
@@ -68,7 +66,12 @@ class cached_property(property, t.Generic[_T]):
e.value = 16 # sets cache
del e.value # clears cache
- The class must have a ``__dict__`` for this to work.
+ If the class defines ``__slots__``, it must add ``_cache_{name}`` as
+ a slot. Alternatively, it can add ``__dict__``, but that's usually
+ not desirable.
+
+ .. versionchanged:: 2.1
+ Works with ``__slots__``.
.. versionchanged:: 2.0
``del obj.name`` clears the cached value.
@@ -82,59 +85,41 @@ class cached_property(property, t.Generic[_T]):
) -> None:
super().__init__(fget, doc=doc)
self.__name__ = name or fget.__name__
+ self.slot_name = f"_cache_{self.__name__}"
self.__module__ = fget.__module__
def __set__(self, obj: object, value: _T) -> None:
- obj.__dict__[self.__name__] = value
+ if hasattr(obj, "__dict__"):
+ obj.__dict__[self.__name__] = value
+ else:
+ setattr(obj, self.slot_name, value)
def __get__(self, obj: object, type: type = None) -> _T: # type: ignore
if obj is None:
return self # type: ignore
- value: _T = obj.__dict__.get(self.__name__, _missing)
+ obj_dict = getattr(obj, "__dict__", None)
+
+ if obj_dict is not None:
+ value: _T = obj_dict.get(self.__name__, _missing)
+ else:
+ value = getattr(obj, self.slot_name, _missing) # type: ignore[arg-type]
if value is _missing:
value = self.fget(obj) # type: ignore
- obj.__dict__[self.__name__] = value
+
+ if obj_dict is not None:
+ obj.__dict__[self.__name__] = value
+ else:
+ setattr(obj, self.slot_name, value)
return value
def __delete__(self, obj: object) -> None:
- del obj.__dict__[self.__name__]
-
-
-def invalidate_cached_property(obj: object, name: str) -> None:
- """Invalidates the cache for a :class:`cached_property`:
-
- >>> class Test(object):
- ... @cached_property
- ... def magic_number(self):
- ... print("recalculating...")
- ... return 42
- ...
- >>> var = Test()
- >>> var.magic_number
- recalculating...
- 42
- >>> var.magic_number
- 42
- >>> invalidate_cached_property(var, "magic_number")
- >>> var.magic_number
- recalculating...
- 42
-
- You must pass the name of the cached property as the second argument.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use ``del obj.name`` instead.
- """
- warnings.warn(
- "'invalidate_cached_property' is deprecated and will be removed"
- " in Werkzeug 2.1. Use 'del obj.name' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- delattr(obj, name)
+ if hasattr(obj, "__dict__"):
+ del obj.__dict__[self.__name__]
+ else:
+ setattr(obj, self.slot_name, _missing)
class environ_property(_DictAccessorProperty[_TAccessorValue]):
@@ -171,143 +156,6 @@ class header_property(_DictAccessorProperty[_TAccessorValue]):
return obj.headers
-class HTMLBuilder:
- """Helper object for HTML generation.
-
- Per default there are two instances of that class. The `html` one, and
- the `xhtml` one for those two dialects. The class uses keyword parameters
- and positional parameters to generate small snippets of HTML.
-
- Keyword parameters are converted to XML/SGML attributes, positional
- arguments are used as children. Because Python accepts positional
- arguments before keyword arguments it's a good idea to use a list with the
- star-syntax for some children:
-
- >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
- ... html.a('bar', href='bar.html')])
- '<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
-
- This class works around some browser limitations and can not be used for
- arbitrary SGML/XML generation. For that purpose lxml and similar
- libraries exist.
-
- Calling the builder escapes the string passed:
-
- >>> html.p(html("<foo>"))
- '<p>&lt;foo&gt;</p>'
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1.
- """
-
- _entity_re = re.compile(r"&([^;]+);")
- _entities = name2codepoint.copy()
- _entities["apos"] = 39
- _empty_elements = {
- "area",
- "base",
- "basefont",
- "br",
- "col",
- "command",
- "embed",
- "frame",
- "hr",
- "img",
- "input",
- "keygen",
- "isindex",
- "link",
- "meta",
- "param",
- "source",
- "wbr",
- }
- _boolean_attributes = {
- "selected",
- "checked",
- "compact",
- "declare",
- "defer",
- "disabled",
- "ismap",
- "multiple",
- "nohref",
- "noresize",
- "noshade",
- "nowrap",
- }
- _plaintext_elements = {"textarea"}
- _c_like_cdata = {"script", "style"}
-
- def __init__(self, dialect): # type: ignore
- self._dialect = dialect
-
- def __call__(self, s): # type: ignore
- import html
-
- warnings.warn(
- "'utils.HTMLBuilder' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- return html.escape(s)
-
- def __getattr__(self, tag): # type: ignore
- import html
-
- warnings.warn(
- "'utils.HTMLBuilder' is deprecated and will be removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
- if tag[:2] == "__":
- raise AttributeError(tag)
-
- def proxy(*children, **arguments): # type: ignore
- buffer = f"<{tag}"
- for key, value in arguments.items():
- if value is None:
- continue
- if key[-1] == "_":
- key = key[:-1]
- if key in self._boolean_attributes:
- if not value:
- continue
- if self._dialect == "xhtml":
- value = f'="{key}"'
- else:
- value = ""
- else:
- value = f'="{html.escape(value)}"'
- buffer += f" {key}{value}"
- if not children and tag in self._empty_elements:
- if self._dialect == "xhtml":
- buffer += " />"
- else:
- buffer += ">"
- return buffer
- buffer += ">"
-
- children_as_string = "".join([str(x) for x in children if x is not None])
-
- if children_as_string:
- if tag in self._plaintext_elements:
- children_as_string = html.escape(children_as_string)
- elif tag in self._c_like_cdata and self._dialect == "xhtml":
- children_as_string = f"/*<![CDATA[*/{children_as_string}/*]]>*/"
- buffer += children_as_string + f"</{tag}>"
- return buffer
-
- return proxy
-
- def __repr__(self) -> str:
- return f"<{type(self).__name__} for {self._dialect!r}>"
-
-
-html = HTMLBuilder("html")
-xhtml = HTMLBuilder("xhtml")
-
# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in
# https://www.iana.org/assignments/media-types/media-types.xhtml
# Types listed in the XDG mime info that have a charset in the IANA registration.
@@ -346,89 +194,6 @@ def get_content_type(mimetype: str, charset: str) -> str:
return mimetype
-def detect_utf_encoding(data: bytes) -> str:
- """Detect which UTF encoding was used to encode the given bytes.
-
- The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is
- accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big
- or little endian. Some editors or libraries may prepend a BOM.
-
- :internal:
-
- :param data: Bytes in unknown UTF encoding.
- :return: UTF encoding name
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. This is built in to
- :func:`json.loads`.
-
- .. versionadded:: 0.15
- """
- warnings.warn(
- "'detect_utf_encoding' is deprecated and will be removed in"
- " Werkzeug 2.1. This is built in to 'json.loads'.",
- DeprecationWarning,
- stacklevel=2,
- )
- head = data[:4]
-
- if head[:3] == codecs.BOM_UTF8:
- return "utf-8-sig"
-
- if b"\x00" not in head:
- return "utf-8"
-
- if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE):
- return "utf-32"
-
- if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE):
- return "utf-16"
-
- if len(head) == 4:
- if head[:3] == b"\x00\x00\x00":
- return "utf-32-be"
-
- if head[::2] == b"\x00\x00":
- return "utf-16-be"
-
- if head[1:] == b"\x00\x00\x00":
- return "utf-32-le"
-
- if head[1::2] == b"\x00\x00":
- return "utf-16-le"
-
- if len(head) == 2:
- return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le"
-
- return "utf-8"
-
-
-def format_string(string: str, context: t.Mapping[str, t.Any]) -> str:
- """String-template format a string:
-
- >>> format_string('$foo and ${foo}s', dict(foo=42))
- '42 and 42s'
-
- This does not do any attribute lookup.
-
- :param string: the format string.
- :param context: a dict with the variables to insert.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :class:`string.Template`
- instead.
- """
- from string import Template
-
- warnings.warn(
- "'utils.format_string' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'string.Template' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return Template(string).substitute(context)
-
-
def secure_filename(filename: str) -> str:
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
@@ -476,54 +241,6 @@ def secure_filename(filename: str) -> str:
return filename
-def escape(s: t.Any) -> str:
- """Replace ``&``, ``<``, ``>``, ``"``, and ``'`` with HTML-safe
- sequences.
-
- ``None`` is escaped to an empty string.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use MarkupSafe instead.
- """
- import html
-
- warnings.warn(
- "'utils.escape' is deprecated and will be removed in Werkzeug"
- " 2.1. Use MarkupSafe instead.",
- DeprecationWarning,
- stacklevel=2,
- )
-
- if s is None:
- return ""
-
- if hasattr(s, "__html__"):
- return s.__html__() # type: ignore
-
- if not isinstance(s, str):
- s = str(s)
-
- return html.escape(s, quote=True) # type: ignore
-
-
-def unescape(s: str) -> str:
- """The reverse of :func:`escape`. This unescapes all the HTML
- entities, not only those inserted by ``escape``.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use MarkupSafe instead.
- """
- import html
-
- warnings.warn(
- "'utils.unescape' is deprecated and will be removed in Werkzueg"
- " 2.1. Use MarkupSafe instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return html.unescape(s)
-
-
def redirect(
location: str, code: int = 302, Response: t.Optional[t.Type["Response"]] = None
) -> "Response":
@@ -546,25 +263,25 @@ def redirect(
response. The default is :class:`werkzeug.wrappers.Response` if
unspecified.
"""
- import html
-
if Response is None:
from .wrappers import Response # type: ignore
- display_location = html.escape(location)
+ display_location = escape(location)
if isinstance(location, str):
# Safe conversion is necessary here as we might redirect
# to a broken URI scheme (for instance itms-services).
from .urls import iri_to_uri
location = iri_to_uri(location, safe_conversion=True)
+
response = Response( # type: ignore
- '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
+ "<!doctype html>\n"
+ "<html lang=en>\n"
"<title>Redirecting...</title>\n"
"<h1>Redirecting...</h1>\n"
- "<p>You should be redirected automatically to target URL: "
- f'<a href="{html.escape(location)}">{display_location}</a>. If'
- " not click the link.",
+ "<p>You should be redirected automatically to the target URL: "
+ f'<a href="{escape(location)}">{display_location}</a>. If'
+ " not, click the link.\n",
code,
mimetype="text/html",
)
@@ -572,18 +289,40 @@ def redirect(
return response
-def append_slash_redirect(environ: "WSGIEnvironment", code: int = 301) -> "Response":
- """Redirects to the same URL but with a slash appended. The behavior
- of this function is undefined if the path ends with a slash already.
+def append_slash_redirect(environ: "WSGIEnvironment", code: int = 308) -> "Response":
+ """Redirect to the current URL with a slash appended.
+
+ If the current URL is ``/user/42``, the redirect URL will be
+ ``42/``. When joined to the current URL during response
+ processing or by the browser, this will produce ``/user/42/``.
- :param environ: the WSGI environment for the request that triggers
- the redirect.
+ The behavior is undefined if the path ends with a slash already. If
+ called unconditionally on a URL, it may produce a redirect loop.
+
+ :param environ: Use the path and query from this WSGI environment
+ to produce the redirect URL.
:param code: the status code for the redirect.
+
+ .. versionchanged:: 2.1
+ Produce a relative URL that only modifies the last segment.
+ Relevant when the current path has multiple segments.
+
+ .. versionchanged:: 2.1
+ The default status code is 308 instead of 301. This preserves
+ the request method and body.
"""
- new_path = environ["PATH_INFO"].strip("/") + "/"
+ tail = environ["PATH_INFO"].rpartition("/")[2]
+
+ if not tail:
+ new_path = "./"
+ else:
+ new_path = f"{tail}/"
+
query_string = environ.get("QUERY_STRING")
+
if query_string:
- new_path += f"?{query_string}"
+ new_path = f"{new_path}?{query_string}"
+
return redirect(new_path, code)
@@ -924,139 +663,6 @@ def find_modules(
yield modname
-def validate_arguments(func, args, kwargs, drop_extra=True): # type: ignore
- """Checks if the function accepts the arguments and keyword arguments.
- Returns a new ``(args, kwargs)`` tuple that can safely be passed to
- the function without causing a `TypeError` because the function signature
- is incompatible. If `drop_extra` is set to `True` (which is the default)
- any extra positional or keyword arguments are dropped automatically.
-
- The exception raised provides three attributes:
-
- `missing`
- A set of argument names that the function expected but where
- missing.
-
- `extra`
- A dict of keyword arguments that the function can not handle but
- where provided.
-
- `extra_positional`
- A list of values that where given by positional argument but the
- function cannot accept.
-
- This can be useful for decorators that forward user submitted data to
- a view function::
-
- from werkzeug.utils import ArgumentValidationError, validate_arguments
-
- def sanitize(f):
- def proxy(request):
- data = request.values.to_dict()
- try:
- args, kwargs = validate_arguments(f, (request,), data)
- except ArgumentValidationError:
- raise BadRequest('The browser failed to transmit all '
- 'the data expected.')
- return f(*args, **kwargs)
- return proxy
-
- :param func: the function the validation is performed against.
- :param args: a tuple of positional arguments.
- :param kwargs: a dict of keyword arguments.
- :param drop_extra: set to `False` if you don't want extra arguments
- to be silently dropped.
- :return: tuple in the form ``(args, kwargs)``.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :func:`inspect.signature`
- instead.
- """
- warnings.warn(
- "'utils.validate_arguments' is deprecated and will be removed"
- " in Werkzeug 2.1. Use 'inspect.signature' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- parser = _parse_signature(func)
- args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
- if missing:
- raise ArgumentValidationError(tuple(missing))
- elif (extra or extra_positional) and not drop_extra:
- raise ArgumentValidationError(None, extra, extra_positional)
- return tuple(args), kwargs
-
-
-def bind_arguments(func, args, kwargs): # type: ignore
- """Bind the arguments provided into a dict. When passed a function,
- a tuple of arguments and a dict of keyword arguments `bind_arguments`
- returns a dict of names as the function would see it. This can be useful
- to implement a cache decorator that uses the function arguments to build
- the cache key based on the values of the arguments.
-
- :param func: the function the arguments should be bound for.
- :param args: tuple of positional arguments.
- :param kwargs: a dict of keyword arguments.
- :return: a :class:`dict` of bound keyword arguments.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Use :meth:`Signature.bind`
- instead.
- """
- warnings.warn(
- "'utils.bind_arguments' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'Signature.bind' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- (
- args,
- kwargs,
- missing,
- extra,
- extra_positional,
- arg_spec,
- vararg_var,
- kwarg_var,
- ) = _parse_signature(func)(args, kwargs)
- values = {}
- for (name, _has_default, _default), value in zip(arg_spec, args):
- values[name] = value
- if vararg_var is not None:
- values[vararg_var] = tuple(extra_positional)
- elif extra_positional:
- raise TypeError("too many positional arguments")
- if kwarg_var is not None:
- multikw = set(extra) & {x[0] for x in arg_spec}
- if multikw:
- raise TypeError(
- f"got multiple values for keyword argument {next(iter(multikw))!r}"
- )
- values[kwarg_var] = extra
- elif extra:
- raise TypeError(f"got unexpected keyword argument {next(iter(extra))!r}")
- return values
-
-
-class ArgumentValidationError(ValueError):
- """Raised if :func:`validate_arguments` fails to validate
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1 along with ``utils.bind`` and
- ``validate_arguments``.
- """
-
- def __init__(self, missing=None, extra=None, extra_positional=None): # type: ignore
- self.missing = set(missing or ())
- self.extra = extra or {}
- self.extra_positional = extra_positional or []
- super().__init__(
- "function arguments invalid."
- f" ({len(self.missing)} missing,"
- f" {len(self.extra) + len(self.extra_positional)} additional)"
- )
-
-
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/__init__.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/__init__.py
index eb69a99493..b8c45d71cf 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/__init__.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/wrappers/__init__.py
@@ -1,16 +1,3 @@
-from .accept import AcceptMixin
-from .auth import AuthorizationMixin
-from .auth import WWWAuthenticateMixin
-from .base_request import BaseRequest
-from .base_response import BaseResponse
-from .common_descriptors import CommonRequestDescriptorsMixin
-from .common_descriptors import CommonResponseDescriptorsMixin
-from .etag import ETagRequestMixin
-from .etag import ETagResponseMixin
-from .request import PlainRequest
from .request import Request as Request
-from .request import StreamOnlyMixin
from .response import Response as Response
from .response import ResponseStream
-from .response import ResponseStreamMixin
-from .user_agent import UserAgentMixin
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/accept.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/accept.py
deleted file mode 100644
index da24dbad63..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/accept.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import typing as t
-import warnings
-
-
-class AcceptMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'AcceptMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/auth.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/auth.py
deleted file mode 100644
index f7f58157a4..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/auth.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import typing as t
-import warnings
-
-
-class AuthorizationMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'AuthorizationMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
-
-
-class WWWAuthenticateMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'WWWAuthenticateMixin' is deprecated and will be removed"
- " in Werkzeug 2.1. 'Response' now includes the"
- " functionality directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_request.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_request.py
deleted file mode 100644
index 451989fd7f..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_request.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import typing as t
-import warnings
-
-from .request import Request
-
-
-class _FakeSubclassCheck(type):
- def __subclasscheck__(cls, subclass: t.Type) -> bool:
- warnings.warn(
- "'BaseRequest' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'issubclass(cls, Request)' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return issubclass(subclass, Request)
-
- def __instancecheck__(cls, instance: t.Any) -> bool:
- warnings.warn(
- "'BaseRequest' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'isinstance(obj, Request)' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return isinstance(instance, Request)
-
-
-class BaseRequest(Request, metaclass=_FakeSubclassCheck):
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'BaseRequest' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_response.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_response.py
deleted file mode 100644
index 3e0dc67664..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/base_response.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import typing as t
-import warnings
-
-from .response import Response
-
-
-class _FakeSubclassCheck(type):
- def __subclasscheck__(cls, subclass: t.Type) -> bool:
- warnings.warn(
- "'BaseResponse' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'issubclass(cls, Response)' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return issubclass(subclass, Response)
-
- def __instancecheck__(cls, instance: t.Any) -> bool:
- warnings.warn(
- "'BaseResponse' is deprecated and will be removed in"
- " Werkzeug 2.1. Use 'isinstance(obj, Response)' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- return isinstance(instance, Response)
-
-
-class BaseResponse(Response, metaclass=_FakeSubclassCheck):
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'BaseResponse' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Response' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/common_descriptors.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/common_descriptors.py
deleted file mode 100644
index 6436b4c1f8..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/common_descriptors.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import typing as t
-import warnings
-
-
-class CommonRequestDescriptorsMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'CommonRequestDescriptorsMixin' is deprecated and will be"
- " removed in Werkzeug 2.1. 'Request' now includes the"
- " functionality directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
-
-
-class CommonResponseDescriptorsMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'CommonResponseDescriptorsMixin' is deprecated and will be"
- " removed in Werkzeug 2.1. 'Response' now includes the"
- " functionality directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/cors.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/cors.py
deleted file mode 100644
index efd8537b61..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/cors.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import typing as t
-import warnings
-
-
-class CORSRequestMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'CORSRequestMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
-
-
-class CORSResponseMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'CORSResponseMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Response' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/etag.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/etag.py
deleted file mode 100644
index 9131b93007..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/etag.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import typing as t
-import warnings
-
-
-class ETagRequestMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'ETagRequestMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
-
-
-class ETagResponseMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'ETagResponseMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Response' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/json.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/json.py
deleted file mode 100644
index a4dd7c27d2..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/json.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import typing as t
-import warnings
-
-
-class JSONMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'JSONMixin' is deprecated and will be removed in Werkzeug"
- " 2.1. 'Request' now includes the functionality directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/request.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/request.py
index f68dd5a1db..57b739cc5f 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/request.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/wrappers/request.py
@@ -2,7 +2,6 @@ import functools
import json
import typing
import typing as t
-import warnings
from io import BytesIO
from .._internal import _wsgi_decoding_dance
@@ -50,6 +49,9 @@ class Request(_SansIORequest):
prevent consuming the form data in middleware, which would make
it unavailable to the final application.
+ .. versionchanged:: 2.1
+ Remove the ``disable_data_descriptor`` attribute.
+
.. versionchanged:: 2.0
Combine ``BaseRequest`` and mixins into a single ``Request``
class. Using the old classes is deprecated and will be removed
@@ -81,20 +83,10 @@ class Request(_SansIORequest):
#: .. versionadded:: 0.5
max_form_memory_size: t.Optional[int] = None
- #: The form data parser that shoud be used. Can be replaced to customize
+ #: The form data parser that should be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class: t.Type[FormDataParser] = FormDataParser
- #: Disable the :attr:`data` property to avoid reading from the input
- #: stream.
- #:
- #: .. deprecated:: 2.0
- #: Will be removed in Werkzeug 2.1. Create the request with
- #: ``shallow=True`` instead.
- #:
- #: .. versionadded:: 0.9
- disable_data_descriptor: t.Optional[bool] = None
-
#: The WSGI environment containing HTTP headers and information from
#: the WSGI server.
environ: "WSGIEnvironment"
@@ -125,17 +117,6 @@ class Request(_SansIORequest):
remote_addr=environ.get("REMOTE_ADDR"),
)
self.environ = environ
-
- if self.disable_data_descriptor is not None:
- warnings.warn(
- "'disable_data_descriptor' is deprecated and will be"
- " removed in Werkzeug 2.1. Create the request with"
- " 'shallow=True' instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- shallow = shallow or self.disable_data_descriptor
-
self.shallow = shallow
if populate_request and not shallow:
@@ -549,6 +530,12 @@ class Request(_SansIORequest):
(:mimetype:`application/json`, see :attr:`is_json`).
Calls :meth:`get_json` with default arguments.
+
+ If the request content type is not ``application/json``, this
+ will raise a 400 Bad Request error.
+
+ .. versionchanged:: 2.1
+ Raise a 400 error if the content type is incorrect.
"""
return self.get_json()
@@ -562,23 +549,28 @@ class Request(_SansIORequest):
"""Parse :attr:`data` as JSON.
If the mimetype does not indicate JSON
- (:mimetype:`application/json`, see :attr:`is_json`), this
- returns ``None``.
-
- If parsing fails, :meth:`on_json_loading_failed` is called and
- its return value is used as the return value.
+ (:mimetype:`application/json`, see :attr:`is_json`), or parsing
+ fails, :meth:`on_json_loading_failed` is called and
+ its return value is used as the return value. By default this
+ raises a 400 Bad Request error.
:param force: Ignore the mimetype and always try to parse JSON.
- :param silent: Silence parsing errors and return ``None``
- instead.
+ :param silent: Silence mimetype and parsing errors, and
+ return ``None`` instead.
:param cache: Store the parsed JSON to return for subsequent
calls.
+
+ .. versionchanged:: 2.1
+ Raise a 400 error if the content type is incorrect.
"""
if cache and self._cached_json[silent] is not Ellipsis:
return self._cached_json[silent]
if not (force or self.is_json):
- return None
+ if not silent:
+ return self.on_json_loading_failed(None)
+ else:
+ return None
data = self.get_data(cache=cache)
@@ -603,58 +595,20 @@ class Request(_SansIORequest):
return rv
- def on_json_loading_failed(self, e: ValueError) -> t.Any:
- """Called if :meth:`get_json` parsing fails and isn't silenced.
+ def on_json_loading_failed(self, e: t.Optional[ValueError]) -> t.Any:
+ """Called if :meth:`get_json` fails and isn't silenced.
+
If this method returns a value, it is used as the return value
for :meth:`get_json`. The default implementation raises
:exc:`~werkzeug.exceptions.BadRequest`.
- """
- raise BadRequest(f"Failed to decode JSON object: {e}")
+ :param e: If parsing failed, this is the exception. It will be
+ ``None`` if the content type wasn't ``application/json``.
+ """
+ if e is not None:
+ raise BadRequest(f"Failed to decode JSON object: {e}")
-class StreamOnlyMixin:
- """Mixin to create a ``Request`` that disables the ``data``,
- ``form``, and ``files`` properties. Only ``stream`` is available.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Create the request with
- ``shallow=True`` instead.
-
- .. versionadded:: 0.9
- """
-
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'StreamOnlyMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. Create the request with 'shallow=True'"
- " instead.",
- DeprecationWarning,
- stacklevel=2,
- )
- kwargs["shallow"] = True
- super().__init__(*args, **kwargs)
-
-
-class PlainRequest(StreamOnlyMixin, Request):
- """A request object without ``data``, ``form``, and ``files``.
-
- .. deprecated:: 2.0
- Will be removed in Werkzeug 2.1. Create the request with
- ``shallow=True`` instead.
-
- .. versionadded:: 0.9
- """
-
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'PlainRequest' is deprecated and will be removed in"
- " Werkzeug 2.1. Create the request with 'shallow=True'"
- " instead.",
- DeprecationWarning,
- stacklevel=2,
+ raise BadRequest(
+ "Did not attempt to load JSON data because the request"
+ " Content-Type was not 'application/json'."
)
-
- # Don't show the DeprecationWarning for StreamOnlyMixin.
- with warnings.catch_warnings():
- warnings.simplefilter("ignore", DeprecationWarning)
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/response.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/response.py
index 8378e74405..7e888cba5e 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/response.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/wrappers/response.py
@@ -142,11 +142,15 @@ class Response(_SansIOResponse):
#: your code to the name change.
implicit_sequence_conversion = True
- #: Should this response object correct the location header to be RFC
- #: conformant? This is true by default.
+ #: If a redirect ``Location`` header is a relative URL, make it an
+ #: absolute URL, including scheme and domain.
+ #:
+ #: .. versionchanged:: 2.1
+ #: This is disabled by default, so responses will send relative
+ #: redirects.
#:
#: .. versionadded:: 0.8
- autocorrect_location_header = True
+ autocorrect_location_header = False
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
@@ -445,7 +449,7 @@ class Response(_SansIOResponse):
def __exit__(self, exc_type, exc_value, tb): # type: ignore
self.close()
- def freeze(self, no_etag: None = None) -> None:
+ def freeze(self) -> None:
"""Make the response object ready to be pickled. Does the
following:
@@ -455,6 +459,9 @@ class Response(_SansIOResponse):
* Set the ``Content-Length`` header.
* Generate an ``ETag`` header if one is not already set.
+ .. versionchanged:: 2.1
+ Removed the ``no_etag`` parameter.
+
.. versionchanged:: 2.0
An ``ETag`` header is added, the ``no_etag`` parameter is
deprecated and will be removed in Werkzeug 2.1.
@@ -466,15 +473,6 @@ class Response(_SansIOResponse):
# implicit_sequence_conversion and direct_passthrough.
self.response = list(self.iter_encoded())
self.headers["Content-Length"] = str(sum(map(len, self.response)))
-
- if no_etag is not None:
- warnings.warn(
- "The 'no_etag' parameter is deprecated and will be"
- " removed in Werkzeug 2.1.",
- DeprecationWarning,
- stacklevel=2,
- )
-
self.add_etag()
def get_wsgi_headers(self, environ: "WSGIEnvironment") -> Headers:
@@ -798,7 +796,7 @@ class Response(_SansIOResponse):
if environ["REQUEST_METHOD"] in ("GET", "HEAD"):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
- # this header will be overriden by many WSGI servers including
+ # this header will be overridden by many WSGI servers including
# wsgiref.
if "date" not in self.headers:
self.headers["Date"] = http_date()
@@ -835,9 +833,9 @@ class Response(_SansIOResponse):
class ResponseStream:
- """A file descriptor like object used by the :class:`ResponseStreamMixin` to
- represent the body of the stream. It directly pushes into the response
- iterable of the response object.
+ """A file descriptor like object used by :meth:`Response.stream` to
+ represent the body of the stream. It directly pushes into the
+ response iterable of the response object.
"""
mode = "wb+"
@@ -877,15 +875,3 @@ class ResponseStream:
@property
def encoding(self) -> str:
return self.response.charset
-
-
-class ResponseStreamMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'ResponseStreamMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Response' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wrappers/user_agent.py b/contrib/python/Werkzeug/py3/werkzeug/wrappers/user_agent.py
deleted file mode 100644
index e69ab05506..0000000000
--- a/contrib/python/Werkzeug/py3/werkzeug/wrappers/user_agent.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import typing as t
-import warnings
-
-
-class UserAgentMixin:
- def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
- warnings.warn(
- "'UserAgentMixin' is deprecated and will be removed in"
- " Werkzeug 2.1. 'Request' now includes the functionality"
- " directly.",
- DeprecationWarning,
- stacklevel=2,
- )
- super().__init__(*args, **kwargs)
diff --git a/contrib/python/Werkzeug/py3/werkzeug/wsgi.py b/contrib/python/Werkzeug/py3/werkzeug/wsgi.py
index 9cfa74de85..24ece0b19e 100644
--- a/contrib/python/Werkzeug/py3/werkzeug/wsgi.py
+++ b/contrib/python/Werkzeug/py3/werkzeug/wsgi.py
@@ -1,6 +1,7 @@
import io
import re
import typing as t
+import warnings
from functools import partial
from functools import update_wrapper
from itertools import chain
@@ -126,16 +127,10 @@ def get_content_length(environ: "WSGIEnvironment") -> t.Optional[int]:
:param environ: the WSGI environ to fetch the content length from.
"""
- if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked":
- return None
-
- content_length = environ.get("CONTENT_LENGTH")
- if content_length is not None:
- try:
- return max(0, int(content_length))
- except (ValueError, TypeError):
- pass
- return None
+ return _sansio_utils.get_content_length(
+ http_content_length=environ.get("CONTENT_LENGTH"),
+ http_transfer_encoding=environ.get("HTTP_TRANSFER_ENCODING", ""),
+ )
def get_input_stream(
@@ -183,8 +178,16 @@ def get_query_string(environ: "WSGIEnvironment") -> str:
:param environ: WSGI environment to get the query string from.
+ .. deprecated:: 2.2
+ Will be removed in Werkzeug 2.3.
+
.. versionadded:: 0.9
"""
+ warnings.warn(
+ "'get_query_string' is deprecated and will be removed in Werkzeug 2.3.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
qs = environ.get("QUERY_STRING", "").encode("latin1")
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
@@ -220,8 +223,16 @@ def get_script_name(
should be performed.
:param errors: The decoding error handling.
+ .. deprecated:: 2.2
+ Will be removed in Werkzeug 2.3.
+
.. versionadded:: 0.9
"""
+ warnings.warn(
+ "'get_script_name' is deprecated and will be removed in Werkzeug 2.3.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
path = environ.get("SCRIPT_NAME", "").encode("latin1")
return _to_str(path, charset, errors, allow_none_charset=True) # type: ignore
@@ -247,6 +258,9 @@ def pop_path_info(
>>> env['SCRIPT_NAME']
'/foo/a/b'
+ .. deprecated:: 2.2
+ Will be removed in Werkzeug 2.3.
+
.. versionadded:: 0.5
.. versionchanged:: 0.9
@@ -259,6 +273,12 @@ def pop_path_info(
:param errors: The ``errors`` paramater passed to
:func:`bytes.decode`.
"""
+ warnings.warn(
+ "'pop_path_info' is deprecated and will be removed in Werkzeug 2.3.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
path = environ.get("PATH_INFO")
if not path:
return None
@@ -299,6 +319,9 @@ def peek_path_info(
If the `charset` is set to `None` bytes are returned.
+ .. deprecated:: 2.2
+ Will be removed in Werkzeug 2.3.
+
.. versionadded:: 0.5
.. versionchanged:: 0.9
@@ -307,6 +330,12 @@ def peek_path_info(
:param environ: the WSGI environment that is checked.
"""
+ warnings.warn(
+ "'peek_path_info' is deprecated and will be removed in Werkzeug 2.3.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1)
if segments:
return _to_str( # type: ignore
@@ -354,12 +383,21 @@ def extract_path_info(
same server point to the same
resource.
+ .. deprecated:: 2.2
+ Will be removed in Werkzeug 2.3.
+
.. versionchanged:: 0.15
The ``errors`` parameter defaults to leaving invalid bytes
quoted instead of replacing them.
.. versionadded:: 0.6
+
"""
+ warnings.warn(
+ "'extract_path_info' is deprecated and will be removed in Werkzeug 2.3.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
def _normalize_netloc(scheme: str, netloc: str) -> str:
parts = netloc.split("@", 1)[-1].split(":", 1)