2019-12-02 21:46:54 +00:00
|
|
|
import base64
|
2022-01-24 04:07:52 +00:00
|
|
|
import email.utils
|
2019-12-02 21:46:54 +00:00
|
|
|
import re
|
2022-01-24 04:07:52 +00:00
|
|
|
import typing
|
|
|
|
import typing as t
|
2019-12-02 21:46:54 +00:00
|
|
|
import warnings
|
2022-01-24 04:07:52 +00:00
|
|
|
from datetime import date
|
2019-12-02 21:46:54 +00:00
|
|
|
from datetime import datetime
|
2022-01-24 04:07:52 +00:00
|
|
|
from datetime import time
|
2019-12-02 21:46:54 +00:00
|
|
|
from datetime import timedelta
|
2022-01-24 04:07:52 +00:00
|
|
|
from datetime import timezone
|
|
|
|
from enum import Enum
|
|
|
|
from hashlib import sha1
|
|
|
|
from time import mktime
|
|
|
|
from time import struct_time
|
|
|
|
from urllib.parse import unquote_to_bytes as _unquote
|
|
|
|
from urllib.request import parse_http_list as _parse_list_header
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
from ._internal import _cookie_parse_impl
|
|
|
|
from ._internal import _cookie_quote
|
|
|
|
from ._internal import _make_cookie_domain
|
2022-01-24 04:07:52 +00:00
|
|
|
from ._internal import _to_bytes
|
|
|
|
from ._internal import _to_str
|
|
|
|
from ._internal import _wsgi_decoding_dance
|
|
|
|
from werkzeug._internal import _dt_as_utc
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if t.TYPE_CHECKING:
|
|
|
|
import typing_extensions as te
|
|
|
|
from _typeshed.wsgi import WSGIEnvironment
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
|
|
|
|
_accept_re = re.compile(
|
|
|
|
r"""
|
|
|
|
( # media-range capturing-parenthesis
|
|
|
|
[^\s;,]+ # type/subtype
|
|
|
|
(?:[ \t]*;[ \t]* # ";"
|
|
|
|
(?: # parameter non-capturing-parenthesis
|
|
|
|
[^\s;,q][^\s;,]* # token that doesn't start with "q"
|
|
|
|
| # or
|
|
|
|
q[^\s;,=][^\s;,]* # token that is more than just "q"
|
|
|
|
)
|
|
|
|
)* # zero or more parameters
|
|
|
|
) # end of media-range
|
|
|
|
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
|
|
|
|
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
|
|
|
|
[^,]* # "extension" accept params: who cares?
|
|
|
|
)? # accept params are optional
|
|
|
|
""",
|
|
|
|
re.VERBOSE,
|
|
|
|
)
|
|
|
|
_token_chars = frozenset(
|
|
|
|
"!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~"
|
|
|
|
)
|
|
|
|
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
|
|
|
|
_option_header_piece_re = re.compile(
|
|
|
|
r"""
|
|
|
|
;\s*,?\s* # newlines were replaced with commas
|
|
|
|
(?P<key>
|
|
|
|
"[^"\\]*(?:\\.[^"\\]*)*" # quoted string
|
|
|
|
|
|
|
|
|
[^\s;,=*]+ # token
|
|
|
|
)
|
|
|
|
(?:\*(?P<count>\d+))? # *1, optional continuation index
|
|
|
|
\s*
|
|
|
|
(?: # optionally followed by =value
|
|
|
|
(?: # equals sign, possibly with encoding
|
|
|
|
\*\s*=\s* # * indicates extended notation
|
|
|
|
(?: # optional encoding
|
|
|
|
(?P<encoding>[^\s]+?)
|
|
|
|
'(?P<language>[^\s]*?)'
|
|
|
|
)?
|
|
|
|
|
|
|
|
|
=\s* # basic notation
|
|
|
|
)
|
|
|
|
(?P<value>
|
|
|
|
"[^"\\]*(?:\\.[^"\\]*)*" # quoted string
|
|
|
|
|
|
|
|
|
[^;,]+ # token
|
|
|
|
)?
|
|
|
|
)?
|
|
|
|
\s*
|
|
|
|
""",
|
|
|
|
flags=re.VERBOSE,
|
|
|
|
)
|
|
|
|
_option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?")
|
|
|
|
_entity_headers = frozenset(
|
|
|
|
[
|
|
|
|
"allow",
|
|
|
|
"content-encoding",
|
|
|
|
"content-language",
|
|
|
|
"content-length",
|
|
|
|
"content-location",
|
|
|
|
"content-md5",
|
|
|
|
"content-range",
|
|
|
|
"content-type",
|
|
|
|
"expires",
|
|
|
|
"last-modified",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
_hop_by_hop_headers = frozenset(
|
|
|
|
[
|
|
|
|
"connection",
|
|
|
|
"keep-alive",
|
|
|
|
"proxy-authenticate",
|
|
|
|
"proxy-authorization",
|
|
|
|
"te",
|
|
|
|
"trailer",
|
|
|
|
"transfer-encoding",
|
|
|
|
"upgrade",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
HTTP_STATUS_CODES = {
|
|
|
|
100: "Continue",
|
|
|
|
101: "Switching Protocols",
|
|
|
|
102: "Processing",
|
2022-01-24 04:07:52 +00:00
|
|
|
103: "Early Hints", # see RFC 8297
|
2019-12-02 21:46:54 +00:00
|
|
|
200: "OK",
|
|
|
|
201: "Created",
|
|
|
|
202: "Accepted",
|
|
|
|
203: "Non Authoritative Information",
|
|
|
|
204: "No Content",
|
|
|
|
205: "Reset Content",
|
|
|
|
206: "Partial Content",
|
|
|
|
207: "Multi Status",
|
2022-01-24 04:07:52 +00:00
|
|
|
208: "Already Reported", # see RFC 5842
|
2019-12-02 21:46:54 +00:00
|
|
|
226: "IM Used", # see RFC 3229
|
|
|
|
300: "Multiple Choices",
|
|
|
|
301: "Moved Permanently",
|
|
|
|
302: "Found",
|
|
|
|
303: "See Other",
|
|
|
|
304: "Not Modified",
|
|
|
|
305: "Use Proxy",
|
2022-01-24 04:07:52 +00:00
|
|
|
306: "Switch Proxy", # unused
|
2019-12-02 21:46:54 +00:00
|
|
|
307: "Temporary Redirect",
|
|
|
|
308: "Permanent Redirect",
|
|
|
|
400: "Bad Request",
|
|
|
|
401: "Unauthorized",
|
|
|
|
402: "Payment Required", # unused
|
|
|
|
403: "Forbidden",
|
|
|
|
404: "Not Found",
|
|
|
|
405: "Method Not Allowed",
|
|
|
|
406: "Not Acceptable",
|
|
|
|
407: "Proxy Authentication Required",
|
|
|
|
408: "Request Timeout",
|
|
|
|
409: "Conflict",
|
|
|
|
410: "Gone",
|
|
|
|
411: "Length Required",
|
|
|
|
412: "Precondition Failed",
|
|
|
|
413: "Request Entity Too Large",
|
|
|
|
414: "Request URI Too Long",
|
|
|
|
415: "Unsupported Media Type",
|
|
|
|
416: "Requested Range Not Satisfiable",
|
|
|
|
417: "Expectation Failed",
|
|
|
|
418: "I'm a teapot", # see RFC 2324
|
|
|
|
421: "Misdirected Request", # see RFC 7540
|
|
|
|
422: "Unprocessable Entity",
|
|
|
|
423: "Locked",
|
|
|
|
424: "Failed Dependency",
|
2022-01-24 04:07:52 +00:00
|
|
|
425: "Too Early", # see RFC 8470
|
2019-12-02 21:46:54 +00:00
|
|
|
426: "Upgrade Required",
|
|
|
|
428: "Precondition Required", # see RFC 6585
|
|
|
|
429: "Too Many Requests",
|
|
|
|
431: "Request Header Fields Too Large",
|
|
|
|
449: "Retry With", # proprietary MS extension
|
|
|
|
451: "Unavailable For Legal Reasons",
|
|
|
|
500: "Internal Server Error",
|
|
|
|
501: "Not Implemented",
|
|
|
|
502: "Bad Gateway",
|
|
|
|
503: "Service Unavailable",
|
|
|
|
504: "Gateway Timeout",
|
|
|
|
505: "HTTP Version Not Supported",
|
2022-01-24 04:07:52 +00:00
|
|
|
506: "Variant Also Negotiates", # see RFC 2295
|
2019-12-02 21:46:54 +00:00
|
|
|
507: "Insufficient Storage",
|
2022-01-24 04:07:52 +00:00
|
|
|
508: "Loop Detected", # see RFC 5842
|
2019-12-02 21:46:54 +00:00
|
|
|
510: "Not Extended",
|
2022-01-24 04:07:52 +00:00
|
|
|
511: "Network Authentication Failed",
|
2019-12-02 21:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
class COEP(Enum):
|
|
|
|
"""Cross Origin Embedder Policies"""
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
UNSAFE_NONE = "unsafe-none"
|
|
|
|
REQUIRE_CORP = "require-corp"
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
class COOP(Enum):
|
|
|
|
"""Cross Origin Opener Policies"""
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
UNSAFE_NONE = "unsafe-none"
|
|
|
|
SAME_ORIGIN_ALLOW_POPUPS = "same-origin-allow-popups"
|
|
|
|
SAME_ORIGIN = "same-origin"
|
|
|
|
|
|
|
|
|
|
|
|
def quote_header_value(
|
|
|
|
value: t.Union[str, int], extra_chars: str = "", allow_token: bool = True
|
|
|
|
) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Quote a header value if necessary.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param value: the value to quote.
|
|
|
|
:param extra_chars: a list of extra characters to skip quoting.
|
|
|
|
:param allow_token: if this is enabled token values are returned
|
|
|
|
unchanged.
|
|
|
|
"""
|
|
|
|
if isinstance(value, bytes):
|
2022-01-24 04:07:52 +00:00
|
|
|
value = value.decode("latin1")
|
2019-12-02 21:46:54 +00:00
|
|
|
value = str(value)
|
|
|
|
if allow_token:
|
|
|
|
token_chars = _token_chars | set(extra_chars)
|
|
|
|
if set(value).issubset(token_chars):
|
|
|
|
return value
|
2022-01-24 04:07:52 +00:00
|
|
|
value = value.replace("\\", "\\\\").replace('"', '\\"')
|
|
|
|
return f'"{value}"'
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def unquote_header_value(value: str, is_filename: bool = False) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
|
|
|
|
This does not use the real unquoting but what browsers are actually
|
|
|
|
using for quoting.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param value: the header value to unquote.
|
2022-01-24 04:07:52 +00:00
|
|
|
:param is_filename: The value represents a filename or path.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
|
|
|
if value and value[0] == value[-1] == '"':
|
|
|
|
# this is not the real unquoting, but fixing this so that the
|
|
|
|
# RFC is met will result in bugs with internet explorer and
|
|
|
|
# probably some other browsers as well. IE for example is
|
|
|
|
# uploading files with "C:\foo\bar.txt" as filename
|
|
|
|
value = value[1:-1]
|
|
|
|
|
|
|
|
# if this is a filename and the starting characters look like
|
|
|
|
# a UNC path, then just return the value without quotes. Using the
|
|
|
|
# replace sequence below on a UNC path has the effect of turning
|
|
|
|
# the leading double slash into a single slash and then
|
|
|
|
# _fix_ie_filename() doesn't work correctly. See #458.
|
|
|
|
if not is_filename or value[:2] != "\\\\":
|
|
|
|
return value.replace("\\\\", "\\").replace('\\"', '"')
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def dump_options_header(
|
|
|
|
header: t.Optional[str], options: t.Mapping[str, t.Optional[t.Union[str, int]]]
|
|
|
|
) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""The reverse function to :func:`parse_options_header`.
|
|
|
|
|
|
|
|
:param header: the header to dump
|
|
|
|
:param options: a dict of options to append.
|
|
|
|
"""
|
|
|
|
segments = []
|
|
|
|
if header is not None:
|
|
|
|
segments.append(header)
|
2022-01-24 04:07:52 +00:00
|
|
|
for key, value in options.items():
|
2019-12-02 21:46:54 +00:00
|
|
|
if value is None:
|
|
|
|
segments.append(key)
|
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
segments.append(f"{key}={quote_header_value(value)}")
|
2019-12-02 21:46:54 +00:00
|
|
|
return "; ".join(segments)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def dump_header(
|
|
|
|
iterable: t.Union[t.Dict[str, t.Union[str, int]], t.Iterable[str]],
|
|
|
|
allow_token: bool = True,
|
|
|
|
) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Dump an HTTP header again. This is the reversal of
|
|
|
|
:func:`parse_list_header`, :func:`parse_set_header` and
|
|
|
|
:func:`parse_dict_header`. This also quotes strings that include an
|
|
|
|
equals sign unless you pass it as dict of key, value pairs.
|
|
|
|
|
|
|
|
>>> dump_header({'foo': 'bar baz'})
|
|
|
|
'foo="bar baz"'
|
|
|
|
>>> dump_header(('foo', 'bar baz'))
|
|
|
|
'foo, "bar baz"'
|
|
|
|
|
|
|
|
:param iterable: the iterable or dict of values to quote.
|
|
|
|
:param allow_token: if set to `False` tokens as values are disallowed.
|
|
|
|
See :func:`quote_header_value` for more details.
|
|
|
|
"""
|
|
|
|
if isinstance(iterable, dict):
|
|
|
|
items = []
|
2022-01-24 04:07:52 +00:00
|
|
|
for key, value in iterable.items():
|
2019-12-02 21:46:54 +00:00
|
|
|
if value is None:
|
|
|
|
items.append(key)
|
|
|
|
else:
|
|
|
|
items.append(
|
2022-01-24 04:07:52 +00:00
|
|
|
f"{key}={quote_header_value(value, allow_token=allow_token)}"
|
2019-12-02 21:46:54 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
items = [quote_header_value(x, allow_token=allow_token) for x in iterable]
|
|
|
|
return ", ".join(items)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def dump_csp_header(header: "ds.ContentSecurityPolicy") -> str:
|
|
|
|
"""Dump a Content Security Policy header.
|
|
|
|
|
|
|
|
These are structured into policies such as "default-src 'self';
|
|
|
|
script-src 'self'".
|
|
|
|
|
|
|
|
.. versionadded:: 1.0.0
|
|
|
|
Support for Content Security Policy headers was added.
|
|
|
|
|
|
|
|
"""
|
|
|
|
return "; ".join(f"{key} {value}" for key, value in header.items())
|
|
|
|
|
|
|
|
|
|
|
|
def parse_list_header(value: str) -> t.List[str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse lists as described by RFC 2068 Section 2.
|
|
|
|
|
|
|
|
In particular, parse comma-separated lists where the elements of
|
|
|
|
the list may include quoted-strings. A quoted-string could
|
|
|
|
contain a comma. A non-quoted string could have quotes in the
|
|
|
|
middle. Quotes are removed automatically after parsing.
|
|
|
|
|
|
|
|
It basically works like :func:`parse_set_header` just that items
|
|
|
|
may appear multiple times and case sensitivity is preserved.
|
|
|
|
|
|
|
|
The return value is a standard :class:`list`:
|
|
|
|
|
|
|
|
>>> parse_list_header('token, "quoted value"')
|
|
|
|
['token', 'quoted value']
|
|
|
|
|
|
|
|
To create a header from the :class:`list` again, use the
|
|
|
|
:func:`dump_header` function.
|
|
|
|
|
|
|
|
:param value: a string with a list header.
|
|
|
|
:return: :class:`list`
|
|
|
|
"""
|
|
|
|
result = []
|
|
|
|
for item in _parse_list_header(value):
|
|
|
|
if item[:1] == item[-1:] == '"':
|
|
|
|
item = unquote_header_value(item[1:-1])
|
|
|
|
result.append(item)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_dict_header(value: str, cls: t.Type[dict] = dict) -> t.Dict[str, str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
|
|
|
|
convert them into a python dict (or any other mapping object created from
|
|
|
|
the type with a dict like interface provided by the `cls` argument):
|
|
|
|
|
|
|
|
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
|
|
|
|
>>> type(d) is dict
|
|
|
|
True
|
|
|
|
>>> sorted(d.items())
|
|
|
|
[('bar', 'as well'), ('foo', 'is a fish')]
|
|
|
|
|
|
|
|
If there is no value for a key it will be `None`:
|
|
|
|
|
|
|
|
>>> parse_dict_header('key_without_value')
|
|
|
|
{'key_without_value': None}
|
|
|
|
|
|
|
|
To create a header from the :class:`dict` again, use the
|
|
|
|
:func:`dump_header` function.
|
|
|
|
|
|
|
|
.. versionchanged:: 0.9
|
|
|
|
Added support for `cls` argument.
|
|
|
|
|
|
|
|
:param value: a string with a dict header.
|
|
|
|
:param cls: callable to use for storage of parsed results.
|
|
|
|
:return: an instance of `cls`
|
|
|
|
"""
|
|
|
|
result = cls()
|
2022-01-24 04:07:52 +00:00
|
|
|
if isinstance(value, bytes):
|
|
|
|
value = value.decode("latin1")
|
2019-12-02 21:46:54 +00:00
|
|
|
for item in _parse_list_header(value):
|
|
|
|
if "=" not in item:
|
|
|
|
result[item] = None
|
|
|
|
continue
|
|
|
|
name, value = item.split("=", 1)
|
|
|
|
if value[:1] == value[-1:] == '"':
|
|
|
|
value = unquote_header_value(value[1:-1])
|
|
|
|
result[name] = value
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
@typing.overload
|
|
|
|
def parse_options_header(
|
|
|
|
value: t.Optional[str], multiple: "te.Literal[False]" = False
|
|
|
|
) -> t.Tuple[str, t.Dict[str, str]]:
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_options_header(
|
|
|
|
value: t.Optional[str], multiple: "te.Literal[True]"
|
|
|
|
) -> t.Tuple[t.Any, ...]:
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
def parse_options_header(
|
|
|
|
value: t.Optional[str], multiple: bool = False
|
|
|
|
) -> t.Union[t.Tuple[str, t.Dict[str, str]], t.Tuple[t.Any, ...]]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse a ``Content-Type`` like header into a tuple with the content
|
|
|
|
type and the options:
|
|
|
|
|
|
|
|
>>> parse_options_header('text/html; charset=utf8')
|
|
|
|
('text/html', {'charset': 'utf8'})
|
|
|
|
|
|
|
|
This should not be used to parse ``Cache-Control`` like headers that use
|
|
|
|
a slightly different format. For these headers use the
|
|
|
|
:func:`parse_dict_header` function.
|
|
|
|
|
|
|
|
.. versionchanged:: 0.15
|
|
|
|
:rfc:`2231` parameter continuations are handled.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param value: the header to parse.
|
|
|
|
:param multiple: Whether try to parse and return multiple MIME types
|
|
|
|
:return: (mimetype, options) or (mimetype, options, mimetype, options, …)
|
|
|
|
if multiple=True
|
|
|
|
"""
|
|
|
|
if not value:
|
|
|
|
return "", {}
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
result: t.List[t.Any] = []
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
value = "," + value.replace("\n", ",")
|
|
|
|
while value:
|
|
|
|
match = _option_header_start_mime_type.match(value)
|
|
|
|
if not match:
|
|
|
|
break
|
|
|
|
result.append(match.group(1)) # mimetype
|
2022-01-24 04:07:52 +00:00
|
|
|
options: t.Dict[str, str] = {}
|
2019-12-02 21:46:54 +00:00
|
|
|
# Parse options
|
|
|
|
rest = match.group(2)
|
2022-01-24 04:07:52 +00:00
|
|
|
encoding: t.Optional[str]
|
|
|
|
continued_encoding: t.Optional[str] = None
|
2019-12-02 21:46:54 +00:00
|
|
|
while rest:
|
|
|
|
optmatch = _option_header_piece_re.match(rest)
|
|
|
|
if not optmatch:
|
|
|
|
break
|
|
|
|
option, count, encoding, language, option_value = optmatch.groups()
|
|
|
|
# Continuations don't have to supply the encoding after the
|
|
|
|
# first line. If we're in a continuation, track the current
|
|
|
|
# encoding to use for subsequent lines. Reset it when the
|
|
|
|
# continuation ends.
|
|
|
|
if not count:
|
|
|
|
continued_encoding = None
|
|
|
|
else:
|
|
|
|
if not encoding:
|
|
|
|
encoding = continued_encoding
|
|
|
|
continued_encoding = encoding
|
|
|
|
option = unquote_header_value(option)
|
|
|
|
if option_value is not None:
|
|
|
|
option_value = unquote_header_value(option_value, option == "filename")
|
|
|
|
if encoding is not None:
|
|
|
|
option_value = _unquote(option_value).decode(encoding)
|
|
|
|
if count:
|
|
|
|
# Continuations append to the existing value. For
|
|
|
|
# simplicity, this ignores the possibility of
|
|
|
|
# out-of-order indices, which shouldn't happen anyway.
|
|
|
|
options[option] = options.get(option, "") + option_value
|
|
|
|
else:
|
|
|
|
options[option] = option_value
|
|
|
|
rest = rest[optmatch.end() :]
|
|
|
|
result.append(options)
|
|
|
|
if multiple is False:
|
|
|
|
return tuple(result)
|
|
|
|
value = rest
|
|
|
|
|
|
|
|
return tuple(result) if result else ("", {})
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
_TAnyAccept = t.TypeVar("_TAnyAccept", bound="ds.Accept")
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_accept_header(value: t.Optional[str]) -> "ds.Accept":
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_accept_header(
|
|
|
|
value: t.Optional[str], cls: t.Type[_TAnyAccept]
|
|
|
|
) -> _TAnyAccept:
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
def parse_accept_header(
|
|
|
|
value: t.Optional[str], cls: t.Optional[t.Type[_TAnyAccept]] = None
|
|
|
|
) -> _TAnyAccept:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parses an HTTP Accept-* header. This does not implement a complete
|
|
|
|
valid algorithm but one that supports at least value and quality
|
|
|
|
extraction.
|
|
|
|
|
|
|
|
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
|
|
|
|
tuples sorted by the quality with some additional accessor methods).
|
|
|
|
|
|
|
|
The second parameter can be a subclass of :class:`Accept` that is created
|
|
|
|
with the parsed values and returned.
|
|
|
|
|
|
|
|
:param value: the accept header string to be parsed.
|
|
|
|
:param cls: the wrapper class for the return value (can be
|
|
|
|
:class:`Accept` or a subclass thereof)
|
|
|
|
:return: an instance of `cls`.
|
|
|
|
"""
|
|
|
|
if cls is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
cls = t.cast(t.Type[_TAnyAccept], ds.Accept)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if not value:
|
|
|
|
return cls(None)
|
|
|
|
|
|
|
|
result = []
|
|
|
|
for match in _accept_re.finditer(value):
|
2022-01-24 04:07:52 +00:00
|
|
|
quality_match = match.group(2)
|
|
|
|
if not quality_match:
|
|
|
|
quality: float = 1
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
2022-01-24 04:07:52 +00:00
|
|
|
quality = max(min(float(quality_match), 1), 0)
|
2019-12-02 21:46:54 +00:00
|
|
|
result.append((match.group(1), quality))
|
|
|
|
return cls(result)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
_TAnyCC = t.TypeVar("_TAnyCC", bound="ds._CacheControl")
|
|
|
|
_t_cc_update = t.Optional[t.Callable[[_TAnyCC], None]]
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_cache_control_header(
|
|
|
|
value: t.Optional[str], on_update: _t_cc_update, cls: None = None
|
|
|
|
) -> "ds.RequestCacheControl":
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_cache_control_header(
|
|
|
|
value: t.Optional[str], on_update: _t_cc_update, cls: t.Type[_TAnyCC]
|
|
|
|
) -> _TAnyCC:
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
def parse_cache_control_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
on_update: _t_cc_update = None,
|
|
|
|
cls: t.Optional[t.Type[_TAnyCC]] = None,
|
|
|
|
) -> _TAnyCC:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse a cache control header. The RFC differs between response and
|
|
|
|
request cache control, this method does not. It's your responsibility
|
|
|
|
to not use the wrong control statements.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
The `cls` was added. If not specified an immutable
|
|
|
|
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
|
|
|
|
|
|
|
|
:param value: a cache control header to be parsed.
|
|
|
|
:param on_update: an optional callable that is called every time a value
|
|
|
|
on the :class:`~werkzeug.datastructures.CacheControl`
|
|
|
|
object is changed.
|
|
|
|
:param cls: the class for the returned object. By default
|
|
|
|
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
|
|
|
|
:return: a `cls` object.
|
|
|
|
"""
|
|
|
|
if cls is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
cls = t.cast(t.Type[_TAnyCC], ds.RequestCacheControl)
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return cls((), on_update)
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
return cls(parse_dict_header(value), on_update)
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
_TAnyCSP = t.TypeVar("_TAnyCSP", bound="ds.ContentSecurityPolicy")
|
|
|
|
_t_csp_update = t.Optional[t.Callable[[_TAnyCSP], None]]
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_csp_header(
|
|
|
|
value: t.Optional[str], on_update: _t_csp_update, cls: None = None
|
|
|
|
) -> "ds.ContentSecurityPolicy":
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
@typing.overload
|
|
|
|
def parse_csp_header(
|
|
|
|
value: t.Optional[str], on_update: _t_csp_update, cls: t.Type[_TAnyCSP]
|
|
|
|
) -> _TAnyCSP:
|
|
|
|
...
|
|
|
|
|
|
|
|
|
|
|
|
def parse_csp_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
on_update: _t_csp_update = None,
|
|
|
|
cls: t.Optional[t.Type[_TAnyCSP]] = None,
|
|
|
|
) -> _TAnyCSP:
|
|
|
|
"""Parse a Content Security Policy header.
|
|
|
|
|
|
|
|
.. versionadded:: 1.0.0
|
|
|
|
Support for Content Security Policy headers was added.
|
|
|
|
|
|
|
|
:param value: a csp header to be parsed.
|
|
|
|
:param on_update: an optional callable that is called every time a value
|
|
|
|
on the object is changed.
|
|
|
|
:param cls: the class for the returned object. By default
|
|
|
|
:class:`~werkzeug.datastructures.ContentSecurityPolicy` is used.
|
|
|
|
:return: a `cls` object.
|
|
|
|
"""
|
|
|
|
if cls is None:
|
|
|
|
cls = t.cast(t.Type[_TAnyCSP], ds.ContentSecurityPolicy)
|
|
|
|
|
|
|
|
if value is None:
|
|
|
|
return cls((), on_update)
|
|
|
|
|
|
|
|
items = []
|
|
|
|
|
|
|
|
for policy in value.split(";"):
|
|
|
|
policy = policy.strip()
|
|
|
|
|
|
|
|
# Ignore badly formatted policies (no space)
|
|
|
|
if " " in policy:
|
|
|
|
directive, value = policy.strip().split(" ", 1)
|
|
|
|
items.append((directive.strip(), value.strip()))
|
|
|
|
|
|
|
|
return cls(items, on_update)
|
|
|
|
|
|
|
|
|
|
|
|
def parse_set_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
on_update: t.Optional[t.Callable[["ds.HeaderSet"], None]] = None,
|
|
|
|
) -> "ds.HeaderSet":
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse a set-like header and return a
|
|
|
|
:class:`~werkzeug.datastructures.HeaderSet` object:
|
|
|
|
|
|
|
|
>>> hs = parse_set_header('token, "quoted value"')
|
|
|
|
|
|
|
|
The return value is an object that treats the items case-insensitively
|
|
|
|
and keeps the order of the items:
|
|
|
|
|
|
|
|
>>> 'TOKEN' in hs
|
|
|
|
True
|
|
|
|
>>> hs.index('quoted value')
|
|
|
|
1
|
|
|
|
>>> hs
|
|
|
|
HeaderSet(['token', 'quoted value'])
|
|
|
|
|
|
|
|
To create a header from the :class:`HeaderSet` again, use the
|
|
|
|
:func:`dump_header` function.
|
|
|
|
|
|
|
|
:param value: a set header to be parsed.
|
|
|
|
:param on_update: an optional callable that is called every time a
|
|
|
|
value on the :class:`~werkzeug.datastructures.HeaderSet`
|
|
|
|
object is changed.
|
|
|
|
:return: a :class:`~werkzeug.datastructures.HeaderSet`
|
|
|
|
"""
|
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.HeaderSet(None, on_update)
|
|
|
|
return ds.HeaderSet(parse_list_header(value), on_update)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_authorization_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
) -> t.Optional["ds.Authorization"]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse an HTTP basic/digest authorization header transmitted by the web
|
|
|
|
browser. The return value is either `None` if the header was invalid or
|
|
|
|
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
|
|
|
|
object.
|
|
|
|
|
|
|
|
:param value: the authorization header to parse.
|
|
|
|
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
|
|
|
|
"""
|
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
|
|
|
value = _wsgi_decoding_dance(value)
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
|
|
|
auth_type, auth_info = value.split(None, 1)
|
|
|
|
auth_type = auth_type.lower()
|
|
|
|
except ValueError:
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
|
|
|
if auth_type == "basic":
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
|
|
|
username, password = base64.b64decode(auth_info).split(b":", 1)
|
|
|
|
except Exception:
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
|
|
|
try:
|
|
|
|
return ds.Authorization(
|
|
|
|
"basic",
|
|
|
|
{
|
|
|
|
"username": _to_str(username, "utf-8"),
|
|
|
|
"password": _to_str(password, "utf-8"),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
return None
|
|
|
|
elif auth_type == "digest":
|
2019-12-02 21:46:54 +00:00
|
|
|
auth_map = parse_dict_header(auth_info)
|
|
|
|
for key in "username", "realm", "nonce", "uri", "response":
|
|
|
|
if key not in auth_map:
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
if "qop" in auth_map:
|
|
|
|
if not auth_map.get("nc") or not auth_map.get("cnonce"):
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
|
|
|
return ds.Authorization("digest", auth_map)
|
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_www_authenticate_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
on_update: t.Optional[t.Callable[["ds.WWWAuthenticate"], None]] = None,
|
|
|
|
) -> "ds.WWWAuthenticate":
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse an HTTP WWW-Authenticate header into a
|
|
|
|
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
|
|
|
|
|
|
|
|
:param value: a WWW-Authenticate header to parse.
|
|
|
|
:param on_update: an optional callable that is called every time a value
|
|
|
|
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
|
|
|
|
object is changed.
|
|
|
|
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
|
|
|
|
"""
|
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.WWWAuthenticate(on_update=on_update)
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
|
|
|
auth_type, auth_info = value.split(None, 1)
|
|
|
|
auth_type = auth_type.lower()
|
|
|
|
except (ValueError, AttributeError):
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.WWWAuthenticate(value.strip().lower(), on_update=on_update)
|
|
|
|
return ds.WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_if_range_header(value: t.Optional[str]) -> "ds.IfRange":
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parses an if-range header which can be an etag or a date. Returns
|
|
|
|
a :class:`~werkzeug.datastructures.IfRange` object.
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. versionchanged:: 2.0
|
|
|
|
If the value represents a datetime, it is timezone-aware.
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
.. versionadded:: 0.7
|
|
|
|
"""
|
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.IfRange()
|
2019-12-02 21:46:54 +00:00
|
|
|
date = parse_date(value)
|
|
|
|
if date is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.IfRange(date=date)
|
2019-12-02 21:46:54 +00:00
|
|
|
# drop weakness information
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.IfRange(unquote_etag(value)[0])
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_range_header(
|
|
|
|
value: t.Optional[str], make_inclusive: bool = True
|
|
|
|
) -> t.Optional["ds.Range"]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
|
|
|
|
object. If the header is missing or malformed `None` is returned.
|
|
|
|
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
|
|
|
|
non-inclusive.
|
|
|
|
|
|
|
|
.. versionadded:: 0.7
|
|
|
|
"""
|
|
|
|
if not value or "=" not in value:
|
|
|
|
return None
|
|
|
|
|
|
|
|
ranges = []
|
|
|
|
last_end = 0
|
|
|
|
units, rng = value.split("=", 1)
|
|
|
|
units = units.strip().lower()
|
|
|
|
|
|
|
|
for item in rng.split(","):
|
|
|
|
item = item.strip()
|
|
|
|
if "-" not in item:
|
|
|
|
return None
|
|
|
|
if item.startswith("-"):
|
|
|
|
if last_end < 0:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
begin = int(item)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
end = None
|
|
|
|
last_end = -1
|
|
|
|
elif "-" in item:
|
2022-01-24 04:07:52 +00:00
|
|
|
begin_str, end_str = item.split("-", 1)
|
|
|
|
begin_str = begin_str.strip()
|
|
|
|
end_str = end_str.strip()
|
|
|
|
if not begin_str.isdigit():
|
2019-12-02 21:46:54 +00:00
|
|
|
return None
|
2022-01-24 04:07:52 +00:00
|
|
|
begin = int(begin_str)
|
2019-12-02 21:46:54 +00:00
|
|
|
if begin < last_end or last_end < 0:
|
|
|
|
return None
|
2022-01-24 04:07:52 +00:00
|
|
|
if end_str:
|
|
|
|
if not end_str.isdigit():
|
2019-12-02 21:46:54 +00:00
|
|
|
return None
|
2022-01-24 04:07:52 +00:00
|
|
|
end = int(end_str) + 1
|
2019-12-02 21:46:54 +00:00
|
|
|
if begin >= end:
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
end = None
|
2022-01-24 04:07:52 +00:00
|
|
|
last_end = end if end is not None else -1
|
2019-12-02 21:46:54 +00:00
|
|
|
ranges.append((begin, end))
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.Range(units, ranges)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_content_range_header(
|
|
|
|
value: t.Optional[str],
|
|
|
|
on_update: t.Optional[t.Callable[["ds.ContentRange"], None]] = None,
|
|
|
|
) -> t.Optional["ds.ContentRange"]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parses a range header into a
|
|
|
|
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
|
|
|
|
parsing is not possible.
|
|
|
|
|
|
|
|
.. versionadded:: 0.7
|
|
|
|
|
|
|
|
:param value: a content range header to be parsed.
|
|
|
|
:param on_update: an optional callable that is called every time a value
|
|
|
|
on the :class:`~werkzeug.datastructures.ContentRange`
|
|
|
|
object is changed.
|
|
|
|
"""
|
|
|
|
if value is None:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
units, rangedef = (value or "").strip().split(None, 1)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if "/" not in rangedef:
|
|
|
|
return None
|
2022-01-24 04:07:52 +00:00
|
|
|
rng, length_str = rangedef.split("/", 1)
|
|
|
|
if length_str == "*":
|
2019-12-02 21:46:54 +00:00
|
|
|
length = None
|
2022-01-24 04:07:52 +00:00
|
|
|
elif length_str.isdigit():
|
|
|
|
length = int(length_str)
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if rng == "*":
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.ContentRange(units, None, None, length, on_update=on_update)
|
2019-12-02 21:46:54 +00:00
|
|
|
elif "-" not in rng:
|
|
|
|
return None
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
start_str, stop_str = rng.split("-", 1)
|
2019-12-02 21:46:54 +00:00
|
|
|
try:
|
2022-01-24 04:07:52 +00:00
|
|
|
start = int(start_str)
|
|
|
|
stop = int(stop_str) + 1
|
2019-12-02 21:46:54 +00:00
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
if is_byte_range_valid(start, stop, length):
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.ContentRange(units, start, stop, length, on_update=on_update)
|
|
|
|
|
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def quote_etag(etag: str, weak: bool = False) -> str:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Quote an etag.
|
|
|
|
|
|
|
|
:param etag: the etag to quote.
|
|
|
|
:param weak: set to `True` to tag it "weak".
|
|
|
|
"""
|
|
|
|
if '"' in etag:
|
|
|
|
raise ValueError("invalid etag")
|
2022-01-24 04:07:52 +00:00
|
|
|
etag = f'"{etag}"'
|
2019-12-02 21:46:54 +00:00
|
|
|
if weak:
|
2022-01-24 04:07:52 +00:00
|
|
|
etag = f"W/{etag}"
|
2019-12-02 21:46:54 +00:00
|
|
|
return etag
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def unquote_etag(
|
|
|
|
etag: t.Optional[str],
|
|
|
|
) -> t.Union[t.Tuple[str, bool], t.Tuple[None, None]]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Unquote a single etag:
|
|
|
|
|
|
|
|
>>> unquote_etag('W/"bar"')
|
|
|
|
('bar', True)
|
|
|
|
>>> unquote_etag('"bar"')
|
|
|
|
('bar', False)
|
|
|
|
|
|
|
|
:param etag: the etag identifier to unquote.
|
|
|
|
:return: a ``(etag, weak)`` tuple.
|
|
|
|
"""
|
|
|
|
if not etag:
|
|
|
|
return None, None
|
|
|
|
etag = etag.strip()
|
|
|
|
weak = False
|
|
|
|
if etag.startswith(("W/", "w/")):
|
|
|
|
weak = True
|
|
|
|
etag = etag[2:]
|
|
|
|
if etag[:1] == etag[-1:] == '"':
|
|
|
|
etag = etag[1:-1]
|
|
|
|
return etag, weak
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_etags(value: t.Optional[str]) -> "ds.ETags":
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parse an etag header.
|
|
|
|
|
|
|
|
:param value: the tag header to parse
|
|
|
|
:return: an :class:`~werkzeug.datastructures.ETags` object.
|
|
|
|
"""
|
|
|
|
if not value:
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.ETags()
|
2019-12-02 21:46:54 +00:00
|
|
|
strong = []
|
|
|
|
weak = []
|
|
|
|
end = len(value)
|
|
|
|
pos = 0
|
|
|
|
while pos < end:
|
|
|
|
match = _etag_re.match(value, pos)
|
|
|
|
if match is None:
|
|
|
|
break
|
|
|
|
is_weak, quoted, raw = match.groups()
|
|
|
|
if raw == "*":
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.ETags(star_tag=True)
|
2019-12-02 21:46:54 +00:00
|
|
|
elif quoted:
|
|
|
|
raw = quoted
|
|
|
|
if is_weak:
|
|
|
|
weak.append(raw)
|
|
|
|
else:
|
|
|
|
strong.append(raw)
|
|
|
|
pos = match.end()
|
2022-01-24 04:07:52 +00:00
|
|
|
return ds.ETags(strong, weak)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def generate_etag(data: bytes) -> str:
|
|
|
|
"""Generate an etag for some data.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. versionchanged:: 2.0
|
|
|
|
Use SHA-1. MD5 may not be available in some environments.
|
|
|
|
"""
|
|
|
|
return sha1(data).hexdigest()
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_date(value: t.Optional[str]) -> t.Optional[datetime]:
|
|
|
|
"""Parse an :rfc:`2822` date into a timezone-aware
|
|
|
|
:class:`datetime.datetime` object, or ``None`` if parsing fails.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
This is a wrapper for :func:`email.utils.parsedate_to_datetime`. It
|
|
|
|
returns ``None`` if parsing fails instead of raising an exception,
|
|
|
|
and always returns a timezone-aware datetime object. If the string
|
|
|
|
doesn't have timezone information, it is assumed to be UTC.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
:param value: A string with a supported date format.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. versionchanged:: 2.0
|
|
|
|
Return a timezone-aware datetime object. Use
|
|
|
|
``email.utils.parsedate_to_datetime``.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
if value is None:
|
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
try:
|
|
|
|
dt = email.utils.parsedate_to_datetime(value)
|
|
|
|
except (TypeError, ValueError):
|
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if dt.tzinfo is None:
|
|
|
|
return dt.replace(tzinfo=timezone.utc)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
return dt
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def cookie_date(
|
|
|
|
expires: t.Optional[t.Union[datetime, date, int, float, struct_time]] = None
|
|
|
|
) -> str:
|
|
|
|
"""Format a datetime object or timestamp into an :rfc:`2822` date
|
|
|
|
string for ``Set-Cookie expires``.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. deprecated:: 2.0
|
|
|
|
Will be removed in Werkzeug 2.1. Use :func:`http_date` instead.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
warnings.warn(
|
|
|
|
"'cookie_date' is deprecated and will be removed in Werkzeug"
|
|
|
|
" 2.1. Use 'http_date' instead.",
|
|
|
|
DeprecationWarning,
|
|
|
|
stacklevel=2,
|
|
|
|
)
|
|
|
|
return http_date(expires)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def http_date(
|
|
|
|
timestamp: t.Optional[t.Union[datetime, date, int, float, struct_time]] = None
|
|
|
|
) -> str:
|
|
|
|
"""Format a datetime object or timestamp into an :rfc:`2822` date
|
|
|
|
string.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
This is a wrapper for :func:`email.utils.format_datetime`. It
|
|
|
|
assumes naive datetime objects are in UTC instead of raising an
|
|
|
|
exception.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
:param timestamp: The datetime or timestamp to format. Defaults to
|
|
|
|
the current time.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
.. versionchanged:: 2.0
|
|
|
|
Use ``email.utils.format_datetime``. Accept ``date`` objects.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
if isinstance(timestamp, date):
|
|
|
|
if not isinstance(timestamp, datetime):
|
|
|
|
# Assume plain date is midnight UTC.
|
|
|
|
timestamp = datetime.combine(timestamp, time(), tzinfo=timezone.utc)
|
|
|
|
else:
|
|
|
|
# Ensure datetime is timezone-aware.
|
|
|
|
timestamp = _dt_as_utc(timestamp)
|
|
|
|
|
|
|
|
return email.utils.format_datetime(timestamp, usegmt=True)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if isinstance(timestamp, struct_time):
|
|
|
|
timestamp = mktime(timestamp)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
return email.utils.formatdate(timestamp, usegmt=True)
|
|
|
|
|
|
|
|
|
|
|
|
def parse_age(value: t.Optional[str] = None) -> t.Optional[timedelta]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Parses a base-10 integer count of seconds into a timedelta.
|
|
|
|
|
|
|
|
If parsing fails, the return value is `None`.
|
|
|
|
|
|
|
|
:param value: a string consisting of an integer represented in base-10
|
|
|
|
:return: a :class:`datetime.timedelta` object or `None`.
|
|
|
|
"""
|
|
|
|
if not value:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
seconds = int(value)
|
|
|
|
except ValueError:
|
|
|
|
return None
|
|
|
|
if seconds < 0:
|
|
|
|
return None
|
|
|
|
try:
|
|
|
|
return timedelta(seconds=seconds)
|
|
|
|
except OverflowError:
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def dump_age(age: t.Optional[t.Union[timedelta, int]] = None) -> t.Optional[str]:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Formats the duration as a base-10 integer.
|
|
|
|
|
|
|
|
:param age: should be an integer number of seconds,
|
|
|
|
a :class:`datetime.timedelta` object, or,
|
|
|
|
if the age is unknown, `None` (default).
|
|
|
|
"""
|
|
|
|
if age is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
return None
|
2019-12-02 21:46:54 +00:00
|
|
|
if isinstance(age, timedelta):
|
2022-01-24 04:07:52 +00:00
|
|
|
age = int(age.total_seconds())
|
|
|
|
else:
|
|
|
|
age = int(age)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if age < 0:
|
|
|
|
raise ValueError("age cannot be negative")
|
|
|
|
|
|
|
|
return str(age)
|
|
|
|
|
|
|
|
|
|
|
|
def is_resource_modified(
|
2022-01-24 04:07:52 +00:00
|
|
|
environ: "WSGIEnvironment",
|
|
|
|
etag: t.Optional[str] = None,
|
|
|
|
data: t.Optional[bytes] = None,
|
|
|
|
last_modified: t.Optional[t.Union[datetime, str]] = None,
|
|
|
|
ignore_if_range: bool = True,
|
|
|
|
) -> bool:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Convenience method for conditional requests.
|
|
|
|
|
|
|
|
:param environ: the WSGI environment of the request to be checked.
|
|
|
|
:param etag: the etag for the response for comparison.
|
|
|
|
:param data: or alternatively the data of the response to automatically
|
|
|
|
generate an etag using :func:`generate_etag`.
|
|
|
|
:param last_modified: an optional date of the last modification.
|
|
|
|
:param ignore_if_range: If `False`, `If-Range` header will be taken into
|
|
|
|
account.
|
|
|
|
:return: `True` if the resource was modified, otherwise `False`.
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
.. versionchanged:: 2.0
|
|
|
|
SHA-1 is used to generate an etag value for the data. MD5 may
|
|
|
|
not be available in some environments.
|
|
|
|
|
|
|
|
.. versionchanged:: 1.0.0
|
|
|
|
The check is run for methods other than ``GET`` and ``HEAD``.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
|
|
|
if etag is None and data is not None:
|
|
|
|
etag = generate_etag(data)
|
|
|
|
elif data is not None:
|
|
|
|
raise TypeError("both data and etag given")
|
|
|
|
|
|
|
|
unmodified = False
|
2022-01-24 04:07:52 +00:00
|
|
|
if isinstance(last_modified, str):
|
2019-12-02 21:46:54 +00:00
|
|
|
last_modified = parse_date(last_modified)
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# HTTP doesn't use microsecond, remove it to avoid false positive
|
|
|
|
# comparisons. Mark naive datetimes as UTC.
|
2019-12-02 21:46:54 +00:00
|
|
|
if last_modified is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
last_modified = _dt_as_utc(last_modified.replace(microsecond=0))
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if_range = None
|
|
|
|
if not ignore_if_range and "HTTP_RANGE" in environ:
|
|
|
|
# https://tools.ietf.org/html/rfc7233#section-3.2
|
|
|
|
# A server MUST ignore an If-Range header field received in a request
|
|
|
|
# that does not contain a Range header field.
|
|
|
|
if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE"))
|
|
|
|
|
|
|
|
if if_range is not None and if_range.date is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
modified_since: t.Optional[datetime] = if_range.date
|
2019-12-02 21:46:54 +00:00
|
|
|
else:
|
|
|
|
modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE"))
|
|
|
|
|
|
|
|
if modified_since and last_modified and last_modified <= modified_since:
|
|
|
|
unmodified = True
|
|
|
|
|
|
|
|
if etag:
|
|
|
|
etag, _ = unquote_etag(etag)
|
2022-01-24 04:07:52 +00:00
|
|
|
etag = t.cast(str, etag)
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
if if_range is not None and if_range.etag is not None:
|
|
|
|
unmodified = parse_etags(if_range.etag).contains(etag)
|
|
|
|
else:
|
|
|
|
if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH"))
|
|
|
|
if if_none_match:
|
|
|
|
# https://tools.ietf.org/html/rfc7232#section-3.2
|
|
|
|
# "A recipient MUST use the weak comparison function when comparing
|
|
|
|
# entity-tags for If-None-Match"
|
|
|
|
unmodified = if_none_match.contains_weak(etag)
|
|
|
|
|
|
|
|
# https://tools.ietf.org/html/rfc7232#section-3.1
|
|
|
|
# "Origin server MUST use the strong comparison function when
|
|
|
|
# comparing entity-tags for If-Match"
|
|
|
|
if_match = parse_etags(environ.get("HTTP_IF_MATCH"))
|
|
|
|
if if_match:
|
|
|
|
unmodified = not if_match.is_strong(etag)
|
|
|
|
|
|
|
|
return not unmodified
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def remove_entity_headers(
|
|
|
|
headers: t.Union["ds.Headers", t.List[t.Tuple[str, str]]],
|
|
|
|
allowed: t.Iterable[str] = ("expires", "content-location"),
|
|
|
|
) -> None:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Remove all entity headers from a list or :class:`Headers` object. This
|
|
|
|
operation works in-place. `Expires` and `Content-Location` headers are
|
|
|
|
by default not removed. The reason for this is :rfc:`2616` section
|
|
|
|
10.3.5 which specifies some entity headers that should be sent.
|
|
|
|
|
|
|
|
.. versionchanged:: 0.5
|
|
|
|
added `allowed` parameter.
|
|
|
|
|
|
|
|
:param headers: a list or :class:`Headers` object.
|
|
|
|
:param allowed: a list of headers that should still be allowed even though
|
|
|
|
they are entity headers.
|
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
allowed = {x.lower() for x in allowed}
|
2019-12-02 21:46:54 +00:00
|
|
|
headers[:] = [
|
|
|
|
(key, value)
|
|
|
|
for key, value in headers
|
|
|
|
if not is_entity_header(key) or key.lower() in allowed
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def remove_hop_by_hop_headers(
|
|
|
|
headers: t.Union["ds.Headers", t.List[t.Tuple[str, str]]]
|
|
|
|
) -> None:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
|
|
|
|
:class:`Headers` object. This operation works in-place.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param headers: a list or :class:`Headers` object.
|
|
|
|
"""
|
|
|
|
headers[:] = [
|
|
|
|
(key, value) for key, value in headers if not is_hop_by_hop_header(key)
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def is_entity_header(header: str) -> bool:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Check if a header is an entity header.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param header: the header to test.
|
|
|
|
:return: `True` if it's an entity header, `False` otherwise.
|
|
|
|
"""
|
|
|
|
return header.lower() in _entity_headers
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def is_hop_by_hop_header(header: str) -> bool:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
|
|
|
|
|
|
|
|
.. versionadded:: 0.5
|
|
|
|
|
|
|
|
:param header: the header to test.
|
|
|
|
:return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise.
|
|
|
|
"""
|
|
|
|
return header.lower() in _hop_by_hop_headers
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def parse_cookie(
|
|
|
|
header: t.Union["WSGIEnvironment", str, bytes, None],
|
|
|
|
charset: str = "utf-8",
|
|
|
|
errors: str = "replace",
|
|
|
|
cls: t.Optional[t.Type["ds.MultiDict"]] = None,
|
|
|
|
) -> "ds.MultiDict[str, str]":
|
|
|
|
"""Parse a cookie from a string or WSGI environ.
|
|
|
|
|
|
|
|
The same key can be provided multiple times, the values are stored
|
|
|
|
in-order. The default :class:`MultiDict` will have the first value
|
|
|
|
first, and all values can be retrieved with
|
|
|
|
:meth:`MultiDict.getlist`.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
:param header: The cookie header as a string, or a WSGI environ dict
|
|
|
|
with a ``HTTP_COOKIE`` key.
|
|
|
|
:param charset: The charset for the cookie values.
|
|
|
|
:param errors: The error behavior for the charset decoding.
|
|
|
|
:param cls: A dict-like class to store the parsed cookies in.
|
|
|
|
Defaults to :class:`MultiDict`.
|
|
|
|
|
|
|
|
.. versionchanged:: 1.0.0
|
|
|
|
Returns a :class:`MultiDict` instead of a
|
|
|
|
``TypeConversionDict``.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
.. versionchanged:: 0.5
|
2022-01-24 04:07:52 +00:00
|
|
|
Returns a :class:`TypeConversionDict` instead of a regular dict.
|
|
|
|
The ``cls`` parameter was added.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
|
|
|
if isinstance(header, dict):
|
|
|
|
header = header.get("HTTP_COOKIE", "")
|
|
|
|
elif header is None:
|
|
|
|
header = ""
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# PEP 3333 sends headers through the environ as latin1 decoded
|
|
|
|
# strings. Encode strings back to bytes for parsing.
|
|
|
|
if isinstance(header, str):
|
2019-12-02 21:46:54 +00:00
|
|
|
header = header.encode("latin1", "replace")
|
|
|
|
|
|
|
|
if cls is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
cls = ds.MultiDict
|
|
|
|
|
|
|
|
def _parse_pairs() -> t.Iterator[t.Tuple[str, str]]:
|
|
|
|
for key, val in _cookie_parse_impl(header): # type: ignore
|
|
|
|
key_str = _to_str(key, charset, errors, allow_none_charset=True)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if not key_str:
|
2019-12-02 21:46:54 +00:00
|
|
|
continue
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
val_str = _to_str(val, charset, errors, allow_none_charset=True)
|
|
|
|
yield key_str, val_str
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
return cls(_parse_pairs())
|
|
|
|
|
|
|
|
|
|
|
|
def dump_cookie(
|
2022-01-24 04:07:52 +00:00
|
|
|
key: str,
|
|
|
|
value: t.Union[bytes, str] = "",
|
|
|
|
max_age: t.Optional[t.Union[timedelta, int]] = None,
|
|
|
|
expires: t.Optional[t.Union[str, datetime, int, float]] = None,
|
|
|
|
path: t.Optional[str] = "/",
|
|
|
|
domain: t.Optional[str] = None,
|
|
|
|
secure: bool = False,
|
|
|
|
httponly: bool = False,
|
|
|
|
charset: str = "utf-8",
|
|
|
|
sync_expires: bool = True,
|
|
|
|
max_size: int = 4093,
|
|
|
|
samesite: t.Optional[str] = None,
|
|
|
|
) -> str:
|
|
|
|
"""Create a Set-Cookie header without the ``Set-Cookie`` prefix.
|
|
|
|
|
|
|
|
The return value is usually restricted to ascii as the vast majority
|
|
|
|
of values are properly escaped, but that is no guarantee. It's
|
|
|
|
tunneled through latin1 as required by :pep:`3333`.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
The return value is not ASCII safe if the key contains unicode
|
|
|
|
characters. This is technically against the specification but
|
|
|
|
happens in the wild. It's strongly recommended to not use
|
|
|
|
non-ASCII values for the keys.
|
|
|
|
|
|
|
|
:param max_age: should be a number of seconds, or `None` (default) if
|
|
|
|
the cookie should last only as long as the client's
|
|
|
|
browser session. Additionally `timedelta` objects
|
|
|
|
are accepted, too.
|
|
|
|
:param expires: should be a `datetime` object or unix timestamp.
|
|
|
|
:param path: limits the cookie to a given path, per default it will
|
|
|
|
span the whole domain.
|
|
|
|
:param domain: Use this if you want to set a cross-domain cookie. For
|
|
|
|
example, ``domain=".example.com"`` will set a cookie
|
|
|
|
that is readable by the domain ``www.example.com``,
|
|
|
|
``foo.example.com`` etc. Otherwise, a cookie will only
|
|
|
|
be readable by the domain that set it.
|
|
|
|
:param secure: The cookie will only be available via HTTPS
|
|
|
|
:param httponly: disallow JavaScript to access the cookie. This is an
|
|
|
|
extension to the cookie standard and probably not
|
|
|
|
supported by all browsers.
|
2022-01-24 04:07:52 +00:00
|
|
|
:param charset: the encoding for string values.
|
2019-12-02 21:46:54 +00:00
|
|
|
:param sync_expires: automatically set expires if max_age is defined
|
|
|
|
but expires not.
|
|
|
|
:param max_size: Warn if the final header value exceeds this size. The
|
|
|
|
default, 4093, should be safely `supported by most browsers
|
|
|
|
<cookie_>`_. Set to 0 to disable this check.
|
2022-01-24 04:07:52 +00:00
|
|
|
:param samesite: Limits the scope of the cookie such that it will
|
|
|
|
only be attached to requests if those requests are same-site.
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
.. _`cookie`: http://browsercookielimits.squawky.net/
|
2022-01-24 04:07:52 +00:00
|
|
|
|
|
|
|
.. versionchanged:: 1.0.0
|
|
|
|
The string ``'None'`` is accepted for ``samesite``.
|
2019-12-02 21:46:54 +00:00
|
|
|
"""
|
2022-01-24 04:07:52 +00:00
|
|
|
key = _to_bytes(key, charset)
|
|
|
|
value = _to_bytes(value, charset)
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
if path is not None:
|
|
|
|
from .urls import iri_to_uri
|
|
|
|
|
|
|
|
path = iri_to_uri(path, charset)
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
domain = _make_cookie_domain(domain)
|
2022-01-24 04:07:52 +00:00
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
if isinstance(max_age, timedelta):
|
2022-01-24 04:07:52 +00:00
|
|
|
max_age = int(max_age.total_seconds())
|
|
|
|
|
2019-12-02 21:46:54 +00:00
|
|
|
if expires is not None:
|
2022-01-24 04:07:52 +00:00
|
|
|
if not isinstance(expires, str):
|
|
|
|
expires = http_date(expires)
|
2019-12-02 21:46:54 +00:00
|
|
|
elif max_age is not None and sync_expires:
|
2022-01-24 04:07:52 +00:00
|
|
|
expires = http_date(datetime.now(tz=timezone.utc).timestamp() + max_age)
|
|
|
|
|
|
|
|
if samesite is not None:
|
|
|
|
samesite = samesite.title()
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
if samesite not in {"Strict", "Lax", "None"}:
|
|
|
|
raise ValueError("SameSite must be 'Strict', 'Lax', or 'None'.")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
|
|
|
buf = [key + b"=" + _cookie_quote(value)]
|
|
|
|
|
|
|
|
# XXX: In theory all of these parameters that are not marked with `None`
|
|
|
|
# should be quoted. Because stdlib did not quote it before I did not
|
|
|
|
# want to introduce quoting there now.
|
|
|
|
for k, v, q in (
|
|
|
|
(b"Domain", domain, True),
|
|
|
|
(b"Expires", expires, False),
|
|
|
|
(b"Max-Age", max_age, False),
|
|
|
|
(b"Secure", secure, None),
|
|
|
|
(b"HttpOnly", httponly, None),
|
|
|
|
(b"Path", path, False),
|
|
|
|
(b"SameSite", samesite, False),
|
|
|
|
):
|
|
|
|
if q is None:
|
|
|
|
if v:
|
|
|
|
buf.append(k)
|
|
|
|
continue
|
|
|
|
|
|
|
|
if v is None:
|
|
|
|
continue
|
|
|
|
|
|
|
|
tmp = bytearray(k)
|
|
|
|
if not isinstance(v, (bytes, bytearray)):
|
2022-01-24 04:07:52 +00:00
|
|
|
v = _to_bytes(str(v), charset)
|
2019-12-02 21:46:54 +00:00
|
|
|
if q:
|
|
|
|
v = _cookie_quote(v)
|
|
|
|
tmp += b"=" + v
|
|
|
|
buf.append(bytes(tmp))
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# The return value will be an incorrectly encoded latin1 header for
|
|
|
|
# consistency with the headers object.
|
2019-12-02 21:46:54 +00:00
|
|
|
rv = b"; ".join(buf)
|
2022-01-24 04:07:52 +00:00
|
|
|
rv = rv.decode("latin1")
|
2019-12-02 21:46:54 +00:00
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
# Warn if the final value of the cookie is larger than the limit. If the
|
|
|
|
# cookie is too large, then it may be silently ignored by the browser,
|
|
|
|
# which can be quite hard to debug.
|
2019-12-02 21:46:54 +00:00
|
|
|
cookie_size = len(rv)
|
|
|
|
|
|
|
|
if max_size and cookie_size > max_size:
|
|
|
|
value_size = len(value)
|
|
|
|
warnings.warn(
|
2022-01-24 04:07:52 +00:00
|
|
|
f"The {key.decode(charset)!r} cookie is too large: the value was"
|
|
|
|
f" {value_size} bytes but the"
|
|
|
|
f" header required {cookie_size - value_size} extra bytes. The final size"
|
|
|
|
f" was {cookie_size} bytes but the limit is {max_size} bytes. Browsers may"
|
|
|
|
f" silently ignore cookies larger than this.",
|
2019-12-02 21:46:54 +00:00
|
|
|
stacklevel=2,
|
|
|
|
)
|
|
|
|
|
|
|
|
return rv
|
|
|
|
|
|
|
|
|
2022-01-24 04:07:52 +00:00
|
|
|
def is_byte_range_valid(
|
|
|
|
start: t.Optional[int], stop: t.Optional[int], length: t.Optional[int]
|
|
|
|
) -> bool:
|
2019-12-02 21:46:54 +00:00
|
|
|
"""Checks if a given byte content range is valid for the given length.
|
|
|
|
|
|
|
|
.. versionadded:: 0.7
|
|
|
|
"""
|
|
|
|
if (start is None) != (stop is None):
|
|
|
|
return False
|
|
|
|
elif start is None:
|
|
|
|
return length is None or length >= 0
|
|
|
|
elif length is None:
|
2022-01-24 04:07:52 +00:00
|
|
|
return 0 <= start < stop # type: ignore
|
|
|
|
elif start >= stop: # type: ignore
|
2019-12-02 21:46:54 +00:00
|
|
|
return False
|
|
|
|
return 0 <= start < length
|
|
|
|
|
|
|
|
|
|
|
|
# circular dependencies
|
2022-01-24 04:07:52 +00:00
|
|
|
from . import datastructures as ds
|