7028eae083
* Streaming request by async for.
* Make all requests streaming and preload body for non-streaming handlers.
* Cleanup of code and avoid mixing streaming responses.
* Async http protocol loop.
* Change of test: don't require early bad request error but only after CRLF-CRLF.
* Add back streaming requests.
* Rewritten request body parser.
* Misc. cleanup, down to 4 failing tests.
* All tests OK.
* Entirely remove request body queue.
* Let black f*ckup the layout
* Better testing error messages on protocol errors.
* Remove StreamBuffer tests because the type is about to be removed.
* Remove tests using the deprecated get_headers function that can no longer be supported. Chunked mode is now autodetected, so do not put content-length header if chunked mode is preferred.
* Major refactoring of HTTP protocol handling (new module http.py added), all requests made streaming. A few compatibility issues and a lot of cleanup to be done remain, 16 tests failing.
* Terminate check_timeouts once connection_task finishes.
* Code cleanup, 14 tests failing.
* Much cleanup, 12 failing...
* Even more cleanup and error checking, 8 failing tests.
* Remove keep-alive header from responses. First of all, it should say timeout=<value> which wasn't the case with existing implementation, and secondly none of the other web servers I tried include this header.
* Everything but CustomServer OK.
* Linter
* Disable custom protocol test
* Remove unnecessary variables, optimise performance.
* A test was missing that body_init/body_push/body_finish are never called. Rewritten using receive_body and case switching to make it fail if bypassed.
* Minor fixes.
* Remove unused code.
* Py 3.8 check for deprecated loop argument.
* Fix a middleware cancellation handling test with py38.
* Linter 'n fixes
* Typing
* Stricter handling of request header size
* More specific error messages on Payload Too Large.
* Init http.response = None
* Messages further tuned.
* Always try to consume request body, plus minor cleanup.
* Add a missing check in case of close_if_idle on a dead connection.
* Avoid error messages on PayloadTooLarge.
* Add test for new API.
* json takes str, not bytes
* Default to no maximum request size for streaming handlers.
* Fix chunked mode crash.
* Header values should be strictly ASCII but both UTF-8 and Latin-1 exist. Use UTF-8B to
cope with all.
* Refactoring and cleanup.
* Unify response header processing of ASGI and asyncio modes.
* Avoid special handling of StreamingHTTPResponse.
* 35 % speedup in HTTP/1.1 response formatting (not so much overall effect).
* Duplicate set-cookie headers were being produced.
* Cleanup processed_headers some more.
* Linting
* Import ordering
* Response middleware ran by async request.respond().
* Need to check if transport is closing to avoid getting stuck in sending loops after peer has disconnected.
* Middleware and error handling refactoring.
* Linter
* Fix tracking of HTTP stage when writing to transport fails.
* Add clarifying comment
* Add a check for request body functions and a test for NotImplementedError.
* Linter and typing
* These must be tuples + hack mypy warnings away.
* New streaming test and minor fixes.
* Constant receive buffer size.
* 256 KiB send and receive buffers.
* Revert "256 KiB send and receive buffers."
This reverts commit abc1e3edb2
.
* app.handle_exception already sends the response.
* Improved handling of errors during request.
* An odd hack to avoid an httpx limitation that causes test failures.
* Limit request header size to 8 KiB at most.
* Remove unnecessary use of format string.
* Cleanup tests
* Remove artifact
* Fix type checking
* Mark test for skipping
* Cleanup some edge cases
* Add ignore_body flag to safe methods
* Add unit tests for timeout logic
* Add unit tests for timeout logic
* Fix Mock usage in timeout test
* Change logging test to only logger in handler
* Windows py3.8 logging issue with current testing client
* Add test_header_size_exceeded
* Resolve merge conflicts
* Add request middleware to hard exception handling
* Add request middleware to hard exception handling
* Request middleware on exception handlers
* Linting
* Cleanup deprecations
Co-authored-by: L. Kärkkäinen <tronic@users.noreply.github.com>
Co-authored-by: Adam Hopkins <admhpkns@gmail.com>
194 lines
6.9 KiB
Python
194 lines
6.9 KiB
Python
import re
|
|
|
|
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
|
|
from urllib.parse import unquote
|
|
|
|
from sanic.helpers import STATUS_CODES
|
|
|
|
|
|
HeaderIterable = Iterable[Tuple[str, Any]] # Values convertible to str
|
|
HeaderBytesIterable = Iterable[Tuple[bytes, bytes]]
|
|
Options = Dict[str, Union[int, str]] # key=value fields in various headers
|
|
OptionsIterable = Iterable[Tuple[str, str]] # May contain duplicate keys
|
|
|
|
_token, _quoted = r"([\w!#$%&'*+\-.^_`|~]+)", r'"([^"]*)"'
|
|
_param = re.compile(fr";\s*{_token}=(?:{_token}|{_quoted})", re.ASCII)
|
|
_firefox_quote_escape = re.compile(r'\\"(?!; |\s*$)')
|
|
_ipv6 = "(?:[0-9A-Fa-f]{0,4}:){2,7}[0-9A-Fa-f]{0,4}"
|
|
_ipv6_re = re.compile(_ipv6)
|
|
_host_re = re.compile(
|
|
r"((?:\[" + _ipv6 + r"\])|[a-zA-Z0-9.\-]{1,253})(?::(\d{1,5}))?"
|
|
)
|
|
|
|
# RFC's quoted-pair escapes are mostly ignored by browsers. Chrome, Firefox and
|
|
# curl all have different escaping, that we try to handle as well as possible,
|
|
# even though no client espaces in a way that would allow perfect handling.
|
|
|
|
# For more information, consult ../tests/test_requests.py
|
|
|
|
|
|
def parse_content_header(value: str) -> Tuple[str, Options]:
|
|
"""Parse content-type and content-disposition header values.
|
|
|
|
E.g. 'form-data; name=upload; filename=\"file.txt\"' to
|
|
('form-data', {'name': 'upload', 'filename': 'file.txt'})
|
|
|
|
Mostly identical to cgi.parse_header and werkzeug.parse_options_header
|
|
but runs faster and handles special characters better. Unescapes quotes.
|
|
"""
|
|
value = _firefox_quote_escape.sub("%22", value)
|
|
pos = value.find(";")
|
|
if pos == -1:
|
|
options: Dict[str, Union[int, str]] = {}
|
|
else:
|
|
options = {
|
|
m.group(1).lower(): m.group(2) or m.group(3).replace("%22", '"')
|
|
for m in _param.finditer(value[pos:])
|
|
}
|
|
value = value[:pos]
|
|
return value.strip().lower(), options
|
|
|
|
|
|
# https://tools.ietf.org/html/rfc7230#section-3.2.6 and
|
|
# https://tools.ietf.org/html/rfc7239#section-4
|
|
# This regex is for *reversed* strings because that works much faster for
|
|
# right-to-left matching than the other way around. Be wary that all things are
|
|
# a bit backwards! _rparam matches forwarded pairs alike ";key=value"
|
|
_rparam = re.compile(f"(?:{_token}|{_quoted})={_token}\\s*($|[;,])", re.ASCII)
|
|
|
|
|
|
def parse_forwarded(headers, config) -> Optional[Options]:
|
|
"""Parse RFC 7239 Forwarded headers.
|
|
The value of `by` or `secret` must match `config.FORWARDED_SECRET`
|
|
:return: dict with keys and values, or None if nothing matched
|
|
"""
|
|
header = headers.getall("forwarded", None)
|
|
secret = config.FORWARDED_SECRET
|
|
if header is None or not secret:
|
|
return None
|
|
header = ",".join(header) # Join multiple header lines
|
|
if secret not in header:
|
|
return None
|
|
# Loop over <separator><key>=<value> elements from right to left
|
|
sep = pos = None
|
|
options: List[Tuple[str, str]] = []
|
|
found = False
|
|
for m in _rparam.finditer(header[::-1]):
|
|
# Start of new element? (on parser skips and non-semicolon right sep)
|
|
if m.start() != pos or sep != ";":
|
|
# Was the previous element (from right) what we wanted?
|
|
if found:
|
|
break
|
|
# Clear values and parse as new element
|
|
del options[:]
|
|
pos = m.end()
|
|
val_token, val_quoted, key, sep = m.groups()
|
|
key = key.lower()[::-1]
|
|
val = (val_token or val_quoted.replace('"\\', '"'))[::-1]
|
|
options.append((key, val))
|
|
if key in ("secret", "by") and val == secret:
|
|
found = True
|
|
# Check if we would return on next round, to avoid useless parse
|
|
if found and sep != ";":
|
|
break
|
|
# If secret was found, return the matching options in left-to-right order
|
|
return fwd_normalize(reversed(options)) if found else None
|
|
|
|
|
|
def parse_xforwarded(headers, config) -> Optional[Options]:
|
|
"""Parse traditional proxy headers."""
|
|
real_ip_header = config.REAL_IP_HEADER
|
|
proxies_count = config.PROXIES_COUNT
|
|
addr = real_ip_header and headers.get(real_ip_header)
|
|
if not addr and proxies_count:
|
|
assert proxies_count > 0
|
|
try:
|
|
# Combine, split and filter multiple headers' entries
|
|
forwarded_for = headers.getall(config.FORWARDED_FOR_HEADER)
|
|
proxies = [
|
|
p
|
|
for p in (
|
|
p.strip() for h in forwarded_for for p in h.split(",")
|
|
)
|
|
if p
|
|
]
|
|
addr = proxies[-proxies_count]
|
|
except (KeyError, IndexError):
|
|
pass
|
|
# No processing of other headers if no address is found
|
|
if not addr:
|
|
return None
|
|
|
|
def options():
|
|
yield "for", addr
|
|
for key, header in (
|
|
("proto", "x-scheme"),
|
|
("proto", "x-forwarded-proto"), # Overrides X-Scheme if present
|
|
("host", "x-forwarded-host"),
|
|
("port", "x-forwarded-port"),
|
|
("path", "x-forwarded-path"),
|
|
):
|
|
yield key, headers.get(header)
|
|
|
|
return fwd_normalize(options())
|
|
|
|
|
|
def fwd_normalize(fwd: OptionsIterable) -> Options:
|
|
"""Normalize and convert values extracted from forwarded headers."""
|
|
ret: Dict[str, Union[int, str]] = {}
|
|
for key, val in fwd:
|
|
if val is not None:
|
|
try:
|
|
if key in ("by", "for"):
|
|
ret[key] = fwd_normalize_address(val)
|
|
elif key in ("host", "proto"):
|
|
ret[key] = val.lower()
|
|
elif key == "port":
|
|
ret[key] = int(val)
|
|
elif key == "path":
|
|
ret[key] = unquote(val)
|
|
else:
|
|
ret[key] = val
|
|
except ValueError:
|
|
pass
|
|
return ret
|
|
|
|
|
|
def fwd_normalize_address(addr: str) -> str:
|
|
"""Normalize address fields of proxy headers."""
|
|
if addr == "unknown":
|
|
raise ValueError() # omit unknown value identifiers
|
|
if addr.startswith("_"):
|
|
return addr # do not lower-case obfuscated strings
|
|
if _ipv6_re.fullmatch(addr):
|
|
addr = f"[{addr}]" # bracket IPv6
|
|
return addr.lower()
|
|
|
|
|
|
def parse_host(host: str) -> Tuple[Optional[str], Optional[int]]:
|
|
"""Split host:port into hostname and port.
|
|
:return: None in place of missing elements
|
|
"""
|
|
m = _host_re.fullmatch(host)
|
|
if not m:
|
|
return None, None
|
|
host, port = m.groups()
|
|
return host.lower(), int(port) if port is not None else None
|
|
|
|
|
|
_HTTP1_STATUSLINES = [
|
|
b"HTTP/1.1 %d %b\r\n" % (status, STATUS_CODES.get(status, b"UNKNOWN"))
|
|
for status in range(1000)
|
|
]
|
|
|
|
|
|
def format_http1_response(status: int, headers: HeaderBytesIterable) -> bytes:
|
|
"""Format a HTTP/1.1 response header."""
|
|
# Note: benchmarks show that here bytes concat is faster than bytearray,
|
|
# b"".join() or %-formatting. %timeit any changes you make.
|
|
ret = _HTTP1_STATUSLINES[status]
|
|
for h in headers:
|
|
ret += b"%b: %b\r\n" % h
|
|
ret += b"\r\n"
|
|
return ret
|