
* Streaming request by async for. * Make all requests streaming and preload body for non-streaming handlers. * Cleanup of code and avoid mixing streaming responses. * Async http protocol loop. * Change of test: don't require early bad request error but only after CRLF-CRLF. * Add back streaming requests. * Rewritten request body parser. * Misc. cleanup, down to 4 failing tests. * All tests OK. * Entirely remove request body queue. * Let black f*ckup the layout * Better testing error messages on protocol errors. * Remove StreamBuffer tests because the type is about to be removed. * Remove tests using the deprecated get_headers function that can no longer be supported. Chunked mode is now autodetected, so do not put content-length header if chunked mode is preferred. * Major refactoring of HTTP protocol handling (new module http.py added), all requests made streaming. A few compatibility issues and a lot of cleanup to be done remain, 16 tests failing. * Terminate check_timeouts once connection_task finishes. * Code cleanup, 14 tests failing. * Much cleanup, 12 failing... * Even more cleanup and error checking, 8 failing tests. * Remove keep-alive header from responses. First of all, it should say timeout=<value> which wasn't the case with existing implementation, and secondly none of the other web servers I tried include this header. * Everything but CustomServer OK. * Linter * Disable custom protocol test * Remove unnecessary variables, optimise performance. * A test was missing that body_init/body_push/body_finish are never called. Rewritten using receive_body and case switching to make it fail if bypassed. * Minor fixes. * Remove unused code. * Py 3.8 check for deprecated loop argument. * Fix a middleware cancellation handling test with py38. * Linter 'n fixes * Typing * Stricter handling of request header size * More specific error messages on Payload Too Large. * Init http.response = None * Messages further tuned. * Always try to consume request body, plus minor cleanup. * Add a missing check in case of close_if_idle on a dead connection. * Avoid error messages on PayloadTooLarge. * Add test for new API. * json takes str, not bytes * Default to no maximum request size for streaming handlers. * Fix chunked mode crash. * Header values should be strictly ASCII but both UTF-8 and Latin-1 exist. Use UTF-8B to cope with all. * Refactoring and cleanup. * Unify response header processing of ASGI and asyncio modes. * Avoid special handling of StreamingHTTPResponse. * 35 % speedup in HTTP/1.1 response formatting (not so much overall effect). * Duplicate set-cookie headers were being produced. * Cleanup processed_headers some more. * Linting * Import ordering * Response middleware ran by async request.respond(). * Need to check if transport is closing to avoid getting stuck in sending loops after peer has disconnected. * Middleware and error handling refactoring. * Linter * Fix tracking of HTTP stage when writing to transport fails. * Add clarifying comment * Add a check for request body functions and a test for NotImplementedError. * Linter and typing * These must be tuples + hack mypy warnings away. * New streaming test and minor fixes. * Constant receive buffer size. * 256 KiB send and receive buffers. * Revert "256 KiB send and receive buffers." This reverts commit abc1e3edb21a5e6925fa4c856657559608a8d65b. * app.handle_exception already sends the response. * Improved handling of errors during request. * An odd hack to avoid an httpx limitation that causes test failures. * Limit request header size to 8 KiB at most. * Remove unnecessary use of format string. * Cleanup tests * Remove artifact * Fix type checking * Mark test for skipping * Cleanup some edge cases * Add ignore_body flag to safe methods * Add unit tests for timeout logic * Add unit tests for timeout logic * Fix Mock usage in timeout test * Change logging test to only logger in handler * Windows py3.8 logging issue with current testing client * Add test_header_size_exceeded * Resolve merge conflicts * Add request middleware to hard exception handling * Add request middleware to hard exception handling * Request middleware on exception handlers * Linting * Cleanup deprecations Co-authored-by: L. Kärkkäinen <tronic@users.noreply.github.com> Co-authored-by: Adam Hopkins <admhpkns@gmail.com>
206 lines
5.5 KiB
Python
206 lines
5.5 KiB
Python
import logging
|
|
import os
|
|
import sys
|
|
import uuid
|
|
|
|
from importlib import reload
|
|
from io import StringIO
|
|
|
|
import pytest
|
|
|
|
import sanic
|
|
|
|
from sanic import Sanic
|
|
from sanic.compat import OS_IS_WINDOWS
|
|
from sanic.log import LOGGING_CONFIG_DEFAULTS, logger
|
|
from sanic.response import text
|
|
from sanic.testing import SanicTestClient
|
|
|
|
|
|
logging_format = """module: %(module)s; \
|
|
function: %(funcName)s(); \
|
|
message: %(message)s"""
|
|
|
|
|
|
def reset_logging():
|
|
logging.shutdown()
|
|
reload(logging)
|
|
|
|
|
|
def test_log(app):
|
|
log_stream = StringIO()
|
|
for handler in logging.root.handlers[:]:
|
|
logging.root.removeHandler(handler)
|
|
logging.basicConfig(
|
|
format=logging_format, level=logging.DEBUG, stream=log_stream
|
|
)
|
|
log = logging.getLogger()
|
|
rand_string = str(uuid.uuid4())
|
|
|
|
@app.route("/")
|
|
def handler(request):
|
|
log.info(rand_string)
|
|
return text("hello")
|
|
|
|
request, response = app.test_client.get("/")
|
|
log_text = log_stream.getvalue()
|
|
assert rand_string in log_text
|
|
|
|
|
|
def test_logging_defaults():
|
|
# reset_logging()
|
|
app = Sanic("test_logging")
|
|
|
|
for fmt in [h.formatter for h in logging.getLogger("sanic.root").handlers]:
|
|
assert (
|
|
fmt._fmt
|
|
== LOGGING_CONFIG_DEFAULTS["formatters"]["generic"]["format"]
|
|
)
|
|
|
|
for fmt in [
|
|
h.formatter for h in logging.getLogger("sanic.error").handlers
|
|
]:
|
|
assert (
|
|
fmt._fmt
|
|
== LOGGING_CONFIG_DEFAULTS["formatters"]["generic"]["format"]
|
|
)
|
|
|
|
for fmt in [
|
|
h.formatter for h in logging.getLogger("sanic.access").handlers
|
|
]:
|
|
assert (
|
|
fmt._fmt
|
|
== LOGGING_CONFIG_DEFAULTS["formatters"]["access"]["format"]
|
|
)
|
|
|
|
|
|
def test_logging_pass_customer_logconfig():
|
|
# reset_logging()
|
|
|
|
modified_config = LOGGING_CONFIG_DEFAULTS
|
|
modified_config["formatters"]["generic"][
|
|
"format"
|
|
] = "%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s"
|
|
modified_config["formatters"]["access"][
|
|
"format"
|
|
] = "%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s"
|
|
|
|
app = Sanic("test_logging", log_config=modified_config)
|
|
|
|
for fmt in [h.formatter for h in logging.getLogger("sanic.root").handlers]:
|
|
assert fmt._fmt == modified_config["formatters"]["generic"]["format"]
|
|
|
|
for fmt in [
|
|
h.formatter for h in logging.getLogger("sanic.error").handlers
|
|
]:
|
|
assert fmt._fmt == modified_config["formatters"]["generic"]["format"]
|
|
|
|
for fmt in [
|
|
h.formatter for h in logging.getLogger("sanic.access").handlers
|
|
]:
|
|
assert fmt._fmt == modified_config["formatters"]["access"]["format"]
|
|
|
|
|
|
@pytest.mark.parametrize("debug", (True, False))
|
|
def test_log_connection_lost(app, debug, monkeypatch):
|
|
""" Should not log Connection lost exception on non debug """
|
|
stream = StringIO()
|
|
root = logging.getLogger("sanic.root")
|
|
root.addHandler(logging.StreamHandler(stream))
|
|
monkeypatch.setattr(sanic.server, "logger", root)
|
|
|
|
@app.route("/conn_lost")
|
|
async def conn_lost(request):
|
|
response = text("Ok")
|
|
request.transport.close()
|
|
return response
|
|
|
|
req, res = app.test_client.get("/conn_lost", debug=debug)
|
|
assert res is None
|
|
|
|
log = stream.getvalue()
|
|
|
|
if debug:
|
|
assert "Connection lost before response written @" in log
|
|
else:
|
|
assert "Connection lost before response written @" not in log
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_logger(caplog):
|
|
rand_string = str(uuid.uuid4())
|
|
|
|
app = Sanic(name=__name__)
|
|
|
|
@app.get("/")
|
|
def log_info(request):
|
|
logger.info(rand_string)
|
|
return text("hello")
|
|
|
|
with caplog.at_level(logging.INFO):
|
|
_ = await app.asgi_client.get("/")
|
|
|
|
record = ("sanic.root", logging.INFO, rand_string)
|
|
assert record in caplog.record_tuples
|
|
|
|
|
|
@pytest.mark.skipif(
|
|
OS_IS_WINDOWS and sys.version_info >= (3, 8),
|
|
reason="Not testable with current client",
|
|
)
|
|
def test_logger_static_and_secure(caplog):
|
|
# Same as test_logger, except for more coverage:
|
|
# - test_client initialised separately for static port
|
|
# - using ssl
|
|
rand_string = str(uuid.uuid4())
|
|
|
|
app = Sanic(name=__name__)
|
|
|
|
@app.get("/")
|
|
def log_info(request):
|
|
logger.info(rand_string)
|
|
return text("hello")
|
|
|
|
current_dir = os.path.dirname(os.path.realpath(__file__))
|
|
ssl_cert = os.path.join(current_dir, "certs/selfsigned.cert")
|
|
ssl_key = os.path.join(current_dir, "certs/selfsigned.key")
|
|
|
|
ssl_dict = {"cert": ssl_cert, "key": ssl_key}
|
|
|
|
test_client = SanicTestClient(app, port=42101)
|
|
with caplog.at_level(logging.INFO):
|
|
request, response = test_client.get(
|
|
f"https://127.0.0.1:{test_client.port}/",
|
|
server_kwargs=dict(ssl=ssl_dict),
|
|
)
|
|
|
|
port = test_client.port
|
|
|
|
assert caplog.record_tuples[0] == (
|
|
"sanic.root",
|
|
logging.INFO,
|
|
f"Goin' Fast @ https://127.0.0.1:{port}",
|
|
)
|
|
assert caplog.record_tuples[1] == (
|
|
"sanic.root",
|
|
logging.INFO,
|
|
f"https://127.0.0.1:{port}/",
|
|
)
|
|
assert caplog.record_tuples[2] == ("sanic.root", logging.INFO, rand_string)
|
|
assert caplog.record_tuples[-1] == (
|
|
"sanic.root",
|
|
logging.INFO,
|
|
"Server Stopped",
|
|
)
|
|
|
|
|
|
def test_logging_modified_root_logger_config():
|
|
# reset_logging()
|
|
|
|
modified_config = LOGGING_CONFIG_DEFAULTS
|
|
modified_config["loggers"]["sanic.root"]["level"] = "DEBUG"
|
|
|
|
app = Sanic("test_logging", log_config=modified_config)
|
|
|
|
assert logging.getLogger("sanic.root").getEffectiveLevel() == logging.DEBUG
|