resolve conflicts again

This commit is contained in:
Yun Xu 2017-10-01 01:22:27 -07:00
commit 225ea49b6f
14 changed files with 639 additions and 62 deletions

View File

@ -25,7 +25,7 @@ import sanic
# -- General configuration ------------------------------------------------
extensions = ['sphinx.ext.autodoc']
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.asyncio']
templates_path = ['_templates']

View File

@ -7,6 +7,7 @@ A list of Sanic extensions created by the community.
- [CORS](https://github.com/ashleysommer/sanic-cors): A port of flask-cors.
- [Compress](https://github.com/subyraman/sanic_compress): Allows you to easily gzip Sanic responses. A port of Flask-Compress.
- [Jinja2](https://github.com/lixxu/sanic-jinja2): Support for Jinja2 template.
- [JWT](https://github.com/ahopkins/sanic-jwt): Authentication extension for JSON Web Tokens (JWT).
- [OpenAPI/Swagger](https://github.com/channelcat/sanic-openapi): OpenAPI support, plus a Swagger UI.
- [Pagination](https://github.com/lixxu/python-paginate): Simple pagination support.
- [Motor](https://github.com/lixxu/sanic-motor): Simple motor wrapper.

View File

@ -1,3 +1,4 @@
sphinx
sphinx_rtd_theme
recommonmark
sphinxcontrib-asyncio

View File

@ -345,13 +345,13 @@ class Sanic:
# Static Files
def static(self, uri, file_or_directory, pattern=r'/?.+',
use_modified_since=True, use_content_range=False,
stream_large_files=False, name='static'):
stream_large_files=False, name='static', host=None):
"""Register a root to serve files from. The input can either be a
file or a directory. See
"""
static_register(self, uri, file_or_directory, pattern,
use_modified_since, use_content_range,
stream_large_files, name)
stream_large_files, name, host)
def blueprint(self, blueprint, **options):
"""Register a blueprint on the application.
@ -451,7 +451,10 @@ class Sanic:
if external:
if not scheme:
scheme = netloc[:8].split(':', 1)[0]
if ':' in netloc[:8]:
scheme = netloc[:8].split(':', 1)[0]
else:
scheme = 'http'
if '://' in netloc[:8]:
netloc = netloc.split('://', 1)[-1]
@ -760,6 +763,8 @@ class Sanic:
'request_handler': self.handle_request,
'error_handler': self.error_handler,
'request_timeout': self.config.REQUEST_TIMEOUT,
'response_timeout': self.config.RESPONSE_TIMEOUT,
'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,
'request_max_size': self.config.REQUEST_MAX_SIZE,
'keep_alive': self.config.KEEP_ALIVE,
'loop': loop,

View File

@ -31,7 +31,15 @@ class Config(dict):
"""
self.REQUEST_MAX_SIZE = 100000000 # 100 megabytes
self.REQUEST_TIMEOUT = 60 # 60 seconds
self.RESPONSE_TIMEOUT = 60 # 60 seconds
self.KEEP_ALIVE = keep_alive
# Apache httpd server default keepalive timeout = 5 seconds
# Nginx server default keepalive timeout = 75 seconds
# Nginx performance tuning guidelines uses keepalive = 15 seconds
# IE client hard keepalive limit = 60 seconds
# Firefox client hard keepalive limit = 115 seconds
self.KEEP_ALIVE_TIMEOUT = 5 # 5 seconds
self.WEBSOCKET_MAX_SIZE = 2 ** 20 # 1 megabytes
self.WEBSOCKET_MAX_QUEUE = 32
self.GRACEFUL_SHUTDOWN_TIMEOUT = 15.0 # 15 sec

View File

@ -155,6 +155,13 @@ class ServerError(SanicException):
pass
@add_status_code(503)
class ServiceUnavailable(SanicException):
"""The server is currently unavailable (because it is overloaded or
down for maintenance). Generally, this is a temporary state."""
pass
class URLBuildError(ServerError):
pass
@ -170,6 +177,13 @@ class FileNotFound(NotFound):
@add_status_code(408)
class RequestTimeout(SanicException):
"""The Web server (running the Web site) thinks that there has been too
long an interval of time between 1) the establishment of an IP
connection (socket) between the client and the server and
2) the receipt of any data on that socket, so the server has dropped
the connection. The socket connection has actually been lost - the Web
server has 'timed out' on that particular socket connection.
"""
pass

View File

@ -19,8 +19,9 @@ except ImportError:
from sanic.exceptions import InvalidUsage
from sanic.log import error_logger
DEFAULT_HTTP_CONTENT_TYPE = "application/octet-stream"
# HTTP/1.1: https://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.2.1
# > If the media type remains unknown, the recipient SHOULD treat it
# > as type "application/octet-stream"
@ -68,15 +69,27 @@ class Request(dict):
self._cookies = None
self.stream = None
def __repr__(self):
if self.method is None or not self.path:
return '<{0}>'.format(self.__class__.__name__)
return '<{0}: {1} {2}>'.format(self.__class__.__name__,
self.method,
self.path)
@property
def json(self):
if self.parsed_json is None:
try:
self.parsed_json = json_loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
self.load_json()
return self.parsed_json
def load_json(self, loads=json_loads):
try:
self.parsed_json = loads(self.body)
except Exception:
if not self.body:
return None
raise InvalidUsage("Failed when parsing body as json")
return self.parsed_json
@ -170,8 +183,8 @@ class Request(dict):
remote_addrs = [
addr for addr in [
addr.strip() for addr in forwarded_for
] if addr
]
] if addr
]
if len(remote_addrs) > 0:
self._remote_addr = remote_addrs[0]
else:

View File

@ -237,7 +237,8 @@ class HTTPResponse(BaseHTTPResponse):
def json(body, status=200, headers=None,
content_type="application/json", **kwargs):
content_type="application/json", dumps=json_dumps,
**kwargs):
"""
Returns response object with body in json format.
@ -246,7 +247,7 @@ def json(body, status=200, headers=None,
:param headers: Custom Headers.
:param kwargs: Remaining arguments that are passed to the json encoder.
"""
return HTTPResponse(json_dumps(body, **kwargs), headers=headers,
return HTTPResponse(dumps(body, **kwargs), headers=headers,
status=status, content_type=content_type)

View File

@ -28,7 +28,8 @@ from sanic.log import logger, access_logger
from sanic.response import HTTPResponse
from sanic.request import Request
from sanic.exceptions import (
RequestTimeout, PayloadTooLarge, InvalidUsage, ServerError)
RequestTimeout, PayloadTooLarge, InvalidUsage, ServerError,
ServiceUnavailable)
current_time = None
@ -63,16 +64,19 @@ class HttpProtocol(asyncio.Protocol):
# request params
'parser', 'request', 'url', 'headers',
# request config
'request_handler', 'request_timeout', 'request_max_size',
'request_class', 'is_request_stream', 'router',
'request_handler', 'request_timeout', 'response_timeout',
'keep_alive_timeout', 'request_max_size', 'request_class',
'is_request_stream', 'router',
# enable or disable access log purpose
'access_log',
# connection management
'_total_request_size', '_timeout_handler', '_last_communication_time',
'_is_stream_handler')
'_total_request_size', '_request_timeout_handler',
'_response_timeout_handler', '_keep_alive_timeout_handler',
'_last_request_time', '_last_response_time', '_is_stream_handler')
def __init__(self, *, loop, request_handler, error_handler,
signal=Signal(), connections=set(), request_timeout=60,
response_timeout=60, keep_alive_timeout=15,
request_max_size=None, request_class=None, access_log=True,
keep_alive=True, is_request_stream=False, router=None,
state=None, debug=False, **kwargs):
@ -89,13 +93,18 @@ class HttpProtocol(asyncio.Protocol):
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._total_request_size = 0
self._timeout_handler = None
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
@ -118,22 +127,32 @@ class HttpProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.connections.add(self)
self._timeout_handler = self.loop.call_later(
self.request_timeout, self.connection_timeout)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback)
self.transport = transport
self._last_request_time = current_time
def connection_lost(self, exc):
self.connections.discard(self)
self._timeout_handler.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def connection_timeout(self):
# Check if
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._timeout_handler = (
self.loop.call_later(time_left, self.connection_timeout))
self._request_timeout_handler = (
self.loop.call_later(time_left,
self.request_timeout_callback)
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
@ -144,6 +163,36 @@ class HttpProtocol(asyncio.Protocol):
except RequestTimeout as exception:
self.write_error(exception)
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = (
self.loop.call_later(time_left,
self.response_timeout_callback)
)
else:
try:
raise ServiceUnavailable('Response Timeout')
except ServiceUnavailable as exception:
self.write_error(exception)
def keep_alive_timeout_callback(self):
# Check if elapsed time since last response exceeds our configured
# maximum keep alive timeout value
time_elapsed = current_time - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = (
self.loop.call_later(time_left,
self.keep_alive_timeout_callback)
)
else:
logger.info('KeepAlive Timeout. Closing connection.')
self.transport.close()
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
@ -206,6 +255,11 @@ class HttpProtocol(asyncio.Protocol):
method=self.parser.get_method().decode(),
transport=self.transport
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request)
@ -221,6 +275,11 @@ class HttpProtocol(asyncio.Protocol):
self.request.body.append(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None))
@ -229,6 +288,9 @@ class HttpProtocol(asyncio.Protocol):
self.execute_request_handler()
def execute_request_handler(self):
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback)
self._last_request_time = current_time
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request,
@ -242,12 +304,15 @@ class HttpProtocol(asyncio.Protocol):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive,
self.request_timeout))
self.keep_alive_timeout))
if self.access_log:
access_logger.info('', extra={
'status': response.status,
@ -275,7 +340,10 @@ class HttpProtocol(asyncio.Protocol):
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout,
self.keep_alive_timeout_callback)
self._last_response_time = current_time
self.cleanup()
async def stream_response(self, response):
@ -284,12 +352,14 @@ class HttpProtocol(asyncio.Protocol):
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.transport = self.transport
await response.stream(
self.request.version, keep_alive, self.request_timeout)
self.request.version, keep_alive, self.keep_alive_timeout)
if self.access_log:
access_logger.info('', extra={
'status': response.status,
@ -317,10 +387,18 @@ class HttpProtocol(asyncio.Protocol):
if not keep_alive:
self.transport.close()
else:
self._last_request_time = current_time
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout,
self.keep_alive_timeout_callback)
self._last_response_time = current_time
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
@ -332,8 +410,9 @@ class HttpProtocol(asyncio.Protocol):
self.request.ip if self.request else 'Unknown'))
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True)
"Writing error failed, connection closed {}".format(
repr(e)), from_error=True
)
finally:
if self.access_log:
extra = dict()
@ -369,6 +448,9 @@ class HttpProtocol(asyncio.Protocol):
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
@ -423,8 +505,9 @@ def trigger_events(events, loop):
def serve(host, port, request_handler, error_handler, before_start=None,
after_start=None, before_stop=None, after_stop=None, debug=False,
request_timeout=60, ssl=None, sock=None, request_max_size=None,
reuse_port=False, loop=None, protocol=HttpProtocol, backlog=100,
request_timeout=60, response_timeout=60, keep_alive_timeout=60,
ssl=None, sock=None, request_max_size=None, reuse_port=False,
loop=None, protocol=HttpProtocol, backlog=100,
register_sys_signals=True, run_async=False, connections=None,
signal=Signal(), request_class=None, access_log=True,
keep_alive=True, is_request_stream=False, router=None,
@ -476,6 +559,8 @@ def serve(host, port, request_handler, error_handler, before_start=None,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_class=request_class,
access_log=access_log,

View File

@ -18,7 +18,7 @@ from sanic.response import file, file_stream, HTTPResponse
def register(app, uri, file_or_directory, pattern,
use_modified_since, use_content_range,
stream_large_files, name='static'):
stream_large_files, name='static', host=None):
# TODO: Though sanic is not a file server, I feel like we should at least
# make a good effort here. Modified-since is nice, but we could
# also look into etags, expires, and caching
@ -122,4 +122,4 @@ def register(app, uri, file_or_directory, pattern,
if not name.startswith('_static_'):
name = '_static_{}'.format(name)
app.route(uri, methods=['GET', 'HEAD'], name=name)(_handler)
app.route(uri, methods=['GET', 'HEAD'], name=name, host=host)(_handler)

View File

@ -0,0 +1,269 @@
from json import JSONDecodeError
from sanic import Sanic
import asyncio
from asyncio import sleep as aio_sleep
from sanic.response import text
from sanic.config import Config
from sanic import server
import aiohttp
from aiohttp import TCPConnector
from sanic.testing import SanicTestClient, HOST, PORT
class ReuseableTCPConnector(TCPConnector):
def __init__(self, *args, **kwargs):
super(ReuseableTCPConnector, self).__init__(*args, **kwargs)
self.old_proto = None
@asyncio.coroutine
def connect(self, req):
new_conn = yield from super(ReuseableTCPConnector, self)\
.connect(req)
if self.old_proto is not None:
if self.old_proto != new_conn._protocol:
raise RuntimeError(
"We got a new connection, wanted the same one!")
print(new_conn.__dict__)
self.old_proto = new_conn._protocol
return new_conn
class ReuseableSanicTestClient(SanicTestClient):
def __init__(self, app, loop=None):
super(ReuseableSanicTestClient, self).__init__(app)
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
self._server = None
self._tcp_connector = None
self._session = None
# Copied from SanicTestClient, but with some changes to reuse the
# same loop for the same app.
def _sanic_endpoint_test(
self, method='get', uri='/', gather_request=True,
debug=False, server_kwargs={},
*request_args, **request_kwargs):
loop = self._loop
results = [None, None]
exceptions = []
do_kill_server = request_kwargs.pop('end_server', False)
if gather_request:
def _collect_request(request):
if results[0] is None:
results[0] = request
self.app.request_middleware.appendleft(_collect_request)
@self.app.listener('after_server_start')
async def _collect_response(loop):
try:
if do_kill_server:
request_kwargs['end_session'] = True
response = await self._local_request(
method, uri, *request_args,
**request_kwargs)
results[-1] = response
except Exception as e2:
import traceback
traceback.print_tb(e2.__traceback__)
exceptions.append(e2)
#Don't stop here! self.app.stop()
if self._server is not None:
_server = self._server
else:
_server_co = self.app.create_server(host=HOST, debug=debug,
port=PORT, **server_kwargs)
server.trigger_events(
self.app.listeners['before_server_start'], loop)
try:
loop._stopping = False
http_server = loop.run_until_complete(_server_co)
except Exception as e1:
import traceback
traceback.print_tb(e1.__traceback__)
raise e1
self._server = _server = http_server
server.trigger_events(
self.app.listeners['after_server_start'], loop)
self.app.listeners['after_server_start'].pop()
if do_kill_server:
try:
_server.close()
self._server = None
loop.run_until_complete(_server.wait_closed())
self.app.stop()
except Exception as e3:
import traceback
traceback.print_tb(e3.__traceback__)
exceptions.append(e3)
if exceptions:
raise ValueError(
"Exception during request: {}".format(exceptions))
if gather_request:
self.app.request_middleware.pop()
try:
request, response = results
return request, response
except:
raise ValueError(
"Request and response object expected, got ({})".format(
results))
else:
try:
return results[-1]
except:
raise ValueError(
"Request object expected, got ({})".format(results))
# Copied from SanicTestClient, but with some changes to reuse the
# same TCPConnection and the sane ClientSession more than once.
# Note, you cannot use the same session if you are in a _different_
# loop, so the changes above are required too.
async def _local_request(self, method, uri, cookies=None, *args,
**kwargs):
request_keepalive = kwargs.pop('request_keepalive',
Config.KEEP_ALIVE_TIMEOUT)
if uri.startswith(('http:', 'https:', 'ftp:', 'ftps://' '//')):
url = uri
else:
url = 'http://{host}:{port}{uri}'.format(
host=HOST, port=PORT, uri=uri)
do_kill_session = kwargs.pop('end_session', False)
if self._session:
session = self._session
else:
if self._tcp_connector:
conn = self._tcp_connector
else:
conn = ReuseableTCPConnector(verify_ssl=False,
loop=self._loop,
keepalive_timeout=
request_keepalive)
self._tcp_connector = conn
session = aiohttp.ClientSession(cookies=cookies,
connector=conn,
loop=self._loop)
self._session = session
async with getattr(session, method.lower())(
url, *args, **kwargs) as response:
try:
response.text = await response.text()
except UnicodeDecodeError:
response.text = None
try:
response.json = await response.json()
except (JSONDecodeError,
UnicodeDecodeError,
aiohttp.ClientResponseError):
response.json = None
response.body = await response.read()
if do_kill_session:
session.close()
self._session = None
return response
Config.KEEP_ALIVE_TIMEOUT = 2
Config.KEEP_ALIVE = True
keep_alive_timeout_app_reuse = Sanic('test_ka_timeout_reuse')
keep_alive_app_client_timeout = Sanic('test_ka_client_timeout')
keep_alive_app_server_timeout = Sanic('test_ka_server_timeout')
@keep_alive_timeout_app_reuse.route('/1')
async def handler1(request):
return text('OK')
@keep_alive_app_client_timeout.route('/1')
async def handler2(request):
return text('OK')
@keep_alive_app_server_timeout.route('/1')
async def handler3(request):
return text('OK')
def test_keep_alive_timeout_reuse():
"""If the server keep-alive timeout and client keep-alive timeout are
both longer than the delay, the client _and_ server will successfully
reuse the existing connection."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = ReuseableSanicTestClient(keep_alive_timeout_app_reuse, loop)
headers = {
'Connection': 'keep-alive'
}
request, response = client.get('/1', headers=headers)
assert response.status == 200
assert response.text == 'OK'
loop.run_until_complete(aio_sleep(1))
request, response = client.get('/1', end_server=True)
assert response.status == 200
assert response.text == 'OK'
def test_keep_alive_client_timeout():
"""If the server keep-alive timeout is longer than the client
keep-alive timeout, client will try to create a new connection here."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = ReuseableSanicTestClient(keep_alive_app_client_timeout,
loop)
headers = {
'Connection': 'keep-alive'
}
request, response = client.get('/1', headers=headers,
request_keepalive=1)
assert response.status == 200
assert response.text == 'OK'
loop.run_until_complete(aio_sleep(2))
exception = None
try:
request, response = client.get('/1', end_server=True,
request_keepalive=1)
except ValueError as e:
exception = e
assert exception is not None
assert isinstance(exception, ValueError)
assert "got a new connection" in exception.args[0]
def test_keep_alive_server_timeout():
"""If the client keep-alive timeout is longer than the server
keep-alive timeout, the client will either a 'Connection reset' error
_or_ a new connection. Depending on how the event-loop handles the
broken server connection."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
client = ReuseableSanicTestClient(keep_alive_app_server_timeout,
loop)
headers = {
'Connection': 'keep-alive'
}
request, response = client.get('/1', headers=headers,
request_keepalive=60)
assert response.status == 200
assert response.text == 'OK'
loop.run_until_complete(aio_sleep(3))
exception = None
try:
request, response = client.get('/1', request_keepalive=60,
end_server=True)
except ValueError as e:
exception = e
assert exception is not None
assert isinstance(exception, ValueError)
assert "Connection reset" in exception.args[0] or \
"got a new connection" in exception.args[0]

View File

@ -1,38 +1,163 @@
from json import JSONDecodeError
from sanic import Sanic
import asyncio
from sanic.response import text
from sanic.exceptions import RequestTimeout
from sanic.config import Config
import aiohttp
from aiohttp import TCPConnector
from sanic.testing import SanicTestClient, HOST, PORT
Config.REQUEST_TIMEOUT = 1
request_timeout_app = Sanic('test_request_timeout')
class DelayableTCPConnector(TCPConnector):
class RequestContextManager(object):
def __new__(cls, req, delay):
cls = super(DelayableTCPConnector.RequestContextManager, cls).\
__new__(cls)
cls.req = req
cls.send_task = None
cls.resp = None
cls.orig_send = getattr(req, 'send')
cls.orig_start = None
cls.delay = delay
cls._acting_as = req
return cls
def __getattr__(self, item):
acting_as = self._acting_as
return getattr(acting_as, item)
@asyncio.coroutine
def start(self, connection, read_until_eof=False):
if self.send_task is None:
raise RuntimeError("do a send() before you do a start()")
resp = yield from self.send_task
self.send_task = None
self.resp = resp
self._acting_as = self.resp
self.orig_start = getattr(resp, 'start')
try:
ret = yield from self.orig_start(connection,
read_until_eof)
except Exception as e:
raise e
return ret
def close(self):
if self.resp is not None:
self.resp.close()
if self.send_task is not None:
self.send_task.cancel()
@asyncio.coroutine
def delayed_send(self, *args, **kwargs):
req = self.req
if self.delay and self.delay > 0:
#sync_sleep(self.delay)
_ = yield from asyncio.sleep(self.delay)
t = req.loop.time()
print("sending at {}".format(t), flush=True)
conn = next(iter(args)) # first arg is connection
try:
delayed_resp = self.orig_send(*args, **kwargs)
except Exception as e:
return aiohttp.ClientResponse(req.method, req.url)
return delayed_resp
def send(self, *args, **kwargs):
gen = self.delayed_send(*args, **kwargs)
task = self.req.loop.create_task(gen)
self.send_task = task
self._acting_as = task
return self
def __init__(self, *args, **kwargs):
_post_connect_delay = kwargs.pop('post_connect_delay', 0)
_pre_request_delay = kwargs.pop('pre_request_delay', 0)
super(DelayableTCPConnector, self).__init__(*args, **kwargs)
self._post_connect_delay = _post_connect_delay
self._pre_request_delay = _pre_request_delay
@asyncio.coroutine
def connect(self, req):
d_req = DelayableTCPConnector.\
RequestContextManager(req, self._pre_request_delay)
conn = yield from super(DelayableTCPConnector, self).connect(req)
if self._post_connect_delay and self._post_connect_delay > 0:
_ = yield from asyncio.sleep(self._post_connect_delay,
loop=self._loop)
req.send = d_req.send
t = req.loop.time()
print("Connected at {}".format(t), flush=True)
return conn
class DelayableSanicTestClient(SanicTestClient):
def __init__(self, app, loop, request_delay=1):
super(DelayableSanicTestClient, self).__init__(app)
self._request_delay = request_delay
self._loop = None
async def _local_request(self, method, uri, cookies=None, *args,
**kwargs):
if self._loop is None:
self._loop = asyncio.get_event_loop()
if uri.startswith(('http:', 'https:', 'ftp:', 'ftps://' '//')):
url = uri
else:
url = 'http://{host}:{port}{uri}'.format(
host=HOST, port=PORT, uri=uri)
conn = DelayableTCPConnector(pre_request_delay=self._request_delay,
verify_ssl=False, loop=self._loop)
async with aiohttp.ClientSession(cookies=cookies, connector=conn,
loop=self._loop) as session:
# Insert a delay after creating the connection
# But before sending the request.
async with getattr(session, method.lower())(
url, *args, **kwargs) as response:
try:
response.text = await response.text()
except UnicodeDecodeError:
response.text = None
try:
response.json = await response.json()
except (JSONDecodeError,
UnicodeDecodeError,
aiohttp.ClientResponseError):
response.json = None
response.body = await response.read()
return response
Config.REQUEST_TIMEOUT = 2
request_timeout_default_app = Sanic('test_request_timeout_default')
@request_timeout_app.route('/1')
async def handler_1(request):
await asyncio.sleep(2)
return text('OK')
@request_timeout_app.exception(RequestTimeout)
def handler_exception(request, exception):
return text('Request Timeout from error_handler.', 408)
def test_server_error_request_timeout():
request, response = request_timeout_app.test_client.get('/1')
assert response.status == 408
assert response.text == 'Request Timeout from error_handler.'
request_no_timeout_app = Sanic('test_request_no_timeout')
@request_timeout_default_app.route('/1')
async def handler_2(request):
await asyncio.sleep(2)
async def handler1(request):
return text('OK')
@request_no_timeout_app.route('/1')
async def handler2(request):
return text('OK')
def test_default_server_error_request_timeout():
request, response = request_timeout_default_app.test_client.get('/1')
client = DelayableSanicTestClient(request_timeout_default_app, None, 3)
request, response = client.get('/1')
assert response.status == 408
assert response.text == 'Error: Request Timeout'
def test_default_server_error_request_dont_timeout():
client = DelayableSanicTestClient(request_no_timeout_app, None, 1)
request, response = client.get('/1')
assert response.status == 200
assert response.text == 'OK'

View File

@ -0,0 +1,38 @@
from sanic import Sanic
import asyncio
from sanic.response import text
from sanic.exceptions import ServiceUnavailable
from sanic.config import Config
Config.RESPONSE_TIMEOUT = 1
response_timeout_app = Sanic('test_response_timeout')
response_timeout_default_app = Sanic('test_response_timeout_default')
@response_timeout_app.route('/1')
async def handler_1(request):
await asyncio.sleep(2)
return text('OK')
@response_timeout_app.exception(ServiceUnavailable)
def handler_exception(request, exception):
return text('Response Timeout from error_handler.', 503)
def test_server_error_response_timeout():
request, response = response_timeout_app.test_client.get('/1')
assert response.status == 503
assert response.text == 'Response Timeout from error_handler.'
@response_timeout_default_app.route('/1')
async def handler_2(request):
await asyncio.sleep(2)
return text('OK')
def test_default_server_error_response_timeout():
request, response = response_timeout_default_app.test_client.get('/1')
assert response.status == 503
assert response.text == 'Error: Response Timeout'

View File

@ -161,3 +161,20 @@ def test_static_content_range_error(file_name, static_file_directory):
assert 'Content-Range' in response.headers
assert response.headers['Content-Range'] == "bytes */%s" % (
len(get_file_content(static_file_directory, file_name)),)
@pytest.mark.parametrize('file_name', ['test.file', 'decode me.txt', 'python.png'])
def test_static_file(static_file_directory, file_name):
app = Sanic('test_static')
app.static(
'/testing.file',
get_file_path(static_file_directory, file_name),
host="www.example.com"
)
headers = {"Host": "www.example.com"}
request, response = app.test_client.get('/testing.file', headers=headers)
assert response.status == 200
assert response.body == get_file_content(static_file_directory, file_name)
request, response = app.test_client.get('/testing.file')
assert response.status == 404