Performance degraded - added in request too large and keep-alive timeout

This commit is contained in:
Channel Cat 2016-10-08 17:27:10 -07:00
parent fa980b1445
commit b0d38f8a04
5 changed files with 60 additions and 60 deletions

View File

@ -1,4 +1,5 @@
LOGO = """
class Config:
LOGO = """
_______________
/ \\
@ -18,4 +19,6 @@ LOGO = """
"""
"""
REQUEST_MAX_SIZE = 100000000 # 100 megababies
KEEP_ALIVE_TIMEOUT = 60 # 60 seconds

View File

@ -6,6 +6,7 @@ class Request:
__slots__ = ('url', 'headers', 'version', 'method', 'query_string', 'body', 'parsed_json', 'parsed_args')
def __init__(self, url_bytes, headers, version, method):
# TODO: Content-Encoding detection
url_parsed = parse_url(url_bytes)
self.url = url_parsed.path.decode('utf-8')
self.headers = headers
@ -37,4 +38,6 @@ class Request:
else:
self.parsed_args = {}
return self.parsed_args
return self.parsed_args
# TODO: Files

View File

@ -31,14 +31,19 @@ class HTTPResponse:
self.status = status
def output(self, version="1.1", keep_alive=False):
def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None):
# This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python
additional_headers = []
if keep_alive and not keep_alive_timeout is None:
additional_headers = [b'Keep-Alive: timeout=', str(keep_alive_timeout).encode(), b's\r\n']
return b''.join([
'HTTP/{} {} {}\r\n'.format(version, self.status, STATUS_CODES.get(self.status, 'FAIL')).encode(),
b'Content-Type: ', self.content_type.encode(), b'\r\n',
b'Content-Length: ', str(len(self.body)).encode(), b'\r\n',
b'Connection: ', ('keep-alive' if keep_alive else 'close').encode(), b'\r\n',
] + additional_headers + [
b'\r\n',
self.body,
])

View File

@ -1,8 +1,9 @@
from .router import Router
from .config import Config
from .exceptions import Handler
from .response import HTTPResponse
from .server import serve
from .log import log
from .response import HTTPResponse
from .router import Router
from .server import serve
class Sanic:
name = None
@ -15,6 +16,7 @@ class Sanic:
self.name = name
self.router = router or Router()
self.error_handler = error_handler or Handler()
self.config = Config()
def route(self, uri):
def response(handler):

View File

@ -8,6 +8,7 @@ import logging
from inspect import isawaitable
from ujson import loads as json_loads
from traceback import format_exc
from time import time
import httptools
try:
@ -18,18 +19,16 @@ except:
from socket import *
from .log import log
from .config import LOGO
from .exceptions import ServerError
from .response import HTTPResponse
from .request import Request
class HttpProtocol(asyncio.Protocol):
__slots__ = ('loop',
'transport', 'request', 'parser',
'url', 'headers', 'sanic',
'_total_body_size')
__slots__ = ('loop', 'transport', # event loop, connection
'parser', 'request', 'url', 'headers', # request params
'sanic', # router and config object
'_total_body_size', '_timeout_handler') # connection management
def __init__(self, *, sanic, loop):
self.loop = loop
@ -39,51 +38,53 @@ class HttpProtocol(asyncio.Protocol):
self.url = None
self.headers = None
self.sanic = sanic
self._total_request_size = 0
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self._timeout_handler = self.loop.call_later(self.sanic.config.KEEP_ALIVE_TIMEOUT, self.connection_timeout)
self.transport = transport
#TODO: handle connection timeout
# TCP Nodelay
# I have no evidence to support this makes anything faster
# So I'll leave it commented out for now
# sock = transport.get_extra_info('socket')
# try:
# sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
# except (OSError, NameError):
# pass
def connection_lost(self, exc):
self.request = self.parser = None
self._timeout_handler.cancel()
self.cleanup()
def connection_timeout(self):
self.bail_out("Request timed out, connection closed")
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
#TODO: handle body too large
# Check for the request itself getting too large and exceeding memory limits
self._total_request_size += len(data)
if self._total_request_size > self.sanic.config.REQUEST_MAX_SIZE:
return self.bail_out("Request too large ({}), connection closed".format(self._total_request_size))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = httptools.HttpRequestParser(self)
# Parse request chunk or close connection
try:
#print(data)
self.parser.feed_data(data)
except httptools.parser.errors.HttpParserError as e:
log.error("Invalid request data, connection closed ({})".format(e))
self.transport.close()
self.bail_out("Invalid request data, connection closed ({})".format(e))
def on_url(self, url):
self.url = url
def on_header(self, name, value):
if name == 'Content-Length' and int(value) > self.sanic.config.REQUEST_MAX_SIZE:
return self.bail_out("Request body too large ({}), connection closed".format(value))
self.headers.append((name, value.decode('utf-8')))
def on_headers_complete(self):
@ -93,7 +94,6 @@ class HttpProtocol(asyncio.Protocol):
version=self.parser.get_http_version(),
method=self.parser.get_method().decode()
)
#print("res {} - {}".format(n, self.request))
def on_body(self, body):
self.request.body = body
@ -131,33 +131,25 @@ class HttpProtocol(asyncio.Protocol):
#print("response - {} - {}".format(self.n, self.request))
try:
keep_alive = self.parser.should_keep_alive()
self.transport.write(response.output(self.request.version, keep_alive))
self.transport.write(response.output(self.request.version, keep_alive, self.sanic.config.KEEP_ALIVE_TIMEOUT))
#print("KA - {}".format(self.parser.should_keep_alive()))
if not keep_alive:
self.transport.close()
else:
self.cleanup()
except Exception as e:
log.error("Writing request failed, connection closed {}".format(e))
self.transport.close()
self.bail_out("Writing request failed, connection closed {}".format(e))
def bail_out(self, error):
log.error(error)
self.transport.close()
def cleanup(self):
self.parser = None
self.request = None
# -------------------------------------------- #
# Async
# -------------------------------------------- #
async def handle_response(self, future, handler, request):
response = await handler(request)
future.set_result((request, response))
def handle_result(self, future):
request, response = future.result()
self.write_response(request, response)
def abort(msg):
log.info(msg, file=sys.stderr)
sys.exit(1)
self.url = None
self.headers = None
self._total_body_size = 0
def serve(sanic, host, port, debug=False, on_start=None, on_stop=None):
# Create Event Loop
@ -165,36 +157,31 @@ def serve(sanic, host, port, debug=False, on_start=None, on_stop=None):
asyncio.set_event_loop(loop)
loop.set_debug(debug)
# Add signal handlers
def ask_exit(signame):
log.debug("Exiting, received signal %s" % signame)
loop.stop()
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame), functools.partial(ask_exit, signame))
if debug:
log.setLevel(logging.DEBUG)
log.debug(LOGO)
log.debug(sanic.config.LOGO)
# Serve
log.info('Goin\' Fast @ {}:{}'.format(host, port))
# Run the on_start function if provided
if on_start:
print("start1")
result = on_start(sanic, loop)
if isawaitable(result):
print("start2")
loop.run_until_complete(result)
server_coroutine = loop.create_server(lambda: HttpProtocol(loop=loop, sanic=sanic), host, port)
#connection_timeout_coroutine =
server_loop = loop.run_until_complete(server_coroutine)
try:
loop.run_forever()
finally:
# Run the on_stop function if provided
if on_stop:
result = on_stop(sanic, loop)
if isawaitable(result):
loop.run_until_complete(result)
# Wait for event loop to finish and all connections to drain
server_loop.close()
loop.close()