Performance degraded - added in request too large and keep-alive timeout

This commit is contained in:
Channel Cat 2016-10-08 17:27:10 -07:00
parent fa980b1445
commit b0d38f8a04
5 changed files with 60 additions and 60 deletions

View File

@ -1,4 +1,5 @@
LOGO = """ class Config:
LOGO = """
_______________ _______________
/ \\ / \\
@ -18,4 +19,6 @@ LOGO = """
""" """
REQUEST_MAX_SIZE = 100000000 # 100 megababies
KEEP_ALIVE_TIMEOUT = 60 # 60 seconds

View File

@ -6,6 +6,7 @@ class Request:
__slots__ = ('url', 'headers', 'version', 'method', 'query_string', 'body', 'parsed_json', 'parsed_args') __slots__ = ('url', 'headers', 'version', 'method', 'query_string', 'body', 'parsed_json', 'parsed_args')
def __init__(self, url_bytes, headers, version, method): def __init__(self, url_bytes, headers, version, method):
# TODO: Content-Encoding detection
url_parsed = parse_url(url_bytes) url_parsed = parse_url(url_bytes)
self.url = url_parsed.path.decode('utf-8') self.url = url_parsed.path.decode('utf-8')
self.headers = headers self.headers = headers
@ -37,4 +38,6 @@ class Request:
else: else:
self.parsed_args = {} self.parsed_args = {}
return self.parsed_args return self.parsed_args
# TODO: Files

View File

@ -31,14 +31,19 @@ class HTTPResponse:
self.status = status self.status = status
def output(self, version="1.1", keep_alive=False): def output(self, version="1.1", keep_alive=False, keep_alive_timeout=None):
# This is all returned in a kind-of funky way # This is all returned in a kind-of funky way
# We tried to make this as fast as possible in pure python # We tried to make this as fast as possible in pure python
additional_headers = []
if keep_alive and not keep_alive_timeout is None:
additional_headers = [b'Keep-Alive: timeout=', str(keep_alive_timeout).encode(), b's\r\n']
return b''.join([ return b''.join([
'HTTP/{} {} {}\r\n'.format(version, self.status, STATUS_CODES.get(self.status, 'FAIL')).encode(), 'HTTP/{} {} {}\r\n'.format(version, self.status, STATUS_CODES.get(self.status, 'FAIL')).encode(),
b'Content-Type: ', self.content_type.encode(), b'\r\n', b'Content-Type: ', self.content_type.encode(), b'\r\n',
b'Content-Length: ', str(len(self.body)).encode(), b'\r\n', b'Content-Length: ', str(len(self.body)).encode(), b'\r\n',
b'Connection: ', ('keep-alive' if keep_alive else 'close').encode(), b'\r\n', b'Connection: ', ('keep-alive' if keep_alive else 'close').encode(), b'\r\n',
] + additional_headers + [
b'\r\n', b'\r\n',
self.body, self.body,
]) ])

View File

@ -1,8 +1,9 @@
from .router import Router from .config import Config
from .exceptions import Handler from .exceptions import Handler
from .response import HTTPResponse
from .server import serve
from .log import log from .log import log
from .response import HTTPResponse
from .router import Router
from .server import serve
class Sanic: class Sanic:
name = None name = None
@ -15,6 +16,7 @@ class Sanic:
self.name = name self.name = name
self.router = router or Router() self.router = router or Router()
self.error_handler = error_handler or Handler() self.error_handler = error_handler or Handler()
self.config = Config()
def route(self, uri): def route(self, uri):
def response(handler): def response(handler):

View File

@ -8,6 +8,7 @@ import logging
from inspect import isawaitable from inspect import isawaitable
from ujson import loads as json_loads from ujson import loads as json_loads
from traceback import format_exc from traceback import format_exc
from time import time
import httptools import httptools
try: try:
@ -18,18 +19,16 @@ except:
from socket import * from socket import *
from .log import log from .log import log
from .config import LOGO
from .exceptions import ServerError from .exceptions import ServerError
from .response import HTTPResponse from .response import HTTPResponse
from .request import Request from .request import Request
class HttpProtocol(asyncio.Protocol): class HttpProtocol(asyncio.Protocol):
__slots__ = ('loop', __slots__ = ('loop', 'transport', # event loop, connection
'transport', 'request', 'parser', 'parser', 'request', 'url', 'headers', # request params
'url', 'headers', 'sanic', 'sanic', # router and config object
'_total_body_size') '_total_body_size', '_timeout_handler') # connection management
def __init__(self, *, sanic, loop): def __init__(self, *, sanic, loop):
self.loop = loop self.loop = loop
@ -39,51 +38,53 @@ class HttpProtocol(asyncio.Protocol):
self.url = None self.url = None
self.headers = None self.headers = None
self.sanic = sanic self.sanic = sanic
self._total_request_size = 0
# -------------------------------------------- # # -------------------------------------------- #
# Connection # Connection
# -------------------------------------------- # # -------------------------------------------- #
def connection_made(self, transport): def connection_made(self, transport):
self._timeout_handler = self.loop.call_later(self.sanic.config.KEEP_ALIVE_TIMEOUT, self.connection_timeout)
self.transport = transport self.transport = transport
#TODO: handle connection timeout #TODO: handle connection timeout
# TCP Nodelay
# I have no evidence to support this makes anything faster
# So I'll leave it commented out for now
# sock = transport.get_extra_info('socket')
# try:
# sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
# except (OSError, NameError):
# pass
def connection_lost(self, exc): def connection_lost(self, exc):
self.request = self.parser = None self._timeout_handler.cancel()
self.cleanup()
def connection_timeout(self):
self.bail_out("Request timed out, connection closed")
# -------------------------------------------- # # -------------------------------------------- #
# Parsing # Parsing
# -------------------------------------------- # # -------------------------------------------- #
def data_received(self, data): def data_received(self, data):
#TODO: handle body too large # Check for the request itself getting too large and exceeding memory limits
self._total_request_size += len(data)
if self._total_request_size > self.sanic.config.REQUEST_MAX_SIZE:
return self.bail_out("Request too large ({}), connection closed".format(self._total_request_size))
# Create parser if this is the first time we're receiving data
if self.parser is None: if self.parser is None:
assert self.request is None assert self.request is None
self.headers = [] self.headers = []
self.parser = httptools.HttpRequestParser(self) self.parser = httptools.HttpRequestParser(self)
# Parse request chunk or close connection
try: try:
#print(data)
self.parser.feed_data(data) self.parser.feed_data(data)
except httptools.parser.errors.HttpParserError as e: except httptools.parser.errors.HttpParserError as e:
log.error("Invalid request data, connection closed ({})".format(e)) self.bail_out("Invalid request data, connection closed ({})".format(e))
self.transport.close()
def on_url(self, url): def on_url(self, url):
self.url = url self.url = url
def on_header(self, name, value): def on_header(self, name, value):
if name == 'Content-Length' and int(value) > self.sanic.config.REQUEST_MAX_SIZE:
return self.bail_out("Request body too large ({}), connection closed".format(value))
self.headers.append((name, value.decode('utf-8'))) self.headers.append((name, value.decode('utf-8')))
def on_headers_complete(self): def on_headers_complete(self):
@ -93,7 +94,6 @@ class HttpProtocol(asyncio.Protocol):
version=self.parser.get_http_version(), version=self.parser.get_http_version(),
method=self.parser.get_method().decode() method=self.parser.get_method().decode()
) )
#print("res {} - {}".format(n, self.request))
def on_body(self, body): def on_body(self, body):
self.request.body = body self.request.body = body
@ -131,33 +131,25 @@ class HttpProtocol(asyncio.Protocol):
#print("response - {} - {}".format(self.n, self.request)) #print("response - {} - {}".format(self.n, self.request))
try: try:
keep_alive = self.parser.should_keep_alive() keep_alive = self.parser.should_keep_alive()
self.transport.write(response.output(self.request.version, keep_alive)) self.transport.write(response.output(self.request.version, keep_alive, self.sanic.config.KEEP_ALIVE_TIMEOUT))
#print("KA - {}".format(self.parser.should_keep_alive())) #print("KA - {}".format(self.parser.should_keep_alive()))
if not keep_alive: if not keep_alive:
self.transport.close() self.transport.close()
else:
self.cleanup()
except Exception as e: except Exception as e:
log.error("Writing request failed, connection closed {}".format(e)) self.bail_out("Writing request failed, connection closed {}".format(e))
self.transport.close()
def bail_out(self, error):
log.error(error)
self.transport.close()
def cleanup(self):
self.parser = None self.parser = None
self.request = None self.request = None
self.url = None
# -------------------------------------------- # self.headers = None
# Async self._total_body_size = 0
# -------------------------------------------- #
async def handle_response(self, future, handler, request):
response = await handler(request)
future.set_result((request, response))
def handle_result(self, future):
request, response = future.result()
self.write_response(request, response)
def abort(msg):
log.info(msg, file=sys.stderr)
sys.exit(1)
def serve(sanic, host, port, debug=False, on_start=None, on_stop=None): def serve(sanic, host, port, debug=False, on_start=None, on_stop=None):
# Create Event Loop # Create Event Loop
@ -165,36 +157,31 @@ def serve(sanic, host, port, debug=False, on_start=None, on_stop=None):
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)
loop.set_debug(debug) loop.set_debug(debug)
# Add signal handlers
def ask_exit(signame):
log.debug("Exiting, received signal %s" % signame)
loop.stop()
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame), functools.partial(ask_exit, signame))
if debug: if debug:
log.setLevel(logging.DEBUG) log.setLevel(logging.DEBUG)
log.debug(LOGO) log.debug(sanic.config.LOGO)
# Serve # Serve
log.info('Goin\' Fast @ {}:{}'.format(host, port)) log.info('Goin\' Fast @ {}:{}'.format(host, port))
# Run the on_start function if provided
if on_start: if on_start:
print("start1")
result = on_start(sanic, loop) result = on_start(sanic, loop)
if isawaitable(result): if isawaitable(result):
print("start2")
loop.run_until_complete(result) loop.run_until_complete(result)
server_coroutine = loop.create_server(lambda: HttpProtocol(loop=loop, sanic=sanic), host, port) server_coroutine = loop.create_server(lambda: HttpProtocol(loop=loop, sanic=sanic), host, port)
#connection_timeout_coroutine =
server_loop = loop.run_until_complete(server_coroutine) server_loop = loop.run_until_complete(server_coroutine)
try: try:
loop.run_forever() loop.run_forever()
finally: finally:
# Run the on_stop function if provided
if on_stop: if on_stop:
result = on_stop(sanic, loop) result = on_stop(sanic, loop)
if isawaitable(result): if isawaitable(result):
loop.run_until_complete(result) loop.run_until_complete(result)
# Wait for event loop to finish and all connections to drain
server_loop.close() server_loop.close()
loop.close() loop.close()