Compare commits
	
		
			2 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 33db2c01b4 | ||
|   | 26addb2f7b | 
| @@ -119,8 +119,12 @@ async def watch(req, ws): | |||||||
|         # Send updates |         # Send updates | ||||||
|         while True: |         while True: | ||||||
|             await ws.send(await q.get()) |             await ws.send(await q.get()) | ||||||
|  |     except RuntimeError as e: | ||||||
|  |         if str(e) == "cannot schedule new futures after shutdown": | ||||||
|  |             return  # Server shutting down, drop the WebSocket | ||||||
|  |         raise | ||||||
|     finally: |     finally: | ||||||
|         del watching.pubsub[uuid] |         watching.pubsub.pop(uuid, None)  # Remove whether it got added yet or not | ||||||
|  |  | ||||||
|  |  | ||||||
| def subscribe(uuid, ws): | def subscribe(uuid, ws): | ||||||
|   | |||||||
| @@ -43,14 +43,16 @@ async def main_start(app, loop): | |||||||
|     app.ctx.threadexec = ThreadPoolExecutor( |     app.ctx.threadexec = ThreadPoolExecutor( | ||||||
|         max_workers=workers, thread_name_prefix="cista-ioworker" |         max_workers=workers, thread_name_prefix="cista-ioworker" | ||||||
|     ) |     ) | ||||||
|     await watching.start(app, loop) |     watching.start(app, loop) | ||||||
|  |  | ||||||
|  |  | ||||||
| @app.after_server_stop | # Sanic sometimes fails to execute after_server_stop, so we do it before instead (potentially interrupting handlers) | ||||||
|  | @app.before_server_stop | ||||||
| async def main_stop(app, loop): | async def main_stop(app, loop): | ||||||
|     quit.set() |     quit.set() | ||||||
|     await watching.stop(app, loop) |     watching.stop(app) | ||||||
|     app.ctx.threadexec.shutdown() |     app.ctx.threadexec.shutdown() | ||||||
|  |     logger.debug("Cista worker threads all finished") | ||||||
|  |  | ||||||
|  |  | ||||||
| @app.on_request | @app.on_request | ||||||
|   | |||||||
| @@ -24,6 +24,17 @@ pillow_heif.register_heif_opener() | |||||||
|  |  | ||||||
| bp = Blueprint("preview", url_prefix="/preview") | bp = Blueprint("preview", url_prefix="/preview") | ||||||
|  |  | ||||||
|  | # Map EXIF Orientation value to a corresponding PIL transpose | ||||||
|  | EXIF_ORI = { | ||||||
|  |     2: Image.Transpose.FLIP_LEFT_RIGHT, | ||||||
|  |     3: Image.Transpose.ROTATE_180, | ||||||
|  |     4: Image.Transpose.FLIP_TOP_BOTTOM, | ||||||
|  |     5: Image.Transpose.TRANSPOSE, | ||||||
|  |     6: Image.Transpose.ROTATE_270, | ||||||
|  |     7: Image.Transpose.TRANSVERSE, | ||||||
|  |     8: Image.Transpose.ROTATE_90, | ||||||
|  | } | ||||||
|  |  | ||||||
|  |  | ||||||
| @bp.get("/<path:path>") | @bp.get("/<path:path>") | ||||||
| async def preview(req, path): | async def preview(req, path): | ||||||
| @@ -69,34 +80,35 @@ def dispatch(path, quality, maxsize, maxzoom): | |||||||
|  |  | ||||||
|  |  | ||||||
| def process_image(path, *, maxsize, quality): | def process_image(path, *, maxsize, quality): | ||||||
|     t_load_start = perf_counter() |     t_load = perf_counter() | ||||||
|     img = Image.open(path) |     with Image.open(path) as img: | ||||||
|         # Force decode to include I/O in load timing |         # Force decode to include I/O in load timing | ||||||
|         img.load() |         img.load() | ||||||
|     t_load_end = perf_counter() |         t_proc = perf_counter() | ||||||
|         # Resize |         # Resize | ||||||
|     orig_w, orig_h = img.size |         w, h = img.size | ||||||
|     t_proc_start = perf_counter() |         img.thumbnail((min(w, maxsize), min(h, maxsize))) | ||||||
|     img.thumbnail((min(orig_w, maxsize), min(orig_h, maxsize))) |         # Transpose pixels according to EXIF Orientation | ||||||
|     t_proc_end = perf_counter() |         orientation = img.getexif().get(274, 1) | ||||||
|  |         if orientation in EXIF_ORI: | ||||||
|  |             img = img.transpose(EXIF_ORI[orientation]) | ||||||
|         # Save as AVIF |         # Save as AVIF | ||||||
|         imgdata = io.BytesIO() |         imgdata = io.BytesIO() | ||||||
|     t_save_start = perf_counter() |         t_save = perf_counter() | ||||||
|         img.save(imgdata, format="avif", quality=quality, speed=10, max_threads=1) |         img.save(imgdata, format="avif", quality=quality, speed=10, max_threads=1) | ||||||
|     t_save_end = perf_counter() |  | ||||||
|  |  | ||||||
|  |     t_end = perf_counter() | ||||||
|     ret = imgdata.getvalue() |     ret = imgdata.getvalue() | ||||||
|  |  | ||||||
|     load_ms = (t_load_end - t_load_start) * 1000 |     load_ms = (t_proc - t_load) * 1000 | ||||||
|     proc_ms = (t_proc_end - t_proc_start) * 1000 |     proc_ms = (t_save - t_proc) * 1000 | ||||||
|     save_ms = (t_save_end - t_save_start) * 1000 |     save_ms = (t_end - t_save) * 1000 | ||||||
|     logger.debug( |     logger.debug( | ||||||
|         "Preview image %s: load=%.1fms process=%.1fms save=%.1fms out=%.1fKB", |         "Preview image %s: load=%.1fms process=%.1fms save=%.1fms", | ||||||
|         path.name, |         path.name, | ||||||
|         load_ms, |         load_ms, | ||||||
|         proc_ms, |         proc_ms, | ||||||
|         save_ms, |         save_ms, | ||||||
|         len(ret) / 1024, |  | ||||||
|     ) |     ) | ||||||
|  |  | ||||||
|     return ret |     return ret | ||||||
|   | |||||||
| @@ -1,6 +1,5 @@ | |||||||
| import os | import os | ||||||
| import re | import re | ||||||
| import signal |  | ||||||
| from pathlib import Path | from pathlib import Path | ||||||
|  |  | ||||||
| from sanic import Sanic | from sanic import Sanic | ||||||
| @@ -12,14 +11,6 @@ def run(*, dev=False): | |||||||
|     """Run Sanic main process that spawns worker processes to serve HTTP requests.""" |     """Run Sanic main process that spawns worker processes to serve HTTP requests.""" | ||||||
|     from .app import app |     from .app import app | ||||||
|  |  | ||||||
|     # Set up immediate exit on Ctrl+C for faster termination |  | ||||||
|     def signal_handler(signum, frame): |  | ||||||
|         print("\nReceived interrupt signal, exiting immediately...") |  | ||||||
|         os._exit(0) |  | ||||||
|  |  | ||||||
|     signal.signal(signal.SIGINT, signal_handler) |  | ||||||
|     signal.signal(signal.SIGTERM, signal_handler) |  | ||||||
|  |  | ||||||
|     url, opts = parse_listen(config.config.listen) |     url, opts = parse_listen(config.config.listen) | ||||||
|     # Silence Sanic's warning about running in production rather than debug |     # Silence Sanic's warning about running in production rather than debug | ||||||
|     os.environ["SANIC_IGNORE_PRODUCTION_WARNING"] = "1" |     os.environ["SANIC_IGNORE_PRODUCTION_WARNING"] = "1" | ||||||
|   | |||||||
| @@ -440,7 +440,7 @@ def watcher_poll(loop): | |||||||
|         quit.wait(0.1 + 8 * dur) |         quit.wait(0.1 + 8 * dur) | ||||||
|  |  | ||||||
|  |  | ||||||
| async def start(app, loop): | def start(app, loop): | ||||||
|     global rootpath |     global rootpath | ||||||
|     config.load_config() |     config.load_config() | ||||||
|     rootpath = config.config.path |     rootpath = config.config.path | ||||||
| @@ -454,6 +454,6 @@ async def start(app, loop): | |||||||
|     app.ctx.watcher.start() |     app.ctx.watcher.start() | ||||||
|  |  | ||||||
|  |  | ||||||
| async def stop(app, loop): | def stop(app): | ||||||
|     quit.set() |     quit.set() | ||||||
|     app.ctx.watcher.join() |     app.ctx.watcher.join() | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user