Frontend created and rewritten a few times, with some backend fixes (#1)
The software is fully operational. Reviewed-on: #1
This commit is contained in:
0
cista/__init__.py
Executable file → Normal file
0
cista/__init__.py
Executable file → Normal file
6
cista/__main__.py
Executable file → Normal file
6
cista/__main__.py
Executable file → Normal file
@@ -67,14 +67,14 @@ def _main():
|
||||
# Maybe run without arguments
|
||||
print(doc)
|
||||
print(
|
||||
"No config file found! Get started with:\n cista -l :8000 /path/to/files, or\n cista -l example.com --import-droppy # Uses Droppy files\n"
|
||||
"No config file found! Get started with:\n cista -l :8000 /path/to/files, or\n cista -l example.com --import-droppy # Uses Droppy files\n",
|
||||
)
|
||||
return 1
|
||||
settings = {}
|
||||
if import_droppy:
|
||||
if exists:
|
||||
raise ValueError(
|
||||
f"Importing Droppy: First remove the existing configuration:\n rm {config.conffile}"
|
||||
f"Importing Droppy: First remove the existing configuration:\n rm {config.conffile}",
|
||||
)
|
||||
settings = droppy.readconf()
|
||||
if path:
|
||||
@@ -95,6 +95,8 @@ def _main():
|
||||
print(f"Serving {config.config.path} at {url}{extra}")
|
||||
# Run the server
|
||||
serve.run(dev=dev)
|
||||
return 0
|
||||
|
||||
|
||||
|
||||
def _confdir(args):
|
||||
|
||||
42
cista/api.py
42
cista/api.py
@@ -1,12 +1,13 @@
|
||||
import asyncio
|
||||
import typing
|
||||
from secrets import token_bytes
|
||||
|
||||
import msgspec
|
||||
from sanic import Blueprint
|
||||
|
||||
from cista import watching
|
||||
from cista import __version__, config, watching
|
||||
from cista.fileio import FileServer
|
||||
from cista.protocol import ControlBase, FileRange, StatusMsg
|
||||
from cista.protocol import ControlTypes, FileRange, StatusMsg
|
||||
from cista.util.apphelpers import asend, websocket_wrapper
|
||||
|
||||
bp = Blueprint("api", url_prefix="/api")
|
||||
@@ -32,7 +33,7 @@ async def upload(req, ws):
|
||||
text = await ws.recv()
|
||||
if not isinstance(text, str):
|
||||
raise ValueError(
|
||||
f"Expected JSON control, got binary len(data) = {len(text)}"
|
||||
f"Expected JSON control, got binary len(data) = {len(text)}",
|
||||
)
|
||||
req = msgspec.json.decode(text, type=FileRange)
|
||||
pos = req.start
|
||||
@@ -45,8 +46,8 @@ async def upload(req, ws):
|
||||
raise ValueError(f"Expected {req.end - pos} more bytes, got {d}")
|
||||
# Report success
|
||||
res = StatusMsg(status="ack", req=req)
|
||||
print("ack", res)
|
||||
await asend(ws, res)
|
||||
# await ws.drain()
|
||||
|
||||
|
||||
@bp.websocket("download")
|
||||
@@ -58,7 +59,7 @@ async def download(req, ws):
|
||||
text = await ws.recv()
|
||||
if not isinstance(text, str):
|
||||
raise ValueError(
|
||||
f"Expected JSON control, got binary len(data) = {len(text)}"
|
||||
f"Expected JSON control, got binary len(data) = {len(text)}",
|
||||
)
|
||||
req = msgspec.json.decode(text, type=FileRange)
|
||||
pos = req.start
|
||||
@@ -70,23 +71,42 @@ async def download(req, ws):
|
||||
# Report success
|
||||
res = StatusMsg(status="ack", req=req)
|
||||
await asend(ws, res)
|
||||
# await ws.drain()
|
||||
|
||||
|
||||
@bp.websocket("control")
|
||||
@websocket_wrapper
|
||||
async def control(req, ws):
|
||||
cmd = msgspec.json.decode(await ws.recv(), type=ControlBase)
|
||||
await asyncio.to_thread(cmd)
|
||||
await asend(ws, StatusMsg(status="ack", req=cmd))
|
||||
while True:
|
||||
cmd = msgspec.json.decode(await ws.recv(), type=ControlTypes)
|
||||
await asyncio.to_thread(cmd)
|
||||
await asend(ws, StatusMsg(status="ack", req=cmd))
|
||||
|
||||
|
||||
|
||||
@bp.websocket("watch")
|
||||
@websocket_wrapper
|
||||
async def watch(req, ws):
|
||||
await ws.send(
|
||||
msgspec.json.encode(
|
||||
{
|
||||
"server": {
|
||||
"name": "Cista", # Should be configurable
|
||||
"version": __version__,
|
||||
"public": config.config.public,
|
||||
},
|
||||
"user": {
|
||||
"username": req.ctx.username,
|
||||
"privileged": req.ctx.user.privileged,
|
||||
}
|
||||
if req.ctx.user
|
||||
else None,
|
||||
}
|
||||
).decode()
|
||||
)
|
||||
uuid = token_bytes(16)
|
||||
try:
|
||||
with watching.tree_lock:
|
||||
q = watching.pubsub[ws] = asyncio.Queue()
|
||||
q = watching.pubsub[uuid] = asyncio.Queue()
|
||||
# Init with disk usage and full tree
|
||||
await ws.send(watching.format_du())
|
||||
await ws.send(watching.format_tree())
|
||||
@@ -94,4 +114,4 @@ async def watch(req, ws):
|
||||
while True:
|
||||
await ws.send(await q.get())
|
||||
finally:
|
||||
del watching.pubsub[ws]
|
||||
del watching.pubsub[uuid]
|
||||
|
||||
195
cista/app.py
Executable file → Normal file
195
cista/app.py
Executable file → Normal file
@@ -1,15 +1,31 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
import mimetypes
|
||||
from collections import deque
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from importlib.resources import files
|
||||
from pathlib import Path
|
||||
from stat import S_IFDIR, S_IFREG
|
||||
from urllib.parse import unquote
|
||||
from wsgiref.handlers import format_date_time
|
||||
|
||||
from sanic import Blueprint, Sanic, raw
|
||||
import brotli
|
||||
import sanic.helpers
|
||||
from blake3 import blake3
|
||||
from natsort import natsorted, ns
|
||||
from sanic import Blueprint, Sanic, empty, raw
|
||||
from sanic.exceptions import Forbidden, NotFound
|
||||
from sanic.log import logging
|
||||
from stream_zip import ZIP_AUTO, stream_zip
|
||||
|
||||
from cista import auth, config, session, watching
|
||||
from cista.api import bp
|
||||
from cista.util import filename
|
||||
from cista.protocol import DirEntry
|
||||
from cista.util.apphelpers import handle_sanic_exception
|
||||
|
||||
# Workaround until Sanic PR #2824 is merged
|
||||
sanic.helpers._ENTITY_HEADERS = frozenset()
|
||||
|
||||
app = Sanic("cista", strict_slashes=True)
|
||||
app.blueprint(auth.bp)
|
||||
app.blueprint(bp)
|
||||
@@ -20,19 +36,25 @@ app.exception(Exception)(handle_sanic_exception)
|
||||
async def main_start(app, loop):
|
||||
config.load_config()
|
||||
await watching.start(app, loop)
|
||||
app.ctx.threadexec = ThreadPoolExecutor(max_workers=8)
|
||||
|
||||
|
||||
|
||||
@app.after_server_stop
|
||||
async def main_stop(app, loop):
|
||||
await watching.stop(app, loop)
|
||||
app.ctx.threadexec.shutdown()
|
||||
|
||||
|
||||
|
||||
@app.on_request
|
||||
async def use_session(req):
|
||||
req.ctx.session = session.get(req)
|
||||
try:
|
||||
req.ctx.username = req.ctx.session["username"]
|
||||
req.ctx.user = config.config.users[req.ctx.session["username"]] # type: ignore
|
||||
except (AttributeError, KeyError, TypeError):
|
||||
req.ctx.username = None
|
||||
req.ctx.user = None
|
||||
# CSRF protection
|
||||
if req.method == "GET" and req.headers.upgrade != "websocket":
|
||||
@@ -58,15 +80,168 @@ def http_fileserver(app, _):
|
||||
app.blueprint(bp)
|
||||
|
||||
|
||||
@app.get("/<path:path>", static=True)
|
||||
www = {}
|
||||
|
||||
|
||||
@app.before_server_start
|
||||
async def load_wwwroot(*_ignored):
|
||||
global www
|
||||
www = await asyncio.get_event_loop().run_in_executor(None, _load_wwwroot, www)
|
||||
|
||||
|
||||
def _load_wwwroot(www):
|
||||
wwwnew = {}
|
||||
base = files("cista") / "wwwroot"
|
||||
paths = ["."]
|
||||
while paths:
|
||||
path = paths.pop(0)
|
||||
current = base / path
|
||||
for p in current.iterdir():
|
||||
if p.is_dir():
|
||||
paths.append(current / p.parts[-1])
|
||||
continue
|
||||
name = p.relative_to(base).as_posix()
|
||||
mime = mimetypes.guess_type(name)[0] or "application/octet-stream"
|
||||
mtime = p.stat().st_mtime
|
||||
data = p.read_bytes()
|
||||
etag = blake3(data).hexdigest(length=8)
|
||||
if name == "index.html":
|
||||
name = ""
|
||||
# Use old data if not changed
|
||||
if name in www and www[name][2]["etag"] == etag:
|
||||
wwwnew[name] = www[name]
|
||||
continue
|
||||
# Add charset definition
|
||||
if mime.startswith("text/"):
|
||||
mime = f"{mime}; charset=UTF-8"
|
||||
# Asset files names will change whenever the content changes
|
||||
cached = name.startswith("assets/")
|
||||
headers = {
|
||||
"etag": etag,
|
||||
"last-modified": format_date_time(mtime),
|
||||
"cache-control": "max-age=31536000, immutable"
|
||||
if cached
|
||||
else "no-cache",
|
||||
"content-type": mime,
|
||||
}
|
||||
# Precompress with Brotli
|
||||
br = brotli.compress(data)
|
||||
if len(br) >= len(data):
|
||||
br = False
|
||||
wwwnew[name] = data, br, headers
|
||||
return wwwnew
|
||||
|
||||
|
||||
@app.add_task
|
||||
async def refresh_wwwroot():
|
||||
while True:
|
||||
try:
|
||||
wwwold = www
|
||||
await load_wwwroot()
|
||||
changes = ""
|
||||
for name in sorted(www):
|
||||
attr = www[name]
|
||||
if wwwold.get(name) == attr:
|
||||
continue
|
||||
headers = attr[2]
|
||||
changes += f"{headers['last-modified']} {headers['etag']} /{name}\n"
|
||||
for name in sorted(set(wwwold) - set(www)):
|
||||
changes += f"Deleted /{name}\n"
|
||||
if changes:
|
||||
print(f"Updated wwwroot:\n{changes}", end="", flush=True)
|
||||
except Exception as e:
|
||||
print("Error loading wwwroot", e)
|
||||
if not app.debug:
|
||||
return
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
|
||||
@app.route("/<path:path>", methods=["GET", "HEAD"])
|
||||
async def wwwroot(req, path=""):
|
||||
"""Frontend files only"""
|
||||
name = filename.sanitize(unquote(path)) if path else "index.html"
|
||||
try:
|
||||
index = files("cista").joinpath("wwwroot", name).read_bytes()
|
||||
except OSError as e:
|
||||
name = unquote(path)
|
||||
if name not in www:
|
||||
raise NotFound(f"File not found: /{path}", extra={"name": name})
|
||||
data, br, headers = www[name]
|
||||
if req.headers.if_none_match == headers["etag"]:
|
||||
# The client has it cached, respond 304 Not Modified
|
||||
return empty(304, headers=headers)
|
||||
# Brotli compressed?
|
||||
if br and "br" in req.headers.accept_encoding.split(", "):
|
||||
headers = {
|
||||
**headers,
|
||||
"content-encoding": "br",
|
||||
}
|
||||
data = br
|
||||
return raw(data, headers=headers)
|
||||
|
||||
|
||||
@app.get("/zip/<keys>/<zipfile:ext=zip>")
|
||||
async def zip_download(req, keys, zipfile, ext):
|
||||
"""Download a zip archive of the given keys"""
|
||||
wanted = set(keys.split("+"))
|
||||
with watching.tree_lock:
|
||||
q = deque([([], None, watching.tree[""].dir)])
|
||||
files = []
|
||||
while q:
|
||||
locpar, relpar, d = q.pop()
|
||||
for name, attr in d.items():
|
||||
loc = [*locpar, name]
|
||||
rel = None
|
||||
if relpar or attr.key in wanted:
|
||||
rel = [*relpar, name] if relpar else [name]
|
||||
wanted.discard(attr.key)
|
||||
isdir = isinstance(attr, DirEntry)
|
||||
if isdir:
|
||||
q.append((loc, rel, attr.dir))
|
||||
if rel:
|
||||
files.append(
|
||||
("/".join(rel), Path(watching.rootpath.joinpath(*loc)))
|
||||
)
|
||||
|
||||
if not files:
|
||||
raise NotFound(
|
||||
f"File not found: /{path}", extra={"name": name, "exception": repr(e)}
|
||||
"No files found",
|
||||
context={"keys": keys, "zipfile": zipfile, "wanted": wanted},
|
||||
)
|
||||
mime = mimetypes.guess_type(name)[0] or "application/octet-stream"
|
||||
return raw(index, content_type=mime)
|
||||
if wanted:
|
||||
raise NotFound("Files not found", context={"missing": wanted})
|
||||
|
||||
files = natsorted(files, key=lambda f: f[0], alg=ns.IGNORECASE)
|
||||
|
||||
def local_files(files):
|
||||
for rel, p in files:
|
||||
s = p.stat()
|
||||
size = s.st_size
|
||||
modified = datetime.datetime.fromtimestamp(s.st_mtime, datetime.UTC)
|
||||
if p.is_dir():
|
||||
yield rel, modified, S_IFDIR | 0o755, ZIP_AUTO(size), b""
|
||||
else:
|
||||
yield rel, modified, S_IFREG | 0o644, ZIP_AUTO(size), contents(p)
|
||||
|
||||
def contents(name):
|
||||
with name.open("rb") as f:
|
||||
while chunk := f.read(65536):
|
||||
yield chunk
|
||||
|
||||
def worker():
|
||||
try:
|
||||
for chunk in stream_zip(local_files(files)):
|
||||
asyncio.run_coroutine_threadsafe(queue.put(chunk), loop)
|
||||
except Exception:
|
||||
logging.exception("Error streaming ZIP")
|
||||
raise
|
||||
finally:
|
||||
asyncio.run_coroutine_threadsafe(queue.put(None), loop)
|
||||
|
||||
# Don't block the event loop: run in a thread
|
||||
queue = asyncio.Queue(maxsize=1)
|
||||
loop = asyncio.get_event_loop()
|
||||
thread = loop.run_in_executor(app.ctx.threadexec, worker)
|
||||
|
||||
# Stream the response
|
||||
res = await req.respond(content_type="application/zip")
|
||||
while chunk := await queue.get():
|
||||
await res.send(chunk)
|
||||
|
||||
await thread # If it raises, the response will fail download
|
||||
|
||||
16
cista/auth.py
Executable file → Normal file
16
cista/auth.py
Executable file → Normal file
@@ -25,7 +25,7 @@ def login(username: str, password: str):
|
||||
try:
|
||||
u = config.config.users[un.decode()]
|
||||
except KeyError:
|
||||
raise ValueError("Invalid username")
|
||||
raise ValueError("Invalid username") from None
|
||||
# Verify password
|
||||
need_rehash = False
|
||||
if not u.hash:
|
||||
@@ -41,7 +41,7 @@ def login(username: str, password: str):
|
||||
try:
|
||||
_argon.verify(u.hash, pw)
|
||||
except Exception:
|
||||
raise ValueError("Invalid password")
|
||||
raise ValueError("Invalid password") from None
|
||||
if _argon.check_needs_rehash(u.hash):
|
||||
need_rehash = True
|
||||
# Login successful
|
||||
@@ -62,7 +62,7 @@ class LoginResponse(msgspec.Struct):
|
||||
error: str = ""
|
||||
|
||||
|
||||
def verify(request, privileged=False):
|
||||
def verify(request, *, privileged=False):
|
||||
"""Raise Unauthorized or Forbidden if the request is not authorized"""
|
||||
if privileged:
|
||||
if request.ctx.user:
|
||||
@@ -71,7 +71,8 @@ def verify(request, privileged=False):
|
||||
raise Forbidden("Access Forbidden: Only for privileged users")
|
||||
elif config.config.public or request.ctx.user:
|
||||
return
|
||||
raise Unauthorized("Login required", "cookie", context={"redirect": "/login"})
|
||||
raise Unauthorized("Login required", "cookie")
|
||||
|
||||
|
||||
|
||||
bp = Blueprint("auth")
|
||||
@@ -130,11 +131,14 @@ async def login_post(request):
|
||||
if not username or not password:
|
||||
raise KeyError
|
||||
except KeyError:
|
||||
raise BadRequest("Missing username or password", context={"redirect": "/login"})
|
||||
raise BadRequest(
|
||||
"Missing username or password",
|
||||
context={"redirect": "/login"},
|
||||
) from None
|
||||
try:
|
||||
user = login(username, password)
|
||||
except ValueError as e:
|
||||
raise Forbidden(str(e), context={"redirect": "/login"})
|
||||
raise Forbidden(str(e), context={"redirect": "/login"}) from e
|
||||
|
||||
if "text/html" in request.headers.accept:
|
||||
res = redirect("/")
|
||||
|
||||
3
cista/config.py
Executable file → Normal file
3
cista/config.py
Executable file → Normal file
@@ -21,7 +21,8 @@ class Config(msgspec.Struct):
|
||||
class User(msgspec.Struct, omit_defaults=True):
|
||||
privileged: bool = False
|
||||
hash: str = ""
|
||||
lastSeen: int = 0
|
||||
lastSeen: int = 0 # noqa: N815
|
||||
|
||||
|
||||
|
||||
class Link(msgspec.Struct, omit_defaults=True):
|
||||
|
||||
6
cista/droppy.py
Executable file → Normal file
6
cista/droppy.py
Executable file → Normal file
@@ -30,10 +30,12 @@ def _droppy_listeners(cf):
|
||||
host = listener["host"]
|
||||
if isinstance(host, list):
|
||||
host = host[0]
|
||||
except (KeyError, IndexError):
|
||||
continue
|
||||
else:
|
||||
if host in ("127.0.0.1", "::", "localhost"):
|
||||
return f":{port}"
|
||||
return f"{host}:{port}"
|
||||
except (KeyError, IndexError):
|
||||
continue
|
||||
|
||||
# If none matched, fallback to Droppy default
|
||||
return "0.0.0.0:8989"
|
||||
|
||||
4
cista/fileio.py
Executable file → Normal file
4
cista/fileio.py
Executable file → Normal file
@@ -62,7 +62,9 @@ class FileServer:
|
||||
async def start(self):
|
||||
self.alink = AsyncLink()
|
||||
self.worker = asyncio.get_event_loop().run_in_executor(
|
||||
None, self.worker_thread, self.alink.to_sync
|
||||
None,
|
||||
self.worker_thread,
|
||||
self.alink.to_sync,
|
||||
)
|
||||
self.cache = LRUCache(File, capacity=10, maxage=5.0)
|
||||
|
||||
|
||||
37
cista/protocol.py
Executable file → Normal file
37
cista/protocol.py
Executable file → Normal file
@@ -22,7 +22,8 @@ class MkDir(ControlBase):
|
||||
|
||||
def __call__(self):
|
||||
path = config.config.path / filename.sanitize(self.path)
|
||||
path.mkdir(parents=False, exist_ok=False)
|
||||
path.mkdir(parents=True, exist_ok=False)
|
||||
|
||||
|
||||
|
||||
class Rename(ControlBase):
|
||||
@@ -44,7 +45,11 @@ class Rm(ControlBase):
|
||||
root = config.config.path
|
||||
sel = [root / filename.sanitize(p) for p in self.sel]
|
||||
for p in sel:
|
||||
shutil.rmtree(p, ignore_errors=True)
|
||||
if p.is_dir():
|
||||
shutil.rmtree(p)
|
||||
else:
|
||||
p.unlink()
|
||||
|
||||
|
||||
|
||||
class Mv(ControlBase):
|
||||
@@ -72,10 +77,19 @@ class Cp(ControlBase):
|
||||
if not dst.is_dir():
|
||||
raise BadRequest("The destination must be a directory")
|
||||
for p in sel:
|
||||
# Note: copies as dst rather than in dst unless name is appended.
|
||||
shutil.copytree(
|
||||
p, dst / p.name, dirs_exist_ok=True, ignore_dangling_symlinks=True
|
||||
)
|
||||
if p.is_dir():
|
||||
# Note: copies as dst rather than in dst unless name is appended.
|
||||
shutil.copytree(
|
||||
p,
|
||||
dst / p.name,
|
||||
dirs_exist_ok=True,
|
||||
ignore_dangling_symlinks=True,
|
||||
)
|
||||
else:
|
||||
shutil.copy2(p, dst)
|
||||
|
||||
|
||||
ControlTypes = MkDir | Rename | Rm | Mv | Cp
|
||||
|
||||
|
||||
## File uploads and downloads
|
||||
@@ -101,11 +115,13 @@ class ErrorMsg(msgspec.Struct):
|
||||
|
||||
|
||||
class FileEntry(msgspec.Struct):
|
||||
key: str
|
||||
size: int
|
||||
mtime: int
|
||||
|
||||
|
||||
class DirEntry(msgspec.Struct):
|
||||
key: str
|
||||
size: int
|
||||
mtime: int
|
||||
dir: DirList
|
||||
@@ -133,7 +149,8 @@ DirList = dict[str, FileEntry | DirEntry]
|
||||
class UpdateEntry(msgspec.Struct, omit_defaults=True):
|
||||
"""Updates the named entry in the tree. Fields that are set replace old values. A list of entries recurses directories."""
|
||||
|
||||
name: str = ""
|
||||
name: str
|
||||
key: str
|
||||
deleted: bool = False
|
||||
size: int | None = None
|
||||
mtime: int | None = None
|
||||
@@ -141,12 +158,12 @@ class UpdateEntry(msgspec.Struct, omit_defaults=True):
|
||||
|
||||
|
||||
def make_dir_data(root):
|
||||
if len(root) == 2:
|
||||
if len(root) == 3:
|
||||
return FileEntry(*root)
|
||||
size, mtime, listing = root
|
||||
id_, size, mtime, listing = root
|
||||
converted = {}
|
||||
for name, data in listing.items():
|
||||
converted[name] = make_dir_data(data)
|
||||
sz = sum(x.size for x in converted.values())
|
||||
mt = max(x.mtime for x in converted.values())
|
||||
return DirEntry(sz, max(mt, mtime), converted)
|
||||
return DirEntry(id_, sz, max(mt, mtime), converted)
|
||||
|
||||
27
cista/serve.py
Executable file → Normal file
27
cista/serve.py
Executable file → Normal file
@@ -7,7 +7,7 @@ from sanic import Sanic
|
||||
from cista import config, server80
|
||||
|
||||
|
||||
def run(dev=False):
|
||||
def run(*, dev=False):
|
||||
"""Run Sanic main process that spawns worker processes to serve HTTP requests."""
|
||||
from .app import app
|
||||
|
||||
@@ -30,7 +30,11 @@ def run(dev=False):
|
||||
reload_dir={confdir, wwwroot},
|
||||
access_log=True,
|
||||
) # type: ignore
|
||||
Sanic.serve()
|
||||
if dev:
|
||||
Sanic.serve()
|
||||
else:
|
||||
Sanic.serve_single()
|
||||
|
||||
|
||||
|
||||
def check_cert(certdir, domain):
|
||||
@@ -38,7 +42,7 @@ def check_cert(certdir, domain):
|
||||
return
|
||||
# TODO: Use certbot to fetch a cert
|
||||
raise ValueError(
|
||||
f"TLS certificate files privkey.pem and fullchain.pem needed in {certdir}"
|
||||
f"TLS certificate files privkey.pem and fullchain.pem needed in {certdir}",
|
||||
)
|
||||
|
||||
|
||||
@@ -47,15 +51,14 @@ def parse_listen(listen):
|
||||
unix = Path(listen).resolve()
|
||||
if not unix.parent.exists():
|
||||
raise ValueError(
|
||||
f"Directory for unix socket does not exist: {unix.parent}/"
|
||||
f"Directory for unix socket does not exist: {unix.parent}/",
|
||||
)
|
||||
return "http://localhost", {"unix": unix}
|
||||
elif re.fullmatch(r"(\w+(-\w+)*\.)+\w{2,}", listen, re.UNICODE):
|
||||
if re.fullmatch(r"(\w+(-\w+)*\.)+\w{2,}", listen, re.UNICODE):
|
||||
return f"https://{listen}", {"host": listen, "port": 443, "ssl": True}
|
||||
else:
|
||||
try:
|
||||
addr, _port = listen.split(":", 1)
|
||||
port = int(_port)
|
||||
except Exception:
|
||||
raise ValueError(f"Invalid listen address: {listen}")
|
||||
return f"http://localhost:{port}", {"host": addr, "port": port}
|
||||
try:
|
||||
addr, _port = listen.split(":", 1)
|
||||
port = int(_port)
|
||||
except Exception:
|
||||
raise ValueError(f"Invalid listen address: {listen}") from None
|
||||
return f"http://localhost:{port}", {"host": addr, "port": port}
|
||||
|
||||
0
cista/session.py
Executable file → Normal file
0
cista/session.py
Executable file → Normal file
0
cista/util/__init__.py
Normal file
0
cista/util/__init__.py
Normal file
@@ -33,7 +33,8 @@ async def handle_sanic_exception(request, e):
|
||||
# Non-browsers get JSON errors
|
||||
if "text/html" not in request.headers.accept:
|
||||
return jres(
|
||||
ErrorMsg({"code": code, "message": message, **context}), status=code
|
||||
ErrorMsg({"code": code, "message": message, **context}),
|
||||
status=code,
|
||||
)
|
||||
# Redirections flash the error message via cookies
|
||||
if "redirect" in context:
|
||||
|
||||
3
cista/util/asynclink.py
Executable file → Normal file
3
cista/util/asynclink.py
Executable file → Normal file
@@ -80,8 +80,9 @@ class SyncRequest:
|
||||
if exc:
|
||||
self.set_exception(exc)
|
||||
return True
|
||||
elif not self.done:
|
||||
if not self.done:
|
||||
self.set_result(None)
|
||||
return None
|
||||
|
||||
def set_result(self, value):
|
||||
"""Set result value; mark as done."""
|
||||
|
||||
@@ -10,4 +10,7 @@ def sanitize(filename: str) -> str:
|
||||
filename = filename.replace("\\", "-")
|
||||
filename = sanitize_filepath(filename)
|
||||
filename = filename.strip("/")
|
||||
return PurePosixPath(filename).as_posix()
|
||||
p = PurePosixPath(filename)
|
||||
if any(n.startswith(".") for n in p.parts):
|
||||
raise ValueError("Filenames starting with dot are not allowed")
|
||||
return p.as_posix()
|
||||
|
||||
2
cista/util/lrucache.py
Executable file → Normal file
2
cista/util/lrucache.py
Executable file → Normal file
@@ -41,7 +41,7 @@ class LRUCache:
|
||||
The corresponding item's handle.
|
||||
"""
|
||||
# Take from cache or open a new one
|
||||
for i, (k, f, ts) in enumerate(self.cache):
|
||||
for i, (k, f, _ts) in enumerate(self.cache): # noqa: B007
|
||||
if k == key:
|
||||
self.cache.pop(i)
|
||||
break
|
||||
|
||||
77
cista/watching.py
Executable file → Normal file
77
cista/watching.py
Executable file → Normal file
@@ -1,13 +1,15 @@
|
||||
import asyncio
|
||||
import shutil
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path, PurePosixPath
|
||||
|
||||
import inotify.adapters
|
||||
import msgspec
|
||||
from sanic.log import logging
|
||||
|
||||
from cista import config
|
||||
from cista.fileio import fuid
|
||||
from cista.protocol import DirEntry, FileEntry, UpdateEntry
|
||||
|
||||
pubsub = {}
|
||||
@@ -28,7 +30,8 @@ disk_usage = None
|
||||
|
||||
|
||||
def watcher_thread(loop):
|
||||
global disk_usage
|
||||
global disk_usage, rootpath
|
||||
import inotify.adapters
|
||||
|
||||
while True:
|
||||
rootpath = config.config.path
|
||||
@@ -66,11 +69,33 @@ def watcher_thread(loop):
|
||||
try:
|
||||
update(path.relative_to(rootpath), loop)
|
||||
except Exception as e:
|
||||
print("Watching error", e)
|
||||
break
|
||||
print("Watching error", e, path, rootpath)
|
||||
raise
|
||||
i = None # Free the inotify object
|
||||
|
||||
|
||||
def watcher_thread_poll(loop):
|
||||
global disk_usage, rootpath
|
||||
|
||||
while not quit:
|
||||
rootpath = config.config.path
|
||||
old = format_tree() if tree[""] else None
|
||||
with tree_lock:
|
||||
# Initialize the tree from filesystem
|
||||
tree[""] = walk(rootpath)
|
||||
msg = format_tree()
|
||||
if msg != old:
|
||||
asyncio.run_coroutine_threadsafe(broadcast(msg), loop)
|
||||
|
||||
# Disk usage update
|
||||
du = shutil.disk_usage(rootpath)
|
||||
if du != disk_usage:
|
||||
disk_usage = du
|
||||
asyncio.run_coroutine_threadsafe(broadcast(format_du()), loop)
|
||||
|
||||
time.sleep(1.0)
|
||||
|
||||
|
||||
def format_du():
|
||||
return msgspec.json.encode(
|
||||
{
|
||||
@@ -79,24 +104,24 @@ def format_du():
|
||||
"used": disk_usage.used,
|
||||
"free": disk_usage.free,
|
||||
"storage": tree[""].size,
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
).decode()
|
||||
|
||||
|
||||
def format_tree():
|
||||
root = tree[""]
|
||||
return msgspec.json.encode(
|
||||
{"update": [UpdateEntry(size=root.size, mtime=root.mtime, dir=root.dir)]}
|
||||
).decode()
|
||||
return msgspec.json.encode({"root": root}).decode()
|
||||
|
||||
|
||||
def walk(path: Path) -> DirEntry | FileEntry | None:
|
||||
try:
|
||||
s = path.stat()
|
||||
key = fuid(s)
|
||||
assert key, repr(key)
|
||||
mtime = int(s.st_mtime)
|
||||
if path.is_file():
|
||||
return FileEntry(s.st_size, mtime)
|
||||
return FileEntry(key, s.st_size, mtime)
|
||||
|
||||
tree = {
|
||||
p.name: v
|
||||
@@ -106,10 +131,10 @@ def walk(path: Path) -> DirEntry | FileEntry | None:
|
||||
}
|
||||
if tree:
|
||||
size = sum(v.size for v in tree.values())
|
||||
mtime = max(mtime, max(v.mtime for v in tree.values()))
|
||||
mtime = max(mtime, *(v.mtime for v in tree.values()))
|
||||
else:
|
||||
size = 0
|
||||
return DirEntry(size, mtime, tree)
|
||||
return DirEntry(key, size, mtime, tree)
|
||||
except FileNotFoundError:
|
||||
return None
|
||||
except OSError as e:
|
||||
@@ -119,6 +144,8 @@ def walk(path: Path) -> DirEntry | FileEntry | None:
|
||||
|
||||
def update(relpath: PurePosixPath, loop):
|
||||
"""Called by inotify updates, check the filesystem and broadcast any changes."""
|
||||
if rootpath is None or relpath is None:
|
||||
print("ERROR", rootpath, relpath)
|
||||
new = walk(rootpath / relpath)
|
||||
with tree_lock:
|
||||
update = update_internal(relpath, new)
|
||||
@@ -129,7 +156,8 @@ def update(relpath: PurePosixPath, loop):
|
||||
|
||||
|
||||
def update_internal(
|
||||
relpath: PurePosixPath, new: DirEntry | FileEntry | None
|
||||
relpath: PurePosixPath,
|
||||
new: DirEntry | FileEntry | None,
|
||||
) -> list[UpdateEntry]:
|
||||
path = "", *relpath.parts
|
||||
old = tree
|
||||
@@ -158,7 +186,7 @@ def update_internal(
|
||||
# Update parents
|
||||
update = []
|
||||
for name, entry in elems[:-1]:
|
||||
u = UpdateEntry(name)
|
||||
u = UpdateEntry(name, entry.key)
|
||||
if szdiff:
|
||||
entry.size += szdiff
|
||||
u.size = entry.size
|
||||
@@ -168,16 +196,15 @@ def update_internal(
|
||||
# The last element is the one that changed
|
||||
name, entry = elems[-1]
|
||||
parent = elems[-2][1] if len(elems) > 1 else tree
|
||||
u = UpdateEntry(name)
|
||||
u = UpdateEntry(name, new.key if new else entry.key)
|
||||
if new:
|
||||
parent[name] = new
|
||||
if u.size != new.size:
|
||||
u.size = new.size
|
||||
if u.mtime != new.mtime:
|
||||
u.mtime = new.mtime
|
||||
if isinstance(new, DirEntry):
|
||||
if u.dir == new.dir:
|
||||
u.dir = new.dir
|
||||
if isinstance(new, DirEntry) and u.dir != new.dir:
|
||||
u.dir = new.dir
|
||||
else:
|
||||
del parent[name]
|
||||
u.deleted = True
|
||||
@@ -186,13 +213,21 @@ def update_internal(
|
||||
|
||||
|
||||
async def broadcast(msg):
|
||||
for queue in pubsub.values():
|
||||
await queue.put_nowait(msg)
|
||||
try:
|
||||
for queue in pubsub.values():
|
||||
queue.put_nowait(msg)
|
||||
except Exception:
|
||||
# Log because asyncio would silently eat the error
|
||||
logging.exception("Broadcast error")
|
||||
|
||||
|
||||
|
||||
async def start(app, loop):
|
||||
config.load_config()
|
||||
app.ctx.watcher = threading.Thread(target=watcher_thread, args=[loop])
|
||||
app.ctx.watcher = threading.Thread(
|
||||
target=watcher_thread if sys.platform == "linux" else watcher_thread_poll,
|
||||
args=[loop],
|
||||
)
|
||||
app.ctx.watcher.start()
|
||||
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
@media (min-width: 1024px){.about{min-height:100vh;display:flex;align-items:center}}
|
||||
@@ -1 +0,0 @@
|
||||
import{_ as e,o as t,c as o,a as s}from"./index-689b26c8.js";const _={},c={class:"about"},a=s("h1",null,"This is an about page",-1),n=[a];function i(r,u){return t(),o("div",c,n)}const l=e(_,[["render",i]]);export{l as default};
|
||||
File diff suppressed because one or more lines are too long
@@ -1 +0,0 @@
|
||||
:root{--vt-c-white: #ffffff;--vt-c-white-soft: #f8f8f8;--vt-c-white-mute: #f2f2f2;--vt-c-black: #181818;--vt-c-black-soft: #222222;--vt-c-black-mute: #282828;--vt-c-indigo: #2c3e50;--vt-c-divider-light-1: rgba(60, 60, 60, .29);--vt-c-divider-light-2: rgba(60, 60, 60, .12);--vt-c-divider-dark-1: rgba(84, 84, 84, .65);--vt-c-divider-dark-2: rgba(84, 84, 84, .48);--vt-c-text-light-1: var(--vt-c-indigo);--vt-c-text-light-2: rgba(60, 60, 60, .66);--vt-c-text-dark-1: var(--vt-c-white);--vt-c-text-dark-2: rgba(235, 235, 235, .64)}:root{--color-background: var(--vt-c-white);--color-background-soft: var(--vt-c-white-soft);--color-background-mute: var(--vt-c-white-mute);--color-border: var(--vt-c-divider-light-2);--color-border-hover: var(--vt-c-divider-light-1);--color-heading: var(--vt-c-text-light-1);--color-text: var(--vt-c-text-light-1);--section-gap: 160px}@media (prefers-color-scheme: dark){:root{--color-background: var(--vt-c-black);--color-background-soft: var(--vt-c-black-soft);--color-background-mute: var(--vt-c-black-mute);--color-border: var(--vt-c-divider-dark-2);--color-border-hover: var(--vt-c-divider-dark-1);--color-heading: var(--vt-c-text-dark-1);--color-text: var(--vt-c-text-dark-2)}}*,*:before,*:after{box-sizing:border-box;margin:0;font-weight:400}body{min-height:100vh;color:var(--color-text);background:var(--color-background);transition:color .5s,background-color .5s;line-height:1.6;font-family:Inter,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;font-size:15px;text-rendering:optimizeLegibility;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}#app{max-width:1280px;margin:0 auto;padding:2rem;font-weight:400}a,.green{text-decoration:none;color:#00bd7e;transition:.4s}@media (hover: hover){a:hover{background-color:#00bd7e33}}@media (min-width: 1024px){body{display:flex;place-items:center}#app{display:grid;grid-template-columns:1fr 1fr;padding:0 2rem}}h1[data-v-a47c673d]{font-weight:500;font-size:2.6rem;position:relative;top:-10px}h3[data-v-a47c673d]{font-size:1.2rem}.greetings h1[data-v-a47c673d],.greetings h3[data-v-a47c673d]{text-align:center}@media (min-width: 1024px){.greetings h1[data-v-a47c673d],.greetings h3[data-v-a47c673d]{text-align:left}}header[data-v-85852c48]{line-height:1.5;max-height:100vh}.logo[data-v-85852c48]{display:block;margin:0 auto 2rem}nav[data-v-85852c48]{width:100%;font-size:12px;text-align:center;margin-top:2rem}nav a.router-link-exact-active[data-v-85852c48]{color:var(--color-text)}nav a.router-link-exact-active[data-v-85852c48]:hover{background-color:transparent}nav a[data-v-85852c48]{display:inline-block;padding:0 1rem;border-left:1px solid var(--color-border)}nav a[data-v-85852c48]:first-of-type{border:0}@media (min-width: 1024px){header[data-v-85852c48]{display:flex;place-items:center;padding-right:calc(var(--section-gap) / 2)}.logo[data-v-85852c48]{margin:0 2rem 0 0}header .wrapper[data-v-85852c48]{display:flex;place-items:flex-start;flex-wrap:wrap}nav[data-v-85852c48]{text-align:left;margin-left:-1rem;font-size:1rem;padding:1rem 0;margin-top:1rem}}.item[data-v-fd0742eb]{margin-top:2rem;display:flex;position:relative}.details[data-v-fd0742eb]{flex:1;margin-left:1rem}i[data-v-fd0742eb]{display:flex;place-items:center;place-content:center;width:32px;height:32px;color:var(--color-text)}h3[data-v-fd0742eb]{font-size:1.2rem;font-weight:500;margin-bottom:.4rem;color:var(--color-heading)}@media (min-width: 1024px){.item[data-v-fd0742eb]{margin-top:0;padding:.4rem 0 1rem calc(var(--section-gap) / 2)}i[data-v-fd0742eb]{top:calc(50% - 25px);left:-26px;position:absolute;border:1px solid var(--color-border);background:var(--color-background);border-radius:8px;width:50px;height:50px}.item[data-v-fd0742eb]:before{content:" ";border-left:1px solid var(--color-border);position:absolute;left:0;bottom:calc(50% + 25px);height:calc(50% - 25px)}.item[data-v-fd0742eb]:after{content:" ";border-left:1px solid var(--color-border);position:absolute;left:0;top:calc(50% + 25px);height:calc(50% - 25px)}.item[data-v-fd0742eb]:first-of-type:before{display:none}.item[data-v-fd0742eb]:last-of-type:after{display:none}}
|
||||
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 261.76 226.69"><path d="M161.096.001l-30.225 52.351L100.647.001H-.005l130.877 226.688L261.749.001z" fill="#41b883"/><path d="M161.096.001l-30.225 52.351L100.647.001H52.346l78.526 136.01L209.398.001z" fill="#34495e"/></svg>
|
||||
|
Before Width: | Height: | Size: 276 B |
Binary file not shown.
|
Before Width: | Height: | Size: 4.2 KiB |
@@ -1,15 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<link rel="icon" href="/favicon.ico">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Vite App</title>
|
||||
<script type="module" crossorigin src="/assets/index-689b26c8.js"></script>
|
||||
<link rel="stylesheet" href="/assets/index-9f680dd7.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,241 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<title>Storage</title>
|
||||
<style>
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
max-width: 100ch;
|
||||
margin: 0 auto;
|
||||
padding: 1em;
|
||||
background-color: #333;
|
||||
color: #eee;
|
||||
}
|
||||
td {
|
||||
text-align: right;
|
||||
padding: .5em;
|
||||
}
|
||||
td:first-child {
|
||||
text-align: left;
|
||||
}
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: none;
|
||||
}
|
||||
</style>
|
||||
<div>
|
||||
<h2>Quick file upload</h2>
|
||||
<p>Uses parallel WebSocket connections for increased bandwidth /api/upload</p>
|
||||
<input type=file id=fileInput>
|
||||
<progress id=progressBar value=0 max=1></progress>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h2>Files</h2>
|
||||
<ul id=file_list></ul>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let files = {}
|
||||
let flatfiles = {}
|
||||
|
||||
function createWatchSocket() {
|
||||
const wsurl = new URL("/api/watch", location.href.replace(/^http/, 'ws'))
|
||||
const ws = new WebSocket(wsurl)
|
||||
ws.onmessage = event => {
|
||||
msg = JSON.parse(event.data)
|
||||
if (msg.update) {
|
||||
tree_update(msg.update)
|
||||
file_list(files)
|
||||
} else {
|
||||
console.log("Unkonwn message from watch socket", msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
createWatchSocket()
|
||||
|
||||
function tree_update(msg) {
|
||||
console.log("Tree update", msg)
|
||||
let node = files
|
||||
for (const elem of msg) {
|
||||
if (elem.deleted) {
|
||||
const p = node.dir[elem.name].path
|
||||
delete node.dir[elem.name]
|
||||
delete flatfiles[p]
|
||||
break
|
||||
}
|
||||
if (elem.name !== undefined) node = node.dir[elem.name] ||= {}
|
||||
if (elem.size !== undefined) node.size = elem.size
|
||||
if (elem.mtime !== undefined) node.mtime = elem.mtime
|
||||
if (elem.dir !== undefined) node.dir = elem.dir
|
||||
}
|
||||
// Update paths and flatfiles
|
||||
files.path = "/"
|
||||
const nodes = [files]
|
||||
flatfiles = {}
|
||||
while (node = nodes.pop()) {
|
||||
flatfiles[node.path] = node
|
||||
if (node.dir === undefined) continue
|
||||
for (const name of Object.keys(node.dir)) {
|
||||
const child = node.dir[name]
|
||||
child.path = node.path + name + (child.dir === undefined ? "" : "/")
|
||||
nodes.push(child)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var collator = new Intl.Collator(undefined, {numeric: true, sensitivity: 'base'});
|
||||
|
||||
const compare_path = (a, b) => collator.compare(a.path, b.path)
|
||||
const compare_time = (a, b) => a.mtime > b.mtime
|
||||
|
||||
function file_list(files) {
|
||||
const table = document.getElementById("file_list")
|
||||
const sorted = Object.values(flatfiles).sort(compare_time)
|
||||
table.innerHTML = ""
|
||||
for (const f of sorted) {
|
||||
const {path, size, mtime} = f
|
||||
const tr = document.createElement("tr")
|
||||
const name_td = document.createElement("td")
|
||||
const size_td = document.createElement("td")
|
||||
const mtime_td = document.createElement("td")
|
||||
const a = document.createElement("a")
|
||||
table.appendChild(tr)
|
||||
tr.appendChild(name_td)
|
||||
tr.appendChild(size_td)
|
||||
tr.appendChild(mtime_td)
|
||||
name_td.appendChild(a)
|
||||
size_td.textContent = size
|
||||
mtime_td.textContent = formatUnixDate(mtime)
|
||||
a.textContent = path
|
||||
a.href = `/files${path}`
|
||||
/*a.onclick = event => {
|
||||
if (window.showSaveFilePicker) {
|
||||
event.preventDefault()
|
||||
download_ws(name, size)
|
||||
}
|
||||
}
|
||||
a.download = ""*/
|
||||
}
|
||||
}
|
||||
|
||||
function formatUnixDate(t) {
|
||||
const date = new Date(t * 1000)
|
||||
const now = new Date()
|
||||
const diff = date - now
|
||||
const formatter = new Intl.RelativeTimeFormat('en', { numeric: 'auto' })
|
||||
|
||||
if (Math.abs(diff) <= 60000) {
|
||||
return formatter.format(Math.round(diff / 1000), 'second')
|
||||
}
|
||||
|
||||
if (Math.abs(diff) <= 3600000) {
|
||||
return formatter.format(Math.round(diff / 60000), 'minute')
|
||||
}
|
||||
|
||||
if (Math.abs(diff) <= 86400000) {
|
||||
return formatter.format(Math.round(diff / 3600000), 'hour')
|
||||
}
|
||||
|
||||
if (Math.abs(diff) <= 604800000) {
|
||||
return formatter.format(Math.round(diff / 86400000), 'day')
|
||||
}
|
||||
|
||||
return date.toLocaleDateString()
|
||||
}
|
||||
|
||||
async function download_ws(name, size) {
|
||||
const fh = await window.showSaveFilePicker({
|
||||
suggestedName: name,
|
||||
})
|
||||
const writer = await fh.createWritable()
|
||||
writer.truncate(size)
|
||||
const wsurl = new URL("/api/download", location.href.replace(/^http/, 'ws'))
|
||||
const ws = new WebSocket(wsurl)
|
||||
let pos = 0
|
||||
ws.onopen = () => {
|
||||
console.log("Downloading over WebSocket", name, size)
|
||||
ws.send(JSON.stringify({name, start: 0, end: size, size}))
|
||||
}
|
||||
ws.onmessage = event => {
|
||||
if (typeof event.data === 'string') {
|
||||
const msg = JSON.parse(event.data)
|
||||
console.log("Download finished", msg)
|
||||
ws.close()
|
||||
return
|
||||
}
|
||||
console.log("Received chunk", name, pos, pos + event.data.size)
|
||||
pos += event.data.size
|
||||
writer.write(event.data)
|
||||
}
|
||||
ws.onclose = () => {
|
||||
if (pos < size) {
|
||||
console.log("Download aborted", name, pos)
|
||||
writer.truncate(pos)
|
||||
}
|
||||
writer.close()
|
||||
}
|
||||
}
|
||||
|
||||
const fileInput = document.getElementById("fileInput")
|
||||
const progress = document.getElementById("progressBar")
|
||||
const numConnections = 2
|
||||
const chunkSize = 1<<20
|
||||
const wsConnections = new Set()
|
||||
|
||||
//for (let i = 0; i < numConnections; i++) createUploadWS()
|
||||
|
||||
function createUploadWS() {
|
||||
const wsurl = new URL("/api/upload", location.href.replace(/^http/, 'ws'))
|
||||
const ws = new WebSocket(wsurl)
|
||||
ws.binaryType = 'arraybuffer'
|
||||
ws.onopen = () => {
|
||||
wsConnections.add(ws)
|
||||
console.log("Upload socket connected")
|
||||
}
|
||||
ws.onmessage = event => {
|
||||
msg = JSON.parse(event.data)
|
||||
if (msg.written) progress.value += +msg.written
|
||||
else console.log(`Error: ${msg.error}`)
|
||||
}
|
||||
ws.onclose = () => {
|
||||
wsConnections.delete(ws)
|
||||
console.log("Upload socket disconnected, reconnecting...")
|
||||
setTimeout(createUploadWS, 1000)
|
||||
}
|
||||
}
|
||||
|
||||
async function load(file, start, end) {
|
||||
const reader = new FileReader()
|
||||
const load = new Promise(resolve => reader.onload = resolve)
|
||||
reader.readAsArrayBuffer(file.slice(start, end))
|
||||
const event = await load
|
||||
return event.target.result
|
||||
}
|
||||
|
||||
async function sendChunk(file, start, end, ws) {
|
||||
const chunk = await load(file, start, end)
|
||||
ws.send(JSON.stringify({
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
start: start,
|
||||
end: end
|
||||
}))
|
||||
ws.send(chunk)
|
||||
}
|
||||
|
||||
fileInput.addEventListener("change", async function() {
|
||||
const file = this.files[0]
|
||||
const numChunks = Math.ceil(file.size / chunkSize)
|
||||
progress.value = 0
|
||||
progress.max = file.size
|
||||
|
||||
console.log(wsConnections)
|
||||
for (let i = 0; i < numChunks; i++) {
|
||||
const ws = Array.from(wsConnections)[i % wsConnections.size]
|
||||
const start = i * chunkSize
|
||||
const end = Math.min(file.size, start + chunkSize)
|
||||
const res = await sendChunk(file, start, end, ws)
|
||||
}
|
||||
})
|
||||
|
||||
</script>
|
||||
Reference in New Issue
Block a user