Update the project to run with modern uv/bun (or python/nodejs) environment #7
@ -53,8 +53,12 @@ def treeget(rootmod: list[FileEntry], path: PurePosixPath):
|
|||||||
|
|
||||||
for i, relpath, entry in treeiter(rootmod):
|
for i, relpath, entry in treeiter(rootmod):
|
||||||
iteration_count += 1
|
iteration_count += 1
|
||||||
if iteration_count % 1000 == 0: # Log every 1000 iterations to detect infinite loops
|
if (
|
||||||
logger.debug(f"DEBUG: treeget iteration {iteration_count}, i={i}, relpath={relpath}, entry.name={entry.name}")
|
iteration_count % 1000 == 0
|
||||||
|
): # Log every 1000 iterations to detect infinite loops
|
||||||
|
logger.debug(
|
||||||
|
f"DEBUG: treeget iteration {iteration_count}, i={i}, relpath={relpath}, entry.name={entry.name}"
|
||||||
|
)
|
||||||
|
|
||||||
if begin is None:
|
if begin is None:
|
||||||
if relpath == path:
|
if relpath == path:
|
||||||
@ -63,18 +67,24 @@ def treeget(rootmod: list[FileEntry], path: PurePosixPath):
|
|||||||
ret.append(entry)
|
ret.append(entry)
|
||||||
continue
|
continue
|
||||||
if entry.level <= len(path.parts):
|
if entry.level <= len(path.parts):
|
||||||
logger.debug(f"DEBUG: treeget BREAK: entry.level={entry.level} <= path.parts_len={len(path.parts)}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeget BREAK: entry.level={entry.level} <= path.parts_len={len(path.parts)}"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
ret.append(entry)
|
ret.append(entry)
|
||||||
|
|
||||||
logger.debug(f"DEBUG: treeget EXIT: path={path}, begin={begin}, ret_len={len(ret)}, iterations={iteration_count}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeget EXIT: path={path}, begin={begin}, ret_len={len(ret)}, iterations={iteration_count}"
|
||||||
|
)
|
||||||
return begin, ret
|
return begin, ret
|
||||||
|
|
||||||
|
|
||||||
def treeinspos(rootmod: list[FileEntry], relpath: PurePosixPath, relfile: int):
|
def treeinspos(rootmod: list[FileEntry], relpath: PurePosixPath, relfile: int):
|
||||||
# Find the first entry greater than the new one
|
# Find the first entry greater than the new one
|
||||||
# precondition: the new entry doesn't exist
|
# precondition: the new entry doesn't exist
|
||||||
logger.debug(f"DEBUG: treeinspos ENTRY: relpath={relpath}, relfile={relfile}, rootmod_len={len(rootmod)}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos ENTRY: relpath={relpath}, relfile={relfile}, rootmod_len={len(rootmod)}"
|
||||||
|
)
|
||||||
|
|
||||||
isfile = 0
|
isfile = 0
|
||||||
level = 0
|
level = 0
|
||||||
@ -86,19 +96,27 @@ def treeinspos(rootmod: list[FileEntry], relpath: PurePosixPath, relfile: int):
|
|||||||
|
|
||||||
# Detect potential infinite loops in treeinspos
|
# Detect potential infinite loops in treeinspos
|
||||||
if iteration_count % 1000 == 0:
|
if iteration_count % 1000 == 0:
|
||||||
logger.debug(f"DEBUG: treeinspos iteration {iteration_count}, i={i}, rel={rel}, entry.name={entry.name}, level={level}, entry.level={entry.level}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos iteration {iteration_count}, i={i}, rel={rel}, entry.name={entry.name}, level={level}, entry.level={entry.level}"
|
||||||
|
)
|
||||||
|
|
||||||
if iteration_count > 10000: # Emergency brake for infinite loops
|
if iteration_count > 10000: # Emergency brake for infinite loops
|
||||||
logger.error(f"ERROR: treeinspos potential infinite loop! iteration={iteration_count}, relpath={relpath}, i={i}, level={level}")
|
logger.error(
|
||||||
|
f"ERROR: treeinspos potential infinite loop! iteration={iteration_count}, relpath={relpath}, i={i}, level={level}"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
|
|
||||||
if entry.level > level:
|
if entry.level > level:
|
||||||
# We haven't found item at level, skip subdirectories
|
# We haven't found item at level, skip subdirectories
|
||||||
logger.debug(f"DEBUG: treeinspos SKIP: entry.level={entry.level} > level={level}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos SKIP: entry.level={entry.level} > level={level}"
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
if entry.level < level:
|
if entry.level < level:
|
||||||
# We have passed the level, so the new item is the first
|
# We have passed the level, so the new item is the first
|
||||||
logger.debug(f"DEBUG: treeinspos RETURN_EARLY: entry.level={entry.level} < level={level}, returning i={i}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos RETURN_EARLY: entry.level={entry.level} < level={level}, returning i={i}"
|
||||||
|
)
|
||||||
return i
|
return i
|
||||||
if level == 0:
|
if level == 0:
|
||||||
# root
|
# root
|
||||||
@ -108,13 +126,17 @@ def treeinspos(rootmod: list[FileEntry], relpath: PurePosixPath, relfile: int):
|
|||||||
|
|
||||||
ename = rel.parts[level - 1]
|
ename = rel.parts[level - 1]
|
||||||
name = relpath.parts[level - 1]
|
name = relpath.parts[level - 1]
|
||||||
logger.debug(f"DEBUG: treeinspos COMPARE: ename='{ename}', name='{name}', level={level}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos COMPARE: ename='{ename}', name='{name}', level={level}"
|
||||||
|
)
|
||||||
|
|
||||||
esort = sortkey(ename)
|
esort = sortkey(ename)
|
||||||
nsort = sortkey(name)
|
nsort = sortkey(name)
|
||||||
# Non-leaf are always folders, only use relfile at leaf
|
# Non-leaf are always folders, only use relfile at leaf
|
||||||
isfile = relfile if len(relpath.parts) == level else 0
|
isfile = relfile if len(relpath.parts) == level else 0
|
||||||
logger.debug(f"DEBUG: treeinspos SORT: esort={esort}, nsort={nsort}, isfile={isfile}, entry.isfile={entry.isfile}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos SORT: esort={esort}, nsort={nsort}, isfile={isfile}, entry.isfile={entry.isfile}"
|
||||||
|
)
|
||||||
|
|
||||||
# First compare by isfile, then by sorting order and if that too matches then case sensitive
|
# First compare by isfile, then by sorting order and if that too matches then case sensitive
|
||||||
cmp = (
|
cmp = (
|
||||||
@ -134,13 +156,17 @@ def treeinspos(rootmod: list[FileEntry], relpath: PurePosixPath, relfile: int):
|
|||||||
logger.debug(f"DEBUG: treeinspos INCREMENT_LEVEL: level {level} -> {level + 1}")
|
logger.debug(f"DEBUG: treeinspos INCREMENT_LEVEL: level {level} -> {level + 1}")
|
||||||
level += 1
|
level += 1
|
||||||
if level > len(relpath.parts):
|
if level > len(relpath.parts):
|
||||||
logger.error(f"ERROR: insertpos level overflow: relpath={relpath}, i={i}, entry.name={entry.name}, entry.level={entry.level}, level={level}")
|
logger.error(
|
||||||
|
f"ERROR: insertpos level overflow: relpath={relpath}, i={i}, entry.name={entry.name}, entry.level={entry.level}, level={level}"
|
||||||
|
)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
logger.debug(f"DEBUG: treeinspos FOR_ELSE: incrementing i from {i} to {i + 1}")
|
logger.debug(f"DEBUG: treeinspos FOR_ELSE: incrementing i from {i} to {i + 1}")
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
logger.debug(f"DEBUG: treeinspos EXIT: returning i={i}, iterations={iteration_count}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos EXIT: returning i={i}, iterations={iteration_count}"
|
||||||
|
)
|
||||||
return i
|
return i
|
||||||
|
|
||||||
|
|
||||||
@ -219,19 +245,25 @@ def update_root(loop):
|
|||||||
|
|
||||||
def update_path(rootmod: list[FileEntry], relpath: PurePosixPath, loop):
|
def update_path(rootmod: list[FileEntry], relpath: PurePosixPath, loop):
|
||||||
"""Called on FS updates, check the filesystem and broadcast any changes."""
|
"""Called on FS updates, check the filesystem and broadcast any changes."""
|
||||||
logger.debug(f"DEBUG: update_path ENTRY: path={relpath}, rootmod_len={len(rootmod)}")
|
logger.debug(
|
||||||
|
f"DEBUG: update_path ENTRY: path={relpath}, rootmod_len={len(rootmod)}"
|
||||||
|
)
|
||||||
|
|
||||||
# Add timing for walk operation
|
# Add timing for walk operation
|
||||||
walk_start = time.perf_counter()
|
walk_start = time.perf_counter()
|
||||||
new = walk(relpath)
|
new = walk(relpath)
|
||||||
walk_end = time.perf_counter()
|
walk_end = time.perf_counter()
|
||||||
logger.debug(f"DEBUG: walk({relpath}) took {walk_end - walk_start:.4f}s, returned {len(new)} entries")
|
logger.debug(
|
||||||
|
f"DEBUG: walk({relpath}) took {walk_end - walk_start:.4f}s, returned {len(new)} entries"
|
||||||
|
)
|
||||||
|
|
||||||
# Add timing for treeget operation
|
# Add timing for treeget operation
|
||||||
treeget_start = time.perf_counter()
|
treeget_start = time.perf_counter()
|
||||||
obegin, old = treeget(rootmod, relpath)
|
obegin, old = treeget(rootmod, relpath)
|
||||||
treeget_end = time.perf_counter()
|
treeget_end = time.perf_counter()
|
||||||
logger.debug(f"DEBUG: treeget({relpath}) took {treeget_end - treeget_start:.4f}s, obegin={obegin}, old_len={len(old) if old else 0}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeget({relpath}) took {treeget_end - treeget_start:.4f}s, obegin={obegin}, old_len={len(old) if old else 0}"
|
||||||
|
)
|
||||||
|
|
||||||
if old == new:
|
if old == new:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@ -244,7 +276,9 @@ def update_path(rootmod: list[FileEntry], relpath: PurePosixPath, loop):
|
|||||||
|
|
||||||
# Debug the deletion operation
|
# Debug the deletion operation
|
||||||
if obegin is not None:
|
if obegin is not None:
|
||||||
logger.debug(f"DEBUG: DELETING entries from rootmod[{obegin}:{obegin + len(old)}] for path {relpath}")
|
logger.debug(
|
||||||
|
f"DEBUG: DELETING entries from rootmod[{obegin}:{obegin + len(old)}] for path {relpath}"
|
||||||
|
)
|
||||||
del rootmod[obegin : obegin + len(old)]
|
del rootmod[obegin : obegin + len(old)]
|
||||||
logger.debug(f"DEBUG: DELETED entries, rootmod_len now {len(rootmod)}")
|
logger.debug(f"DEBUG: DELETED entries, rootmod_len now {len(rootmod)}")
|
||||||
|
|
||||||
@ -255,15 +289,21 @@ def update_path(rootmod: list[FileEntry], relpath: PurePosixPath, loop):
|
|||||||
inspos_start = time.perf_counter()
|
inspos_start = time.perf_counter()
|
||||||
i = treeinspos(rootmod, relpath, new[0].isfile)
|
i = treeinspos(rootmod, relpath, new[0].isfile)
|
||||||
inspos_end = time.perf_counter()
|
inspos_end = time.perf_counter()
|
||||||
logger.debug(f"DEBUG: treeinspos({relpath}) took {inspos_end - inspos_start:.4f}s, returned index={i}")
|
logger.debug(
|
||||||
|
f"DEBUG: treeinspos({relpath}) took {inspos_end - inspos_start:.4f}s, returned index={i}"
|
||||||
|
)
|
||||||
|
|
||||||
logger.debug(f"DEBUG: INSERTING {len(new)} entries at position {i} for path {relpath}")
|
logger.debug(
|
||||||
|
f"DEBUG: INSERTING {len(new)} entries at position {i} for path {relpath}"
|
||||||
|
)
|
||||||
rootmod[i:i] = new
|
rootmod[i:i] = new
|
||||||
logger.debug(f"DEBUG: INSERTED entries, rootmod_len now {len(rootmod)}")
|
logger.debug(f"DEBUG: INSERTED entries, rootmod_len now {len(rootmod)}")
|
||||||
else:
|
else:
|
||||||
logger.debug(f"Watch: Removed {relpath}")
|
logger.debug(f"Watch: Removed {relpath}")
|
||||||
|
|
||||||
logger.debug(f"DEBUG: update_path EXIT: path={relpath}, final_rootmod_len={len(rootmod)}")
|
logger.debug(
|
||||||
|
f"DEBUG: update_path EXIT: path={relpath}, final_rootmod_len={len(rootmod)}"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def update_space(loop):
|
def update_space(loop):
|
||||||
@ -297,18 +337,26 @@ def format_update(old, new):
|
|||||||
|
|
||||||
# Log every 1000 iterations to detect infinite loops
|
# Log every 1000 iterations to detect infinite loops
|
||||||
if iteration_count % 1000 == 0:
|
if iteration_count % 1000 == 0:
|
||||||
logger.debug(f"DEBUG: format_update iteration {iteration_count}, oidx={oidx}/{len(old)}, nidx={nidx}/{len(new)}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update iteration {iteration_count}, oidx={oidx}/{len(old)}, nidx={nidx}/{len(new)}"
|
||||||
|
)
|
||||||
|
|
||||||
# Emergency brake for potential infinite loops
|
# Emergency brake for potential infinite loops
|
||||||
if iteration_count > 50000:
|
if iteration_count > 50000:
|
||||||
logger.error(f"ERROR: format_update potential infinite loop! iteration={iteration_count}, oidx={oidx}, nidx={nidx}")
|
logger.error(
|
||||||
raise Exception(f"format_update infinite loop detected at iteration {iteration_count}")
|
f"ERROR: format_update potential infinite loop! iteration={iteration_count}, oidx={oidx}, nidx={nidx}"
|
||||||
|
)
|
||||||
|
raise Exception(
|
||||||
|
f"format_update infinite loop detected at iteration {iteration_count}"
|
||||||
|
)
|
||||||
|
|
||||||
modified = False
|
modified = False
|
||||||
# Matching entries are kept
|
# Matching entries are kept
|
||||||
if old[oidx] == new[nidx]:
|
if old[oidx] == new[nidx]:
|
||||||
entry = old[oidx]
|
entry = old[oidx]
|
||||||
logger.debug(f"DEBUG: format_update MATCH: entry={entry.name}, oidx={oidx}, nidx={nidx}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update MATCH: entry={entry.name}, oidx={oidx}, nidx={nidx}"
|
||||||
|
)
|
||||||
oremain.remove(entry)
|
oremain.remove(entry)
|
||||||
nremain.remove(entry)
|
nremain.remove(entry)
|
||||||
keep_count += 1
|
keep_count += 1
|
||||||
@ -326,12 +374,16 @@ def format_update(old, new):
|
|||||||
del_count = 0
|
del_count = 0
|
||||||
del_start_oidx = oidx
|
del_start_oidx = oidx
|
||||||
while oidx < len(old) and old[oidx] not in nremain:
|
while oidx < len(old) and old[oidx] not in nremain:
|
||||||
logger.debug(f"DEBUG: format_update DELETE: removing old[{oidx}]={old[oidx].name}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update DELETE: removing old[{oidx}]={old[oidx].name}"
|
||||||
|
)
|
||||||
oremain.remove(old[oidx])
|
oremain.remove(old[oidx])
|
||||||
del_count += 1
|
del_count += 1
|
||||||
oidx += 1
|
oidx += 1
|
||||||
if del_count:
|
if del_count:
|
||||||
logger.debug(f"DEBUG: format_update DEL: adding UpdDel({del_count}), oidx {del_start_oidx}->{oidx}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update DEL: adding UpdDel({del_count}), oidx {del_start_oidx}->{oidx}"
|
||||||
|
)
|
||||||
update.append(UpdDel(del_count))
|
update.append(UpdDel(del_count))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -340,19 +392,29 @@ def format_update(old, new):
|
|||||||
ins_start_nidx = nidx
|
ins_start_nidx = nidx
|
||||||
while nidx < len(new) and new[nidx] not in oremain:
|
while nidx < len(new) and new[nidx] not in oremain:
|
||||||
entry = new[nidx]
|
entry = new[nidx]
|
||||||
logger.debug(f"DEBUG: format_update INSERT: adding new[{nidx}]={entry.name}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update INSERT: adding new[{nidx}]={entry.name}"
|
||||||
|
)
|
||||||
nremain.remove(entry)
|
nremain.remove(entry)
|
||||||
insert_items.append(entry)
|
insert_items.append(entry)
|
||||||
nidx += 1
|
nidx += 1
|
||||||
if insert_items:
|
if insert_items:
|
||||||
logger.debug(f"DEBUG: format_update INS: adding UpdIns({len(insert_items)} items), nidx {ins_start_nidx}->{nidx}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update INS: adding UpdIns({len(insert_items)} items), nidx {ins_start_nidx}->{nidx}"
|
||||||
|
)
|
||||||
modified = True
|
modified = True
|
||||||
update.append(UpdIns(insert_items))
|
update.append(UpdIns(insert_items))
|
||||||
|
|
||||||
if not modified:
|
if not modified:
|
||||||
logger.error(f"ERROR: format_update INFINITE_LOOP: nidx={nidx}, oidx={oidx}, old_len={len(old)}, new_len={len(new)}")
|
logger.error(
|
||||||
logger.error(f"ERROR: old[oidx]={old[oidx].name if oidx < len(old) else 'OUT_OF_BOUNDS'}")
|
f"ERROR: format_update INFINITE_LOOP: nidx={nidx}, oidx={oidx}, old_len={len(old)}, new_len={len(new)}"
|
||||||
logger.error(f"ERROR: new[nidx]={new[nidx].name if nidx < len(new) else 'OUT_OF_BOUNDS'}")
|
)
|
||||||
|
logger.error(
|
||||||
|
f"ERROR: old[oidx]={old[oidx].name if oidx < len(old) else 'OUT_OF_BOUNDS'}"
|
||||||
|
)
|
||||||
|
logger.error(
|
||||||
|
f"ERROR: new[nidx]={new[nidx].name if nidx < len(new) else 'OUT_OF_BOUNDS'}"
|
||||||
|
)
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"Infinite loop in diff {nidx=} {oidx=} {len(old)=} {len(new)=}"
|
f"Infinite loop in diff {nidx=} {oidx=} {len(old)=} {len(new)=}"
|
||||||
)
|
)
|
||||||
@ -362,13 +424,19 @@ def format_update(old, new):
|
|||||||
logger.debug(f"DEBUG: format_update FINAL_KEEP: adding UpdKeep({keep_count})")
|
logger.debug(f"DEBUG: format_update FINAL_KEEP: adding UpdKeep({keep_count})")
|
||||||
update.append(UpdKeep(keep_count))
|
update.append(UpdKeep(keep_count))
|
||||||
if oremain:
|
if oremain:
|
||||||
logger.debug(f"DEBUG: format_update FINAL_DEL: adding UpdDel({len(oremain)}) for remaining old items")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update FINAL_DEL: adding UpdDel({len(oremain)}) for remaining old items"
|
||||||
|
)
|
||||||
update.append(UpdDel(len(oremain)))
|
update.append(UpdDel(len(oremain)))
|
||||||
elif nremain:
|
elif nremain:
|
||||||
logger.debug(f"DEBUG: format_update FINAL_INS: adding UpdIns({len(new[nidx:])}) for remaining new items")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update FINAL_INS: adding UpdIns({len(new[nidx:])}) for remaining new items"
|
||||||
|
)
|
||||||
update.append(UpdIns(new[nidx:]))
|
update.append(UpdIns(new[nidx:]))
|
||||||
|
|
||||||
logger.debug(f"DEBUG: format_update EXIT: generated {len(update)} operations, iterations={iteration_count}")
|
logger.debug(
|
||||||
|
f"DEBUG: format_update EXIT: generated {len(update)} operations, iterations={iteration_count}"
|
||||||
|
)
|
||||||
return msgspec.json.encode({"update": update}).decode()
|
return msgspec.json.encode({"update": update}).decode()
|
||||||
|
|
||||||
|
|
||||||
@ -440,16 +508,24 @@ def watcher_inotify(loop):
|
|||||||
logger.debug(f"Watch: {interesting=} {event=}")
|
logger.debug(f"Watch: {interesting=} {event=}")
|
||||||
if interesting:
|
if interesting:
|
||||||
# Update modified path
|
# Update modified path
|
||||||
logger.debug(f"DEBUG: inotify PROCESSING: event={event}, path={event[2]}/{event[3]}")
|
logger.debug(
|
||||||
|
f"DEBUG: inotify PROCESSING: event={event}, path={event[2]}/{event[3]}"
|
||||||
|
)
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
path = PurePosixPath(event[2]) / event[3]
|
path = PurePosixPath(event[2]) / event[3]
|
||||||
try:
|
try:
|
||||||
rel_path = path.relative_to(rootpath)
|
rel_path = path.relative_to(rootpath)
|
||||||
logger.debug(f"DEBUG: inotify CALLING update_path: rel_path={rel_path}")
|
logger.debug(
|
||||||
|
f"DEBUG: inotify CALLING update_path: rel_path={rel_path}"
|
||||||
|
)
|
||||||
update_path(rootmod, rel_path, loop)
|
update_path(rootmod, rel_path, loop)
|
||||||
logger.debug(f"DEBUG: inotify update_path COMPLETED: rel_path={rel_path}")
|
logger.debug(
|
||||||
|
f"DEBUG: inotify update_path COMPLETED: rel_path={rel_path}"
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"ERROR: inotify update_path FAILED: path={path}, error={e}")
|
logger.error(
|
||||||
|
f"ERROR: inotify update_path FAILED: path={path}, error={e}"
|
||||||
|
)
|
||||||
raise
|
raise
|
||||||
t1 = time.perf_counter()
|
t1 = time.perf_counter()
|
||||||
logger.debug(f"Watch: Update {event[3]} took {t1 - t0:.1f}s")
|
logger.debug(f"Watch: Update {event[3]} took {t1 - t0:.1f}s")
|
||||||
@ -461,7 +537,9 @@ def watcher_inotify(loop):
|
|||||||
logger.debug("DEBUG: inotify TIMEOUT: breaking due to 0.5s timeout")
|
logger.debug("DEBUG: inotify TIMEOUT: breaking due to 0.5s timeout")
|
||||||
break
|
break
|
||||||
if dirty and state.root != rootmod:
|
if dirty and state.root != rootmod:
|
||||||
logger.debug(f"DEBUG: inotify BATCH_UPDATE: state.root_len={len(state.root)}, rootmod_len={len(rootmod)}")
|
logger.debug(
|
||||||
|
f"DEBUG: inotify BATCH_UPDATE: state.root_len={len(state.root)}, rootmod_len={len(rootmod)}"
|
||||||
|
)
|
||||||
t0 = time.perf_counter()
|
t0 = time.perf_counter()
|
||||||
logger.debug("DEBUG: inotify CALLING format_update")
|
logger.debug("DEBUG: inotify CALLING format_update")
|
||||||
update = format_update(state.root, rootmod)
|
update = format_update(state.root, rootmod)
|
||||||
|
@ -106,9 +106,13 @@ def test_nested_directory_rename_causes_hang(setup_watcher):
|
|||||||
watching.state.root = initial_root
|
watching.state.root = initial_root
|
||||||
|
|
||||||
# Verify the nested structure exists
|
# Verify the nested structure exists
|
||||||
target_path = PurePosixPath("parent_folder/child_folder/grandchild_folder/target_to_rename")
|
target_path = PurePosixPath(
|
||||||
|
"parent_folder/child_folder/grandchild_folder/target_to_rename"
|
||||||
|
)
|
||||||
initial_begin, initial_entries = watching.treeget(initial_root, target_path)
|
initial_begin, initial_entries = watching.treeget(initial_root, target_path)
|
||||||
assert initial_begin is not None, "Target directory should be found in initial state"
|
assert initial_begin is not None, (
|
||||||
|
"Target directory should be found in initial state"
|
||||||
|
)
|
||||||
assert len(initial_entries) > 1, "Target directory should contain files"
|
assert len(initial_entries) > 1, "Target directory should contain files"
|
||||||
|
|
||||||
# Now rename the deeply nested directory
|
# Now rename the deeply nested directory
|
||||||
@ -119,8 +123,12 @@ def test_nested_directory_rename_causes_hang(setup_watcher):
|
|||||||
working_state = watching.state.root[:]
|
working_state = watching.state.root[:]
|
||||||
|
|
||||||
# This is where the hang likely occurs - updating a deeply nested path
|
# This is where the hang likely occurs - updating a deeply nested path
|
||||||
old_nested_path = PurePosixPath("parent_folder/child_folder/grandchild_folder/target_to_rename")
|
old_nested_path = PurePosixPath(
|
||||||
new_nested_path = PurePosixPath("parent_folder/child_folder/grandchild_folder/renamed_target")
|
"parent_folder/child_folder/grandchild_folder/target_to_rename"
|
||||||
|
)
|
||||||
|
new_nested_path = PurePosixPath(
|
||||||
|
"parent_folder/child_folder/grandchild_folder/renamed_target"
|
||||||
|
)
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@ -134,7 +142,9 @@ def test_nested_directory_rename_causes_hang(setup_watcher):
|
|||||||
|
|
||||||
# Check for hang - nested operations should still be fast
|
# Check for hang - nested operations should still be fast
|
||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
assert duration < 3.0, f"Nested directory rename took too long: {duration}s - possible hang"
|
assert duration < 3.0, (
|
||||||
|
f"Nested directory rename took too long: {duration}s - possible hang"
|
||||||
|
)
|
||||||
|
|
||||||
# Verify the old nested path is gone
|
# Verify the old nested path is gone
|
||||||
old_begin, old_entries = watching.treeget(working_state, old_nested_path)
|
old_begin, old_entries = watching.treeget(working_state, old_nested_path)
|
||||||
@ -190,7 +200,9 @@ def test_move_directory_across_nested_parents(setup_watcher):
|
|||||||
|
|
||||||
# These paths represent the complex nested move operation
|
# These paths represent the complex nested move operation
|
||||||
old_path = PurePosixPath("source_area/source_child/movable_directory")
|
old_path = PurePosixPath("source_area/source_child/movable_directory")
|
||||||
new_path = PurePosixPath("destination_area/dest_child/dest_grandchild/moved_directory")
|
new_path = PurePosixPath(
|
||||||
|
"destination_area/dest_child/dest_grandchild/moved_directory"
|
||||||
|
)
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@ -269,7 +281,9 @@ def test_rapid_nested_directory_operations_cause_corruption(setup_watcher):
|
|||||||
watching.update_path(working_state, old_path, loop)
|
watching.update_path(working_state, old_path, loop)
|
||||||
watching.update_path(working_state, new_path, loop)
|
watching.update_path(working_state, new_path, loop)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pytest.fail(f"Rapid nested operations failed for {old_path} -> {new_path}: {e}")
|
pytest.fail(
|
||||||
|
f"Rapid nested operations failed for {old_path} -> {new_path}: {e}"
|
||||||
|
)
|
||||||
|
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
@ -356,7 +370,10 @@ def test_nested_directory_treeget_corruption(setup_watcher):
|
|||||||
assert len(new_entries) >= 1, "New nested path should have entries"
|
assert len(new_entries) >= 1, "New nested path should have entries"
|
||||||
|
|
||||||
# Verify that other paths are still accessible (no corruption)
|
# Verify that other paths are still accessible (no corruption)
|
||||||
for path in [PurePosixPath("root_1/mid_1/leaf_1"), PurePosixPath("root_2/mid_0/leaf_1")]:
|
for path in [
|
||||||
|
PurePosixPath("root_1/mid_1/leaf_1"),
|
||||||
|
PurePosixPath("root_2/mid_0/leaf_1"),
|
||||||
|
]:
|
||||||
begin, entries = watching.treeget(working_state, path)
|
begin, entries = watching.treeget(working_state, path)
|
||||||
assert begin is not None, f"Other paths should remain accessible: {path}"
|
assert begin is not None, f"Other paths should remain accessible: {path}"
|
||||||
|
|
||||||
@ -436,7 +453,9 @@ def test_format_update_infinite_loop_with_complex_nested_changes(setup_watcher):
|
|||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
|
|
||||||
# Even complex diffs should complete quickly
|
# Even complex diffs should complete quickly
|
||||||
assert duration < 8.0, f"format_update took {duration}s - possible infinite loop"
|
assert duration < 8.0, (
|
||||||
|
f"format_update took {duration}s - possible infinite loop"
|
||||||
|
)
|
||||||
|
|
||||||
# Verify the result is valid
|
# Verify the result is valid
|
||||||
assert update_msg, "format_update should return a message"
|
assert update_msg, "format_update should return a message"
|
||||||
@ -445,7 +464,9 @@ def test_format_update_infinite_loop_with_complex_nested_changes(setup_watcher):
|
|||||||
|
|
||||||
except TimeoutError:
|
except TimeoutError:
|
||||||
signal.alarm(0)
|
signal.alarm(0)
|
||||||
pytest.fail("format_update hung/infinite loop detected with complex nested changes")
|
pytest.fail(
|
||||||
|
"format_update hung/infinite loop detected with complex nested changes"
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
signal.alarm(0)
|
signal.alarm(0)
|
||||||
@ -652,7 +673,9 @@ def test_simulate_real_inotify_event_sequence(setup_watcher):
|
|||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
duration = end_time - start_time
|
duration = end_time - start_time
|
||||||
assert duration < 5.0, f"format_update took too long with nested changes: {duration}s"
|
assert duration < 5.0, (
|
||||||
|
f"format_update took too long with nested changes: {duration}s"
|
||||||
|
)
|
||||||
|
|
||||||
# Verify the update message is valid
|
# Verify the update message is valid
|
||||||
assert update_msg, "format_update should return valid message"
|
assert update_msg, "format_update should return valid message"
|
||||||
@ -793,7 +816,9 @@ def test_concurrent_inotify_events_simulation(setup_watcher):
|
|||||||
|
|
||||||
# Check that old names are gone
|
# Check that old names are gone
|
||||||
for dir_name in dirs:
|
for dir_name in dirs:
|
||||||
assert dir_name not in final_names, f"Old directory {dir_name} should be removed"
|
assert dir_name not in final_names, (
|
||||||
|
f"Old directory {dir_name} should be removed"
|
||||||
|
)
|
||||||
|
|
||||||
# Check that new names are present
|
# Check that new names are present
|
||||||
for i, dir_name in enumerate(dirs):
|
for i, dir_name in enumerate(dirs):
|
||||||
@ -916,7 +941,6 @@ def test_threaded_watcher_simulation(setup_watcher):
|
|||||||
|
|
||||||
# Patch broadcast to track changes
|
# Patch broadcast to track changes
|
||||||
with patch("cista.watching.broadcast", side_effect=tracking_broadcast):
|
with patch("cista.watching.broadcast", side_effect=tracking_broadcast):
|
||||||
|
|
||||||
# Simulate rapid directory operations
|
# Simulate rapid directory operations
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@ -945,12 +969,16 @@ def test_threaded_watcher_simulation(setup_watcher):
|
|||||||
# Old names should be gone
|
# Old names should be gone
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
old_name = f"thread_test_dir_{i}"
|
old_name = f"thread_test_dir_{i}"
|
||||||
assert old_name not in final_names, f"Old directory {old_name} should be removed"
|
assert old_name not in final_names, (
|
||||||
|
f"Old directory {old_name} should be removed"
|
||||||
|
)
|
||||||
|
|
||||||
# New names should be present
|
# New names should be present
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
new_name = f"renamed_thread_test_dir_{i}"
|
new_name = f"renamed_thread_test_dir_{i}"
|
||||||
assert new_name in final_names, f"New directory {new_name} should be present"
|
assert new_name in final_names, (
|
||||||
|
f"New directory {new_name} should be present"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_directory_rename_with_nested_structure(setup_watcher):
|
def test_directory_rename_with_nested_structure(setup_watcher):
|
||||||
@ -1064,7 +1092,7 @@ def test_concurrent_directory_operations(setup_watcher):
|
|||||||
# Rename multiple directories "simultaneously"
|
# Rename multiple directories "simultaneously"
|
||||||
renamed_dirs = []
|
renamed_dirs = []
|
||||||
for i, dir_path in enumerate(created_dirs):
|
for i, dir_path in enumerate(created_dirs):
|
||||||
renamed_path = temp_dir / f"renamed_dir{i+1}"
|
renamed_path = temp_dir / f"renamed_dir{i + 1}"
|
||||||
dir_path.rename(renamed_path)
|
dir_path.rename(renamed_path)
|
||||||
renamed_dirs.append(renamed_path)
|
renamed_dirs.append(renamed_path)
|
||||||
|
|
||||||
@ -1077,7 +1105,9 @@ def test_concurrent_directory_operations(setup_watcher):
|
|||||||
|
|
||||||
# Update for all new paths (should add them)
|
# Update for all new paths (should add them)
|
||||||
for i in range(len(renamed_dirs)):
|
for i in range(len(renamed_dirs)):
|
||||||
watching.update_path(watching.state.root, PurePosixPath(f"renamed_dir{i+1}"), loop)
|
watching.update_path(
|
||||||
|
watching.state.root, PurePosixPath(f"renamed_dir{i + 1}"), loop
|
||||||
|
)
|
||||||
|
|
||||||
# Verify final state
|
# Verify final state
|
||||||
final_root = watching.state.root
|
final_root = watching.state.root
|
||||||
@ -1089,7 +1119,7 @@ def test_concurrent_directory_operations(setup_watcher):
|
|||||||
|
|
||||||
# New names should be present
|
# New names should be present
|
||||||
for i in range(len(renamed_dirs)):
|
for i in range(len(renamed_dirs)):
|
||||||
assert f"renamed_dir{i+1}" in final_names
|
assert f"renamed_dir{i + 1}" in final_names
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.slow
|
@pytest.mark.slow
|
||||||
@ -1109,8 +1139,7 @@ def test_watcher_doesnt_hang_on_directory_rename(setup_watcher):
|
|||||||
|
|
||||||
# Mock the inotify events to simulate what happens during a rename
|
# Mock the inotify events to simulate what happens during a rename
|
||||||
# This simulates the problematic scenario described in the bug report
|
# This simulates the problematic scenario described in the bug report
|
||||||
with patch('time.monotonic', side_effect=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]):
|
with patch("time.monotonic", side_effect=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]):
|
||||||
|
|
||||||
# Simulate the rename operation
|
# Simulate the rename operation
|
||||||
renamed_subdir = temp_dir / "renamed_test_subdir"
|
renamed_subdir = temp_dir / "renamed_test_subdir"
|
||||||
subdir.rename(renamed_subdir)
|
subdir.rename(renamed_subdir)
|
||||||
@ -1123,12 +1152,16 @@ def test_watcher_doesnt_hang_on_directory_rename(setup_watcher):
|
|||||||
|
|
||||||
# Update the path - this is where the hang might occur
|
# Update the path - this is where the hang might occur
|
||||||
watching.update_path(watching.state.root, PurePosixPath("test_subdir"), loop)
|
watching.update_path(watching.state.root, PurePosixPath("test_subdir"), loop)
|
||||||
watching.update_path(watching.state.root, PurePosixPath("renamed_test_subdir"), loop)
|
watching.update_path(
|
||||||
|
watching.state.root, PurePosixPath("renamed_test_subdir"), loop
|
||||||
|
)
|
||||||
|
|
||||||
end_time = time.time()
|
end_time = time.time()
|
||||||
|
|
||||||
# The operation should complete quickly (within 5 seconds)
|
# The operation should complete quickly (within 5 seconds)
|
||||||
assert end_time - start_time < 5.0, "Directory rename operation took too long, possible hang detected"
|
assert end_time - start_time < 5.0, (
|
||||||
|
"Directory rename operation took too long, possible hang detected"
|
||||||
|
)
|
||||||
|
|
||||||
# Verify the state is consistent
|
# Verify the state is consistent
|
||||||
final_names = [entry.name for entry in watching.state.root]
|
final_names = [entry.name for entry in watching.state.root]
|
||||||
@ -1141,7 +1174,9 @@ def test_watcher_doesnt_hang_on_directory_rename(setup_watcher):
|
|||||||
another_dir.mkdir()
|
another_dir.mkdir()
|
||||||
|
|
||||||
# This should work without issues
|
# This should work without issues
|
||||||
watching.update_path(watching.state.root, PurePosixPath("post_rename_dir"), loop)
|
watching.update_path(
|
||||||
|
watching.state.root, PurePosixPath("post_rename_dir"), loop
|
||||||
|
)
|
||||||
final_names_after = [entry.name for entry in watching.state.root]
|
final_names_after = [entry.name for entry in watching.state.root]
|
||||||
assert "post_rename_dir" in final_names_after
|
assert "post_rename_dir" in final_names_after
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user