fix: more fixing of fixes

This commit is contained in:
CJ van den Berg 2026-03-15 00:27:52 +01:00
parent 190290e96c
commit 4339c122eb
Signed by: neurocyte
GPG key ID: 8EB1E1BB660E3FB9
3 changed files with 30 additions and 11 deletions

View file

@ -209,8 +209,8 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
} }
// Diff against snapshot under the lock; collect events to emit after releasing it. // Diff against snapshot under the lock; collect events to emit after releasing it.
// to_create / to_delete hold borrowed pointers into the snapshot (which uses // to_create / to_delete / new_dirs all use tmp (arena) so they are independent
// allocator, not tmp); only the list metadata itself uses tmp. // of the snapshot and remain valid after the mutex is released.
var to_create: std.ArrayListUnmanaged([]const u8) = .empty; var to_create: std.ArrayListUnmanaged([]const u8) = .empty;
var to_delete: std.ArrayListUnmanaged([]const u8) = .empty; var to_delete: std.ArrayListUnmanaged([]const u8) = .empty;
var new_dirs: std.ArrayListUnmanaged([]const u8) = .empty; var new_dirs: std.ArrayListUnmanaged([]const u8) = .empty;
@ -220,8 +220,12 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
var to_delete_owned = false; var to_delete_owned = false;
defer if (to_delete_owned) for (to_delete.items) |name| allocator.free(name); defer if (to_delete_owned) for (to_delete.items) |name| allocator.free(name);
// Use a boolean flag rather than errdefer to unlock the mutex. An errdefer
// scoped to the whole function would fire again after the explicit unlock below,
// double-unlocking the mutex (UB) if handler.change() later returns an error.
self.snapshots_mutex.lock(); self.snapshots_mutex.lock();
errdefer self.snapshots_mutex.unlock(); var snapshots_locked = true;
defer if (snapshots_locked) self.snapshots_mutex.unlock();
{ {
for (current_dirs.items) |name| { for (current_dirs.items) |name| {
var path_buf: [std.fs.max_path_bytes]u8 = undefined; var path_buf: [std.fs.max_path_bytes]u8 = undefined;
@ -254,7 +258,10 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
allocator.free(owned); allocator.free(owned);
return e; return e;
}; };
try to_create.append(tmp, owned); // Dupe into tmp so to_create holds arena-owned strings that remain
// valid after the mutex is released, even if remove_watch() frees
// the snapshot entry concurrently.
try to_create.append(tmp, try tmp.dupe(u8, owned));
} }
var sit = snapshot.iterator(); var sit = snapshot.iterator();
@ -265,6 +272,7 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
for (to_delete.items) |name| _ = snapshot.fetchRemove(name); for (to_delete.items) |name| _ = snapshot.fetchRemove(name);
to_delete_owned = true; // ownership transferred; defer will free all items to_delete_owned = true; // ownership transferred; defer will free all items
} }
snapshots_locked = false;
self.snapshots_mutex.unlock(); self.snapshots_mutex.unlock();
// Emit all events outside the lock so handlers may safely call watch()/unwatch(). // Emit all events outside the lock so handlers may safely call watch()/unwatch().
@ -449,7 +457,10 @@ fn take_snapshot(self: *@This(), allocator: std.mem.Allocator, dir_path: []const
for (names.items) |name| { for (names.items) |name| {
if (snapshot.contains(name)) continue; if (snapshot.contains(name)) continue;
const owned = try allocator.dupe(u8, name); const owned = try allocator.dupe(u8, name);
try snapshot.put(allocator, owned, {}); snapshot.put(allocator, owned, {}) catch |e| {
allocator.free(owned);
return e;
};
} }
self.snapshots_mutex.unlock(); self.snapshots_mutex.unlock();
// Register a kqueue watch for each existing file so writes are detected. // Register a kqueue watch for each existing file so writes are detected.

View file

@ -196,8 +196,8 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
} }
// Diff against snapshot under the lock; collect events to emit after releasing it. // Diff against snapshot under the lock; collect events to emit after releasing it.
// to_create / to_delete / to_modify borrow pointers from the snapshot (allocator), // to_create / to_delete / to_modify / new_dirs all use tmp (arena) so they are
// only list metadata uses tmp. // independent of the snapshot and remain valid after the mutex is released.
var to_create: std.ArrayListUnmanaged([]const u8) = .empty; var to_create: std.ArrayListUnmanaged([]const u8) = .empty;
var to_delete: std.ArrayListUnmanaged([]const u8) = .empty; var to_delete: std.ArrayListUnmanaged([]const u8) = .empty;
var to_modify: std.ArrayListUnmanaged([]const u8) = .empty; var to_modify: std.ArrayListUnmanaged([]const u8) = .empty;
@ -208,8 +208,12 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
var to_delete_owned = false; var to_delete_owned = false;
defer if (to_delete_owned) for (to_delete.items) |name| allocator.free(name); defer if (to_delete_owned) for (to_delete.items) |name| allocator.free(name);
// Use a boolean flag rather than errdefer to unlock the mutex. An errdefer
// scoped to the whole function would fire again after the explicit unlock below,
// double-unlocking the mutex (UB) if handler.change() later returns an error.
self.snapshots_mutex.lock(); self.snapshots_mutex.lock();
errdefer self.snapshots_mutex.unlock(); var snapshots_locked = true;
defer if (snapshots_locked) self.snapshots_mutex.unlock();
{ {
for (current_dirs.items) |name| { for (current_dirs.items) |name| {
var path_buf: [std.fs.max_path_bytes]u8 = undefined; var path_buf: [std.fs.max_path_bytes]u8 = undefined;
@ -238,7 +242,7 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
// File exists in both check for modification via mtime change. // File exists in both check for modification via mtime change.
if (stored_mtime.* != entry.value_ptr.*) { if (stored_mtime.* != entry.value_ptr.*) {
stored_mtime.* = entry.value_ptr.*; stored_mtime.* = entry.value_ptr.*;
try to_modify.append(tmp, entry.key_ptr.*); // borrow from current (tmp) try to_modify.append(tmp, entry.key_ptr.*); // from current_files (tmp)
} }
} else { } else {
// New file add to snapshot and to_create list. // New file add to snapshot and to_create list.
@ -249,7 +253,10 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
allocator.free(owned); allocator.free(owned);
return e; return e;
}; };
try to_create.append(tmp, owned); // borrow from snapshot // Dupe into tmp so to_create holds arena-owned strings that remain
// valid after the mutex is released, even if remove_watch() frees
// the snapshot entry concurrently.
try to_create.append(tmp, try tmp.dupe(u8, owned));
} }
} }
@ -261,6 +268,7 @@ fn scan_dir(self: *@This(), allocator: std.mem.Allocator, dir_path: []const u8)
for (to_delete.items) |name| _ = snapshot.fetchRemove(name); for (to_delete.items) |name| _ = snapshot.fetchRemove(name);
to_delete_owned = true; // ownership transferred; defer will free all items to_delete_owned = true; // ownership transferred; defer will free all items
} }
snapshots_locked = false;
self.snapshots_mutex.unlock(); self.snapshots_mutex.unlock();
// Emit all events outside the lock so handlers may safely call watch()/unwatch(). // Emit all events outside the lock so handlers may safely call watch()/unwatch().

View file

@ -172,7 +172,7 @@ pub fn Create(comptime variant: Variant) type {
// Collapse any . and .. segments without touching the filesystem so that // Collapse any . and .. segments without touching the filesystem so that
// relative inputs like "../sibling" or "./sub" produce the same watch key // relative inputs like "../sibling" or "./sub" produce the same watch key
// and event-path prefix as an equivalent absolute path would. // and event-path prefix as an equivalent absolute path would.
const norm = std.fs.path.resolve(self.allocator, &.{abs_path}) catch return error.WatchFailed; const norm = try std.fs.path.resolve(self.allocator, &.{abs_path});
defer self.allocator.free(norm); defer self.allocator.free(norm);
try self.interceptor.backend.add_watch(self.allocator, norm); try self.interceptor.backend.add_watch(self.allocator, norm);
if (!Backend.watches_recursively) { if (!Backend.watches_recursively) {