refactor: load persisted projects before file system scan
This should improve usability for very large projects.
This commit is contained in:
parent
3eba052c15
commit
07571ef363
3 changed files with 58 additions and 21 deletions
|
@ -12,6 +12,7 @@ const LSP = @import("LSP.zig");
|
||||||
a: std.mem.Allocator,
|
a: std.mem.Allocator,
|
||||||
name: []const u8,
|
name: []const u8,
|
||||||
files: std.ArrayList(File),
|
files: std.ArrayList(File),
|
||||||
|
pending: std.ArrayList(File),
|
||||||
open_time: i64,
|
open_time: i64,
|
||||||
language_servers: std.StringHashMap(LSP),
|
language_servers: std.StringHashMap(LSP),
|
||||||
file_language_server: std.StringHashMap(LSP),
|
file_language_server: std.StringHashMap(LSP),
|
||||||
|
@ -31,6 +32,7 @@ pub fn init(a: std.mem.Allocator, name: []const u8) error{OutOfMemory}!Self {
|
||||||
.a = a,
|
.a = a,
|
||||||
.name = try a.dupe(u8, name),
|
.name = try a.dupe(u8, name),
|
||||||
.files = std.ArrayList(File).init(a),
|
.files = std.ArrayList(File).init(a),
|
||||||
|
.pending = std.ArrayList(File).init(a),
|
||||||
.open_time = std.time.milliTimestamp(),
|
.open_time = std.time.milliTimestamp(),
|
||||||
.language_servers = std.StringHashMap(LSP).init(a),
|
.language_servers = std.StringHashMap(LSP).init(a),
|
||||||
.file_language_server = std.StringHashMap(LSP).init(a),
|
.file_language_server = std.StringHashMap(LSP).init(a),
|
||||||
|
@ -64,18 +66,21 @@ pub fn write_state(self: *Self, writer: anytype) !void {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn restore_state(self: *Self, data: []const u8) !void {
|
pub fn restore_state(self: *Self, data: []const u8) !void {
|
||||||
|
defer self.sort_files_by_mtime();
|
||||||
var path: []const u8 = undefined;
|
var path: []const u8 = undefined;
|
||||||
var mtime: i128 = undefined;
|
var mtime: i128 = undefined;
|
||||||
var row: usize = undefined;
|
var row: usize = undefined;
|
||||||
var col: usize = undefined;
|
var col: usize = undefined;
|
||||||
defer self.sort_files_by_mtime();
|
|
||||||
var iter: []const u8 = data;
|
var iter: []const u8 = data;
|
||||||
while (try cbor.matchValue(&iter, .{
|
while (cbor.matchValue(&iter, .{
|
||||||
tp.extract(&path),
|
tp.extract(&path),
|
||||||
tp.extract(&mtime),
|
tp.extract(&mtime),
|
||||||
tp.extract(&row),
|
tp.extract(&row),
|
||||||
tp.extract(&col),
|
tp.extract(&col),
|
||||||
})) {
|
}) catch |e| switch (e) {
|
||||||
|
error.CborTooShort => return,
|
||||||
|
else => return e,
|
||||||
|
}) {
|
||||||
try self.update_mru_internal(path, mtime, row, col);
|
try self.update_mru_internal(path, mtime, row, col);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,10 +114,6 @@ fn make_URI(self: *Self, file_path: ?[]const u8) ![]const u8 {
|
||||||
return buf.toOwnedSlice();
|
return buf.toOwnedSlice();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_file(self: *Self, path: []const u8, mtime: i128) error{OutOfMemory}!void {
|
|
||||||
(try self.files.addOne()).* = .{ .path = try self.a.dupe(u8, path), .mtime = mtime };
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn sort_files_by_mtime(self: *Self) void {
|
pub fn sort_files_by_mtime(self: *Self) void {
|
||||||
const less_fn = struct {
|
const less_fn = struct {
|
||||||
fn less_fn(_: void, lhs: File, rhs: File) bool {
|
fn less_fn(_: void, lhs: File, rhs: File) bool {
|
||||||
|
@ -192,13 +193,28 @@ pub fn query_recent_files(self: *Self, from: tp.pid_ref, max: usize, query: []co
|
||||||
return @min(max, matches.items.len);
|
return @min(max, matches.items.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_pending_file(self: *Self, file_path: []const u8, mtime: i128) error{OutOfMemory}!void {
|
||||||
|
(try self.pending.addOne()).* = .{ .path = try self.a.dupe(u8, file_path), .mtime = mtime };
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn merge_pending_files(self: *Self) error{OutOfMemory}!void {
|
||||||
|
defer self.sort_files_by_mtime();
|
||||||
|
const existing = try self.files.toOwnedSlice();
|
||||||
|
self.files = self.pending;
|
||||||
|
self.pending = std.ArrayList(File).init(self.a);
|
||||||
|
for (existing) |*file| {
|
||||||
|
self.update_mru_internal(file.path, file.mtime, file.row, file.col) catch {};
|
||||||
|
self.a.free(file.path);
|
||||||
|
}
|
||||||
|
self.a.free(existing);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn update_mru(self: *Self, file_path: []const u8, row: usize, col: usize) !void {
|
pub fn update_mru(self: *Self, file_path: []const u8, row: usize, col: usize) !void {
|
||||||
defer self.sort_files_by_mtime();
|
defer self.sort_files_by_mtime();
|
||||||
try self.update_mru_internal(file_path, std.time.nanoTimestamp(), row, col);
|
try self.update_mru_internal(file_path, std.time.nanoTimestamp(), row, col);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn update_mru_internal(self: *Self, file_path: []const u8, mtime: i128, row: usize, col: usize) !void {
|
fn update_mru_internal(self: *Self, file_path: []const u8, mtime: i128, row: usize, col: usize) !void {
|
||||||
defer self.sort_files_by_mtime();
|
|
||||||
for (self.files.items) |*file| {
|
for (self.files.items) |*file| {
|
||||||
if (!std.mem.eql(u8, file.path, file_path)) continue;
|
if (!std.mem.eql(u8, file.path, file_path)) continue;
|
||||||
file.mtime = mtime;
|
file.mtime = mtime;
|
||||||
|
@ -209,7 +225,20 @@ fn update_mru_internal(self: *Self, file_path: []const u8, mtime: i128, row: usi
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
return self.add_file(file_path, std.time.nanoTimestamp());
|
if (row != 0) {
|
||||||
|
(try self.files.addOne()).* = .{
|
||||||
|
.path = try self.a.dupe(u8, file_path),
|
||||||
|
.mtime = mtime,
|
||||||
|
.row = row,
|
||||||
|
.col = col,
|
||||||
|
.visited = true,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
(try self.files.addOne()).* = .{
|
||||||
|
.path = try self.a.dupe(u8, file_path),
|
||||||
|
.mtime = mtime,
|
||||||
|
};
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_mru_position(self: *Self, from: tp.pid_ref, file_path: []const u8) !void {
|
pub fn get_mru_position(self: *Self, from: tp.pid_ref, file_path: []const u8) !void {
|
||||||
|
|
|
@ -173,19 +173,14 @@ const Process = struct {
|
||||||
if (try m.match(.{ "walk_tree_entry", tp.extract(&project_directory), tp.extract(&path), tp.extract(&high), tp.extract(&low) })) {
|
if (try m.match(.{ "walk_tree_entry", tp.extract(&project_directory), tp.extract(&path), tp.extract(&high), tp.extract(&low) })) {
|
||||||
const mtime = (@as(i128, @intCast(high)) << 64) | @as(i128, @intCast(low));
|
const mtime = (@as(i128, @intCast(high)) << 64) | @as(i128, @intCast(low));
|
||||||
if (self.projects.get(project_directory)) |project|
|
if (self.projects.get(project_directory)) |project|
|
||||||
project.add_file(path, mtime) catch |e| self.logger.err("walk_tree_entry", e);
|
project.add_pending_file(
|
||||||
// self.logger.print("file: {s}", .{path});
|
path,
|
||||||
|
mtime,
|
||||||
|
) catch |e| self.logger.err("walk_tree_entry", e);
|
||||||
} else if (try m.match(.{ "walk_tree_done", tp.extract(&project_directory) })) {
|
} else if (try m.match(.{ "walk_tree_done", tp.extract(&project_directory) })) {
|
||||||
if (self.walker) |pid| pid.deinit();
|
if (self.walker) |pid| pid.deinit();
|
||||||
self.walker = null;
|
self.walker = null;
|
||||||
const project = self.projects.get(project_directory) orelse return;
|
self.loaded(project_directory) catch |e| return from.forward_error(e);
|
||||||
self.restore_project(project) catch {};
|
|
||||||
project.sort_files_by_mtime();
|
|
||||||
self.logger.print("opened: {s} with {d} files in {d} ms", .{
|
|
||||||
project_directory,
|
|
||||||
project.files.items.len,
|
|
||||||
std.time.milliTimestamp() - project.open_time,
|
|
||||||
});
|
|
||||||
} else if (try m.match(.{ "update_mru", tp.extract(&project_directory), tp.extract(&path), tp.extract(&row), tp.extract(&col) })) {
|
} else if (try m.match(.{ "update_mru", tp.extract(&project_directory), tp.extract(&path), tp.extract(&row), tp.extract(&col) })) {
|
||||||
self.update_mru(project_directory, path, row, col) catch |e| return from.forward_error(e);
|
self.update_mru(project_directory, path, row, col) catch |e| return from.forward_error(e);
|
||||||
} else if (try m.match(.{ "open", tp.extract(&project_directory) })) {
|
} else if (try m.match(.{ "open", tp.extract(&project_directory) })) {
|
||||||
|
@ -226,9 +221,21 @@ const Process = struct {
|
||||||
project.* = try Project.init(self.a, project_directory);
|
project.* = try Project.init(self.a, project_directory);
|
||||||
try self.projects.put(try self.a.dupe(u8, project_directory), project);
|
try self.projects.put(try self.a.dupe(u8, project_directory), project);
|
||||||
self.walker = try walk_tree_async(self.a, project_directory);
|
self.walker = try walk_tree_async(self.a, project_directory);
|
||||||
|
self.restore_project(project) catch |e| self.logger.err("restore_project", e);
|
||||||
|
project.sort_files_by_mtime();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn loaded(self: *Process, project_directory: []const u8) error{ OutOfMemory, Exit }!void {
|
||||||
|
const project = self.projects.get(project_directory) orelse return;
|
||||||
|
try project.merge_pending_files();
|
||||||
|
self.logger.print("opened: {s} with {d} files in {d} ms", .{
|
||||||
|
project_directory,
|
||||||
|
project.files.items.len,
|
||||||
|
std.time.milliTimestamp() - project.open_time,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
fn request_recent_files(self: *Process, from: tp.pid_ref, project_directory: []const u8, max: usize) error{ OutOfMemory, Exit }!void {
|
fn request_recent_files(self: *Process, from: tp.pid_ref, project_directory: []const u8, max: usize) error{ OutOfMemory, Exit }!void {
|
||||||
const project = if (self.projects.get(project_directory)) |p| p else return tp.exit("No project");
|
const project = if (self.projects.get(project_directory)) |p| p else return tp.exit("No project");
|
||||||
project.sort_files_by_mtime();
|
project.sort_files_by_mtime();
|
||||||
|
@ -303,12 +310,11 @@ const Process = struct {
|
||||||
var file = try std.fs.createFileAbsolute(file_name, .{ .truncate = true });
|
var file = try std.fs.createFileAbsolute(file_name, .{ .truncate = true });
|
||||||
defer file.close();
|
defer file.close();
|
||||||
var buffer = std.io.bufferedWriter(file.writer());
|
var buffer = std.io.bufferedWriter(file.writer());
|
||||||
|
defer buffer.flush() catch {};
|
||||||
try project.write_state(buffer.writer());
|
try project.write_state(buffer.writer());
|
||||||
return buffer.flush();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restore_project(self: *Process, project: *Project) !void {
|
fn restore_project(self: *Process, project: *Project) !void {
|
||||||
self.logger.print("restoring: {s}", .{project.name});
|
|
||||||
const file_name = try get_project_cache_file_path(self.a, project);
|
const file_name = try get_project_cache_file_path(self.a, project);
|
||||||
defer self.a.free(file_name);
|
defer self.a.free(file_name);
|
||||||
var file = std.fs.openFileAbsolute(file_name, .{ .mode = .read_only }) catch |e| switch (e) {
|
var file = std.fs.openFileAbsolute(file_name, .{ .mode = .read_only }) catch |e| switch (e) {
|
||||||
|
|
|
@ -238,6 +238,7 @@ fn receive_safe(self: *Self, from: tp.pid_ref, m: tp.message) tp.result {
|
||||||
}
|
}
|
||||||
if (try m.match(.{"quit"})) {
|
if (try m.match(.{"quit"})) {
|
||||||
project_manager.shutdown();
|
project_manager.shutdown();
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
if (try m.match(.{ "project_manager", "shutdown" })) {
|
if (try m.match(.{ "project_manager", "shutdown" })) {
|
||||||
return tp.exit(self.final_exit);
|
return tp.exit(self.final_exit);
|
||||||
|
@ -247,6 +248,7 @@ fn receive_safe(self: *Self, from: tp.pid_ref, m: tp.message) tp.result {
|
||||||
_ = try self.mainview.msg(.{"write_restore_info"});
|
_ = try self.mainview.msg(.{"write_restore_info"});
|
||||||
project_manager.shutdown();
|
project_manager.shutdown();
|
||||||
self.final_exit = "restart";
|
self.final_exit = "restart";
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (try m.match(.{"sigwinch"})) {
|
if (try m.match(.{"sigwinch"})) {
|
||||||
|
|
Loading…
Add table
Reference in a new issue