Added error set and enum completion deduplication

This commit is contained in:
Alexandros Naskos 2021-03-29 14:02:58 +03:00
parent 360c437d6a
commit 4e753338af
No known key found for this signature in database
GPG Key ID: 02BF2E72B0EA32D2
2 changed files with 78 additions and 37 deletions

View File

@ -2443,10 +2443,29 @@ pub fn lookupSymbolContainer(
return try lookupSymbolContainerInternal(store, arena, container_handle, symbol, instance_access, &use_trail); return try lookupSymbolContainerInternal(store, arena, container_handle, symbol, instance_access, &use_trail);
} }
fn eqlCompletionItem(a: types.CompletionItem, b: types.CompletionItem) bool {
return std.mem.eql(u8, a.label, b.label);
}
fn hashCompletionItem(completion_item: types.CompletionItem) u32 {
return @truncate(u32, std.hash.Wyhash.hash(0, completion_item.label));
}
pub const CompletionSet = std.ArrayHashMapUnmanaged(
types.CompletionItem,
void,
hashCompletionItem,
eqlCompletionItem,
false,
);
comptime {
std.debug.assert(@sizeOf(types.CompletionItem) == @sizeOf(CompletionSet.Entry));
}
pub const DocumentScope = struct { pub const DocumentScope = struct {
scopes: []Scope, scopes: []Scope,
error_completions: []types.CompletionItem, error_completions: CompletionSet,
enum_completions: []types.CompletionItem, enum_completions: CompletionSet,
pub fn debugPrint(self: DocumentScope) void { pub fn debugPrint(self: DocumentScope) void {
for (self.scopes) |scope| { for (self.scopes) |scope| {
@ -2472,17 +2491,21 @@ pub const DocumentScope = struct {
} }
} }
pub fn deinit(self: DocumentScope, allocator: *std.mem.Allocator) void { pub fn deinit(self: *DocumentScope, allocator: *std.mem.Allocator) void {
for (self.scopes) |*scope| { for (self.scopes) |*scope| {
scope.decls.deinit(); scope.decls.deinit();
allocator.free(scope.uses); allocator.free(scope.uses);
allocator.free(scope.tests); allocator.free(scope.tests);
} }
allocator.free(self.scopes); allocator.free(self.scopes);
for (self.error_completions) |item| if (item.documentation) |doc| allocator.free(doc.value); for (self.error_completions.entries.items) |entry| {
allocator.free(self.error_completions); if (entry.key.documentation) |doc| allocator.free(doc.value);
for (self.enum_completions) |item| if (item.documentation) |doc| allocator.free(doc.value); }
allocator.free(self.enum_completions); self.error_completions.deinit(allocator);
for (self.enum_completions.entries.items) |entry| {
if (entry.key.documentation) |doc| allocator.free(doc.value);
}
self.enum_completions.deinit(allocator);
} }
}; };
@ -2504,22 +2527,26 @@ pub const Scope = struct {
pub fn makeDocumentScope(allocator: *std.mem.Allocator, tree: ast.Tree) !DocumentScope { pub fn makeDocumentScope(allocator: *std.mem.Allocator, tree: ast.Tree) !DocumentScope {
var scopes = std.ArrayListUnmanaged(Scope){}; var scopes = std.ArrayListUnmanaged(Scope){};
var error_completions = std.ArrayListUnmanaged(types.CompletionItem){}; var error_completions = CompletionSet{};
var enum_completions = std.ArrayListUnmanaged(types.CompletionItem){}; var enum_completions = CompletionSet{};
errdefer { errdefer {
scopes.deinit(allocator); scopes.deinit(allocator);
for (error_completions.items) |item| if (item.documentation) |doc| allocator.free(doc.value); for (error_completions.entries.items) |entry| {
if (entry.key.documentation) |doc| allocator.free(doc.value);
}
error_completions.deinit(allocator); error_completions.deinit(allocator);
for (enum_completions.items) |item| if (item.documentation) |doc| allocator.free(doc.value); for (enum_completions.entries.items) |entry| {
if (entry.key.documentation) |doc| allocator.free(doc.value);
}
enum_completions.deinit(allocator); enum_completions.deinit(allocator);
} }
// pass root node index ('0') // pass root node index ('0')
try makeScopeInternal(allocator, &scopes, &error_completions, &enum_completions, tree, 0); try makeScopeInternal(allocator, &scopes, &error_completions, &enum_completions, tree, 0);
return DocumentScope{ return DocumentScope{
.scopes = scopes.toOwnedSlice(allocator), .scopes = scopes.toOwnedSlice(allocator),
.error_completions = error_completions.toOwnedSlice(allocator), .error_completions = error_completions,
.enum_completions = enum_completions.toOwnedSlice(allocator), .enum_completions = enum_completions,
}; };
} }
@ -2588,8 +2615,8 @@ pub fn varDecl(tree: ast.Tree, node_idx: ast.Node.Index) ?ast.full.VarDecl {
fn makeScopeInternal( fn makeScopeInternal(
allocator: *std.mem.Allocator, allocator: *std.mem.Allocator,
scopes: *std.ArrayListUnmanaged(Scope), scopes: *std.ArrayListUnmanaged(Scope),
error_completions: *std.ArrayListUnmanaged(types.CompletionItem), error_completions: *CompletionSet,
enum_completions: *std.ArrayListUnmanaged(types.CompletionItem), enum_completions: *CompletionSet,
tree: ast.Tree, tree: ast.Tree,
node_idx: ast.Node.Index, node_idx: ast.Node.Index,
) error{OutOfMemory}!void { ) error{OutOfMemory}!void {
@ -2625,12 +2652,12 @@ fn makeScopeInternal(
var i = main_tokens[node_idx]; var i = main_tokens[node_idx];
while (i < data[node_idx].rhs) : (i += 1) { while (i < data[node_idx].rhs) : (i += 1) {
if (token_tags[i] == .identifier) { if (token_tags[i] == .identifier) {
(try error_completions.addOne(allocator)).* = .{ try error_completions.put(allocator, .{
.label = tree.tokenSlice(i), .label = tree.tokenSlice(i),
.kind = .Constant, .kind = .Constant,
.insertTextFormat = .PlainText, .insertTextFormat = .PlainText,
.insertText = tree.tokenSlice(i), .insertText = tree.tokenSlice(i),
}; }, {});
} }
} }
} }
@ -2694,14 +2721,14 @@ fn makeScopeInternal(
} }
if (!std.mem.eql(u8, name, "_")) { if (!std.mem.eql(u8, name, "_")) {
(try enum_completions.addOne(allocator)).* = .{ try enum_completions.put(allocator, .{
.label = name, .label = name,
.kind = .Constant, .kind = .Constant,
.documentation = if (try getDocComments(allocator, tree, decl, .Markdown)) |docs| .{ .documentation = if (try getDocComments(allocator, tree, decl, .Markdown)) |docs| .{
.kind = .Markdown, .kind = .Markdown,
.value = docs, .value = docs,
} else null, } else null,
}; }, {});
} }
} }
} }

View File

@ -146,7 +146,7 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: []u8) anyerror!*Hand
var tree = try std.zig.parse(self.allocator, text); var tree = try std.zig.parse(self.allocator, text);
errdefer tree.deinit(self.allocator); errdefer tree.deinit(self.allocator);
const document_scope = try analysis.makeDocumentScope(self.allocator, tree); var document_scope = try analysis.makeDocumentScope(self.allocator, tree);
errdefer document_scope.deinit(self.allocator); errdefer document_scope.deinit(self.allocator);
handle.* = Handle{ handle.* = Handle{
@ -381,7 +381,7 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle, zig_lib_path: ?[]const
const std_uri = try stdUriFromLibPath(&arena.allocator, zig_lib_path); const std_uri = try stdUriFromLibPath(&arena.allocator, zig_lib_path);
for (import_strs.items) |str| { for (import_strs.items) |str| {
const uri = (try self.uriFromImportStr(&arena.allocator, handle.*, str)) orelse continue; const uri = (try self.uriFromImportStr(&arena.allocator, handle.*, str)) orelse continue;
exists_loop: for (still_exist) |*does_still_exist, i| { exists_loop: for (still_exist) |*does_still_exist, i| {
if (does_still_exist.*) continue; if (does_still_exist.*) continue;
@ -643,31 +643,45 @@ pub fn deinit(self: *DocumentStore) void {
self.build_files.deinit(self.allocator); self.build_files.deinit(self.allocator);
} }
fn tagStoreCompletionItems(self: DocumentStore, arena: *std.heap.ArenaAllocator, base: *DocumentStore.Handle, comptime name: []const u8) ![]types.CompletionItem { fn tagStoreCompletionItems(
self: DocumentStore,
arena: *std.heap.ArenaAllocator,
base: *DocumentStore.Handle,
comptime name: []const u8,
) ![]types.CompletionItem {
// TODO Better solution for deciding what tags to include // TODO Better solution for deciding what tags to include
var handle_arr = try arena.allocator.alloc(*DocumentStore.Handle, base.import_uris.items.len + 1); var max_len: usize = @field(base.document_scope, name).count();
handle_arr[0] = base; for (base.import_uris.items) |uri| {
var len: usize = @field(base.document_scope, name).len; max_len += @field(self.handles.get(uri).?.document_scope, name).count();
for (base.import_uris.items) |uri, idx| {
handle_arr[idx + 1] = self.handles.get(uri).?;
len += @field(handle_arr[idx + 1].document_scope, name).len;
} }
var result = try arena.allocator.alloc(types.CompletionItem, len); var result_set = analysis.CompletionSet{};
var res_idx: usize = 0; try result_set.ensureCapacity(&arena.allocator, max_len);
for (handle_arr) |handle| { result_set.entries.appendSliceAssumeCapacity(@field(base.document_scope, name).entries.items);
for (@field(handle.document_scope, name)) |item| { try result_set.reIndex(&arena.allocator);
result[res_idx] = item;
res_idx += 1; for (base.import_uris.items) |uri| {
const curr_set = &@field(self.handles.get(uri).?.document_scope, name);
for (curr_set.entries.items) |entry| {
result_set.putAssumeCapacity(entry.key, {});
} }
} }
return result; // This is safe to do because CompletionSet.Entry == struct { value: types.CompletionItem }
return std.mem.bytesAsSlice(types.CompletionItem, std.mem.sliceAsBytes(result_set.entries.items));
} }
pub fn errorCompletionItems(self: DocumentStore, arena: *std.heap.ArenaAllocator, base: *DocumentStore.Handle) ![]types.CompletionItem { pub fn errorCompletionItems(
self: DocumentStore,
arena: *std.heap.ArenaAllocator,
base: *DocumentStore.Handle,
) ![]types.CompletionItem {
return try self.tagStoreCompletionItems(arena, base, "error_completions"); return try self.tagStoreCompletionItems(arena, base, "error_completions");
} }
pub fn enumCompletionItems(self: DocumentStore, arena: *std.heap.ArenaAllocator, base: *DocumentStore.Handle) ![]types.CompletionItem { pub fn enumCompletionItems(
self: DocumentStore,
arena: *std.heap.ArenaAllocator,
base: *DocumentStore.Handle,
) ![]types.CompletionItem {
return try self.tagStoreCompletionItems(arena, base, "enum_completions"); return try self.tagStoreCompletionItems(arena, base, "enum_completions");
} }