From c4f3cd0efabe8898880ab6adfa43b40f8f216df3 Mon Sep 17 00:00:00 2001 From: Gregory Anders Date: Wed, 10 Aug 2022 16:03:00 -0600 Subject: [PATCH 01/18] Only return highlights for symbols in current document This seems to only occur when highlighting "field access" symbols, e.g. "bar" in a statement such as `foo.bar`. This is because the `symbolReferencesInternal` function finds the reference to the field in the container even when the container is not in the current document. --- src/Server.zig | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index bcedadf..c5f5bbb 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1144,11 +1144,14 @@ fn referencesDefinitionGlobal( const result: types.ResponseParams = if (highlight) result: { var highlights = try std.ArrayList(types.DocumentHighlight).initCapacity(server.arena.allocator(), locs.items.len); + const uri = handle.uri(); for (locs.items) |loc| { - highlights.appendAssumeCapacity(.{ - .range = loc.range, - .kind = .Text, - }); + if (std.mem.eql(u8, loc.uri, uri)) { + highlights.appendAssumeCapacity(.{ + .range = loc.range, + .kind = .Text, + }); + } } break :result .{ .DocumentHighlight = highlights.items }; } else .{ .Locations = locs.items }; @@ -1187,11 +1190,14 @@ fn referencesDefinitionFieldAccess( ); const result: types.ResponseParams = if (highlight) result: { var highlights = try std.ArrayList(types.DocumentHighlight).initCapacity(server.arena.allocator(), locs.items.len); + const uri = handle.uri(); for (locs.items) |loc| { - highlights.appendAssumeCapacity(.{ - .range = loc.range, - .kind = .Text, - }); + if (std.mem.eql(u8, loc.uri, uri)) { + highlights.appendAssumeCapacity(.{ + .range = loc.range, + .kind = .Text, + }); + } } break :result .{ .DocumentHighlight = highlights.items }; } else .{ .Locations = locs.items }; @@ -1218,11 +1224,14 @@ fn referencesDefinitionLabel( try references.labelReferences(&server.arena, decl, server.offset_encoding, include_decl, &locs, std.ArrayList(types.Location).append); const result: types.ResponseParams = if (highlight) result: { var highlights = try std.ArrayList(types.DocumentHighlight).initCapacity(server.arena.allocator(), locs.items.len); + const uri = handle.uri(); for (locs.items) |loc| { - highlights.appendAssumeCapacity(.{ - .range = loc.range, - .kind = .Text, - }); + if (std.mem.eql(u8, loc.uri, uri)) { + highlights.appendAssumeCapacity(.{ + .range = loc.range, + .kind = .Text, + }); + } } break :result .{ .DocumentHighlight = highlights.items }; } else .{ .Locations = locs.items }; From 673b4b35059f3efe17cafff1e0aeadf6c03da7f7 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Sun, 14 Aug 2022 22:45:05 +0100 Subject: [PATCH 02/18] trigger completion after `]` --- src/Server.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Server.zig b/src/Server.zig index c5f5bbb..7825cd8 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1786,7 +1786,7 @@ fn initializeHandler(server: *Server, writer: anytype, id: types.RequestId, req: }, .textDocumentSync = .Full, .renameProvider = true, - .completionProvider = .{ .resolveProvider = false, .triggerCharacters = &[_][]const u8{ ".", ":", "@" }, .completionItem = .{ .labelDetailsSupport = true } }, + .completionProvider = .{ .resolveProvider = false, .triggerCharacters = &[_][]const u8{ ".", ":", "@", "]" }, .completionItem = .{ .labelDetailsSupport = true } }, .documentHighlightProvider = true, .hoverProvider = true, .codeActionProvider = false, From 29690f4e0b7cee1fb2d7770176e76059238bc69f Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Thu, 18 Aug 2022 00:52:21 +0200 Subject: [PATCH 03/18] Refactor Ast helper functions --- src/Server.zig | 13 +---- src/analysis.zig | 124 +++++----------------------------------- src/ast.zig | 78 +++++++++++++++++++++++++ src/inlay_hints.zig | 38 +++--------- src/references.zig | 41 +++---------- src/semantic_tokens.zig | 47 +++------------ 6 files changed, 118 insertions(+), 223 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index 7825cd8..cb7df43 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -299,17 +299,8 @@ fn publishDiagnostics(server: *Server, writer: anytype, handle: DocumentStore.Ha if (!std.mem.eql(u8, call_name, "@import")) continue; - const node_data = tree.nodes.items(.data)[node]; - const params = switch (tree.nodes.items(.tag)[node]) { - .builtin_call, .builtin_call_comma => tree.extra_data[node_data.lhs..node_data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (node_data.lhs == 0) - &[_]Ast.Node.Index{} - else if (node_data.rhs == 0) - &[_]Ast.Node.Index{node_data.lhs} - else - &[_]Ast.Node.Index{ node_data.lhs, node_data.rhs }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; if (params.len != 1) continue; diff --git a/src/analysis.zig b/src/analysis.zig index 235604c..c9c99f6 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -203,7 +203,7 @@ pub fn hasSelfParam(arena: *std.heap.ArenaAllocator, document_store: *DocumentSt return true; } - if (isPtrType(tree, param.type_expr)) { + if (ast.isPtrType(tree, param.type_expr)) { if (try resolveTypeOfNode(document_store, arena, .{ .node = token_data[param.type_expr].rhs, .handle = handle, @@ -311,13 +311,6 @@ fn getDeclName(tree: Ast, node: Ast.Node.Index) ?[]const u8 { }; } -fn isContainerDecl(decl_handle: DeclWithHandle) bool { - return switch (decl_handle.decl.*) { - .ast_node => |inner_node| ast.isContainer(decl_handle.handle.tree.nodes.items(.tag)[inner_node]), - else => false, - }; -} - fn resolveVarDeclAliasInternal(store: *DocumentStore, arena: *std.heap.ArenaAllocator, node_handle: NodeWithHandle, root: bool) error{OutOfMemory}!?DeclWithHandle { _ = root; const handle = node_handle.handle; @@ -394,39 +387,14 @@ pub fn resolveVarDeclAlias(store: *DocumentStore, arena: *std.heap.ArenaAllocato return null; } -fn isBlock(tree: Ast, node: Ast.Node.Index) bool { - return switch (tree.nodes.items(.tag)[node]) { - .block, - .block_semicolon, - .block_two, - .block_two_semicolon, - => true, - else => false, - }; -} - fn findReturnStatementInternal(tree: Ast, fn_decl: Ast.full.FnProto, body: Ast.Node.Index, already_found: *bool) ?Ast.Node.Index { var result: ?Ast.Node.Index = null; const node_tags = tree.nodes.items(.tag); const datas = tree.nodes.items(.data); - if (!isBlock(tree, body)) return null; - - const statements: []const Ast.Node.Index = switch (node_tags[body]) { - .block, .block_semicolon => tree.extra_data[datas[body].lhs..datas[body].rhs], - .block_two, .block_two_semicolon => blk: { - const statements = &[_]Ast.Node.Index{ datas[body].lhs, datas[body].rhs }; - const len: usize = if (datas[body].lhs == 0) - @as(usize, 0) - else if (datas[body].rhs == 0) - @as(usize, 1) - else - @as(usize, 2); - break :blk statements[0..len]; - }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const statements = ast.blockStatements(tree, body, &buffer) orelse return null; for (statements) |child_idx| { if (node_tags[child_idx] == .@"return") { @@ -532,17 +500,6 @@ fn resolveUnwrapErrorType(store: *DocumentStore, arena: *std.heap.ArenaAllocator return null; } -pub fn isPtrType(tree: Ast, node: Ast.Node.Index) bool { - return switch (tree.nodes.items(.tag)[node]) { - .ptr_type, - .ptr_type_aligned, - .ptr_type_bit_range, - .ptr_type_sentinel, - => true, - else => false, - }; -} - /// Resolves the child type of a deref type fn resolveDerefType(store: *DocumentStore, arena: *std.heap.ArenaAllocator, deref: TypeWithHandle, bound_type_params: *BoundTypeParams) !?TypeWithHandle { const deref_node = switch (deref.type.data) { @@ -560,7 +517,7 @@ fn resolveDerefType(store: *DocumentStore, arena: *std.heap.ArenaAllocator, dere const main_token = tree.nodes.items(.main_token)[deref_node]; const token_tag = tree.tokens.items(.tag)[main_token]; - if (isPtrType(tree, deref_node)) { + if (ast.isPtrType(tree, deref_node)) { const ptr_type = ast.ptrType(tree, deref_node).?; switch (token_tag) { .asterisk => { @@ -900,17 +857,8 @@ pub fn resolveTypeOfNodeInternal(store: *DocumentStore, arena: *std.heap.ArenaAl .builtin_call_two, .builtin_call_two_comma, => { - const data = datas[node]; - const params = switch (node_tags[node]) { - .builtin_call, .builtin_call_comma => tree.extra_data[data.lhs..data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (data.lhs == 0) - &[_]Ast.Node.Index{} - else if (data.rhs == 0) - &[_]Ast.Node.Index{data.lhs} - else - &[_]Ast.Node.Index{ data.lhs, data.rhs }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; const call_name = tree.tokenSlice(main_tokens[node]); if (std.mem.eql(u8, call_name, "@This")) { @@ -1399,17 +1347,9 @@ pub fn getImportStr(tree: Ast, node: Ast.Node.Index, source_index: usize) ?[]con const call_name = tree.tokenSlice(builtin_token); if (!std.mem.eql(u8, call_name, "@import")) return null; - const data = tree.nodes.items(.data)[node]; - const params = switch (node_tags[node]) { - .builtin_call, .builtin_call_comma => tree.extra_data[data.lhs..data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (data.lhs == 0) - &[_]Ast.Node.Index{} - else if (data.rhs == 0) - &[_]Ast.Node.Index{data.lhs} - else - &[_]Ast.Node.Index{ data.lhs, data.rhs }, - else => unreachable, - }; + + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; if (params.len != 1) return null; @@ -2522,21 +2462,8 @@ fn makeInnerScope(allocator: std.mem.Allocator, context: ScopeContext, node_idx: } } - const container_decl = switch (node_tag) { - .container_decl, .container_decl_trailing => tree.containerDecl(node_idx), - .container_decl_arg, .container_decl_arg_trailing => tree.containerDeclArg(node_idx), - .container_decl_two, .container_decl_two_trailing => blk: { - var buffer: [2]Ast.Node.Index = undefined; - break :blk tree.containerDeclTwo(&buffer, node_idx); - }, - .tagged_union, .tagged_union_trailing => tree.taggedUnion(node_idx), - .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => tree.taggedUnionEnumTag(node_idx), - .tagged_union_two, .tagged_union_two_trailing => blk: { - var buffer: [2]Ast.Node.Index = undefined; - break :blk tree.taggedUnionTwo(&buffer, node_idx); - }, - else => null, - }; + var buffer: [2]Ast.Node.Index = undefined; + const container_decl = ast.containerDecl(tree, node_idx, &buffer); // Only tagged unions and enums should pass this const can_have_enum_completions = if (container_decl) |container| blk: { @@ -2717,20 +2644,8 @@ fn makeScopeInternal(allocator: std.mem.Allocator, context: ScopeContext, node_i uses.deinit(); } - const statements: []const Ast.Node.Index = switch (node_tag) { - .block, .block_semicolon => tree.extra_data[data[node_idx].lhs..data[node_idx].rhs], - .block_two, .block_two_semicolon => blk: { - const statements = &[_]Ast.Node.Index{ data[node_idx].lhs, data[node_idx].rhs }; - const len: usize = if (data[node_idx].lhs == 0) - @as(usize, 0) - else if (data[node_idx].rhs == 0) - @as(usize, 1) - else - @as(usize, 2); - break :blk statements[0..len]; - }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const statements = ast.blockStatements(tree, node_idx, &buffer).?; for (statements) |idx| { if (tags[idx] == .@"usingnamespace") { @@ -3058,17 +2973,8 @@ fn makeScopeInternal(allocator: std.mem.Allocator, context: ScopeContext, node_i .builtin_call_two, .builtin_call_two_comma, => { - const b_data = data[node_idx]; - const params = switch (node_tag) { - .builtin_call, .builtin_call_comma => tree.extra_data[b_data.lhs..b_data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (b_data.lhs == 0) - &[_]Ast.Node.Index{} - else if (b_data.rhs == 0) - &[_]Ast.Node.Index{b_data.lhs} - else - &[_]Ast.Node.Index{ b_data.lhs, b_data.rhs }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node_idx, &buffer).?; for (params) |param| { try makeScopeInternal(allocator, context, param); diff --git a/src/ast.zig b/src/ast.zig index 727a289..b5b66ce 100644 --- a/src/ast.zig +++ b/src/ast.zig @@ -948,6 +948,18 @@ pub fn isContainer(tree: Ast, node: Ast.Node.Index) bool { }; } +pub fn containerDecl(tree: Ast, node_idx: Ast.Node.Index, buffer: *[2]Ast.Node.Index) ?full.ContainerDecl { + return switch (tree.nodes.items(.tag)[node_idx]) { + .container_decl, .container_decl_trailing => tree.containerDecl(node_idx), + .container_decl_arg, .container_decl_arg_trailing => tree.containerDeclArg(node_idx), + .container_decl_two, .container_decl_two_trailing => tree.containerDeclTwo(buffer, node_idx), + .tagged_union, .tagged_union_trailing => tree.taggedUnion(node_idx), + .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => tree.taggedUnionEnumTag(node_idx), + .tagged_union_two, .tagged_union_two_trailing => tree.taggedUnionTwo(buffer, node_idx), + else => null, + }; +} + /// Returns the member indices of a given declaration container. /// Asserts given `tag` is a container node pub fn declMembers(tree: Ast, node_idx: Ast.Node.Index, buffer: *[2]Ast.Node.Index) []const Ast.Node.Index { @@ -977,6 +989,17 @@ pub fn varDecl(tree: Ast, node_idx: Ast.Node.Index) ?Ast.full.VarDecl { }; } +pub fn isPtrType(tree: Ast, node: Ast.Node.Index) bool { + return switch (tree.nodes.items(.tag)[node]) { + .ptr_type, + .ptr_type_aligned, + .ptr_type_bit_range, + .ptr_type_sentinel, + => true, + else => false, + }; +} + pub fn isBuiltinCall(tree: Ast, node: Ast.Node.Index) bool { return switch (tree.nodes.items(.tag)[node]) { .builtin_call, @@ -1003,6 +1026,17 @@ pub fn isCall(tree: Ast, node: Ast.Node.Index) bool { }; } +pub fn isBlock(tree: Ast, node: Ast.Node.Index) bool { + return switch (tree.nodes.items(.tag)[node]) { + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + => true, + else => false, + }; +} + pub fn fnProto(tree: Ast, node: Ast.Node.Index, buf: *[1]Ast.Node.Index) ?Ast.full.FnProto { return switch (tree.nodes.items(.tag)[node]) { .fn_proto => tree.fnProto(node), @@ -1029,3 +1063,47 @@ pub fn callFull(tree: Ast, node: Ast.Node.Index, buf: *[1]Ast.Node.Index) ?Ast.f else => null, }; } + +/// returns a list of parameters +pub fn builtinCallParams(tree: Ast, node: Ast.Node.Index, buf: *[2]Ast.Node.Index) ?[]const Node.Index { + const node_data = tree.nodes.items(.data); + return switch (tree.nodes.items(.tag)[node]) { + .builtin_call_two, .builtin_call_two_comma => { + buf[0] = node_data[node].lhs; + buf[1] = node_data[node].rhs; + if (node_data[node].lhs == 0) { + return buf[0..0]; + } else if (node_data[node].rhs == 0) { + return buf[0..1]; + } else { + return buf[0..2]; + } + }, + .builtin_call, + .builtin_call_comma, + => tree.extra_data[node_data[node].lhs..node_data[node].rhs], + else => return null, + }; +} + +/// returns a list of statements +pub fn blockStatements(tree: Ast, node: Ast.Node.Index, buf: *[2]Ast.Node.Index) ?[]const Node.Index { + const node_data = tree.nodes.items(.data); + return switch (tree.nodes.items(.tag)[node]) { + .block_two, .block_two_semicolon => { + buf[0] = node_data[node].lhs; + buf[1] = node_data[node].rhs; + if (node_data[node].lhs == 0) { + return buf[0..0]; + } else if (node_data[node].rhs == 0) { + return buf[0..1]; + } else { + return buf[0..2]; + } + }, + .block, + .block_semicolon, + => tree.extra_data[node_data[node].lhs..node_data[node].rhs], + else => return null, + }; +} diff --git a/src/inlay_hints.zig b/src/inlay_hints.zig index 9ba7965..8215e26 100644 --- a/src/inlay_hints.zig +++ b/src/inlay_hints.zig @@ -133,7 +133,7 @@ fn writeCallHint(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *Doc } /// takes parameter nodes from the ast and function parameter names from `Builtin.arguments` and writes parameter hints into `builder.hints` -fn writeBuiltinHint(builder: *Builder, parameters: []Ast.Node.Index, arguments: []const []const u8) !void { +fn writeBuiltinHint(builder: *Builder, parameters: []const Ast.Node.Index, arguments: []const []const u8) !void { if (parameters.len == 0) return; const handle = builder.handle; @@ -290,25 +290,9 @@ fn writeNodeInlayHint(builder: *Builder, arena: *std.heap.ArenaAllocator, store: .builtin_call_comma, => { var buffer: [2]Ast.Node.Index = undefined; - const parameters: []Ast.Node.Index = switch (tag) { - .builtin_call_two, .builtin_call_two_comma => blk: { - buffer[0] = node_data[node].lhs; - buffer[1] = node_data[node].rhs; + const params = ast.builtinCallParams(tree, node, &buffer).?; - var size: usize = 0; - - if (node_data[node].rhs != 0) { - size = 2; - } else if (node_data[node].lhs != 0) { - size = 1; - } - break :blk buffer[0..size]; - }, - .builtin_call, .builtin_call_comma => tree.extra_data[node_data[node].lhs..node_data[node].rhs], - else => unreachable, - }; - - if (builder.config.inlay_hints_show_builtin and parameters.len > 1) { + if (builder.config.inlay_hints_show_builtin and params.len > 1) { const name = tree.tokenSlice(main_tokens[node]); outer: for (data.builtins) |builtin| { @@ -318,12 +302,12 @@ fn writeNodeInlayHint(builder: *Builder, arena: *std.heap.ArenaAllocator, store: if (std.mem.eql(u8, builtin_name, name)) break :outer; } - try writeBuiltinHint(builder, parameters, builtin.arguments); + try writeBuiltinHint(builder, params, builtin.arguments); } } - for (parameters) |param| { - if (parameters.len > inlay_hints_max_inline_children) { + for (params) |param| { + if (params.len > inlay_hints_max_inline_children) { if (!isNodeInRange(tree, param, range)) continue; } @@ -623,15 +607,7 @@ fn writeNodeInlayHint(builder: *Builder, arena: *std.heap.ArenaAllocator, store: .tagged_union_enum_tag_trailing, => { var buffer: [2]Ast.Node.Index = undefined; - const decl: Ast.full.ContainerDecl = switch (tag) { - .container_decl, .container_decl_trailing => tree.containerDecl(node), - .container_decl_two, .container_decl_two_trailing => tree.containerDeclTwo(&buffer, node), - .container_decl_arg, .container_decl_arg_trailing => tree.containerDeclArg(node), - .tagged_union, .tagged_union_trailing => tree.taggedUnion(node), - .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => tree.taggedUnionEnumTag(node), - .tagged_union_two, .tagged_union_two_trailing => tree.taggedUnionTwo(&buffer, node), - else => unreachable, - }; + const decl: Ast.full.ContainerDecl = ast.containerDecl(tree, node, &buffer).?; try await @asyncCall(child_frame, {}, writeNodeInlayHint, .{ builder, arena, store, decl.ast.arg, range }); diff --git a/src/references.zig b/src/references.zig index fbd290f..52507c2 100644 --- a/src/references.zig +++ b/src/references.zig @@ -67,20 +67,9 @@ fn symbolReferencesInternal(arena: *std.heap.ArenaAllocator, store: *DocumentSto switch (node_tags[node]) { .block, .block_semicolon, .block_two, .block_two_semicolon => { - const statements: []const Ast.Node.Index = switch (node_tags[node]) { - .block, .block_semicolon => tree.extra_data[datas[node].lhs..datas[node].rhs], - .block_two, .block_two_semicolon => blk: { - const statements = &[_]Ast.Node.Index{ datas[node].lhs, datas[node].rhs }; - const len: usize = if (datas[node].lhs == 0) - @as(usize, 0) - else if (datas[node].rhs == 0) - @as(usize, 1) - else - @as(usize, 2); - break :blk statements[0..len]; - }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const statements = ast.blockStatements(tree, node, &buffer).?; + for (statements) |stmt| try symbolReferencesInternal(arena, store, .{ .node = stmt, .handle = handle }, decl, encoding, context, handler); }, @@ -319,13 +308,10 @@ fn symbolReferencesInternal(arena: *std.heap.ArenaAllocator, store: *DocumentSto .async_call_comma, .async_call_one, .async_call_one_comma, - => |c| { + => { var buf: [1]Ast.Node.Index = undefined; - const call: Ast.full.Call = switch (c) { - .call, .call_comma, .async_call, .async_call_comma => tree.callFull(node), - .call_one, .call_one_comma, .async_call_one, .async_call_one_comma => tree.callOne(&buf, node), - else => unreachable, - }; + const call = ast.callFull(tree, node, &buf).?; + if (call.ast.fn_expr != 0) try symbolReferencesInternal(arena, store, .{ .node = call.ast.fn_expr, .handle = handle }, decl, encoding, context, handler); @@ -380,18 +366,9 @@ fn symbolReferencesInternal(arena: *std.heap.ArenaAllocator, store: *DocumentSto .builtin_call_comma, .builtin_call_two, .builtin_call_two_comma, - => |builtin_tag| { - const data = datas[node]; - const params = switch (builtin_tag) { - .builtin_call, .builtin_call_comma => tree.extra_data[data.lhs..data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (data.lhs == 0) - &[_]Ast.Node.Index{} - else if (data.rhs == 0) - &[_]Ast.Node.Index{data.lhs} - else - &[_]Ast.Node.Index{ data.lhs, data.rhs }, - else => unreachable, - }; + => { + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; for (params) |param| try symbolReferencesInternal(arena, store, .{ .node = param, .handle = handle }, decl, encoding, context, handler); diff --git a/src/semantic_tokens.zig b/src/semantic_tokens.zig index 301458e..384beaa 100644 --- a/src/semantic_tokens.zig +++ b/src/semantic_tokens.zig @@ -317,20 +317,8 @@ fn writeNodeTokens(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *D try writeToken(builder, main_token - 2, .label); } - const statements: []const Ast.Node.Index = switch (tag) { - .block, .block_semicolon => tree.extra_data[node_data[node].lhs..node_data[node].rhs], - .block_two, .block_two_semicolon => blk: { - const statements = &[_]Ast.Node.Index{ node_data[node].lhs, node_data[node].rhs }; - const len: usize = if (node_data[node].lhs == 0) - @as(usize, 0) - else if (node_data[node].rhs == 0) - @as(usize, 1) - else - @as(usize, 2); - break :blk statements[0..len]; - }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const statements = ast.blockStatements(tree, node, &buffer).?; for (statements) |child| { if (node_tags[child].isContainerField()) { @@ -390,15 +378,7 @@ fn writeNodeTokens(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *D .tagged_union_two_trailing, => { var buf: [2]Ast.Node.Index = undefined; - const decl: Ast.full.ContainerDecl = switch (tag) { - .container_decl, .container_decl_trailing => tree.containerDecl(node), - .container_decl_two, .container_decl_two_trailing => tree.containerDeclTwo(&buf, node), - .container_decl_arg, .container_decl_arg_trailing => tree.containerDeclArg(node), - .tagged_union, .tagged_union_trailing => tree.taggedUnion(node), - .tagged_union_enum_tag, .tagged_union_enum_tag_trailing => tree.taggedUnionEnumTag(node), - .tagged_union_two, .tagged_union_two_trailing => tree.taggedUnionTwo(&buf, node), - else => unreachable, - }; + const decl: Ast.full.ContainerDecl = ast.containerDecl(tree, node, &buf).?; try writeToken(builder, decl.layout_token, .keyword); try writeToken(builder, decl.ast.main_token, .keyword); @@ -427,7 +407,7 @@ fn writeNodeTokens(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *D .identifier => { const name = tree.getNodeSource(node); - if(std.mem.eql(u8,name, "undefined")) { + if (std.mem.eql(u8, name, "undefined")) { return try writeToken(builder, main_token, .keywordLiteral); } else if (analysis.isTypeIdent(name)) { return try writeToken(builder, main_token, .type); @@ -682,11 +662,7 @@ fn writeNodeTokens(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *D .async_call_one_comma, => { var params: [1]Ast.Node.Index = undefined; - const call: Ast.full.Call = switch (tag) { - .call, .call_comma, .async_call, .async_call_comma => tree.callFull(node), - .call_one, .call_one_comma, .async_call_one, .async_call_one_comma => tree.callOne(¶ms, node), - else => unreachable, - }; + const call = ast.callFull(tree, node, ¶ms).?; try writeToken(builder, call.async_token, .keyword); try await @asyncCall(child_frame, {}, writeNodeTokens, .{ builder, arena, store, call.ast.fn_expr }); @@ -757,17 +733,8 @@ fn writeNodeTokens(builder: *Builder, arena: *std.heap.ArenaAllocator, store: *D .builtin_call_two, .builtin_call_two_comma, => { - const data = node_data[node]; - const params = switch (tag) { - .builtin_call, .builtin_call_comma => tree.extra_data[data.lhs..data.rhs], - .builtin_call_two, .builtin_call_two_comma => if (data.lhs == 0) - &[_]Ast.Node.Index{} - else if (data.rhs == 0) - &[_]Ast.Node.Index{data.lhs} - else - &[_]Ast.Node.Index{ data.lhs, data.rhs }, - else => unreachable, - }; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; try writeToken(builder, main_token, .builtin); for (params) |param| From cb5eeb0b45702b5bf4b3376e2d1ef992b205ba80 Mon Sep 17 00:00:00 2001 From: InKryption Date: Thu, 18 Aug 2022 19:28:56 +0200 Subject: [PATCH 04/18] Improve CLI, fix leak, & other. --- src/main.zig | 163 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 133 insertions(+), 30 deletions(-) diff --git a/src/main.zig b/src/main.zig index 25c989e..881f1a1 100644 --- a/src/main.zig +++ b/src/main.zig @@ -43,8 +43,15 @@ const ConfigWithPath = struct { config_path: ?[]const u8, }; -fn getConfig(allocator: std.mem.Allocator, config: ConfigWithPath) !ConfigWithPath { - if (config.config_path) |path| { +fn getConfig( + allocator: std.mem.Allocator, + config_path: ?[]const u8, + /// If true, and the provided config_path is non-null, frees + /// the aforementioned path, in the case that it is + /// not returned. + free_old_config_path: bool, +) !ConfigWithPath { + if (config_path) |path| { if (Config.loadFromFile(allocator, path)) |conf| { return ConfigWithPath{ .config = conf, @@ -56,6 +63,9 @@ fn getConfig(allocator: std.mem.Allocator, config: ConfigWithPath) !ConfigWithPa \\Falling back to a lookup in the local and global configuration folders \\ , .{path}); + if (free_old_config_path) { + allocator.free(path); + } } if (try known_folders.getPath(allocator, .local_configuration)) |path| { @@ -65,6 +75,7 @@ fn getConfig(allocator: std.mem.Allocator, config: ConfigWithPath) !ConfigWithPa .config_path = path, }; } + allocator.free(path); } if (try known_folders.getPath(allocator, .global_configuration)) |path| { @@ -74,6 +85,7 @@ fn getConfig(allocator: std.mem.Allocator, config: ConfigWithPath) !ConfigWithPa .config_path = path, }; } + allocator.free(path); } return ConfigWithPath{ @@ -82,6 +94,119 @@ fn getConfig(allocator: std.mem.Allocator, config: ConfigWithPath) !ConfigWithPa }; } +const ParseArgsResult = enum { proceed, exit }; +fn parseArgs( + allocator: std.mem.Allocator, + config: *ConfigWithPath, +) !ParseArgsResult { + const ArgId = enum { + help, + version, + config, + @"enable-debug-log", + @"config-path", + }; + const arg_id_map = std.ComptimeStringMap(ArgId, comptime blk: { + const fields = @typeInfo(ArgId).Enum.fields; + const KV = std.meta.Tuple(&.{ []const u8, ArgId }); + var pairs: [fields.len]KV = undefined; + for (pairs) |*pair, i| pair.* = .{ fields[i].name, @intToEnum(ArgId, fields[i].value) }; + break :blk pairs[0..]; + }); + const help_message: []const u8 = comptime help_message: { + var help_message: []const u8 = + \\Usage: zls [command] + \\ + \\Commands: + \\ + \\ + ; + const InfoMap = std.enums.EnumArray(ArgId, []const u8); + var cmd_infos: InfoMap = InfoMap.init(.{ + .help = "Prints this message.", + .version = "Prints the compiler version with which the server was compiled.", + .@"enable-debug-log" = "Enables debug logs.", + .@"config-path" = "Specify the path to a configuration file specifying LSP behaviour.", + .config = "Run the ZLS configuration wizard.", + }); + var info_it = cmd_infos.iterator(); + while (info_it.next()) |entry| { + help_message = help_message ++ std.fmt.comptimePrint(" --{s}: {s}\n", .{ @tagName(entry.key), entry.value.* }); + } + help_message = help_message ++ "\n"; + break :help_message help_message; + }; + + var args_it = try std.process.ArgIterator.initWithAllocator(allocator); + defer args_it.deinit(); + if (!args_it.skip()) @panic("Could not find self argument"); + + // Makes behavior of enabling debug more logging consistent regardless of argument order. + var specified = std.enums.EnumArray(ArgId, bool).initFill(false); + var config_path: ?[]const u8 = null; + errdefer if (config_path) |path| allocator.free(path); + + const stderr = std.io.getStdErr().writer(); + + while (args_it.next()) |tok| { + if (!std.mem.startsWith(u8, tok, "--") or tok.len == 2) { + try stderr.print("{s}\n", .{help_message}); + try stderr.print("Unexpected positional argument '{s}'.\n", .{tok}); + return .exit; + } + + const argname = tok["--".len..]; + const id = arg_id_map.get(argname) orelse { + try stderr.print("{s}\n", .{help_message}); + try stderr.print("Unrecognized argument '{s}'.\n", .{argname}); + return .exit; + }; + + if (specified.get(id)) { + try stderr.print("{s}\n", .{help_message}); + try stderr.print("Duplicate argument '{s}'.\n", .{argname}); + return .exit; + } + specified.set(id, true); + + switch (id) { + .help => {}, + .version => {}, + .@"enable-debug-log" => {}, + .config => {}, + .@"config-path" => { + const path = args_it.next() orelse { + try stderr.print("Expected configuration file path after --config-path argument.\n", .{}); + return .exit; + }; + config.config_path = try allocator.dupe(u8, path); + }, + } + } + + if (specified.get(.help)) { + try stderr.print("{s}\n", .{help_message}); + return .exit; + } + if (specified.get(.version)) { + try std.io.getStdOut().writer().print("Data Version: {s}\n", .{@tagName(build_options.data_version)}); + return .exit; + } + if (specified.get(.config)) { + try setup.wizard(allocator); + return .exit; + } + if (specified.get(.@"enable-debug-log")) { + actual_log_level = .debug; + logger.info("Enabled debug logging.\n", .{}); + } + if (specified.get(.@"config-path")) { + std.debug.assert(config.config_path != null); + } + + return .proceed; +} + const stack_frames = switch (zig_builtin.mode) { .Debug => 10, else => 0, @@ -90,11 +215,9 @@ const stack_frames = switch (zig_builtin.mode) { pub fn main() anyerror!void { var gpa_state = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = stack_frames }){}; defer _ = gpa_state.deinit(); + var tracy_state = if (tracy.enable_allocation) tracy.tracyAllocator(gpa_state.allocator()) else void{}; - var allocator = gpa_state.allocator(); - if (tracy.enable_allocation) { - allocator = tracy.tracyAllocator(allocator).allocator(); - } + const allocator: std.mem.Allocator = if (tracy.enable_allocation) tracy_state.allocator() else gpa_state.allocator(); var config = ConfigWithPath{ .config = undefined, @@ -102,32 +225,12 @@ pub fn main() anyerror!void { }; defer if (config.config_path) |path| allocator.free(path); - // Check arguments. - var args_it = try std.process.ArgIterator.initWithAllocator(allocator); - defer args_it.deinit(); - if (!args_it.skip()) @panic("Could not find self argument"); - - while (args_it.next()) |arg| { - // TODO add --help --version - if (std.mem.eql(u8, arg, "--debug-log")) { - actual_log_level = .debug; - std.debug.print("Enabled debug logging\n", .{}); - } else if (std.mem.eql(u8, arg, "--config-path")) { - var path = args_it.next() orelse { - std.debug.print("Expected configuration file path after --config-path argument\n", .{}); - std.os.exit(1); - }; - config.config_path = try allocator.dupe(u8, path); - } else if (std.mem.eql(u8, arg, "config") or std.mem.eql(u8, arg, "configure")) { - try setup.wizard(allocator); - return; - } else { - std.debug.print("Unrecognized argument {s}\n", .{arg}); - std.os.exit(1); - } + switch (try parseArgs(allocator, &config)) { + .proceed => {}, + .exit => return, } - config = try getConfig(allocator, config); + config = try getConfig(allocator, config.config_path, true); if (config.config_path == null) { logger.info("No config file zls.json found.", .{}); } From 1859afd4c5d441cae8fcb2de40784d127a21f054 Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Thu, 18 Aug 2022 22:56:03 +0200 Subject: [PATCH 05/18] emit JSON formatted packages & include dirs in build_runner.zig --- src/DocumentStore.zig | 80 ++++++++++-------------------- src/Server.zig | 4 +- src/special/build_runner.zig | 96 +++++++++++++++++++++++++++++++----- 3 files changed, 110 insertions(+), 70 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index b200091..438af2b 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -6,20 +6,16 @@ const offsets = @import("offsets.zig"); const log = std.log.scoped(.doc_store); const Ast = std.zig.Ast; const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig"); +const BuildConfig = @import("special/build_runner.zig").BuildConfig; const tracy = @import("tracy.zig"); const Config = @import("Config.zig"); const DocumentStore = @This(); const BuildFile = struct { - const Pkg = struct { - name: []const u8, - uri: []const u8, - }; - refs: usize, uri: []const u8, - packages: std.ArrayListUnmanaged(Pkg), + config: BuildConfig, builtin_uri: ?[]const u8 = null, @@ -110,7 +106,7 @@ fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: *B } } -const LoadPackagesContext = struct { +const LoadBuildConfigContext = struct { build_file: *BuildFile, allocator: std.mem.Allocator, build_runner_path: []const u8, @@ -121,7 +117,7 @@ const LoadPackagesContext = struct { global_cache_root: []const u8, }; -fn loadPackages(context: LoadPackagesContext) !void { +fn loadBuildConfiguration(context: LoadBuildConfigContext) !void { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); @@ -167,46 +163,24 @@ fn loadPackages(context: LoadPackagesContext) !void { defer allocator.free(joined); log.err( - "Failed to execute build runner to collect packages, command:\n{s}\nError: {s}", + "Failed to execute build runner to collect build configuration, command:\n{s}\nError: {s}", .{ joined, zig_run_result.stderr }, ); } switch (zig_run_result.term) { .Exited => |exit_code| { - if (exit_code == 0) { - log.debug("Finished zig run for build file {s}", .{build_file.uri}); + if (exit_code != 0) return error.RunFailed; - for (build_file.packages.items) |old_pkg| { - allocator.free(old_pkg.name); - allocator.free(old_pkg.uri); - } + const parse_options = std.json.ParseOptions{ .allocator = allocator }; - build_file.packages.shrinkAndFree(allocator, 0); - var line_it = std.mem.split(u8, zig_run_result.stdout, "\n"); - while (line_it.next()) |line| { - if (std.mem.indexOfScalar(u8, line, '\x00')) |zero_byte_idx| { - const name = line[0..zero_byte_idx]; - const rel_path = line[zero_byte_idx + 1 ..]; + std.json.parseFree(BuildConfig, build_file.config, parse_options); - const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ directory_path, rel_path }); - defer allocator.free(pkg_abs_path); - - const pkg_uri = try URI.fromPath(allocator, pkg_abs_path); - errdefer allocator.free(pkg_uri); - - const duped_name = try allocator.dupe(u8, name); - errdefer allocator.free(duped_name); - - (try build_file.packages.addOne(allocator)).* = .{ - .name = duped_name, - .uri = pkg_uri, - }; - } - } - } else { - return error.RunFailed; - } + build_file.config = std.json.parse( + BuildConfig, + &std.json.TokenStream.init(zig_run_result.stdout), + parse_options, + ) catch return error.RunFailed; }, else => return error.RunFailed, } @@ -256,7 +230,10 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha build_file.* = .{ .refs = 1, .uri = try self.allocator.dupe(u8, uri), - .packages = .{}, + .config = .{ + .packages = &.{}, + .include_dirs = &.{}, + }, }; const build_file_path = try URI.parse(self.allocator, build_file.uri); @@ -274,7 +251,7 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha // TODO: Do this in a separate thread? // It can take quite long. - loadPackages(.{ + loadBuildConfiguration(.{ .build_file = build_file, .allocator = self.allocator, .build_runner_path = self.config.build_runner_path.?, @@ -409,11 +386,8 @@ fn decrementBuildFileRefs(self: *DocumentStore, build_file: *BuildFile) void { build_file.refs -= 1; if (build_file.refs == 0) { log.debug("Freeing build file {s}", .{build_file.uri}); - for (build_file.packages.items) |pkg| { - self.allocator.free(pkg.name); - self.allocator.free(pkg.uri); - } - build_file.packages.deinit(self.allocator); + + std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator }); // Decrement count of the document since one count comes // from the build file existing. @@ -544,7 +518,7 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle) !void { pub fn applySave(self: *DocumentStore, handle: *Handle) !void { if (handle.is_build_file) |build_file| { - loadPackages(.{ + loadBuildConfiguration(.{ .build_file = build_file, .allocator = self.allocator, .build_runner_path = self.config.build_runner_path.?, @@ -553,7 +527,7 @@ pub fn applySave(self: *DocumentStore, handle: *Handle) !void { .cache_root = self.zig_cache_root, .global_cache_root = self.zig_global_cache_root, }) catch |err| { - log.err("Failed to load packages of build file {s} (error: {})", .{ build_file.uri, err }); + log.err("Failed to load build configuration for {s} (error: {})", .{ build_file.uri, err }); }; } } @@ -643,7 +617,7 @@ pub fn uriFromImportStr(self: *DocumentStore, allocator: std.mem.Allocator, hand return null; } else if (!std.mem.endsWith(u8, import_str, ".zig")) { if (handle.associated_build_file) |build_file| { - for (build_file.packages.items) |pkg| { + for (build_file.config.packages) |pkg| { if (std.mem.eql(u8, import_str, pkg.name)) { return try allocator.dupe(u8, pkg.uri); } @@ -686,7 +660,7 @@ pub fn resolveImport(self: *DocumentStore, handle: *Handle, import_str: []const } } if (handle.associated_build_file) |bf| { - for (bf.packages.items) |pkg| { + for (bf.config.packages) |pkg| { if (std.mem.eql(u8, pkg.uri, final_uri)) { break :find_uri pkg.uri; } @@ -775,11 +749,7 @@ pub fn deinit(self: *DocumentStore) void { self.handles.deinit(self.allocator); for (self.build_files.items) |build_file| { - for (build_file.packages.items) |pkg| { - self.allocator.free(pkg.name); - self.allocator.free(pkg.uri); - } - build_file.packages.deinit(self.allocator); + std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator }); self.allocator.free(build_file.uri); build_file.destroy(self.allocator); } diff --git a/src/Server.zig b/src/Server.zig index cb7df43..eaf958f 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -2048,9 +2048,9 @@ fn completionHandler(server: *Server, writer: anytype, id: types.RequestId, req: if (!subpath_present and pos_context == .import_string_literal) { if (handle.associated_build_file) |bf| { - try fsl_completions.ensureUnusedCapacity(server.arena.allocator(), bf.packages.items.len); + try fsl_completions.ensureUnusedCapacity(server.arena.allocator(), bf.config.packages.len); - for (bf.packages.items) |pkg| { + for (bf.config.packages) |pkg| { try fsl_completions.append(server.arena.allocator(), .{ .label = pkg.name, .kind = .Module, diff --git a/src/special/build_runner.zig b/src/special/build_runner.zig index e5f11ea..c994982 100644 --- a/src/special/build_runner.zig +++ b/src/special/build_runner.zig @@ -5,14 +5,39 @@ const io = std.io; const log = std.log; const process = std.process; const Builder = std.build.Builder; -const Pkg = std.build.Pkg; const InstallArtifactStep = std.build.InstallArtifactStep; const LibExeObjStep = std.build.LibExeObjStep; const ArrayList = std.ArrayList; +pub const BuildConfig = struct { + packages: []Pkg, + include_dirs: []IncludeDir, + + pub const Pkg = struct { + name: []const u8, + uri: []const u8, + }; + + pub const IncludeDir = union(enum) { + raw_path: []const u8, + raw_path_system: []const u8, + + pub fn getPath(self: IncludeDir) []const u8 { + return switch (self) { + .raw_path => |path| return path, + .raw_path_system => |path| return path, + }; + } + + pub fn eql(a: IncludeDir, b: IncludeDir) bool { + return @enumToInt(a) == @enumToInt(b) and + std.mem.eql(u8, a.getPath(), b.getPath()); + } + }; +}; ///! This is a modified build runner to extract information out of build.zig -///! Modified from the std.special.build_runner +///! Modified version of lib/build_runner.zig pub fn main() !void { var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); defer arena.deinit(); @@ -55,48 +80,93 @@ pub fn main() !void { builder.resolveInstallPrefix(null, Builder.DirList{}); try runBuild(builder); - const stdout_stream = io.getStdOut().writer(); + var packages = ArrayList(BuildConfig.Pkg).init(allocator); + defer packages.deinit(); + + var include_dirs = ArrayList(BuildConfig.IncludeDir).init(allocator); + defer include_dirs.deinit(); // TODO: We currently add packages from every LibExeObj step that the install step depends on. // Should we error out or keep one step or something similar? // We also flatten them, we should probably keep the nested structure. for (builder.top_level_steps.items) |tls| { for (tls.step.dependencies.items) |step| { - try processStep(stdout_stream, step); + try processStep(&packages, &include_dirs, step); } } + + try std.json.stringify( + BuildConfig{ + .packages = packages.items, + .include_dirs = include_dirs.items, + }, + .{ .whitespace = .{} }, + io.getStdOut().writer(), + ); } -fn processStep(stdout_stream: anytype, step: *std.build.Step) anyerror!void { +fn processStep( + packages: *ArrayList(BuildConfig.Pkg), + include_dirs: *ArrayList(BuildConfig.IncludeDir), + step: *std.build.Step, +) anyerror!void { if (step.cast(InstallArtifactStep)) |install_exe| { + try processIncludeDirs(include_dirs, install_exe.artifact.include_dirs.items); for (install_exe.artifact.packages.items) |pkg| { - try processPackage(stdout_stream, pkg); + try processPackage(packages, pkg); } } else if (step.cast(LibExeObjStep)) |exe| { + try processIncludeDirs(include_dirs, exe.include_dirs.items); for (exe.packages.items) |pkg| { - try processPackage(stdout_stream, pkg); + try processPackage(packages, pkg); } } else { for (step.dependencies.items) |unknown_step| { - try processStep(stdout_stream, unknown_step); + try processStep(packages, include_dirs, unknown_step); } } } -fn processPackage(out_stream: anytype, pkg: Pkg) anyerror!void { - const source = if (@hasField(Pkg, "source")) pkg.source else pkg.path; +fn processPackage( + packages: *ArrayList(BuildConfig.Pkg), + pkg: std.build.Pkg, +) anyerror!void { + for (packages.items) |package| { + if (std.mem.eql(u8, package.name, pkg.name)) return; + } + + const source = if (@hasField(std.build.Pkg, "source")) pkg.source else pkg.path; switch (source) { - .path => |path| try out_stream.print("{s}\x00{s}\n", .{ pkg.name, path }), - .generated => |generated| if (generated.path != null) try out_stream.print("{s}\x00{s}\n", .{ pkg.name, generated.path.? }), + .path => |path| try packages.append(.{ .name = pkg.name, .uri = path }), + .generated => |generated| if (generated.path != null) try packages.append(.{ .name = pkg.name, .uri = generated.path.? }), } if (pkg.dependencies) |dependencies| { for (dependencies) |dep| { - try processPackage(out_stream, dep); + try processPackage(packages, dep); } } } +fn processIncludeDirs( + include_dirs: *ArrayList(BuildConfig.IncludeDir), + dirs: []std.build.LibExeObjStep.IncludeDir, +) !void { + outer: for (dirs) |dir| { + const candidate: BuildConfig.IncludeDir = switch (dir) { + .raw_path => |path| .{ .raw_path = path }, + .raw_path_system => |path| .{ .raw_path_system = path }, + else => continue, + }; + + for (include_dirs.items) |include_dir| { + if (candidate.eql(include_dir)) continue :outer; + } + + try include_dirs.append(candidate); + } +} + fn runBuild(builder: *Builder) anyerror!void { switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) { .Void => root.build(builder), From 0436b05b801b6a4603c0233d3a4c2cdb8314d5ae Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Thu, 18 Aug 2022 23:14:32 +0200 Subject: [PATCH 06/18] rename `build_runner_cache_path` to `global_cache_path` --- README.md | 2 +- src/Config.zig | 6 +++--- src/DocumentStore.zig | 10 +++++----- src/requests.zig | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 740c6ee..68f8fc9 100644 --- a/README.md +++ b/README.md @@ -108,7 +108,7 @@ The following options are currently available. | `zig_exe_path` | `?[]const u8` | `null` | zig executable path, e.g. `/path/to/zig/zig`, used to run the custom build runner. If `null`, zig is looked up in `PATH`. Will be used to infer the zig standard library path if none is provided. | | `warn_style` | `bool` | `false` | Enables warnings for style *guideline* mismatches | | `build_runner_path` | `?[]const u8` | `null` | Path to the build_runner.zig file provided by zls. `null` is equivalent to `${executable_directory}/build_runner.zig` | -| `build_runner_cache_path` | `?[]const u8` | `null` | Path to a directroy that will be used as zig's cache when running `zig run build_runner.zig ...`. `null` is equivalent to `${KnownFloders.Cache}/zls` | +| `global_cache_path` | `?[]const u8` | `null` | Path to a directroy that will be used as zig's cache. `null` is equivalent to `${KnownFloders.Cache}/zls` | | `enable_semantic_tokens` | `bool` | `true` | Enables semantic token support when the client also supports it. | | `enable_inlay_hints` | `bool` | `false` | Enables inlay hint support when the client also supports it. | | `operator_completions` | `bool` | `true` | Enables `*` and `?` operators in completion lists. | diff --git a/src/Config.zig b/src/Config.zig index 55eec53..440a4b3 100644 --- a/src/Config.zig +++ b/src/Config.zig @@ -32,8 +32,8 @@ warn_style: bool = false, /// Path to the build_runner.zig file. build_runner_path: ?[]const u8 = null, -/// Path to a directory that will be used as cache when `zig run`ning the build runner -build_runner_cache_path: ?[]const u8 = null, +/// Path to the global cache directory +global_cache_path: ?[]const u8 = null, /// Semantic token support enable_semantic_tokens: bool = true, @@ -208,7 +208,7 @@ pub fn configChanged(config: *Config, allocator: std.mem.Allocator, builtin_crea break :blk try std.fs.path.resolve(allocator, &[_][]const u8{ exe_dir_path, "build_runner.zig" }); }; - config.build_runner_cache_path = if (config.build_runner_cache_path) |p| + config.global_cache_path = if (config.global_cache_path) |p| try allocator.dupe(u8, p) else blk: { const cache_dir_path = (try known_folders.getPath(allocator, .cache)) orelse { diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 438af2b..9f4b3e3 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -110,7 +110,7 @@ const LoadBuildConfigContext = struct { build_file: *BuildFile, allocator: std.mem.Allocator, build_runner_path: []const u8, - build_runner_cache_path: []const u8, + global_cache_path: []const u8, zig_exe_path: []const u8, build_file_path: ?[]const u8 = null, cache_root: []const u8, @@ -124,7 +124,7 @@ fn loadBuildConfiguration(context: LoadBuildConfigContext) !void { const allocator = context.allocator; const build_file = context.build_file; const build_runner_path = context.build_runner_path; - const build_runner_cache_path = context.build_runner_cache_path; + const global_cache_path = context.global_cache_path; const zig_exe_path = context.zig_exe_path; const build_file_path = context.build_file_path orelse try URI.parse(allocator, build_file.uri); @@ -136,7 +136,7 @@ fn loadBuildConfiguration(context: LoadBuildConfigContext) !void { "run", build_runner_path, "--cache-dir", - build_runner_cache_path, + global_cache_path, "--pkg-begin", "@build@", build_file_path, @@ -255,7 +255,7 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha .build_file = build_file, .allocator = self.allocator, .build_runner_path = self.config.build_runner_path.?, - .build_runner_cache_path = self.config.build_runner_cache_path.?, + .global_cache_path = self.config.global_cache_path.?, .zig_exe_path = self.config.zig_exe_path.?, .build_file_path = build_file_path, .cache_root = self.zig_cache_root, @@ -522,7 +522,7 @@ pub fn applySave(self: *DocumentStore, handle: *Handle) !void { .build_file = build_file, .allocator = self.allocator, .build_runner_path = self.config.build_runner_path.?, - .build_runner_cache_path = self.config.build_runner_cache_path.?, + .global_cache_path = self.config.global_cache_path.?, .zig_exe_path = self.config.zig_exe_path.?, .cache_root = self.zig_cache_root, .global_cache_root = self.zig_global_cache_root, diff --git a/src/requests.zig b/src/requests.zig index 621ed94..d96496e 100644 --- a/src/requests.zig +++ b/src/requests.zig @@ -282,7 +282,7 @@ pub const Configuration = struct { zig_exe_path: ?[]const u8, warn_style: ?bool, build_runner_path: ?[]const u8, - build_runner_cache_path: ?[]const u8, + global_cache_path: ?[]const u8, enable_semantic_tokens: ?bool, enable_inlay_hints: ?bool, inlay_hints_show_builtin: ?bool, From d57f8e3a647c8b2a2f559e67831950dc4544a6bf Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Fri, 19 Aug 2022 00:00:46 +0200 Subject: [PATCH 07/18] add basic cImport support --- src/DocumentStore.zig | 364 ++++++++++++++++++++++++++++++++++++------ src/analysis.zig | 96 +++++++---- src/translate_c.zig | 186 +++++++++++++++++++++ 3 files changed, 569 insertions(+), 77 deletions(-) create mode 100644 src/translate_c.zig diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 9f4b3e3..8038d24 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -9,9 +9,15 @@ const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig"); const BuildConfig = @import("special/build_runner.zig").BuildConfig; const tracy = @import("tracy.zig"); const Config = @import("Config.zig"); +const translate_c = @import("translate_c.zig"); const DocumentStore = @This(); +pub const Hasher = std.crypto.auth.siphash.SipHash128(1, 3); + +/// Initial state, that can be copied. +pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length); + const BuildFile = struct { refs: usize, uri: []const u8, @@ -30,7 +36,9 @@ pub const Handle = struct { count: usize, /// Contains one entry for every import in the document import_uris: []const []const u8, - /// Items in this array list come from `import_uris` + /// Contains one entry for every cimport in the document + cimports: []CImportHandle, + /// Items in this array list come from `import_uris` and `cimports` imports_used: std.ArrayListUnmanaged([]const u8), tree: Ast, document_scope: analysis.DocumentScope, @@ -206,6 +214,7 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha handle.* = Handle{ .count = 1, .import_uris = &.{}, + .cimports = &.{}, .imports_used = .{}, .document = .{ .uri = uri, @@ -359,6 +368,14 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha self.allocator.free(handle.import_uris); } + handle.cimports = try self.collectCIncludes(handle); + errdefer { + for (handle.cimports) |item| { + self.allocator.free(item.uri); + } + self.allocator.free(handle.cimports); + } + try self.handles.putNoClobber(self.allocator, uri, handle); return handle; } @@ -430,9 +447,14 @@ fn decrementCount(self: *DocumentStore, uri: []const u8) void { self.allocator.free(import_uri); } + for (handle.cimports) |item| { + self.allocator.free(item.uri); + } + handle.document_scope.deinit(self.allocator); handle.imports_used.deinit(self.allocator); self.allocator.free(handle.import_uris); + self.allocator.free(handle.cimports); self.allocator.destroy(handle); const uri_key = entry.key_ptr.*; std.debug.assert(self.handles.remove(uri)); @@ -449,27 +471,146 @@ pub fn getHandle(self: *DocumentStore, uri: []const u8) ?*Handle { } fn collectImportUris(self: *DocumentStore, handle: *Handle) ![]const []const u8 { - var new_imports = std.ArrayList([]const u8).init(self.allocator); + const collected_imports = try analysis.collectImports(self.allocator, handle.tree); + + var imports = std.ArrayList([]const u8).fromOwnedSlice(self.allocator, collected_imports); errdefer { - for (new_imports.items) |imp| { + for (imports.items) |imp| { self.allocator.free(imp); } - new_imports.deinit(); + imports.deinit(); } - try analysis.collectImports(&new_imports, handle.tree); // Convert to URIs var i: usize = 0; - while (i < new_imports.items.len) { - if (try self.uriFromImportStr(self.allocator, handle.*, new_imports.items[i])) |uri| { + while (i < imports.items.len) { + if (try self.uriFromImportStr(self.allocator, handle.*, imports.items[i])) |uri| { // The raw import strings are owned by the document and do not need to be freed here. - new_imports.items[i] = uri; + imports.items[i] = uri; i += 1; } else { - _ = new_imports.swapRemove(i); + _ = imports.swapRemove(i); } } - return new_imports.toOwnedSlice(); + return imports.toOwnedSlice(); +} + +pub const CImportSource = struct { + /// the `@cInclude` node + node: Ast.Node.Index, + /// hash of c source file + hash: [Hasher.mac_length]u8, + /// c source file + source: []const u8, +}; + +/// Collects all `@cImport` nodes and converts them into c source code +/// the translation process is defined in `translate_c.convertCInclude` +/// Caller owns returned memory. +fn collectCIncludeSources(self: *DocumentStore, handle: *Handle) ![]CImportSource { + var cimport_nodes = try analysis.collectCImportNodes(self.allocator, handle.tree); + defer self.allocator.free(cimport_nodes); + + var sources = try std.ArrayListUnmanaged(CImportSource).initCapacity(self.allocator, cimport_nodes.len); + errdefer { + for (sources.items) |item| { + self.allocator.free(item.source); + } + sources.deinit(self.allocator); + } + + for (cimport_nodes) |node| { + const c_source = translate_c.convertCInclude(self.allocator, handle.tree, node) catch |err| switch (err) { + error.Unsupported => continue, + error.OutOfMemory => return error.OutOfMemory, + }; + + var hasher = hasher_init; + hasher.update(c_source); + var hash: [Hasher.mac_length]u8 = undefined; + hasher.final(&hash); + + sources.appendAssumeCapacity(.{ + .node = node, + .hash = hash, + .source = c_source, + }); + } + + return sources.toOwnedSlice(self.allocator); +} + +pub const CImportHandle = struct { + /// the `@cInclude` node + node: Ast.Node.Index, + /// hash of the c source file + hash: [Hasher.mac_length]u8, + /// uri to a zig source file generated with translate-c + uri: []const u8, +}; + +/// Collects all `@cImport` nodes and converts them into zig files using translate-c +/// Caller owns returned memory. +fn collectCIncludes(self: *DocumentStore, handle: *Handle) ![]CImportHandle { + var cimport_nodes = try analysis.collectCImportNodes(self.allocator, handle.tree); + defer self.allocator.free(cimport_nodes); + + var uris = try std.ArrayListUnmanaged(CImportHandle).initCapacity(self.allocator, cimport_nodes.len); + errdefer { + for (uris.items) |item| { + self.allocator.free(item.uri); + } + uris.deinit(self.allocator); + } + + for (cimport_nodes) |node| { + const c_source = translate_c.convertCInclude(self.allocator, handle.tree, node) catch |err| switch (err) { + error.Unsupported => continue, + error.OutOfMemory => return error.OutOfMemory, + }; + defer self.allocator.free(c_source); + + const uri = self.translate(handle, c_source) catch |err| { + std.log.warn("failed to translate cInclude: {}", .{err}); + continue; + } orelse continue; + errdefer self.allocator.free(uri); + + var hasher = hasher_init; + hasher.update(c_source); + var hash: [Hasher.mac_length]u8 = undefined; + hasher.final(&hash); + + uris.appendAssumeCapacity(.{ + .node = node, + .hash = hash, + .uri = uri, + }); + } + + return uris.toOwnedSlice(self.allocator); +} + +fn translate(self: *DocumentStore, handle: *Handle, source: []const u8) !?[]const u8 { + const dirs: []BuildConfig.IncludeDir = if (handle.associated_build_file) |build_file| build_file.config.include_dirs else &.{}; + const include_dirs = blk: { + var result = try self.allocator.alloc([]const u8, dirs.len); + errdefer self.allocator.free(result); + + for (dirs) |dir, i| { + result[i] = dir.getPath(); + } + + break :blk result; + }; + defer self.allocator.free(include_dirs); + + return translate_c.translate( + self.allocator, + self.config, + include_dirs, + source, + ); } fn refreshDocument(self: *DocumentStore, handle: *Handle) !void { @@ -480,28 +621,37 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle) !void { handle.document_scope.deinit(self.allocator); handle.document_scope = try analysis.makeDocumentScope(self.allocator, handle.tree); - const new_imports = try self.collectImportUris(handle); - errdefer { - for (new_imports) |imp| { - self.allocator.free(imp); - } - self.allocator.free(new_imports); - } + var old_imports = handle.import_uris; + var old_cimports = handle.cimports; + + handle.import_uris = try self.collectImportUris(handle); + + handle.cimports = try self.refreshDocumentCIncludes(handle); - const old_imports = handle.import_uris; - handle.import_uris = new_imports; defer { for (old_imports) |uri| { self.allocator.free(uri); } self.allocator.free(old_imports); + + for (old_cimports) |old_cimport| { + self.allocator.free(old_cimport.uri); + } + self.allocator.free(old_cimports); } var i: usize = 0; while (i < handle.imports_used.items.len) { const old = handle.imports_used.items[i]; still_exists: { - for (new_imports) |new| { + for (handle.import_uris) |new| { + if (std.mem.eql(u8, new, old)) { + handle.imports_used.items[i] = new; + break :still_exists; + } + } + for (handle.cimports) |cimport| { + const new = cimport.uri; if (std.mem.eql(u8, new, old)) { handle.imports_used.items[i] = new; break :still_exists; @@ -516,6 +666,92 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle) !void { } } +fn refreshDocumentCIncludes(self: *DocumentStore, handle: *Handle) ![]CImportHandle { + const new_sources: []CImportSource = try self.collectCIncludeSources(handle); + defer { + for (new_sources) |new_source| { + self.allocator.free(new_source.source); + } + self.allocator.free(new_sources); + } + + var old_cimports = handle.cimports; + var new_cimports = try std.ArrayListUnmanaged(CImportHandle).initCapacity(self.allocator, new_sources.len); + errdefer { + for (new_cimports.items) |new_cimport| { + self.allocator.free(new_cimport.uri); + } + new_cimports.deinit(self.allocator); + } + + for (new_sources) |new_source| { + const maybe_old_cimport: ?CImportHandle = blk: { + const old_cimport: CImportHandle = found: { + for (old_cimports) |old_cimport| { + if (new_source.node == old_cimport.node) { + break :found old_cimport; + } + } + break :blk null; + }; + + // avoid re-translating if the source didn't change + if (std.mem.eql(u8, &new_source.hash, &old_cimport.hash)) { + break :blk CImportHandle{ + .node = old_cimport.node, + .hash = old_cimport.hash, + .uri = try self.allocator.dupe(u8, old_cimport.uri), + }; + } + + const new_uri = self.translate(handle, new_source.source) catch |err| { + std.log.warn("failed to translate cInclude: {}", .{err}); + continue; + } orelse continue; + errdefer self.allocator.free(new_uri); + + break :blk CImportHandle{ + .node = old_cimport.node, + .hash = old_cimport.hash, + .uri = new_uri, + }; + }; + + if (maybe_old_cimport) |cimport| { + new_cimports.appendAssumeCapacity(cimport); + continue; + } + + const c_source = translate_c.convertCInclude(self.allocator, handle.tree, new_source.node) catch |err| switch (err) { + error.Unsupported => continue, + error.OutOfMemory => return error.OutOfMemory, + }; + defer self.allocator.free(c_source); + + var hasher = hasher_init; + var hash: [Hasher.mac_length]u8 = undefined; + hasher.update(c_source); + hasher.final(&hash); + + const new_uri = self.translate( + handle, + c_source, + ) catch |err| { + std.log.warn("failed to translate cInclude: {}", .{err}); + continue; + } orelse continue; + errdefer self.allocator.free(new_uri); + + new_cimports.appendAssumeCapacity(.{ + .node = new_source.node, + .hash = hash, + .uri = new_uri, + }); + } + + return new_cimports.toOwnedSlice(self.allocator); +} + pub fn applySave(self: *DocumentStore, handle: *Handle) !void { if (handle.is_build_file) |build_file| { loadBuildConfiguration(.{ @@ -680,39 +916,66 @@ pub fn resolveImport(self: *DocumentStore, handle: *Handle, import_str: []const } // New document, read the file then call into openDocument. - const file_path = try URI.parse(allocator, final_uri); - defer allocator.free(file_path); + var document_handle = try self.newDocumentFromUri(final_uri); - var file = std.fs.cwd().openFile(file_path, .{}) catch { - log.debug("Cannot open import file {s}", .{file_path}); + // Add to import table of current handle. + try handle.imports_used.append(allocator, handle_uri); + + return document_handle; +} + +pub fn resolveCImport(self: *DocumentStore, handle: *Handle, node: Ast.Node.Index) !?*Handle { + const uri = blk: { + for (handle.cimports) |item| { + if (item.node == node) break :blk item.uri; + } return null; }; - defer file.close(); - { - const file_contents = file.readToEndAllocOptions( - allocator, - std.math.maxInt(usize), - null, - @alignOf(u8), - 0, - ) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - else => { - log.debug("Could not read from file {s}", .{file_path}); - return null; - }, - }; - errdefer allocator.free(file_contents); - - // Add to import table of current handle. - try handle.imports_used.append(self.allocator, handle_uri); - // Swap handles. - // This takes ownership of the passed uri and text. - const duped_final_uri = try allocator.dupe(u8, final_uri); - errdefer allocator.free(duped_final_uri); - return try self.newDocument(duped_final_uri, file_contents); + // Check if the import is already opened by others. + if (self.getHandle(uri)) |new_handle| { + // If it is, append it to our imports, increment the count, set our new handle + // and return the parsed tree root node. + try handle.imports_used.append(self.allocator, uri); + new_handle.count += 1; + return new_handle; } + + // New document, read the file then call into openDocument. + var document_handle = try self.newDocumentFromUri(uri); + + // Add to cimport table of current handle. + try handle.imports_used.append(self.allocator, uri); + + return document_handle; +} + +fn newDocumentFromUri(self: *DocumentStore, uri: []const u8) !?*Handle { + const file_path = try URI.parse(self.allocator, uri); + defer self.allocator.free(file_path); + + var file = std.fs.openFileAbsolute(file_path, .{}) catch |err| { + log.debug("Cannot open file '{s}': {}", .{ file_path, err }); + return null; + }; + defer file.close(); + + const file_contents = file.readToEndAllocOptions( + self.allocator, + std.math.maxInt(usize), + null, + @alignOf(u8), + 0, + ) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + else => { + log.debug("Could not read from file {s}", .{file_path}); + return null; + }, + }; + errdefer self.allocator.free(file_contents); + + return try self.newDocument(try self.allocator.dupe(u8, uri), file_contents); } fn stdUriFromLibPath(allocator: std.mem.Allocator, zig_lib_path: ?[]const u8) !?[]const u8 { @@ -742,6 +1005,10 @@ pub fn deinit(self: *DocumentStore) void { self.allocator.free(uri); } self.allocator.free(entry.value_ptr.*.import_uris); + for (entry.value_ptr.*.cimports) |cimport| { + self.allocator.free(cimport.uri); + } + self.allocator.free(entry.value_ptr.*.cimports); entry.value_ptr.*.imports_used.deinit(self.allocator); self.allocator.free(entry.key_ptr.*); self.allocator.destroy(entry.value_ptr.*); @@ -778,6 +1045,7 @@ fn tagStoreCompletionItems(self: DocumentStore, arena: *std.heap.ArenaAllocator, result_set.putAssumeCapacity(completion, {}); } } + return result_set.entries.items(.key); } diff --git a/src/analysis.zig b/src/analysis.zig index c9c99f6..8e4197e 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -334,7 +334,8 @@ fn resolveVarDeclAliasInternal(store: *DocumentStore, arena: *std.heap.ArenaAllo const lhs = datas[node_handle.node].lhs; const container_node = if (ast.isBuiltinCall(tree, lhs)) block: { - if (!std.mem.eql(u8, tree.tokenSlice(main_tokens[lhs]), "@import")) + const name = tree.tokenSlice(main_tokens[lhs]); + if (!std.mem.eql(u8, name, "@import") and !std.mem.eql(u8, name, "@cImport")) return null; const inner_node = (try resolveTypeOfNode(store, arena, .{ .node = lhs, .handle = handle })) orelse return null; @@ -901,20 +902,28 @@ pub fn resolveTypeOfNodeInternal(store: *DocumentStore, arena: *std.heap.ArenaAl return resolved_type; } - if (!std.mem.eql(u8, call_name, "@import")) return null; - if (params.len == 0) return null; + if (std.mem.eql(u8, call_name, "@import")) { + if (params.len == 0) return null; + const import_param = params[0]; + if (node_tags[import_param] != .string_literal) return null; - const import_param = params[0]; - if (node_tags[import_param] != .string_literal) return null; + const import_str = tree.tokenSlice(main_tokens[import_param]); + const new_handle = (store.resolveImport(handle, import_str[1 .. import_str.len - 1]) catch |err| { + log.debug("Error {} while processing import {s}", .{ err, import_str }); + return null; + }) orelse return null; - const import_str = tree.tokenSlice(main_tokens[import_param]); - const new_handle = (store.resolveImport(handle, import_str[1 .. import_str.len - 1]) catch |err| { - log.debug("Error {} while processing import {s}", .{ err, import_str }); - return null; - }) orelse return null; + // reference to node '0' which is root + return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle }); + } else if (std.mem.eql(u8, call_name, "@cImport")) { + const new_handle = (store.resolveCImport(handle, node) catch |err| { + log.debug("Error {} while processing cImport", .{err}); // TODO improve + return null; + }) orelse return null; - // reference to node '0' which is root - return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle }); + // reference to node '0' which is root + return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle }); + } }, .fn_proto, .fn_proto_multi, @@ -1074,8 +1083,17 @@ pub fn resolveTypeOfNode(store: *DocumentStore, arena: *std.heap.ArenaAllocator, return resolveTypeOfNodeInternal(store, arena, node_handle, &bound_type_params); } -/// Collects all imports we can find into a slice of import paths (without quotes). -pub fn collectImports(import_arr: *std.ArrayList([]const u8), tree: Ast) !void { +/// Collects all `@import`'s we can find into a slice of import paths (without quotes). +/// Caller owns returned memory. +pub fn collectImports(allocator: std.mem.Allocator, tree: Ast) error{OutOfMemory}![][]const u8 { + var imports = std.ArrayListUnmanaged([]const u8){}; + errdefer { + for (imports.items) |imp| { + allocator.free(imp); + } + imports.deinit(allocator); + } + const tags = tree.tokens.items(.tag); var i: usize = 0; @@ -1095,9 +1113,33 @@ pub fn collectImports(import_arr: *std.ArrayList([]const u8), tree: Ast) !void { continue; const str = tree.tokenSlice(@intCast(u32, i + 2)); - try import_arr.append(str[1 .. str.len - 1]); + try imports.append(allocator, str[1 .. str.len - 1]); } } + + return imports.toOwnedSlice(allocator); +} + +/// Collects all `@cImport` nodes +/// Caller owns returned memory. +pub fn collectCImportNodes(allocator: std.mem.Allocator, tree: Ast) error{OutOfMemory}![]Ast.Node.Index { + var import_nodes = std.ArrayListUnmanaged(Ast.Node.Index){}; + errdefer import_nodes.deinit(allocator); + + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + var i: usize = 0; + while (i < node_tags.len) : (i += 1) { + const node = @intCast(Ast.Node.Index, i); + if (!ast.isBuiltinCall(tree, node)) continue; + + if (!std.mem.eql(u8, Ast.tokenSlice(tree, main_tokens[node]), "@cImport")) continue; + + try import_nodes.append(allocator, node); + } + + return import_nodes.toOwnedSlice(allocator); } pub const NodeWithHandle = struct { @@ -1338,26 +1380,22 @@ pub fn getImportStr(tree: Ast, node: Ast.Node.Index, source_index: usize) ?[]con return getImportStr(tree, tree.nodes.items(.data)[node].lhs, source_index); } - if (!nodeContainsSourceIndex(tree, node, source_index)) { - return null; - } + if (!nodeContainsSourceIndex(tree, node, source_index)) return null; - if (ast.isBuiltinCall(tree, node)) { - const builtin_token = tree.nodes.items(.main_token)[node]; - const call_name = tree.tokenSlice(builtin_token); + if (!ast.isBuiltinCall(tree, node)) return null; - if (!std.mem.eql(u8, call_name, "@import")) return null; + const builtin_token = tree.nodes.items(.main_token)[node]; + const call_name = tree.tokenSlice(builtin_token); - var buffer: [2]Ast.Node.Index = undefined; - const params = ast.builtinCallParams(tree, node, &buffer).?; + if (!std.mem.eql(u8, call_name, "@import")) return null; - if (params.len != 1) return null; + var buffer: [2]Ast.Node.Index = undefined; + const params = ast.builtinCallParams(tree, node, &buffer).?; - const import_str = tree.tokenSlice(tree.nodes.items(.main_token)[params[0]]); - return import_str[1 .. import_str.len - 1]; - } + if (params.len != 1) return null; - return null; + const import_str = tree.tokenSlice(tree.nodes.items(.main_token)[params[0]]); + return import_str[1 .. import_str.len - 1]; } pub const SourceRange = std.zig.Token.Loc; diff --git a/src/translate_c.zig b/src/translate_c.zig new file mode 100644 index 0000000..7d7d43e --- /dev/null +++ b/src/translate_c.zig @@ -0,0 +1,186 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const Config = @import("Config.zig"); +const ast = @import("ast.zig"); +const Ast = std.zig.Ast; +const URI = @import("uri.zig"); + +/// converts a `@cInclude` node into an equivalent c header file +/// which can then be handed over to `zig translate-c` +/// Caller owns returned memory. +/// +/// **Example** +/// ```zig +/// const glfw = @cImport( +/// @cDefine("GLFW_INCLUDE_VULKAN", {}) +/// @cInclude("GLFW/glfw3.h") +/// ); +/// ``` +/// gets converted into: +/// ```c +/// #define GLFW_INCLUDE_VULKAN +/// #include "GLFW/glfw3.h" +/// ``` +pub fn convertCInclude(allocator: std.mem.Allocator, tree: Ast, node: Ast.Node.Index) error{ OutOfMemory, Unsupported }![]const u8 { + const main_tokens = tree.nodes.items(.main_token); + + std.debug.assert(ast.isBuiltinCall(tree, node)); + std.debug.assert(std.mem.eql(u8, Ast.tokenSlice(tree, main_tokens[node]), "@cImport")); + + var output = std.ArrayList(u8).init(allocator); + errdefer output.deinit(); + + var stack_allocator = std.heap.stackFallback(512, allocator); + + var buffer: [2]Ast.Node.Index = undefined; + for (ast.builtinCallParams(tree, node, &buffer).?) |child| { + try convertCIncludeInternal(stack_allocator.get(), tree, child, &output); + } + + return output.toOwnedSlice(); +} + +fn convertCIncludeInternal(allocator: std.mem.Allocator, tree: Ast, node: Ast.Node.Index, output: *std.ArrayList(u8)) error{ OutOfMemory, Unsupported }!void { + const node_tags = tree.nodes.items(.tag); + const main_tokens = tree.nodes.items(.main_token); + + var buffer: [2]Ast.Node.Index = undefined; + if (ast.isBlock(tree, node)) { + const FrameSize = @sizeOf(@Frame(convertCIncludeInternal)); + var child_frame = try allocator.alignedAlloc(u8, std.Target.stack_align, FrameSize); + defer allocator.free(child_frame); + + for (ast.blockStatements(tree, node, &buffer).?) |statement| { + try await @asyncCall(child_frame, {}, convertCIncludeInternal, .{ allocator, tree, statement, output }); + } + } else if (ast.builtinCallParams(tree, node, &buffer)) |params| { + if (params.len < 1) return; + + const call_name = Ast.tokenSlice(tree, main_tokens[node]); + + if (node_tags[params[0]] != .string_literal) return error.Unsupported; + const first = extractString(Ast.tokenSlice(tree, main_tokens[params[0]])); + + if (std.mem.eql(u8, call_name, "@cInclude")) { + try output.writer().print("#include <{s}>\n", .{first}); + } else if (std.mem.eql(u8, call_name, "@cDefine")) { + if (params.len < 2) return; + + var buffer2: [2]Ast.Node.Index = undefined; + const is_void = if (ast.blockStatements(tree, params[1], &buffer2)) |block| block.len == 0 else false; + + if (is_void) { + try output.writer().print("#define {s}\n", .{first}); + } else { + if (node_tags[params[1]] != .string_literal) return error.Unsupported; + const second = extractString(Ast.tokenSlice(tree, main_tokens[params[1]])); + try output.writer().print("#define {s} {s}\n", .{ first, second }); + } + } else if (std.mem.eql(u8, call_name, "@cUndef")) { + try output.writer().print("#undefine {s}\n", .{first}); + } else { + return error.Unsupported; + } + } +} + +/// takes a c header file and returns the result from calling `zig translate-c` +/// returns the file path to the generated zig file +/// Caller owns returned memory. +pub fn translate(allocator: std.mem.Allocator, config: Config, include_dirs: []const []const u8, source: []const u8) error{OutOfMemory}!?[]const u8 { + const file_path = try std.fs.path.join(allocator, &[_][]const u8{ config.global_cache_path.?, "cimport.h" }); + defer allocator.free(file_path); + + var file = std.fs.createFileAbsolute(file_path, .{}) catch |err| { + std.log.warn("failed to create file '{s}': {}", .{ file_path, err }); + return null; + }; + defer file.close(); + defer std.fs.deleteFileAbsolute(file_path) catch |err| { + std.log.warn("failed to delete file '{s}': {}", .{ file_path, err }); + }; + + _ = file.write(source) catch |err| { + std.log.warn("failed to write to '{s}': {}", .{ file_path, err }); + }; + + const base_include_dirs = blk: { + const target_info = std.zig.system.NativeTargetInfo.detect(allocator, .{}) catch break :blk null; + var native_paths = std.zig.system.NativePaths.detect(allocator, target_info) catch break :blk null; + defer native_paths.deinit(); + + break :blk native_paths.include_dirs.toOwnedSlice(); + }; + defer if (base_include_dirs) |dirs| { + for (dirs) |path| { + allocator.free(path); + } + allocator.free(dirs); + }; + + const base_args = &[_][]const u8{ + config.zig_exe_path.?, + "translate-c", + "--enable-cache", + "--zig-lib-dir", + config.zig_lib_path.?, + "--cache-dir", + config.global_cache_path.?, + }; + + const argc = base_args.len + 2 * (include_dirs.len + if (base_include_dirs) |dirs| dirs.len else 0) + 1; + var argv = try std.ArrayListUnmanaged([]const u8).initCapacity(allocator, argc); + defer argv.deinit(allocator); + + argv.appendSliceAssumeCapacity(base_args); + + if (base_include_dirs) |dirs| { + for (dirs) |include_dir| { + argv.appendAssumeCapacity("-I"); + argv.appendAssumeCapacity(include_dir); + } + } + + for (include_dirs) |include_dir| { + argv.appendAssumeCapacity("-I"); + argv.appendAssumeCapacity(include_dir); + } + + argv.appendAssumeCapacity(file_path); + + const result = std.ChildProcess.exec(.{ + .allocator = allocator, + .argv = argv.items, + }) catch |err| { + std.log.err("Failed to execute zig translate-c process, error: {}", .{err}); + return null; + }; + + defer allocator.free(result.stdout); + defer allocator.free(result.stderr); + + return switch (result.term) { + .Exited => |code| if (code == 0) { + return try std.mem.join(allocator, "", &.{ + "file://", + std.mem.sliceTo(result.stdout, '\n'), + }); + } else { + // TODO convert failure to `textDocument/publishDiagnostics` + std.log.err("zig translate-c process failed, code: {}, stderr: '{s}'", .{ code, result.stderr }); + return null; + }, + else => { + std.log.err("zig translate-c process terminated '{}'", .{result.term}); + return null; + }, + }; +} + +fn extractString(str: []const u8) []const u8 { + if (std.mem.startsWith(u8, str, "\"") and std.mem.endsWith(u8, str, "\"")) { + return str[1 .. str.len - 1]; + } else { + return str; + } +} From 249685e4c5e897a4101e323fa0a2be50c4185887 Mon Sep 17 00:00:00 2001 From: InKryption Date: Fri, 19 Aug 2022 00:31:41 +0200 Subject: [PATCH 08/18] 2 minor tidy-ups + fix for over-eager unused parameter error in function types --- src/Server.zig | 8 ++------ src/ast.zig | 12 ++++++++++++ src/main.zig | 5 ++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index cb7df43..c0564ae 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -239,11 +239,7 @@ fn publishDiagnostics(server: *Server, writer: anytype, handle: DocumentStore.Ha scopes: for (handle.document_scope.scopes) |scope| { const scope_data = switch (scope.data) { .function => |f| b: { - var buf: [1]std.zig.Ast.Node.Index = undefined; - var proto = ast.fnProto(tree, f, &buf) orelse break :b f; - if (proto.extern_export_inline_token) |tok| { - if (std.mem.eql(u8, tree.tokenSlice(tok), "extern")) continue :scopes; - } + if (!ast.fnProtoHasBody(tree, f).?) continue :scopes; break :b f; }, .block => |b| b, @@ -2450,7 +2446,6 @@ pub fn processJsonRpc(server: *Server, writer: anytype, json: []const u8) !void return; } - std.debug.assert(tree.root.Object.get("method") != null); const method = tree.root.Object.get("method").?.String; const start_time = std.time.milliTimestamp(); @@ -2490,6 +2485,7 @@ pub fn processJsonRpc(server: *Server, writer: anytype, json: []const u8) !void }; // Hack to avoid `return`ing in the inline for, which causes bugs. + // TODO: Change once stage2 is shipped and more stable? var done: ?anyerror = null; inline for (method_map) |method_info| { if (done == null and std.mem.eql(u8, method, method_info[0])) { diff --git a/src/ast.zig b/src/ast.zig index b5b66ce..c895196 100644 --- a/src/ast.zig +++ b/src/ast.zig @@ -1037,6 +1037,18 @@ pub fn isBlock(tree: Ast, node: Ast.Node.Index) bool { }; } +pub fn fnProtoHasBody(tree: Ast, node: Ast.Node.Index) ?bool { + return switch (tree.nodes.items(.tag)[node]) { + .fn_proto, + .fn_proto_multi, + .fn_proto_one, + .fn_proto_simple, + => false, + .fn_decl => true, + else => null, + }; +} + pub fn fnProto(tree: Ast, node: Ast.Node.Index, buf: *[1]Ast.Node.Index) ?Ast.full.FnProto { return switch (tree.nodes.items(.tag)[node]) { .fn_proto => tree.fnProto(node), diff --git a/src/main.zig b/src/main.zig index 881f1a1..a462a87 100644 --- a/src/main.zig +++ b/src/main.zig @@ -32,8 +32,7 @@ fn loop(server: *Server) !void { try reader.readNoEof(buffer); - var writer = std.io.getStdOut().writer(); - + const writer = std.io.getStdOut().writer(); try server.processJsonRpc(writer, buffer); } } @@ -212,7 +211,7 @@ const stack_frames = switch (zig_builtin.mode) { else => 0, }; -pub fn main() anyerror!void { +pub fn main() !void { var gpa_state = std.heap.GeneralPurposeAllocator(.{ .stack_trace_frames = stack_frames }){}; defer _ = gpa_state.deinit(); var tracy_state = if (tracy.enable_allocation) tracy.tracyAllocator(gpa_state.allocator()) else void{}; From 63d84855a5a3f83b6b22f95e5502b683e4604609 Mon Sep 17 00:00:00 2001 From: InKryption Date: Fri, 19 Aug 2022 13:23:54 +0200 Subject: [PATCH 09/18] Improve unused variable report accuracy --- src/Server.zig | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/src/Server.zig b/src/Server.zig index c0564ae..b9b53dd 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -250,7 +250,7 @@ fn publishDiagnostics(server: *Server, writer: anytype, handle: DocumentStore.Ha while (decl_iterator.next()) |decl| { var identifier_count: usize = 0; - var name_token_index = switch (decl.value_ptr.*) { + const name_token_index = switch (decl.value_ptr.*) { .ast_node => |an| s: { const an_tag = tree.nodes.items(.tag)[an]; switch (an_tag) { @@ -270,8 +270,27 @@ fn publishDiagnostics(server: *Server, writer: anytype, handle: DocumentStore.Ha const pit_start = tree.firstToken(scope_data); const pit_end = ast.lastToken(tree, scope_data); - for (tree.tokens.items(.tag)[pit_start..pit_end]) |tag, index| { - if (tag == .identifier and std.mem.eql(u8, tree.tokenSlice(pit_start + @intCast(u32, index)), tree.tokenSlice(name_token_index))) identifier_count += 1; + const tags = tree.tokens.items(.tag)[pit_start..pit_end]; + for (tags) |tag, index| { + if (tag != .identifier) continue; + if (!std.mem.eql(u8, tree.tokenSlice(pit_start + @intCast(u32, index)), tree.tokenSlice(name_token_index))) continue; + if (index -| 1 > 0 and tags[index - 1] == .period) continue; + if (index +| 2 < tags.len and tags[index + 1] == .colon) switch (tags[index + 2]) { + .l_brace, + .keyword_inline, + .keyword_while, + .keyword_for, + .keyword_switch, + => continue, + else => {}, + }; + if (index -| 2 > 0 and tags[index - 1] == .colon) switch (tags[index - 2]) { + .keyword_break, + .keyword_continue, + => continue, + else => {}, + }; + identifier_count += 1; } if (identifier_count <= 1) From ea138bc231dc0bd248bc7f9cc0b5b00220917c50 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Sat, 20 Aug 2022 12:15:33 +0100 Subject: [PATCH 10/18] set `use_stage1` in build.zig --- build.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build.zig b/build.zig index a08b16a..ad5eecd 100644 --- a/build.zig +++ b/build.zig @@ -7,6 +7,7 @@ pub fn build(b: *std.build.Builder) !void { const mode = b.standardReleaseOptions(); const exe = b.addExecutable("zls", "src/main.zig"); + exe.use_stage1 = true; const exe_options = b.addOptions(); exe.addOptions("build_options", exe_options); @@ -76,11 +77,13 @@ pub fn build(b: *std.build.Builder) !void { test_step.dependOn(b.getInstallStep()); var unit_tests = b.addTest("src/unit_tests.zig"); + unit_tests.use_stage1 = true; unit_tests.setBuildMode(.Debug); unit_tests.setTarget(target); test_step.dependOn(&unit_tests.step); var session_tests = b.addTest("tests/sessions.zig"); + session_tests.use_stage1 = true; session_tests.addPackage(.{ .name = "header", .source = .{ .path = "src/header.zig" } }); session_tests.addPackage(.{ .name = "server", .source = .{ .path = "src/Server.zig" }, .dependencies = exe.packages.items }); session_tests.setBuildMode(.Debug); From f543659e31175476277bdeb9960b5743340a8a1d Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Sat, 20 Aug 2022 15:34:36 +0200 Subject: [PATCH 11/18] correctly handle path to uri conversion --- src/DocumentStore.zig | 7 +++++-- src/translate_c.zig | 5 +---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 8038d24..5ac1446 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -605,12 +605,15 @@ fn translate(self: *DocumentStore, handle: *Handle, source: []const u8) !?[]cons }; defer self.allocator.free(include_dirs); - return translate_c.translate( + const file_path = (try translate_c.translate( self.allocator, self.config, include_dirs, source, - ); + )) orelse return null; + defer self.allocator.free(file_path); + + return try URI.fromPath(self.allocator, file_path); } fn refreshDocument(self: *DocumentStore, handle: *Handle) !void { diff --git a/src/translate_c.zig b/src/translate_c.zig index 7d7d43e..49d63b3 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -161,10 +161,7 @@ pub fn translate(allocator: std.mem.Allocator, config: Config, include_dirs: []c return switch (result.term) { .Exited => |code| if (code == 0) { - return try std.mem.join(allocator, "", &.{ - "file://", - std.mem.sliceTo(result.stdout, '\n'), - }); + return try allocator.dupe(u8, std.mem.sliceTo(result.stdout, '\n')); } else { // TODO convert failure to `textDocument/publishDiagnostics` std.log.err("zig translate-c process failed, code: {}, stderr: '{s}'", .{ code, result.stderr }); From 8457487b2e77cd1323cf6ff72e8616d3b255ac54 Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Sat, 20 Aug 2022 18:28:41 +0200 Subject: [PATCH 12/18] better handling for enum field signatures --- src/analysis.zig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/analysis.zig b/src/analysis.zig index c9c99f6..3c827a2 100644 --- a/src/analysis.zig +++ b/src/analysis.zig @@ -222,6 +222,9 @@ pub fn getVariableSignature(tree: Ast, var_decl: Ast.full.VarDecl) []const u8 { } pub fn getContainerFieldSignature(tree: Ast, field: Ast.full.ContainerField) []const u8 { + if(field.ast.value_expr == 0 and field.ast.type_expr == 0 and field.ast.align_expr == 0) { + return ""; // TODO display the container's type + } const start = offsets.tokenLocation(tree, field.ast.name_token).start; const end_node = if (field.ast.value_expr != 0) field.ast.value_expr else field.ast.type_expr; const end = offsets.tokenLocation(tree, ast.lastToken(tree, end_node)).end; From 2e61178a08f65bb1f3b448308fef37ef6219020b Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Sat, 20 Aug 2022 19:27:31 +0200 Subject: [PATCH 13/18] Update dependencies --- src/known-folders | 2 +- src/tracy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/known-folders b/src/known-folders index 9db1b99..24845b0 160000 --- a/src/known-folders +++ b/src/known-folders @@ -1 +1 @@ -Subproject commit 9db1b99219c767d5e24994b1525273fe4031e464 +Subproject commit 24845b0103e611c108d6bc334231c464e699742c diff --git a/src/tracy b/src/tracy index 2d8723b..f493d4a 160000 --- a/src/tracy +++ b/src/tracy @@ -1 +1 @@ -Subproject commit 2d8723b69b39721eadcc296451012828899c0f17 +Subproject commit f493d4aa8ba8141d9680473fad007d8a6348628e From 3e20a7a50bef8e3e070964da9f2b6475c829a371 Mon Sep 17 00:00:00 2001 From: Zzzen Date: Sun, 21 Aug 2022 23:33:28 +0800 Subject: [PATCH 14/18] fix link of VS Code (#593) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 68f8fc9..7856b67 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Zig Language Server, or `zls`, is a language server for Zig. The Zig wiki states - [Updating Data Files](#updating-data-files) - [Configuration Options](#configuration-options) - [Features](#features) - - [VS Code](#vscode) + - [VS Code](#vs-code) - [Sublime Text](#sublime-text) - [Sublime Text 3](#sublime-text-3) - [Sublime Text 4](#sublime-text-4) From 333b1bad8cf93d2d2930c31b46ad8ad29d079d17 Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Mon, 22 Aug 2022 16:54:56 +0200 Subject: [PATCH 15/18] fix Path <-> Uri mismatch --- src/DocumentStore.zig | 81 ++++++++++++++++++++++++++++++++---- src/special/build_runner.zig | 53 ++++++++++------------- 2 files changed, 95 insertions(+), 39 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 5ac1446..d973a27 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -21,8 +21,7 @@ pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length); const BuildFile = struct { refs: usize, uri: []const u8, - config: BuildConfig, - + config: BuildFileConfig, builtin_uri: ?[]const u8 = null, pub fn destroy(self: *BuildFile, allocator: std.mem.Allocator) void { @@ -31,6 +30,31 @@ const BuildFile = struct { } }; +pub const BuildFileConfig = struct { + packages: []Pkg, + include_dirs: []IncludeDir, + + pub fn deinit(self: BuildFileConfig, allocator: std.mem.Allocator) void { + for (self.packages) |pkg| { + allocator.free(pkg.name); + allocator.free(pkg.uri); + } + allocator.free(self.packages); + + for (self.include_dirs) |dir| { + allocator.free(dir.path); + } + allocator.free(self.include_dirs); + } + + pub const Pkg = struct { + name: []const u8, + uri: []const u8, + }; + + pub const IncludeDir = BuildConfig.IncludeDir; +}; + pub const Handle = struct { document: types.TextDocument, count: usize, @@ -182,13 +206,56 @@ fn loadBuildConfiguration(context: LoadBuildConfigContext) !void { const parse_options = std.json.ParseOptions{ .allocator = allocator }; - std.json.parseFree(BuildConfig, build_file.config, parse_options); + build_file.config.deinit(allocator); - build_file.config = std.json.parse( + const config: BuildConfig = std.json.parse( BuildConfig, &std.json.TokenStream.init(zig_run_result.stdout), parse_options, ) catch return error.RunFailed; + defer std.json.parseFree(BuildConfig, config, parse_options); + + var packages = try std.ArrayListUnmanaged(BuildFileConfig.Pkg).initCapacity(allocator, config.packages.len); + errdefer { + for (packages.items) |pkg| { + allocator.free(pkg.name); + allocator.free(pkg.uri); + } + packages.deinit(allocator); + } + + var include_dirs = try std.ArrayListUnmanaged(BuildFileConfig.IncludeDir).initCapacity(allocator, config.include_dirs.len); + errdefer { + for (include_dirs.items) |dir| { + allocator.free(dir.path); + } + include_dirs.deinit(allocator); + } + + for (config.packages) |pkg| { + const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ directory_path, pkg.path }); + defer allocator.free(pkg_abs_path); + + const uri = try URI.fromPath(allocator, pkg_abs_path); + errdefer allocator.free(uri); + + const name = try allocator.dupe(u8, pkg.name); + errdefer allocator.free(name); + + packages.appendAssumeCapacity(.{ .name = name, .uri = uri }); + } + + for (config.include_dirs) |dir| { + const path = try allocator.dupe(u8, dir.path); + errdefer allocator.free(path); + + include_dirs.appendAssumeCapacity(.{ .path = path, .system = dir.system }); + } + + build_file.config = .{ + .packages = packages.toOwnedSlice(allocator), + .include_dirs = include_dirs.toOwnedSlice(allocator), + }; }, else => return error.RunFailed, } @@ -404,7 +471,7 @@ fn decrementBuildFileRefs(self: *DocumentStore, build_file: *BuildFile) void { if (build_file.refs == 0) { log.debug("Freeing build file {s}", .{build_file.uri}); - std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator }); + build_file.config.deinit(self.allocator); // Decrement count of the document since one count comes // from the build file existing. @@ -598,7 +665,7 @@ fn translate(self: *DocumentStore, handle: *Handle, source: []const u8) !?[]cons errdefer self.allocator.free(result); for (dirs) |dir, i| { - result[i] = dir.getPath(); + result[i] = dir.path; } break :blk result; @@ -1019,7 +1086,7 @@ pub fn deinit(self: *DocumentStore) void { self.handles.deinit(self.allocator); for (self.build_files.items) |build_file| { - std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator }); + build_file.config.deinit(self.allocator); self.allocator.free(build_file.uri); build_file.destroy(self.allocator); } diff --git a/src/special/build_runner.zig b/src/special/build_runner.zig index c994982..0245b15 100644 --- a/src/special/build_runner.zig +++ b/src/special/build_runner.zig @@ -1,13 +1,10 @@ const root = @import("@build@"); const std = @import("std"); -const fmt = std.fmt; -const io = std.io; const log = std.log; const process = std.process; const Builder = std.build.Builder; const InstallArtifactStep = std.build.InstallArtifactStep; const LibExeObjStep = std.build.LibExeObjStep; -const ArrayList = std.ArrayList; pub const BuildConfig = struct { packages: []Pkg, @@ -15,24 +12,12 @@ pub const BuildConfig = struct { pub const Pkg = struct { name: []const u8, - uri: []const u8, + path: []const u8, }; - pub const IncludeDir = union(enum) { - raw_path: []const u8, - raw_path_system: []const u8, - - pub fn getPath(self: IncludeDir) []const u8 { - return switch (self) { - .raw_path => |path| return path, - .raw_path_system => |path| return path, - }; - } - - pub fn eql(a: IncludeDir, b: IncludeDir) bool { - return @enumToInt(a) == @enumToInt(b) and - std.mem.eql(u8, a.getPath(), b.getPath()); - } + pub const IncludeDir = struct { + path: []const u8, + system: bool, }; }; @@ -80,10 +65,10 @@ pub fn main() !void { builder.resolveInstallPrefix(null, Builder.DirList{}); try runBuild(builder); - var packages = ArrayList(BuildConfig.Pkg).init(allocator); + var packages = std.ArrayList(BuildConfig.Pkg).init(allocator); defer packages.deinit(); - var include_dirs = ArrayList(BuildConfig.IncludeDir).init(allocator); + var include_dirs = std.ArrayList(BuildConfig.IncludeDir).init(allocator); defer include_dirs.deinit(); // TODO: We currently add packages from every LibExeObj step that the install step depends on. @@ -101,13 +86,13 @@ pub fn main() !void { .include_dirs = include_dirs.items, }, .{ .whitespace = .{} }, - io.getStdOut().writer(), + std.io.getStdOut().writer(), ); } fn processStep( - packages: *ArrayList(BuildConfig.Pkg), - include_dirs: *ArrayList(BuildConfig.IncludeDir), + packages: *std.ArrayList(BuildConfig.Pkg), + include_dirs: *std.ArrayList(BuildConfig.IncludeDir), step: *std.build.Step, ) anyerror!void { if (step.cast(InstallArtifactStep)) |install_exe| { @@ -128,7 +113,7 @@ fn processStep( } fn processPackage( - packages: *ArrayList(BuildConfig.Pkg), + packages: *std.ArrayList(BuildConfig.Pkg), pkg: std.build.Pkg, ) anyerror!void { for (packages.items) |package| { @@ -136,9 +121,13 @@ fn processPackage( } const source = if (@hasField(std.build.Pkg, "source")) pkg.source else pkg.path; - switch (source) { - .path => |path| try packages.append(.{ .name = pkg.name, .uri = path }), - .generated => |generated| if (generated.path != null) try packages.append(.{ .name = pkg.name, .uri = generated.path.? }), + const maybe_path = switch (source) { + .path => |path| path, + .generated => |generated| generated.path, + }; + + if (maybe_path) |path| { + try packages.append(.{ .name = pkg.name, .path = path }); } if (pkg.dependencies) |dependencies| { @@ -149,18 +138,18 @@ fn processPackage( } fn processIncludeDirs( - include_dirs: *ArrayList(BuildConfig.IncludeDir), + include_dirs: *std.ArrayList(BuildConfig.IncludeDir), dirs: []std.build.LibExeObjStep.IncludeDir, ) !void { outer: for (dirs) |dir| { const candidate: BuildConfig.IncludeDir = switch (dir) { - .raw_path => |path| .{ .raw_path = path }, - .raw_path_system => |path| .{ .raw_path_system = path }, + .raw_path => |path| .{ .path = path, .system = false }, + .raw_path_system => |path| .{ .path = path, .system = true }, else => continue, }; for (include_dirs.items) |include_dir| { - if (candidate.eql(include_dir)) continue :outer; + if (std.mem.eql(u8, candidate.path, include_dir.path)) continue :outer; } try include_dirs.append(candidate); From 72cb6f0e287c69e9614a26e8b7a1905777022f45 Mon Sep 17 00:00:00 2001 From: Techatrix <19954306+Techatrix@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:06:36 +0200 Subject: [PATCH 16/18] update README.md --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7856b67..e276893 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ Building `zls` is very easy. You will need [a build of Zig master](https://zigla git clone --recurse-submodules https://github.com/zigtools/zls cd zls zig build -Drelease-safe -./zig-out/bin/zls config # Configure ZLS +./zig-out/bin/zls --config # Configure ZLS ``` *For detailed building instructions, see the Wiki page about [Cloning With Git](https://github.com/zigtools/zls/wiki/Downloading-and-Building-ZLS#cloning-with-git).* @@ -92,7 +92,7 @@ There is also a `generate-data.js` in the `src/data` folder, you'll need to run ### Configuration Options -You can configure zls by running `zls config` or manually creating your own `zls.json` configuration file. +You can configure zls by running `zls --config` or manually creating your own `zls.json` configuration file. zls will look for a zls.json configuration file in multiple locations with the following priority: - In the local configuration folder of your OS (as provided by [known-folders](https://github.com/ziglibs/known-folders/blob/master/RESOURCES.md#folder-list)) - In the global configuration folder of your OS (as provided by [known-folders](https://github.com/ziglibs/known-folders/blob/master/RESOURCES.md#folder-list)) @@ -118,8 +118,8 @@ The following options are currently available. ## Features -`zls` supports most language features, including simple type function support, using namespace, payload capture type resolution, custom packages and others. -Notable language features that are not currently implemented include `@cImport` as well as most forms of compile time evaluation. +`zls` supports most language features, including simple type function support, using namespace, payload capture type resolution, custom packages, `cImport` and others. +Currently there is no support for compile time evaluation. The following LSP features are supported: - Completions From b4ccfeecefa2a4623547b5ca65e95f23ee8011d0 Mon Sep 17 00:00:00 2001 From: Auguste Rame <19855629+SuperAuguste@users.noreply.github.com> Date: Mon, 22 Aug 2022 19:23:37 +0200 Subject: [PATCH 17/18] Issue templates (#600) --- .github/ISSUE_TEMPLATE/bug.yml | 48 +++++++++++++++++++++++++++ .github/ISSUE_TEMPLATE/enhancement.md | 8 +++++ 2 files changed, 56 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug.yml create mode 100644 .github/ISSUE_TEMPLATE/enhancement.md diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000..b7aa320 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,48 @@ +name: Bug Report +description: File a bug report +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + A bug is when something works differently than it is expected to. + ## Remember to search before filing a new report + Please search for this bug in the issue tracker, and use a bug report title that + would have made your bug report turn up in the search results for your search query. + - type: input + id: zig-version + attributes: + label: Zig Version + description: "The output of `zig version`" + placeholder: "0.9.0-dev.1275+ac52e0056" + validations: + required: true + - type: input + id: zls-version + attributes: + label: Zig Language Server Version + description: "Your zls release or commit" + placeholder: "b21039d51261923c665d3bc58fadc4b4d5e221ea" + validations: + required: true + - type: textarea + id: repro + attributes: + label: Steps to Reproduce + description: What exactly can someone else do, in order to observe the problem that you observed? + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Behavior + description: What did you expect to happen? + validations: + required: true + - type: textarea + id: actual + attributes: + label: Actual Behavior + description: What happened instead? Be sure to include all error messages if any. + validations: + required: true \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000..44a8a47 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,8 @@ +--- +name: Improvement +about: Enhance the zls experience +labels: enhancement + +--- + +## Remember to search before filing a new report From eb60f75413f5ea988c40f91b184943569211a878 Mon Sep 17 00:00:00 2001 From: Lee Cannon Date: Mon, 22 Aug 2022 23:01:44 +0100 Subject: [PATCH 18/18] workaround zig tarball bug (#599) --- src/DocumentStore.zig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index d973a27..60530f5 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -1052,9 +1052,14 @@ fn stdUriFromLibPath(allocator: std.mem.Allocator, zig_lib_path: ?[]const u8) !? if (zig_lib_path) |zpath| { const std_path = std.fs.path.resolve(allocator, &[_][]const u8{ zpath, "./std/std.zig", - }) catch |err| { - log.debug("Failed to resolve zig std library path, error: {}", .{err}); - return null; + }) catch |first_std_err| blk: { + // workaround for https://github.com/ziglang/zig/issues/12516 + break :blk std.fs.path.resolve(allocator, &[_][]const u8{ + zpath, "./zig/std/std.zig", + }) catch { + log.debug("Failed to resolve zig std library path, error: {}", .{first_std_err}); + return null; + }; }; defer allocator.free(std_path);