Merge remote-tracking branch 'origin/master' into intern-pool

This commit is contained in:
Techarix 2022-12-11 19:02:24 +01:00
commit 748cd7d6a9
25 changed files with 559 additions and 120 deletions

View File

@ -22,7 +22,7 @@ jobs:
with:
fetch-depth: 0
submodules: true
- uses: goto-bus-stop/setup-zig@v1
- uses: goto-bus-stop/setup-zig@v2
with:
version: master
@ -32,13 +32,17 @@ jobs:
- name: Build
run: zig build
- name: Build with Tracy
if: ${{ matrix.os != 'macos-latest' }}
run: zig build -Denable_tracy -Denable_tracy_allocation
- name: Run Tests
run: zig build test
- name: Build artifacts
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
declare -a targets=("x86_64-windows" "x86_64-linux" "x86_64-macos" "x86-windows" "x86-linux" "aarch64-macos")
declare -a targets=("x86_64-windows" "x86_64-linux" "x86_64-macos" "x86-windows" "x86-linux" "aarch64-linux" "aarch64-macos")
mkdir -p "artifacts/"
for target in "${targets[@]}"; do
@ -53,21 +57,59 @@ jobs:
fi
sed -e '1,5d' < README.md > artifacts/${target}/README.md
cp LICENSE artifacts/${target}/
cd artifacts/${target}/
tar cfa ${target}.tar.zst *.md bin/*
mv ${target}.tar.zst ../
cd ../..
done
- name: Upload artifacts
- name: Upload x86_64-windows artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: builds
path: artifacts/*.tar.zst
name: zls-x86_64-windows
path: artifacts/x86_64-windows/
- name: Upload x86_64-linux artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-x86_64-linux
path: artifacts/x86_64-linux/
- name: Upload x86_64-macos artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-x86_64-macos
path: artifacts/x86_64-macos/
- name: Upload x86-windows artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-x86-windows
path: artifacts/x86-windows/
- name: Upload x86-linux artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-x86-linux
path: artifacts/x86-linux/
- name: Upload aarch64-linux artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-aarch64-linux
path: artifacts/aarch64-linux/
- name: Upload aarch64-macos artifact
if: ${{ matrix.os == 'ubuntu-latest' }}
uses: actions/upload-artifact@v3
with:
name: zls-aarch64-macos
path: artifacts/aarch64-macos/
- name: Beam to Felix
if: ${{ matrix.os == 'ubuntu-latest' && github.ref == 'refs/heads/master' }}
if: ${{ matrix.os == 'ubuntu-latest' && github.ref == 'refs/heads/master' && github.repository_owner == 'zigtools' }}
uses: easingthemes/ssh-deploy@v2.1.1
env:
SSH_PRIVATE_KEY: ${{ secrets.SERVER_SSH_PRIVKEY }}

View File

@ -6,14 +6,13 @@ const zls_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
pub fn build(b: *std.build.Builder) !void {
const current_zig = builtin.zig_version;
const min_zig = std.SemanticVersion.parse("0.10.0-dev.4458+b120c819d") catch return; // builtins changed to @min / @max
const min_zig = std.SemanticVersion.parse("0.11.0-dev.399+44ee1c885") catch return; // whereabouts allocgate 2.0
if (current_zig.order(min_zig).compare(.lt)) @panic(b.fmt("Your Zig version v{} does not meet the minimum build requirement of v{}", .{ current_zig, min_zig }));
const target = b.standardTargetOptions(.{});
const mode = b.standardReleaseOptions();
const exe = b.addExecutable("zls", "src/main.zig");
exe.use_stage1 = true;
const exe_options = b.addOptions();
exe.addOptions("build_options", exe_options);
@ -134,7 +133,6 @@ pub fn build(b: *std.build.Builder) !void {
});
}
tests.use_stage1 = true;
tests.addPackage(.{ .name = "zls", .source = .{ .path = "src/zls.zig" }, .dependencies = exe.packages.items });
tests.setBuildMode(.Debug);
tests.setTarget(target);

View File

@ -73,6 +73,7 @@ pub const Value = struct {
pub const FieldDefinition = struct {
node_idx: Ast.Node.Index,
/// Store name so tree doesn't need to be used to access field name
/// When the field is a tuple field, `name` will be an empty slice
name: []const u8,
ty: Type,
default_value: ?Value,
@ -814,7 +815,7 @@ pub fn interpret(
if (index != params.len - 1)
try writer.writeAll(", ");
}
try interpreter.recordError(node_idx, "compile_log", final.toOwnedSlice());
try interpreter.recordError(node_idx, "compile_log", try final.toOwnedSlice());
return InterpretResult{ .nothing = {} };
}

View File

@ -150,9 +150,8 @@ pub fn configChanged(config: *Config, allocator: std.mem.Allocator, builtin_crea
var env = getZigEnv(allocator, exe_path) orelse break :blk;
defer std.json.parseFree(Env, env, .{ .allocator = allocator });
// We know this is allocated with `allocator`, we just steal it!
config.zig_lib_path = env.lib_dir.?;
env.lib_dir = null;
// Make sure the path is absolute
config.zig_lib_path = try std.fs.realpathAlloc(allocator, env.lib_dir.?);
logger.info("Using zig lib path '{s}'", .{config.zig_lib_path.?});
} else {
logger.warn("Zig executable path not specified in zls.json and could not be found in PATH", .{});

View File

@ -387,7 +387,7 @@ fn loadBuildConfiguration(
const directory_path = try std.fs.path.resolve(arena_allocator, &.{ build_file_path, "../" });
// TODO extract this option from `BuildAssociatedConfig.BuildOption`
const zig_cache_root: []const u8 = "zig-cache";
const zig_cache_root: []const u8 = try std.fs.path.join(arena_allocator, &.{ directory_path, "zig-cache" });
// Since we don't compile anything and no packages should put their
// files there this path can be ignored
const zig_global_cache_root: []const u8 = "ZLS_DONT_CARE";

View File

@ -52,6 +52,8 @@ const ClientCapabilities = struct {
supports_snippets: bool = false,
supports_semantic_tokens: bool = false,
supports_inlay_hints: bool = false,
supports_will_save: bool = false,
supports_will_save_wait_until: bool = false,
hover_supports_md: bool = false,
completion_doc_supports_md: bool = false,
label_details_support: bool = false,
@ -445,6 +447,36 @@ fn getAstCheckDiagnostics(
}
}
/// caller owns returned memory.
fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) !std.ArrayListUnmanaged(types.TextEdit) {
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
try getAstCheckDiagnostics(server, handle.*, &diagnostics);
var builder = code_actions.Builder{
.arena = &server.arena,
.document_store = &server.document_store,
.handle = handle,
.offset_encoding = server.offset_encoding,
};
var actions = std.ArrayListUnmanaged(types.CodeAction){};
for (diagnostics.items) |diagnostic| {
try builder.generateCodeAction(diagnostic, &actions);
}
var text_edits = std.ArrayListUnmanaged(types.TextEdit){};
for (actions.items) |action| {
if (action.kind != .SourceFixAll) continue;
if (action.edit.changes.size != 1) continue;
const edits = action.edit.changes.get(handle.uri) orelse continue;
try text_edits.appendSlice(allocator, edits.items);
}
return text_edits;
}
fn typeToCompletion(
server: *Server,
list: *std.ArrayListUnmanaged(types.CompletionItem),
@ -644,14 +676,16 @@ fn nodeToCompletion(
.container_field_init,
=> {
const field = ast.containerField(tree, node).?;
try list.append(allocator, .{
.label = handle.tree.tokenSlice(field.ast.name_token),
.kind = .Field,
.documentation = doc,
.detail = analysis.getContainerFieldSignature(handle.tree, field),
.insertText = tree.tokenSlice(field.ast.name_token),
.insertTextFormat = .PlainText,
});
if (!field.ast.tuple_like) {
try list.append(allocator, .{
.label = handle.tree.tokenSlice(field.ast.main_token),
.kind = .Field,
.documentation = doc,
.detail = analysis.getContainerFieldSignature(handle.tree, field),
.insertText = tree.tokenSlice(field.ast.main_token),
.insertTextFormat = .PlainText,
});
}
},
.array_type,
.array_type_sentinel,
@ -1247,7 +1281,7 @@ fn completeFieldAccess(server: *Server, handle: *const DocumentStore.Handle, sou
}
}
return completions.toOwnedSlice(allocator);
return try completions.toOwnedSlice(allocator);
}
fn formatDetailledLabel(item: *types.CompletionItem, alloc: std.mem.Allocator) !void {
@ -1573,6 +1607,10 @@ fn initializeHandler(server: *Server, writer: anytype, id: types.RequestId, req:
}
}
}
if (textDocument.synchronization) |synchronization| {
server.client_capabilities.supports_will_save = synchronization.willSave.value;
server.client_capabilities.supports_will_save_wait_until = synchronization.willSaveWaitUntil.value;
}
}
// NOTE: everything is initialized, we got the client capabilities
@ -1599,8 +1637,10 @@ fn initializeHandler(server: *Server, writer: anytype, id: types.RequestId, req:
},
.textDocumentSync = .{
.openClose = true,
.change = .Full,
.change = .Incremental,
.save = true,
.willSave = true,
.willSaveWaitUntil = true,
},
.renameProvider = true,
.completionProvider = .{ .resolveProvider = false, .triggerCharacters = &[_][]const u8{ ".", ":", "@", "]" }, .completionItem = .{ .labelDetailsSupport = true } },
@ -1617,7 +1657,7 @@ fn initializeHandler(server: *Server, writer: anytype, id: types.RequestId, req:
.documentFormattingProvider = true,
.documentRangeFormattingProvider = false,
.foldingRangeProvider = true,
.selectionRangeProvider = false,
.selectionRangeProvider = true,
.workspaceSymbolProvider = false,
.rangeProvider = false,
.documentProvider = true,
@ -1722,6 +1762,13 @@ fn exitHandler(server: *Server, writer: anytype, id: types.RequestId) noreturn {
std.os.exit(error_code);
}
fn cancelRequestHandler(server: *Server, writer: anytype, id: types.RequestId) !void {
_ = id;
_ = writer;
_ = server;
// TODO implement $/cancelRequest
}
fn registerCapability(server: *Server, writer: anytype, method: []const u8) !void {
const id = try std.fmt.allocPrint(server.arena.allocator(), "register-{s}", .{method});
log.debug("Dynamically registering method '{s}'", .{method});
@ -1825,31 +1872,10 @@ fn saveDocumentHandler(server: *Server, writer: anytype, id: types.RequestId, re
if (handle.tree.errors.len != 0) return;
if (!server.config.enable_ast_check_diagnostics) return;
if (!server.config.enable_autofix) return;
if (server.client_capabilities.supports_will_save) return;
if (server.client_capabilities.supports_will_save_wait_until) return;
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
try getAstCheckDiagnostics(server, handle.*, &diagnostics);
var builder = code_actions.Builder{
.arena = &server.arena,
.document_store = &server.document_store,
.handle = handle,
.offset_encoding = server.offset_encoding,
};
var actions = std.ArrayListUnmanaged(types.CodeAction){};
for (diagnostics.items) |diagnostic| {
try builder.generateCodeAction(diagnostic, &actions);
}
var text_edits = std.ArrayListUnmanaged(types.TextEdit){};
for (actions.items) |action| {
if (action.kind != .SourceFixAll) continue;
if (action.edit.changes.size != 1) continue;
const edits = action.edit.changes.get(uri) orelse continue;
try text_edits.appendSlice(allocator, edits.items);
}
const text_edits = try server.autofix(allocator, handle);
var workspace_edit = types.WorkspaceEdit{ .changes = .{} };
try workspace_edit.changes.putNoClobber(allocator, uri, text_edits);
@ -1878,6 +1904,35 @@ fn closeDocumentHandler(server: *Server, writer: anytype, id: types.RequestId, r
server.document_store.closeDocument(req.params.textDocument.uri);
}
fn willSaveHandler(server: *Server, writer: anytype, id: types.RequestId, req: requests.WillSave) !void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
if (server.client_capabilities.supports_will_save_wait_until) return;
try willSaveWaitUntilHandler(server, writer, id, req);
}
fn willSaveWaitUntilHandler(server: *Server, writer: anytype, id: types.RequestId, req: requests.WillSave) !void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
if (!server.config.enable_ast_check_diagnostics) return;
if (!server.config.enable_autofix) return;
const allocator = server.arena.allocator();
const uri = req.params.textDocument.uri;
const handle = server.document_store.getHandle(uri) orelse return;
if (handle.tree.errors.len != 0) return;
var text_edits = try server.autofix(allocator, handle);
return try send(writer, allocator, types.Response{
.id = id,
.result = .{ .TextEdits = try text_edits.toOwnedSlice(allocator) },
});
}
fn semanticTokensFullHandler(server: *Server, writer: anytype, id: types.RequestId, req: requests.SemanticTokensFull) !void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2091,6 +2146,10 @@ fn formattingHandler(server: *Server, writer: anytype, id: types.RequestId, req:
return try respondGeneric(writer, id, null_result_response);
};
if (handle.tree.errors.len != 0) {
return try respondGeneric(writer, id, null_result_response);
}
const formatted = try handle.tree.render(server.allocator);
defer server.allocator.free(formatted);
@ -2230,7 +2289,9 @@ fn generalReferencesHandler(server: *Server, writer: anytype, id: types.RequestI
};
const locations = if (pos_context == .label)
try references.labelReferences(allocator, decl, server.offset_encoding, include_decl)
// FIXME https://github.com/zigtools/zls/issues/728
// try references.labelReferences(allocator, decl, server.offset_encoding, include_decl)
std.ArrayListUnmanaged(types.Location){}
else
try references.symbolReferences(
&server.arena,
@ -2386,7 +2447,7 @@ fn foldingRangeHandler(server: *Server, writer: anytype, id: types.RequestId, re
end: Ast.TokenIndex,
end_reach: Inclusivity,
) std.mem.Allocator.Error!bool {
const can_add = !tree.tokensOnSameLine(start, end);
const can_add = start < end and !tree.tokensOnSameLine(start, end);
if (can_add) {
try addTokRange(p_ranges, tree, start, end, end_reach);
}
@ -2631,6 +2692,64 @@ fn foldingRangeHandler(server: *Server, writer: anytype, id: types.RequestId, re
});
}
fn selectionRangeHandler(server: *Server, writer: anytype, id: types.RequestId, req: requests.SelectionRange) !void {
const allocator = server.arena.allocator();
const handle = server.document_store.getHandle(req.params.textDocument.uri) orelse {
log.warn("Trying to get selection range of non existent document {s}", .{req.params.textDocument.uri});
return try respondGeneric(writer, id, null_result_response);
};
// For each of the input positons, we need to compute the stack of AST
// nodes/ranges which contain the position. At the moment, we do this in a
// super inefficient way, by iterationg _all_ nodes, selecting the ones that
// contain position, and then sorting.
//
// A faster algorithm would be to walk the tree starting from the root,
// descending into the child containing the position at every step.
var result = try allocator.alloc(*types.SelectionRange, req.params.positions.len);
var locs = try std.ArrayListUnmanaged(offsets.Loc).initCapacity(allocator, 32);
for (req.params.positions) |position, position_index| {
const index = offsets.positionToIndex(handle.text, position, server.offset_encoding);
locs.clearRetainingCapacity();
for (handle.tree.nodes.items(.data)) |_, i| {
const node = @intCast(u32, i);
const loc = offsets.nodeToLoc(handle.tree, node);
if (loc.start <= index and index <= loc.end) {
(try locs.addOne(allocator)).* = loc;
}
}
std.sort.sort(offsets.Loc, locs.items, {}, shorterLocsFirst);
{
var i: usize = 0;
while (i + 1 < locs.items.len) {
if (std.meta.eql(locs.items[i], locs.items[i + 1])) {
_ = locs.orderedRemove(i);
} else {
i += 1;
}
}
}
var selection_ranges = try allocator.alloc(types.SelectionRange, locs.items.len);
for (selection_ranges) |*range, i| {
range.range = offsets.locToRange(handle.text, locs.items[i], server.offset_encoding);
range.parent = if (i + 1 < selection_ranges.len) &selection_ranges[i + 1] else null;
}
result[position_index] = &selection_ranges[0];
}
try send(writer, allocator, types.Response{
.id = id,
.result = .{ .SelectionRange = result },
});
}
fn shorterLocsFirst(_: void, lhs: offsets.Loc, rhs: offsets.Loc) bool {
return (lhs.end - lhs.start) < (rhs.end - rhs.start);
}
// Needed for the hack seen below.
fn extractErr(val: anytype) anyerror {
val catch |e| return e;
@ -2750,15 +2869,16 @@ pub fn processJsonRpc(server: *Server, writer: anytype, json: []const u8) !void
const method_map = .{
.{ "initialized", void, initializedHandler },
.{"$/cancelRequest"},
.{"textDocument/willSave"},
.{ "initialize", requests.Initialize, initializeHandler },
.{ "shutdown", void, shutdownHandler },
.{ "exit", void, exitHandler },
.{ "$/cancelRequest", void, cancelRequestHandler },
.{ "textDocument/didOpen", requests.OpenDocument, openDocumentHandler },
.{ "textDocument/didChange", requests.ChangeDocument, changeDocumentHandler },
.{ "textDocument/didSave", requests.SaveDocument, saveDocumentHandler },
.{ "textDocument/didClose", requests.CloseDocument, closeDocumentHandler },
.{ "textDocument/willSave", requests.WillSave, willSaveHandler },
.{ "textDocument/willSaveWaitUntil", requests.WillSave, willSaveWaitUntilHandler },
.{ "textDocument/semanticTokens/full", requests.SemanticTokensFull, semanticTokensFullHandler },
.{ "textDocument/inlayHint", requests.InlayHint, inlayHintHandler },
.{ "textDocument/completion", requests.Completion, completionHandler },
@ -2776,6 +2896,7 @@ pub fn processJsonRpc(server: *Server, writer: anytype, json: []const u8) !void
.{ "textDocument/codeAction", requests.CodeAction, codeActionHandler },
.{ "workspace/didChangeConfiguration", Config.DidChangeConfigurationParams, didChangeConfigurationHandler },
.{ "textDocument/foldingRange", requests.FoldingRange, foldingRangeHandler },
.{ "textDocument/selectionRange", requests.SelectionRange, selectionRangeHandler },
};
if (zig_builtin.zig_backend == .stage1) {

View File

@ -226,7 +226,7 @@ pub fn getContainerFieldSignature(tree: Ast, field: Ast.full.ContainerField) []c
if (field.ast.value_expr == 0 and field.ast.type_expr == 0 and field.ast.align_expr == 0) {
return ""; // TODO display the container's type
}
const start = offsets.tokenToIndex(tree, field.ast.name_token);
const start = offsets.tokenToIndex(tree, field.ast.main_token);
const end_node = if (field.ast.value_expr != 0) field.ast.value_expr else field.ast.type_expr;
const end = offsets.tokenToLoc(tree, ast.lastToken(tree, end_node)).end;
return tree.source[start..end];
@ -291,9 +291,21 @@ pub fn getDeclNameToken(tree: Ast, node: Ast.Node.Index) ?Ast.TokenIndex {
},
// containers
.container_field => tree.containerField(node).ast.name_token,
.container_field_init => tree.containerFieldInit(node).ast.name_token,
.container_field_align => tree.containerFieldAlign(node).ast.name_token,
.container_field => blk: {
const field = tree.containerField(node);
if (field.ast.tuple_like) break :blk null;
break :blk field.ast.main_token;
},
.container_field_init => blk: {
const field = tree.containerFieldInit(node);
if (field.ast.tuple_like) break :blk null;
break :blk field.ast.main_token;
},
.container_field_align => blk: {
const field = tree.containerFieldAlign(node);
if (field.ast.tuple_like) break :blk null;
break :blk field.ast.main_token;
},
.identifier => main_token,
.error_value => main_token + 2, // 'error'.<main_token +2>
@ -1399,9 +1411,18 @@ pub fn nodeToString(tree: Ast, node: Ast.Node.Index) ?[]const u8 {
const main_token = tree.nodes.items(.main_token)[node];
var buf: [1]Ast.Node.Index = undefined;
switch (tree.nodes.items(.tag)[node]) {
.container_field => return tree.tokenSlice(tree.containerField(node).ast.name_token),
.container_field_init => return tree.tokenSlice(tree.containerFieldInit(node).ast.name_token),
.container_field_align => return tree.tokenSlice(tree.containerFieldAlign(node).ast.name_token),
.container_field => {
const field = tree.containerField(node).ast;
return if (field.tuple_like) null else tree.tokenSlice(field.main_token);
},
.container_field_init => {
const field = tree.containerFieldInit(node).ast;
return if (field.tuple_like) null else tree.tokenSlice(field.main_token);
},
.container_field_align => {
const field = tree.containerFieldAlign(node).ast;
return if (field.tuple_like) null else tree.tokenSlice(field.main_token);
},
.error_value => return tree.tokenSlice(data[node].rhs),
.identifier => return tree.tokenSlice(main_token),
.fn_proto,

View File

@ -23,8 +23,6 @@ pub const generic = [_]Snipped{
.{ .label = "align", .kind = .Keyword },
.{ .label = "allowzero", .kind = .Keyword },
.{ .label = "and", .kind = .Keyword },
.{ .label = "anyframe", .kind = .Keyword },
.{ .label = "anytype", .kind = .Keyword },
.{ .label = "asm", .kind = .Keyword },
.{ .label = "async", .kind = .Keyword },
.{ .label = "await", .kind = .Keyword },
@ -86,4 +84,31 @@ pub const generic = [_]Snipped{
.{ .label = "log warn", .kind = .Snippet, .text = "std.log.warn(\"$1\", .{$0});" },
.{ .label = "log info", .kind = .Snippet, .text = "std.log.info(\"$1\", .{$0});" },
.{ .label = "log debug", .kind = .Snippet, .text = "std.log.debug(\"$1\", .{$0});" },
// types
.{ .label = "anyopaque", .kind = .Keyword },
.{ .label = "anyerror", .kind = .Keyword },
.{ .label = "anyframe", .kind = .Keyword },
.{ .label = "anytype", .kind = .Keyword },
.{ .label = "noreturn", .kind = .Keyword },
.{ .label = "type", .kind = .Keyword },
.{ .label = "bool", .kind = .Keyword },
.{ .label = "void", .kind = .Keyword },
.{ .label = "isize", .kind = .Keyword },
.{ .label = "usize", .kind = .Keyword },
.{ .label = "i8", .kind = .Keyword },
.{ .label = "i16", .kind = .Keyword },
.{ .label = "i32", .kind = .Keyword },
.{ .label = "i64", .kind = .Keyword },
.{ .label = "i128", .kind = .Keyword },
.{ .label = "u8", .kind = .Keyword },
.{ .label = "u16", .kind = .Keyword },
.{ .label = "u32", .kind = .Keyword },
.{ .label = "u64", .kind = .Keyword },
.{ .label = "u128", .kind = .Keyword },
.{ .label = "f16", .kind = .Keyword },
.{ .label = "f32", .kind = .Keyword },
.{ .label = "f64", .kind = .Keyword },
.{ .label = "f80", .kind = .Keyword },
.{ .label = "f128", .kind = .Keyword },
};

View File

@ -366,6 +366,7 @@ pub fn applyTextEdits(
i -= 1;
if (content_changes[i].range == null) {
last_full_text_change = i;
continue;
}
}

View File

@ -73,7 +73,7 @@ const Builder = struct {
});
}
fn toOwnedSlice(self: *Builder) []types.InlayHint {
fn toOwnedSlice(self: *Builder) error{OutOfMemory}![]types.InlayHint {
return self.hints.toOwnedSlice(self.allocator);
}
};

View File

@ -154,6 +154,11 @@ pub const Initialize = struct {
workspaceFolders: Default(bool, false),
},
textDocument: ?struct {
synchronization: ?struct {
willSave: Default(bool, false),
willSaveWaitUntil: Default(bool, false),
didSave: Default(bool, false),
},
semanticTokens: Exists,
inlayHint: Exists,
hover: ?struct {
@ -230,6 +235,19 @@ const TextDocumentIdentifierPositionRequest = struct {
},
};
pub const SaveReason = enum(u32) {
Manual = 1,
AfterDelay = 2,
FocusOut = 3,
};
pub const WillSave = struct {
params: struct {
textDocument: TextDocumentIdentifier,
reason: SaveReason,
},
};
pub const SignatureHelp = struct {
params: struct {
textDocument: TextDocumentIdentifier,
@ -291,6 +309,13 @@ pub const CodeAction = struct {
pub const FoldingRange = struct {
params: struct {
textDocument: TextDocumentIdentifier,
textDocument: TextDocumentIdentifier,
},
};
};
pub const SelectionRange = struct {
params: struct {
textDocument: TextDocumentIdentifier,
positions: []types.Position,
},
};

View File

@ -195,7 +195,7 @@ const Builder = struct {
self.previous_position = start;
}
fn toOwnedSlice(self: *Builder) []u32 {
fn toOwnedSlice(self: *Builder) error{OutOfMemory}![]u32 {
return self.arr.toOwnedSlice(self.arena.allocator());
}
};
@ -999,7 +999,9 @@ fn writeContainerField(builder: *Builder, node: Ast.Node.Index, field_token_type
try writeDocComments(builder, tree, docs);
try writeToken(builder, container_field.comptime_token, .keyword);
if (field_token_type) |tok_type| try writeToken(builder, container_field.ast.name_token, tok_type);
if (!container_field.ast.tuple_like) {
if (field_token_type) |tok_type| try writeToken(builder, container_field.ast.main_token, tok_type);
}
if (container_field.ast.type_expr != 0) {
try callWriteNodeTokens(allocator, .{ builder, container_field.ast.type_expr });

View File

@ -146,44 +146,54 @@ pub fn TracyAllocator(comptime name: ?[:0]const u8) type {
}
pub fn allocator(self: *Self) std.mem.Allocator {
return std.mem.Allocator.init(self, allocFn, resizeFn, freeFn);
return .{
.ptr = self,
.vtable = &.{
.alloc = allocFn,
.resize = resizeFn,
.free = freeFn,
},
};
}
fn allocFn(self: *Self, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const result = self.parent_allocator.rawAlloc(len, ptr_align, len_align, ret_addr);
fn allocFn(ptr: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8 {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
const result = self.parent_allocator.rawAlloc(len, ptr_align, ret_addr);
if (result) |data| {
if (data.len != 0) {
if (len != 0) {
if (name) |n| {
allocNamed(data.ptr, data.len, n);
allocNamed(data, len, n);
} else {
alloc(data.ptr, data.len);
alloc(data, len);
}
}
} else |_| {
} else {
messageColor("allocation failed", 0xFF0000);
}
return result;
}
fn resizeFn(self: *Self, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) ?usize {
if (self.parent_allocator.rawResize(buf, buf_align, new_len, len_align, ret_addr)) |resized_len| {
fn resizeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, new_len: usize, ret_addr: usize) bool {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
if (self.parent_allocator.rawResize(buf, buf_align, new_len, ret_addr)) {
if (name) |n| {
freeNamed(buf.ptr, n);
allocNamed(buf.ptr, resized_len, n);
allocNamed(buf.ptr, new_len, n);
} else {
free(buf.ptr);
alloc(buf.ptr, resized_len);
alloc(buf.ptr, new_len);
}
return resized_len;
return true;
}
// during normal operation the compiler hits this case thousands of times due to this
// emitting messages for it is both slow and causes clutter
return null;
return false;
}
fn freeFn(self: *Self, buf: []u8, buf_align: u29, ret_addr: usize) void {
fn freeFn(ptr: *anyopaque, buf: []u8, buf_align: u8, ret_addr: usize) void {
const self = @ptrCast(*Self, @alignCast(@alignOf(Self), ptr));
self.parent_allocator.rawFree(buf, buf_align, ret_addr);
// this condition is to handle free being called on an empty slice that was never even allocated
// example case: `std.process.getSelfExeSharedLibPaths` can return `&[_][:0]u8{}`

View File

@ -152,7 +152,7 @@ pub fn translate(allocator: std.mem.Allocator, config: Config, include_dirs: []c
var native_paths = std.zig.system.NativePaths.detect(allocator, target_info) catch break :blk null;
defer native_paths.deinit();
break :blk native_paths.include_dirs.toOwnedSlice();
break :blk try native_paths.include_dirs.toOwnedSlice();
};
defer if (base_include_dirs) |dirs| {
for (dirs) |path| {

View File

@ -44,6 +44,7 @@ pub const ResponseParams = union(enum) {
CodeAction: []CodeAction,
ApplyEdit: ApplyWorkspaceEditParams,
FoldingRange: []FoldingRange,
SelectionRange: []*SelectionRange,
};
pub const Response = struct {
@ -441,6 +442,8 @@ const InitializeResult = struct {
textDocumentSync: struct {
openClose: bool,
change: TextDocumentSyncKind,
willSave: bool,
willSaveWaitUntil: bool,
save: bool,
},
renameProvider: bool,
@ -523,6 +526,11 @@ pub const DocumentHighlight = struct {
};
pub const FoldingRange = struct {
startLine: usize,
startLine: usize,
endLine: usize,
};
pub const SelectionRange = struct {
range: Range,
parent: ?*SelectionRange,
};

View File

@ -100,7 +100,7 @@ fn parseHex(c: u8) !u8 {
pub fn parse(allocator: std.mem.Allocator, str: []const u8) ![]u8 {
if (str.len < 7 or !std.mem.eql(u8, "file://", str[0..7])) return error.UriBadScheme;
const uri = try allocator.alloc(u8, str.len - (if (std.fs.path.sep == '\\') 8 else 7));
var uri = try allocator.alloc(u8, str.len - (if (std.fs.path.sep == '\\') 8 else 7));
errdefer allocator.free(uri);
const path = if (std.fs.path.sep == '\\') str[8..] else str[7..];
@ -125,5 +125,5 @@ pub fn parse(allocator: std.mem.Allocator, str: []const u8) ![]u8 {
i -= 1;
}
return allocator.shrink(uri, i);
return allocator.realloc(uri, i);
}

View File

@ -105,7 +105,7 @@ pub fn collectReplacePlaceholders(allocator: std.mem.Allocator, source: []const
return CollectPlaceholdersResult{
.locations = locations,
.new_source = new_source.toOwnedSlice(allocator),
.new_source = try new_source.toOwnedSlice(allocator),
};
}

View File

@ -32,18 +32,18 @@ test "ComptimeInterpreter - basic test" {
_ = try interpreter.interpret(0, null, .{});
var bool_type = try interpreter.createType(std.math.maxInt(std.zig.Ast.Node.Index), .{ .@"bool" = .{} });
var bool_type = try interpreter.createType(std.math.maxInt(std.zig.Ast.Node.Index), .{ .bool = {} });
var arg_false = ComptimeInterpreter.Value{
.interpreter = &interpreter,
.node_idx = std.math.maxInt(std.zig.Ast.Node.Index),
.@"type" = bool_type,
.value_data = try interpreter.createValueData(.{ .@"bool" = false }),
.type = bool_type,
.value_data = try interpreter.createValueData(.{ .bool = false }),
};
var arg_true = ComptimeInterpreter.Value{
.interpreter = &interpreter,
.node_idx = std.math.maxInt(std.zig.Ast.Node.Index),
.@"type" = bool_type,
.value_data = try interpreter.createValueData(.{ .@"bool" = true }),
.type = bool_type,
.value_data = try interpreter.createValueData(.{ .bool = true }),
};
const rmt = interpreter.root_type.?.getTypeInfo().@"struct".scope.declarations.get("ReturnMyType").?;
@ -57,8 +57,8 @@ test "ComptimeInterpreter - basic test" {
}, .{});
defer call_with_true.scope.deinit();
try std.testing.expectFmt("u69", "{any}", .{interpreter.formatTypeInfo(call_with_false.result.value.value_data.@"type".getTypeInfo())});
try std.testing.expectFmt("u8", "{any}", .{interpreter.formatTypeInfo(call_with_true.result.value.value_data.@"type".getTypeInfo())});
try std.testing.expectFmt("u69", "{any}", .{interpreter.formatTypeInfo(call_with_false.result.value.value_data.type.getTypeInfo())});
try std.testing.expectFmt("u8", "{any}", .{interpreter.formatTypeInfo(call_with_true.result.value.value_data.type.getTypeInfo())});
}
test "ComptimeInterpreter - struct" {
@ -92,5 +92,5 @@ test "ComptimeInterpreter - struct" {
const z = try interpreter.call(null, rmt.node_idx, &.{}, .{});
defer z.scope.deinit();
try std.testing.expectFmt("struct {slay: bool, var abc: comptime_int = 123, }", "{any}", .{interpreter.formatTypeInfo(z.result.value.value_data.@"type".getTypeInfo())});
try std.testing.expectFmt("struct {slay: bool, var abc: comptime_int = 123, }", "{any}", .{interpreter.formatTypeInfo(z.result.value.value_data.type.getTypeInfo())});
}

View File

@ -269,17 +269,19 @@ test "completion - union" {
}
test "completion - enum" {
try testCompletion(
\\const E = enum {
\\ alpha,
\\ beta,
\\};
\\const foo = E.<cursor>
, &.{
// TODO kind should be Enum
.{ .label = "alpha", .kind = .Field },
.{ .label = "beta", .kind = .Field },
});
// TODO: Fix
return error.SkipZigTest;
// try testCompletion(
// \\const E = enum {
// \\ alpha,
// \\ beta,
// \\};
// \\const foo = E.<cursor>
// , &.{
// // TODO kind should be Enum
// .{ .label = "alpha", .kind = .Field },
// .{ .label = "beta", .kind = .Field },
// });
}
test "completion - error union" {

View File

@ -0,0 +1,66 @@
const std = @import("std");
const zls = @import("zls");
const builtin = @import("builtin");
const Context = @import("../context.zig").Context;
const types = zls.types;
const requests = zls.requests;
const allocator: std.mem.Allocator = std.testing.allocator;
test "foldingRange - empty" {
try testFoldingRange("", "[]");
}
test "foldingRange - smoke" {
try testFoldingRange(
\\fn main() u32 {
\\ return 1 + 1;
\\}
,
\\[{"startLine":0,"endLine":1}]
);
}
test "foldingRange - #801" {
try testFoldingRange(
\\fn score(c: u8) !u32 {
\\ return switch(c) {
\\ 'a'...'z' => c - 'a',
\\ 'A'...'Z' => c - 'A',
\\ _ => error
\\ };
\\}
,
\\[]
);
}
fn testFoldingRange(source: []const u8, expect: []const u8) !void {
var ctx = try Context.init();
defer ctx.deinit();
const test_uri: []const u8 = switch (builtin.os.tag) {
.windows => "file:///C:\\test.zig",
else => "file:///test.zig",
};
try ctx.requestDidOpen(test_uri, source);
const request = requests.FoldingRange{ .params = .{ .textDocument = .{ .uri = test_uri } } };
const response = try ctx.requestGetResponse(?[]types.FoldingRange, "textDocument/foldingRange", request);
defer response.deinit();
var actual = std.ArrayList(u8).init(allocator);
defer actual.deinit();
try std.json.stringify(response.result, .{}, actual.writer());
try expectEqualJson(expect, actual.items);
}
fn expectEqualJson(expect: []const u8, actual: []const u8) !void {
// TODO: Actually compare strings as JSON values.
return std.testing.expectEqualStrings(expect, actual);
}

View File

@ -150,7 +150,7 @@ fn testReferences(source: []const u8) !void {
try locs.append(allocator, new_loc);
}
break :blk locs.toOwnedSlice(allocator);
break :blk try locs.toOwnedSlice(allocator);
};
defer allocator.free(expected_locs);

View File

@ -0,0 +1,76 @@
const std = @import("std");
const zls = @import("zls");
const builtin = @import("builtin");
const helper = @import("../helper.zig");
const Context = @import("../context.zig").Context;
const ErrorBuilder = @import("../ErrorBuilder.zig");
const types = zls.types;
const offsets = zls.offsets;
const requests = zls.requests;
const allocator: std.mem.Allocator = std.testing.allocator;
test "selectionRange - empty" {
try testSelectionRange("<>", &.{});
}
test "seletionRange - smoke" {
try testSelectionRange(
\\fn main() void {
\\ const x = 1 <>+ 1;
\\}
, &.{ "1 + 1", "const x = 1 + 1", "{\n const x = 1 + 1;\n}" });
}
fn testSelectionRange(source: []const u8, want: []const []const u8) !void {
var phr = try helper.collectClearPlaceholders(allocator, source);
defer phr.deinit(allocator);
var ctx = try Context.init();
defer ctx.deinit();
const test_uri: []const u8 = switch (builtin.os.tag) {
.windows => "file:///C:\\test.zig",
else => "file:///test.zig",
};
try ctx.requestDidOpen(test_uri, phr.new_source);
const position = offsets.locToRange(phr.new_source, phr.locations.items(.new)[0], .utf16).start;
const SelectionRange = struct {
range: types.Range,
parent: ?*@This(),
};
const request = requests.SelectionRange{ .params = .{
.textDocument = .{ .uri = test_uri },
.positions = &[_]types.Position{position},
} };
const response = try ctx.requestGetResponse(?[]SelectionRange, "textDocument/selectionRange", request);
defer response.deinit();
const selectionRanges: []SelectionRange = response.result orelse {
std.debug.print("Server returned `null` as the result\n", .{});
return error.InvalidResponse;
};
var got = std.ArrayList([]const u8).init(allocator);
defer got.deinit();
var it: ?*SelectionRange = &selectionRanges[0];
while (it) |r| {
const slice = offsets.rangeToSlice(phr.new_source, r.range, .utf16);
(try got.addOne()).* = slice;
it = r.parent;
}
const last = got.pop();
try std.testing.expectEqualStrings(phr.new_source, last);
try std.testing.expectEqual(want.len, got.items.len);
for (want) |w, i| {
try std.testing.expectEqualStrings(w, got.items[i]);
}
}

View File

@ -18,6 +18,7 @@ test "semantic tokens" {
,
&.{ 0, 0, 5, 7, 0, 0, 6, 3, 0, 33, 0, 4, 1, 11, 0, 0, 2, 7, 12, 0, 0, 8, 5, 9, 0 },
);
// TODO more tests
}

View File

@ -10,10 +10,12 @@ comptime {
// TODO Document Synchronization
// LSP features
_ = @import("lsp_features/semantic_tokens.zig");
_ = @import("lsp_features/completion.zig");
_ = @import("lsp_features/folding_range.zig");
_ = @import("lsp_features/inlay_hints.zig");
_ = @import("lsp_features/references.zig");
_ = @import("lsp_features/completion.zig");
_ = @import("lsp_features/selection_range.zig");
_ = @import("lsp_features/semantic_tokens.zig");
// Language features
_ = @import("language_features/cimport.zig");

View File

@ -1,16 +1,55 @@
const std = @import("std");
const builtin = @import("builtin");
const zls = @import("zls");
const URI = zls.URI;
const allocator = std.testing.allocator;
test "uri - pathRelative" {
const join1 = try URI.pathRelative(allocator, "file://project/zig", "/src/main+.zig");
defer allocator.free(join1);
try std.testing.expectEqualStrings("file://project/zig/src/main%2B.zig", join1);
test "uri - parse (Windows)" {
if (builtin.os.tag == .windows) {
const parseWin = try URI.parse(allocator, "file:///c%3A/main.zig");
defer allocator.free(parseWin);
try std.testing.expectEqualStrings("c:\\main.zig", parseWin);
const join2 = try URI.pathRelative(allocator, "file://project/zig/wow", "../]src]/]main.zig");
const parseWin2 = try URI.parse(allocator, "file:///c%3A/main%2B.zig");
defer allocator.free(parseWin2);
try std.testing.expectEqualStrings("c:\\main+.zig", parseWin2);
}
}
test "uri - parse (Unix-style)" {
if (builtin.os.tag != .windows) {
const parseUnix = try URI.parse(allocator, "file:///home/main.zig");
defer allocator.free(parseUnix);
try std.testing.expectEqualStrings("/home/main.zig", parseUnix);
const parseUnix2 = try URI.parse(allocator, "file:///home/main%2B.zig");
defer allocator.free(parseUnix2);
try std.testing.expectEqualStrings("/home/main+.zig", parseUnix2);
}
}
test "uri - fromPath" {
if (builtin.os.tag == .windows) {
const fromPathWin = try URI.fromPath(allocator, "c:\\main.zig");
defer allocator.free(fromPathWin);
try std.testing.expectEqualStrings("file:///c%3A/main.zig", fromPathWin);
}
if (builtin.os.tag != .windows) {
const fromPathUnix = try URI.fromPath(allocator, "/home/main.zig");
defer allocator.free(fromPathUnix);
try std.testing.expectEqualStrings("file:///home/main.zig", fromPathUnix);
}
}
test "uri - pathRelative" {
const join1 = try URI.pathRelative(allocator, "file:///project/zig", "/src/main+.zig");
defer allocator.free(join1);
try std.testing.expectEqualStrings("file:///project/zig/src/main%2B.zig", join1);
const join2 = try URI.pathRelative(allocator, "file:///project/zig/wow", "../]src]/]main.zig");
defer allocator.free(join2);
try std.testing.expectEqualStrings("file://project/zig/%5Dsrc%5D/%5Dmain.zig", join2);
}
try std.testing.expectEqualStrings("file:///project/zig/%5Dsrc%5D/%5Dmain.zig", join2);
}