add support for running ast-check without zig

This commit is contained in:
Techarix 2023-02-14 22:02:37 +01:00 committed by Techatrix
parent c06cf6e95c
commit 4215edbc33
7 changed files with 467 additions and 294 deletions

View File

@ -79,6 +79,7 @@ The following options are currently available.
| `include_at_in_builtins` | `bool` | `false` | Whether the @ sign should be part of the completion of builtins | | `include_at_in_builtins` | `bool` | `false` | Whether the @ sign should be part of the completion of builtins |
| `skip_std_references` | `bool` | `false` | When true, skips searching for references in std. Improves lookup speed for functions in user's code. Renaming and go-to-definition will continue to work as is | | `skip_std_references` | `bool` | `false` | When true, skips searching for references in std. Improves lookup speed for functions in user's code. Renaming and go-to-definition will continue to work as is |
| `max_detail_length` | `usize` | `1048576` | The detail field of completions is truncated to be no longer than this (in bytes) | | `max_detail_length` | `usize` | `1048576` | The detail field of completions is truncated to be no longer than this (in bytes) |
| `prefer_ast_check_as_child_process` | `bool` | `true` | Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork |
| `record_session` | `bool` | `false` | When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay` | | `record_session` | `bool` | `false` | When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay` |
| `record_session_path` | `?[]const u8` | `null` | Output file path when `record_session` is set. The recommended file extension *.zlsreplay | | `record_session_path` | `?[]const u8` | `null` | Output file path when `record_session` is set. The recommended file extension *.zlsreplay |
| `replay_session_path` | `?[]const u8` | `null` | Used when calling `zls replay` for specifying the replay file. If no extra argument is given `record_session_path` is used as the default path. | | `replay_session_path` | `?[]const u8` | `null` | Used when calling `zls replay` for specifying the replay file. If no extra argument is given `record_session_path` is used as the default path. |

View File

@ -94,6 +94,11 @@
"type": "integer", "type": "integer",
"default": "1048576" "default": "1048576"
}, },
"prefer_ast_check_as_child_process": {
"description": "Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork",
"type": "boolean",
"default": "true"
},
"record_session": { "record_session": {
"description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`", "description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`",
"type": "boolean", "type": "boolean",

View File

@ -61,6 +61,9 @@ skip_std_references: bool = false,
/// The detail field of completions is truncated to be no longer than this (in bytes) /// The detail field of completions is truncated to be no longer than this (in bytes)
max_detail_length: usize = 1048576, max_detail_length: usize = 1048576,
/// Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork
prefer_ast_check_as_child_process: bool = true,
/// When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay` /// When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`
record_session: bool = false, record_session: bool = false,

View File

@ -13,6 +13,8 @@ const Config = @import("Config.zig");
const ZigVersionWrapper = @import("ZigVersionWrapper.zig"); const ZigVersionWrapper = @import("ZigVersionWrapper.zig");
const translate_c = @import("translate_c.zig"); const translate_c = @import("translate_c.zig");
const ComptimeInterpreter = @import("ComptimeInterpreter.zig"); const ComptimeInterpreter = @import("ComptimeInterpreter.zig");
const AstGen = @import("stage2/AstGen.zig");
const Zir = @import("stage2/Zir.zig");
const DocumentStore = @This(); const DocumentStore = @This();
@ -56,6 +58,13 @@ pub const Handle = struct {
uri: Uri, uri: Uri,
text: [:0]const u8, text: [:0]const u8,
tree: Ast, tree: Ast,
/// do not access unless `zir_status != .none`
zir: Zir = undefined,
zir_status: enum {
none,
outdated,
done,
} = .none,
/// Not null if a ComptimeInterpreter is actually used /// Not null if a ComptimeInterpreter is actually used
interpreter: ?*ComptimeInterpreter = null, interpreter: ?*ComptimeInterpreter = null,
document_scope: analysis.DocumentScope, document_scope: analysis.DocumentScope,
@ -74,6 +83,7 @@ pub const Handle = struct {
allocator.destroy(interpreter); allocator.destroy(interpreter);
} }
self.document_scope.deinit(allocator); self.document_scope.deinit(allocator);
if (self.zir_status != .none) self.zir.deinit(allocator);
self.tree.deinit(allocator); self.tree.deinit(allocator);
allocator.free(self.text); allocator.free(self.text);
allocator.free(self.uri); allocator.free(self.uri);
@ -214,6 +224,15 @@ pub fn refreshDocument(self: *DocumentStore, uri: Uri, new_text: [:0]const u8) !
handle.tree.deinit(self.allocator); handle.tree.deinit(self.allocator);
handle.tree = new_tree; handle.tree = new_tree;
if (self.wantZir() and handle.open and new_tree.errors.len == 0) {
const new_zir = try AstGen.generate(self.allocator, new_tree);
if (handle.zir_status != .none) handle.zir.deinit(self.allocator);
handle.zir = new_zir;
handle.zir_status = .done;
} else if (handle.zir_status == .done) {
handle.zir_status = .outdated;
}
var new_document_scope = try analysis.makeDocumentScope(self.allocator, handle.tree); var new_document_scope = try analysis.makeDocumentScope(self.allocator, handle.tree);
handle.document_scope.deinit(self.allocator); handle.document_scope.deinit(self.allocator);
handle.document_scope = new_document_scope; handle.document_scope = new_document_scope;
@ -696,17 +715,31 @@ fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8, open: bool
var tree = try Ast.parse(self.allocator, text, .zig); var tree = try Ast.parse(self.allocator, text, .zig);
errdefer tree.deinit(self.allocator); errdefer tree.deinit(self.allocator);
// remove unused capacity
var nodes = tree.nodes.toMultiArrayList(); var nodes = tree.nodes.toMultiArrayList();
try nodes.setCapacity(self.allocator, nodes.len); try nodes.setCapacity(self.allocator, nodes.len);
tree.nodes = nodes.slice(); tree.nodes = nodes.slice();
// remove unused capacity
var tokens = tree.tokens.toMultiArrayList(); var tokens = tree.tokens.toMultiArrayList();
try tokens.setCapacity(self.allocator, tokens.len); try tokens.setCapacity(self.allocator, tokens.len);
tree.tokens = tokens.slice(); tree.tokens = tokens.slice();
const generate_zir = self.wantZir() and open and tree.errors.len == 0;
var zir: ?Zir = if (generate_zir) try AstGen.generate(self.allocator, tree) else null;
errdefer if (zir) |*code| code.deinit(self.allocator);
// remove unused capacity
if (zir) |*code| {
var instructions = code.instructions.toMultiArrayList();
try instructions.setCapacity(self.allocator, instructions.len);
code.instructions = instructions.slice();
}
var document_scope = try analysis.makeDocumentScope(self.allocator, tree); var document_scope = try analysis.makeDocumentScope(self.allocator, tree);
errdefer document_scope.deinit(self.allocator); errdefer document_scope.deinit(self.allocator);
// remove unused capacity
try document_scope.scopes.setCapacity(self.allocator, document_scope.scopes.len); try document_scope.scopes.setCapacity(self.allocator, document_scope.scopes.len);
break :blk Handle{ break :blk Handle{
@ -714,6 +747,8 @@ fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8, open: bool
.uri = duped_uri, .uri = duped_uri,
.text = text, .text = text,
.tree = tree, .tree = tree,
.zir = if (zir) |code| code else undefined,
.zir_status = if (zir != null) .done else .none,
.document_scope = document_scope, .document_scope = document_scope,
}; };
}; };
@ -1082,6 +1117,12 @@ pub fn enumCompletionItems(self: DocumentStore, arena: std.mem.Allocator, handle
return try self.tagStoreCompletionItems(arena, handle, "enum_completions"); return try self.tagStoreCompletionItems(arena, handle, "enum_completions");
} }
pub fn wantZir(self: DocumentStore) bool {
if (!self.config.enable_ast_check_diagnostics) return false;
const can_run_ast_check = std.process.can_spawn and self.config.zig_exe_path != null and self.config.prefer_ast_check_as_child_process;
return !can_run_ast_check;
}
pub fn ensureInterpreterExists(self: *DocumentStore, uri: Uri) !*ComptimeInterpreter { pub fn ensureInterpreterExists(self: *DocumentStore, uri: Uri) !*ComptimeInterpreter {
var handle = self.handles.get(uri).?; var handle = self.handles.get(uri).?;
if (handle.interpreter != null) return handle.interpreter.?; if (handle.interpreter != null) return handle.interpreter.?;

View File

@ -29,6 +29,7 @@ const completions = @import("features/completions.zig");
const goto = @import("features/goto.zig"); const goto = @import("features/goto.zig");
const hover_handler = @import("features/hover.zig"); const hover_handler = @import("features/hover.zig");
const selection_range = @import("features/selection_range.zig"); const selection_range = @import("features/selection_range.zig");
const diagnostics_gen = @import("features/diagnostics.zig");
const tres = @import("tres"); const tres = @import("tres");
@ -209,285 +210,6 @@ fn showMessage(
}); });
} }
fn generateDiagnostics(server: *Server, handle: DocumentStore.Handle) error{OutOfMemory}!types.PublishDiagnosticsParams {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
std.debug.assert(server.client_capabilities.supports_publish_diagnostics);
const tree = handle.tree;
var allocator = server.arena.allocator();
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
for (tree.errors) |err| {
var mem_buffer: [256]u8 = undefined;
var fbs = std.io.fixedBufferStream(&mem_buffer);
tree.renderError(err, fbs.writer()) catch if (std.debug.runtime_safety) unreachable else continue; // if an error occurs here increase buffer size
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, err.token, server.offset_encoding),
.severity = .Error,
.code = .{ .string = @tagName(err.tag) },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, fbs.getWritten()),
// .relatedInformation = undefined
});
}
if (server.config.enable_ast_check_diagnostics and tree.errors.len == 0) {
getAstCheckDiagnostics(server, handle, &diagnostics) catch |err| {
log.err("failed to run ast-check: {}", .{err});
};
}
if (server.config.warn_style) {
var node: u32 = 0;
while (node < tree.nodes.len) : (node += 1) {
if (ast.isBuiltinCall(tree, node)) {
const builtin_token = tree.nodes.items(.main_token)[node];
const call_name = tree.tokenSlice(builtin_token);
if (!std.mem.eql(u8, call_name, "@import")) continue;
var buffer: [2]Ast.Node.Index = undefined;
const params = ast.builtinCallParams(tree, node, &buffer).?;
if (params.len != 1) continue;
const import_str_token = tree.nodes.items(.main_token)[params[0]];
const import_str = tree.tokenSlice(import_str_token);
if (std.mem.startsWith(u8, import_str, "\"./")) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, import_str_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "dot_slash_import" },
.source = "zls",
.message = "A ./ is not needed in imports",
});
}
}
}
// TODO: style warnings for types, values and declarations below root scope
if (tree.errors.len == 0) {
for (tree.rootDecls()) |decl_idx| {
const decl = tree.nodes.items(.tag)[decl_idx];
switch (decl) {
.fn_proto,
.fn_proto_multi,
.fn_proto_one,
.fn_proto_simple,
.fn_decl,
=> blk: {
var buf: [1]Ast.Node.Index = undefined;
const func = tree.fullFnProto(&buf, decl_idx).?;
if (func.extern_export_inline_token != null) break :blk;
if (func.name_token) |name_token| {
const is_type_function = Analyser.isTypeFunction(tree, func);
const func_name = tree.tokenSlice(name_token);
if (!is_type_function and !Analyser.isCamelCase(func_name)) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, name_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "bad_style" },
.source = "zls",
.message = "Functions should be camelCase",
});
} else if (is_type_function and !Analyser.isPascalCase(func_name)) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, name_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "bad_style" },
.source = "zls",
.message = "Type functions should be PascalCase",
});
}
}
},
else => {},
}
}
}
}
for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| {
const result = server.document_store.cimports.get(hash) orelse continue;
if (result != .failure) continue;
const stderr = std.mem.trim(u8, result.failure, " ");
var pos_and_diag_iterator = std.mem.split(u8, stderr, ":");
_ = pos_and_diag_iterator.next(); // skip file path
_ = pos_and_diag_iterator.next(); // skip line
_ = pos_and_diag_iterator.next(); // skip character
try diagnostics.append(allocator, .{
.range = offsets.nodeToRange(handle.tree, node, server.offset_encoding),
.severity = .Error,
.code = .{ .string = "cImport" },
.source = "zls",
.message = try allocator.dupe(u8, pos_and_diag_iterator.rest()),
});
}
if (server.config.highlight_global_var_declarations) {
const main_tokens = tree.nodes.items(.main_token);
const tags = tree.tokens.items(.tag);
for (tree.rootDecls()) |decl| {
const decl_tag = tree.nodes.items(.tag)[decl];
const decl_main_token = tree.nodes.items(.main_token)[decl];
switch (decl_tag) {
.simple_var_decl,
.aligned_var_decl,
.local_var_decl,
.global_var_decl,
=> {
if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable
// uncomment this to get a list :)
//log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)});
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, decl_main_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "highlight_global_var_declarations" },
.source = "zls",
.message = "Global var declaration",
});
},
else => {},
}
}
}
if (handle.interpreter) |int| {
try diagnostics.ensureUnusedCapacity(allocator, int.errors.count());
var err_it = int.errors.iterator();
while (err_it.next()) |err| {
diagnostics.appendAssumeCapacity(.{
.range = offsets.nodeToRange(tree, err.key_ptr.*, server.offset_encoding),
.severity = .Error,
.code = .{ .string = err.value_ptr.code },
.source = "zls",
.message = err.value_ptr.message,
});
}
}
// try diagnostics.appendSlice(allocator, handle.interpreter.?.diagnostics.items);
return .{
.uri = handle.uri,
.diagnostics = diagnostics.items,
};
}
fn getAstCheckDiagnostics(
server: *Server,
handle: DocumentStore.Handle,
diagnostics: *std.ArrayListUnmanaged(types.Diagnostic),
) !void {
var allocator = server.arena.allocator();
const zig_exe_path = server.config.zig_exe_path orelse return;
var process = std.ChildProcess.init(&[_][]const u8{ zig_exe_path, "ast-check", "--color", "off" }, server.allocator);
process.stdin_behavior = .Pipe;
process.stderr_behavior = .Pipe;
process.spawn() catch |err| {
log.warn("Failed to spawn zig ast-check process, error: {}", .{err});
return;
};
try process.stdin.?.writeAll(handle.text);
process.stdin.?.close();
process.stdin = null;
const stderr_bytes = try process.stderr.?.reader().readAllAlloc(server.allocator, std.math.maxInt(usize));
defer server.allocator.free(stderr_bytes);
const term = process.wait() catch |err| {
log.warn("Failed to await zig ast-check process, error: {}", .{err});
return;
};
if (term != .Exited) return;
var last_diagnostic: ?types.Diagnostic = null;
// we don't store DiagnosticRelatedInformation in last_diagnostic instead
// its stored in last_related_diagnostics because we need an ArrayList
var last_related_diagnostics: std.ArrayListUnmanaged(types.DiagnosticRelatedInformation) = .{};
// NOTE: I believe that with color off it's one diag per line; is this correct?
var line_iterator = std.mem.split(u8, stderr_bytes, "\n");
while (line_iterator.next()) |line| lin: {
if (!std.mem.startsWith(u8, line, "<stdin>")) continue;
var pos_and_diag_iterator = std.mem.split(u8, line, ":");
const maybe_first = pos_and_diag_iterator.next();
if (maybe_first) |first| {
if (first.len <= 1) break :lin;
} else break;
const utf8_position = types.Position{
.line = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1,
.character = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1,
};
// zig uses utf-8 encoding for character offsets
const position = offsets.convertPositionEncoding(handle.text, utf8_position, .@"utf-8", server.offset_encoding);
const range = offsets.tokenPositionToRange(handle.text, position, server.offset_encoding);
const msg = pos_and_diag_iterator.rest()[1..];
if (std.mem.startsWith(u8, msg, "note: ")) {
try last_related_diagnostics.append(allocator, .{
.location = .{
.uri = handle.uri,
.range = range,
},
.message = try server.arena.allocator().dupe(u8, msg["note: ".len..]),
});
continue;
}
if (last_diagnostic) |*diagnostic| {
diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator);
try diagnostics.append(allocator, diagnostic.*);
last_diagnostic = null;
}
if (std.mem.startsWith(u8, msg, "error: ")) {
last_diagnostic = types.Diagnostic{
.range = range,
.severity = .Error,
.code = .{ .string = "ast_check" },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, msg["error: ".len..]),
};
} else {
last_diagnostic = types.Diagnostic{
.range = range,
.severity = .Error,
.code = .{ .string = "ast_check" },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, msg),
};
}
}
if (last_diagnostic) |*diagnostic| {
diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator);
try diagnostics.append(allocator, diagnostic.*);
last_diagnostic = null;
}
}
fn getAutofixMode(server: *Server) enum { fn getAutofixMode(server: *Server) enum {
on_save, on_save,
will_save_wait_until, will_save_wait_until,
@ -507,12 +229,11 @@ fn getAutofixMode(server: *Server) enum {
/// caller owns returned memory. /// caller owns returned memory.
pub fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) error{OutOfMemory}!std.ArrayListUnmanaged(types.TextEdit) { pub fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) error{OutOfMemory}!std.ArrayListUnmanaged(types.TextEdit) {
if (!server.config.enable_ast_check_diagnostics) return .{}; if (!server.config.enable_ast_check_diagnostics) return .{};
if (handle.tree.errors.len != 0) return .{}; if (handle.tree.errors.len != 0) return .{};
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
getAstCheckDiagnostics(server, handle.*, &diagnostics) catch |err| { try diagnostics_gen.getAstCheckDiagnostics(server, handle.*, &diagnostics);
log.err("failed to run ast-check: {}", .{err}); if (diagnostics.items.len == 0) return .{};
};
var builder = code_actions.Builder{ var builder = code_actions.Builder{
.arena = server.arena.allocator(), .arena = server.arena.allocator(),
@ -765,6 +486,16 @@ fn initializeHandler(server: *Server, request: types.InitializeParams) Error!typ
, .{server.config.record_session_path}); , .{server.config.record_session_path});
} }
if (server.config.enable_ast_check_diagnostics and
server.config.prefer_ast_check_as_child_process)
{
if (!std.process.can_spawn) {
log.info("'prefer_ast_check_as_child_process' is ignored because your OS can't spawn a child process", .{});
} else if (server.config.zig_exe_path == null) {
log.info("'prefer_ast_check_as_child_process' is ignored because Zig could not be found", .{});
}
}
return .{ return .{
.serverInfo = .{ .serverInfo = .{
.name = "zls", .name = "zls",
@ -1025,7 +756,7 @@ fn openDocumentHandler(server: *Server, notification: types.DidOpenTextDocumentP
if (server.client_capabilities.supports_publish_diagnostics) blk: { if (server.client_capabilities.supports_publish_diagnostics) blk: {
if (!std.process.can_spawn) break :blk; if (!std.process.can_spawn) break :blk;
const diagnostics = try server.generateDiagnostics(handle); const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle);
server.sendNotification("textDocument/publishDiagnostics", diagnostics); server.sendNotification("textDocument/publishDiagnostics", diagnostics);
} }
} }
@ -1042,7 +773,7 @@ fn changeDocumentHandler(server: *Server, notification: types.DidChangeTextDocum
if (server.client_capabilities.supports_publish_diagnostics) blk: { if (server.client_capabilities.supports_publish_diagnostics) blk: {
if (!std.process.can_spawn) break :blk; if (!std.process.can_spawn) break :blk;
const diagnostics = try server.generateDiagnostics(handle.*); const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle.*);
server.sendNotification("textDocument/publishDiagnostics", diagnostics); server.sendNotification("textDocument/publishDiagnostics", diagnostics);
} }
} }
@ -1054,7 +785,7 @@ fn saveDocumentHandler(server: *Server, notification: types.DidSaveTextDocumentP
const handle = server.document_store.getHandle(uri) orelse return; const handle = server.document_store.getHandle(uri) orelse return;
try server.document_store.applySave(handle); try server.document_store.applySave(handle);
if (std.process.can_spawn and server.getAutofixMode() == .on_save) { if (server.getAutofixMode() == .on_save) {
var text_edits = try server.autofix(allocator, handle); var text_edits = try server.autofix(allocator, handle);
var workspace_edit = types.WorkspaceEdit{ .changes = .{} }; var workspace_edit = types.WorkspaceEdit{ .changes = .{} };
@ -1082,7 +813,6 @@ fn willSaveWaitUntilHandler(server: *Server, request: types.WillSaveTextDocument
const handle = server.document_store.getHandle(request.textDocument.uri) orelse return null; const handle = server.document_store.getHandle(request.textDocument.uri) orelse return null;
if (!std.process.can_spawn) return null;
var text_edits = try server.autofix(allocator, handle); var text_edits = try server.autofix(allocator, handle);
return try text_edits.toOwnedSlice(allocator); return try text_edits.toOwnedSlice(allocator);
@ -1185,7 +915,7 @@ pub fn hoverHandler(server: *Server, request: types.HoverParams) Error!?types.Ho
// TODO: Figure out a better solution for comptime interpreter diags // TODO: Figure out a better solution for comptime interpreter diags
if (server.client_capabilities.supports_publish_diagnostics) blk: { if (server.client_capabilities.supports_publish_diagnostics) blk: {
if (!std.process.can_spawn) break :blk; if (!std.process.can_spawn) break :blk;
const diagnostics = try server.generateDiagnostics(handle.*); const diagnostics = try diagnostics_gen.generateDiagnostics(server, handle.*);
server.sendNotification("textDocument/publishDiagnostics", diagnostics); server.sendNotification("textDocument/publishDiagnostics", diagnostics);
} }
@ -1459,12 +1189,8 @@ fn codeActionHandler(server: *Server, request: types.CodeActionParams) Error!?[]
// as of right now, only ast-check errors may get a code action // as of right now, only ast-check errors may get a code action
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){}; var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
if (server.config.enable_ast_check_diagnostics and handle.tree.errors.len == 0) blk: { if (server.config.enable_ast_check_diagnostics and handle.tree.errors.len == 0) {
if (!std.process.can_spawn) break :blk; try diagnostics_gen.getAstCheckDiagnostics(server, handle.*, &diagnostics);
getAstCheckDiagnostics(server, handle.*, &diagnostics) catch |err| {
log.err("failed to run ast-check: {}", .{err});
return error.InternalError;
};
} }
var actions = std.ArrayListUnmanaged(types.CodeAction){}; var actions = std.ArrayListUnmanaged(types.CodeAction){};

View File

@ -107,6 +107,12 @@
"type": "usize", "type": "usize",
"default": "1048576" "default": "1048576"
}, },
{
"name": "prefer_ast_check_as_child_process",
"description": "Can be used in conjuction with `enable_ast_check_diagnostics` to favor using `zig ast-check` instead of ZLS's fork",
"type": "bool",
"default": "true"
},
{ {
"name": "record_session", "name": "record_session",
"description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`", "description": "When true, zls will record all request is receives and write in into `record_session_path`, so that they can replayed with `zls replay`",

View File

@ -0,0 +1,391 @@
const std = @import("std");
const Ast = std.zig.Ast;
const log = std.log.scoped(.zls_diagnostics);
const Server = @import("../Server.zig");
const DocumentStore = @import("../DocumentStore.zig");
const types = @import("../lsp.zig");
const Analyser = @import("../analysis.zig");
const ast = @import("../ast.zig");
const offsets = @import("../offsets.zig");
const tracy = @import("../tracy.zig");
const Module = @import("../stage2/Module.zig");
const Zir = @import("../stage2/Zir.zig");
pub fn generateDiagnostics(server: *Server, handle: DocumentStore.Handle) error{OutOfMemory}!types.PublishDiagnosticsParams {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
std.debug.assert(server.client_capabilities.supports_publish_diagnostics);
const tree = handle.tree;
var allocator = server.arena.allocator();
var diagnostics = std.ArrayListUnmanaged(types.Diagnostic){};
for (tree.errors) |err| {
var mem_buffer: [256]u8 = undefined;
var fbs = std.io.fixedBufferStream(&mem_buffer);
tree.renderError(err, fbs.writer()) catch if (std.debug.runtime_safety) unreachable else continue; // if an error occurs here increase buffer size
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, err.token, server.offset_encoding),
.severity = .Error,
.code = .{ .string = @tagName(err.tag) },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, fbs.getWritten()),
// .relatedInformation = undefined
});
}
if (server.config.enable_ast_check_diagnostics and tree.errors.len == 0) {
try getAstCheckDiagnostics(server, handle, &diagnostics);
}
if (server.config.warn_style) {
var node: u32 = 0;
while (node < tree.nodes.len) : (node += 1) {
if (ast.isBuiltinCall(tree, node)) {
const builtin_token = tree.nodes.items(.main_token)[node];
const call_name = tree.tokenSlice(builtin_token);
if (!std.mem.eql(u8, call_name, "@import")) continue;
var buffer: [2]Ast.Node.Index = undefined;
const params = ast.builtinCallParams(tree, node, &buffer).?;
if (params.len != 1) continue;
const import_str_token = tree.nodes.items(.main_token)[params[0]];
const import_str = tree.tokenSlice(import_str_token);
if (std.mem.startsWith(u8, import_str, "\"./")) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, import_str_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "dot_slash_import" },
.source = "zls",
.message = "A ./ is not needed in imports",
});
}
}
}
// TODO: style warnings for types, values and declarations below root scope
if (tree.errors.len == 0) {
for (tree.rootDecls()) |decl_idx| {
const decl = tree.nodes.items(.tag)[decl_idx];
switch (decl) {
.fn_proto,
.fn_proto_multi,
.fn_proto_one,
.fn_proto_simple,
.fn_decl,
=> blk: {
var buf: [1]Ast.Node.Index = undefined;
const func = tree.fullFnProto(&buf, decl_idx).?;
if (func.extern_export_inline_token != null) break :blk;
if (func.name_token) |name_token| {
const is_type_function = Analyser.isTypeFunction(tree, func);
const func_name = tree.tokenSlice(name_token);
if (!is_type_function and !Analyser.isCamelCase(func_name)) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, name_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "bad_style" },
.source = "zls",
.message = "Functions should be camelCase",
});
} else if (is_type_function and !Analyser.isPascalCase(func_name)) {
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, name_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "bad_style" },
.source = "zls",
.message = "Type functions should be PascalCase",
});
}
}
},
else => {},
}
}
}
}
for (handle.cimports.items(.hash), handle.cimports.items(.node)) |hash, node| {
const result = server.document_store.cimports.get(hash) orelse continue;
if (result != .failure) continue;
const stderr = std.mem.trim(u8, result.failure, " ");
var pos_and_diag_iterator = std.mem.split(u8, stderr, ":");
_ = pos_and_diag_iterator.next(); // skip file path
_ = pos_and_diag_iterator.next(); // skip line
_ = pos_and_diag_iterator.next(); // skip character
try diagnostics.append(allocator, .{
.range = offsets.nodeToRange(handle.tree, node, server.offset_encoding),
.severity = .Error,
.code = .{ .string = "cImport" },
.source = "zls",
.message = try allocator.dupe(u8, pos_and_diag_iterator.rest()),
});
}
if (server.config.highlight_global_var_declarations) {
const main_tokens = tree.nodes.items(.main_token);
const tags = tree.tokens.items(.tag);
for (tree.rootDecls()) |decl| {
const decl_tag = tree.nodes.items(.tag)[decl];
const decl_main_token = tree.nodes.items(.main_token)[decl];
switch (decl_tag) {
.simple_var_decl,
.aligned_var_decl,
.local_var_decl,
.global_var_decl,
=> {
if (tags[main_tokens[decl]] != .keyword_var) continue; // skip anything immutable
// uncomment this to get a list :)
//log.debug("possible global variable \"{s}\"", .{tree.tokenSlice(decl_main_token + 1)});
try diagnostics.append(allocator, .{
.range = offsets.tokenToRange(tree, decl_main_token, server.offset_encoding),
.severity = .Hint,
.code = .{ .string = "highlight_global_var_declarations" },
.source = "zls",
.message = "Global var declaration",
});
},
else => {},
}
}
}
if (handle.interpreter) |int| {
try diagnostics.ensureUnusedCapacity(allocator, int.errors.count());
var err_it = int.errors.iterator();
while (err_it.next()) |err| {
diagnostics.appendAssumeCapacity(.{
.range = offsets.nodeToRange(tree, err.key_ptr.*, server.offset_encoding),
.severity = .Error,
.code = .{ .string = err.value_ptr.code },
.source = "zls",
.message = err.value_ptr.message,
});
}
}
// try diagnostics.appendSlice(allocator, handle.interpreter.?.diagnostics.items);
return .{
.uri = handle.uri,
.diagnostics = diagnostics.items,
};
}
pub fn getAstCheckDiagnostics(
server: *Server,
handle: DocumentStore.Handle,
diagnostics: *std.ArrayListUnmanaged(types.Diagnostic),
) error{OutOfMemory}!void {
std.debug.assert(server.config.enable_ast_check_diagnostics);
std.debug.assert(handle.tree.errors.len == 0);
if (server.config.prefer_ast_check_as_child_process and
std.process.can_spawn and
server.config.zig_exe_path != null)
{
getDiagnosticsFromAstCheck(server, handle, diagnostics) catch |err| {
log.err("failed to run ast-check: {}", .{err});
};
} else {
std.debug.assert(server.document_store.wantZir());
switch (handle.zir_status) {
.none, .outdated => {},
.done => try getDiagnosticsFromZir(server, handle, diagnostics),
}
}
}
fn getDiagnosticsFromAstCheck(
server: *Server,
handle: DocumentStore.Handle,
diagnostics: *std.ArrayListUnmanaged(types.Diagnostic),
) !void {
comptime std.debug.assert(std.process.can_spawn);
std.debug.assert(server.config.zig_exe_path != null);
var allocator = server.arena.allocator();
const zig_exe_path = server.config.zig_exe_path.?;
var process = std.ChildProcess.init(&[_][]const u8{ zig_exe_path, "ast-check", "--color", "off" }, server.allocator);
process.stdin_behavior = .Pipe;
process.stderr_behavior = .Pipe;
process.spawn() catch |err| {
log.warn("Failed to spawn zig ast-check process, error: {}", .{err});
return;
};
try process.stdin.?.writeAll(handle.text);
process.stdin.?.close();
process.stdin = null;
const stderr_bytes = try process.stderr.?.reader().readAllAlloc(server.allocator, std.math.maxInt(usize));
defer server.allocator.free(stderr_bytes);
const term = process.wait() catch |err| {
log.warn("Failed to await zig ast-check process, error: {}", .{err});
return;
};
if (term != .Exited) return;
var last_diagnostic: ?types.Diagnostic = null;
// we don't store DiagnosticRelatedInformation in last_diagnostic instead
// its stored in last_related_diagnostics because we need an ArrayList
var last_related_diagnostics: std.ArrayListUnmanaged(types.DiagnosticRelatedInformation) = .{};
// NOTE: I believe that with color off it's one diag per line; is this correct?
var line_iterator = std.mem.split(u8, stderr_bytes, "\n");
while (line_iterator.next()) |line| lin: {
if (!std.mem.startsWith(u8, line, "<stdin>")) continue;
var pos_and_diag_iterator = std.mem.split(u8, line, ":");
const maybe_first = pos_and_diag_iterator.next();
if (maybe_first) |first| {
if (first.len <= 1) break :lin;
} else break;
const utf8_position = types.Position{
.line = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1,
.character = (try std.fmt.parseInt(u32, pos_and_diag_iterator.next().?, 10)) - 1,
};
// zig uses utf-8 encoding for character offsets
const position = offsets.convertPositionEncoding(handle.text, utf8_position, .@"utf-8", server.offset_encoding);
const range = offsets.tokenPositionToRange(handle.text, position, server.offset_encoding);
const msg = pos_and_diag_iterator.rest()[1..];
if (std.mem.startsWith(u8, msg, "note: ")) {
try last_related_diagnostics.append(allocator, .{
.location = .{
.uri = handle.uri,
.range = range,
},
.message = try server.arena.allocator().dupe(u8, msg["note: ".len..]),
});
continue;
}
if (last_diagnostic) |*diagnostic| {
diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator);
try diagnostics.append(allocator, diagnostic.*);
last_diagnostic = null;
}
if (std.mem.startsWith(u8, msg, "error: ")) {
last_diagnostic = types.Diagnostic{
.range = range,
.severity = .Error,
.code = .{ .string = "ast_check" },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, msg["error: ".len..]),
};
} else {
last_diagnostic = types.Diagnostic{
.range = range,
.severity = .Error,
.code = .{ .string = "ast_check" },
.source = "zls",
.message = try server.arena.allocator().dupe(u8, msg),
};
}
}
if (last_diagnostic) |*diagnostic| {
diagnostic.relatedInformation = try last_related_diagnostics.toOwnedSlice(allocator);
try diagnostics.append(allocator, diagnostic.*);
last_diagnostic = null;
}
}
fn getDiagnosticsFromZir(
server: *Server,
handle: DocumentStore.Handle,
diagnostics: *std.ArrayListUnmanaged(types.Diagnostic),
) error{OutOfMemory}!void {
std.debug.assert(handle.zir_status != .none);
const allocator = server.arena.allocator();
const payload_index = handle.zir.extra[@enumToInt(Zir.ExtraIndex.compile_errors)];
if (payload_index == 0) return;
const header = handle.zir.extraData(Zir.Inst.CompileErrors, payload_index);
const items_len = header.data.items_len;
try diagnostics.ensureUnusedCapacity(allocator, items_len);
var extra_index = header.end;
for (0..items_len) |_| {
const item = handle.zir.extraData(Zir.Inst.CompileErrors.Item, extra_index);
extra_index = item.end;
const err_loc = blk: {
if (item.data.node != 0) {
break :blk offsets.nodeToLoc(handle.tree, item.data.node);
}
const loc = offsets.tokenToLoc(handle.tree, item.data.token);
break :blk offsets.Loc{
.start = loc.start + item.data.byte_offset,
.end = loc.end,
};
};
var notes: []types.DiagnosticRelatedInformation = &.{};
if (item.data.notes != 0) {
const block = handle.zir.extraData(Zir.Inst.Block, item.data.notes);
const body = handle.zir.extra[block.end..][0..block.data.body_len];
notes = try allocator.alloc(types.DiagnosticRelatedInformation, body.len);
for (notes, body) |*note, note_index| {
const note_item = handle.zir.extraData(Zir.Inst.CompileErrors.Item, note_index);
const msg = handle.zir.nullTerminatedString(note_item.data.msg);
const loc = blk: {
if (note_item.data.node != 0) {
break :blk offsets.nodeToLoc(handle.tree, note_item.data.node);
}
const loc = offsets.tokenToLoc(handle.tree, note_item.data.token);
break :blk offsets.Loc{
.start = loc.start + note_item.data.byte_offset,
.end = loc.end,
};
};
note.* = .{
.location = .{
.uri = handle.uri,
.range = offsets.locToRange(handle.text, loc, server.offset_encoding),
},
.message = msg,
};
}
}
const msg = handle.zir.nullTerminatedString(item.data.msg);
diagnostics.appendAssumeCapacity(.{
.range = offsets.locToRange(handle.text, err_loc, server.offset_encoding),
.severity = .Error,
.code = .{ .string = "ast_check" },
.source = "zls",
.message = msg,
.relatedInformation = if (notes.len != 0) notes else null,
});
}
}