Fix build runner (#1029)

* Fix build runner

* Add zls module, bump version

* Fix path from root issue in modules (thanks daremakemyday from Twitch :))

* Libraryify

* remove build_runner backward compatibility

* Remove some `= undefined`s in Server.zig

Makes library use less dangerous

* Consistent mem model + custom build runner possible

* Add build runner utils for third-party tooling

* Make logs removable in libraries with zls_ prefix

* Fix build runner CI

* Expose references

* Use new addModule
This commit is contained in:
Auguste Rame 2023-03-05 14:08:31 -05:00 committed by GitHub
parent a78950ce82
commit 121f68b970
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 203 additions and 271 deletions

View File

@ -17,7 +17,7 @@ jobs:
check_build_runner:
strategy:
matrix:
zig_version: [0.9.1, 0.10.1, master]
zig_version: [master]
runs-on: ubuntu-latest
@ -33,16 +33,7 @@ jobs:
with:
version: ${{ matrix.zig_version }}
- name: Create temp zig project
run: |
mkdir $RUNNER_TEMP/TEMP_ZIG_PROJECT
cd $RUNNER_TEMP/TEMP_ZIG_PROJECT
zig init-exe
- name: Check build_runner builds on master
if: ${{ matrix.zig_version == 'master' }}
run: zig build-exe $GITHUB_WORKSPACE/src/special/build_runner.zig --mod @build@::$RUNNER_TEMP/TEMP_ZIG_PROJECT/build.zig --deps @build@
- name: Check build_runner builds on older tagged releases
if: ${{ matrix.zig_version != 'master' }}
run: zig build-exe $GITHUB_WORKSPACE/src/special/build_runner.zig --pkg-begin @build@ $RUNNER_TEMP/TEMP_ZIG_PROJECT/build.zig --pkg-end
run: |
pwd
zig build --build-runner src/special/build_runner.zig

View File

@ -17,6 +17,7 @@ The Zig Language Server (zls) is a tool that implements Microsoft's Language Ser
- [Per-build Configuration Options](#per-build-configuration-options)
- [`BuildOption`](#buildoption)
- [Features](#features)
- [Using as a library](#using-as-a-library)
- [Related Projects](#related-projects)
- [Quick Thanks :)](#quick-thanks-)
- [License](#license)
@ -129,6 +130,10 @@ The following LSP features are supported:
- Selection ranges
- Folding regions
## Using as a library
You can use zls as a library! [Check out this demo repo](https://github.com/zigtools/zls-as-lib-demo) for a good reference.
## Related Projects
- [`sublime-zig-language` by @prime31](https://github.com/prime31/sublime-zig-language)
@ -139,7 +144,7 @@ The following LSP features are supported:
- [`known-folders` by @ziglibs](https://github.com/ziglibs/known-folders)
- Provides API to access known folders on Linux, Windows and Mac OS
- [`zls` by @zigtools](https://github.com/zigtools/zls)
- Used by many zls developers to more efficently work on zls
- Used by many zls developers to more efficiently work on zls
## Quick Thanks :)

View File

@ -7,7 +7,7 @@ const zls_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
pub fn build(b: *std.build.Builder) !void {
comptime {
const current_zig = builtin.zig_version;
const min_zig = std.SemanticVersion.parse("0.11.0-dev.1817+f6c934677") catch return; // package manager hashes made consistent on windows
const min_zig = std.SemanticVersion.parse("0.11.0-dev.1836+28364166e") catch return; // package manager stuff + --build-runner + zls as a library
if (current_zig.order(min_zig) == .lt) {
@compileError(std.fmt.comptimePrint("Your Zig version v{} does not meet the minimum build requirement of v{}", .{ current_zig, min_zig }));
}
@ -160,7 +160,7 @@ pub fn build(b: *std.build.Builder) !void {
const build_options_module = exe_options.createModule();
const zls_module = b.createModule(.{
const zls_module = b.addModule("zls", .{
.source_file = .{ .path = "src/zls.zig" },
.dependencies = &.{
.{ .name = "known-folders", .module = known_folders_module },

View File

@ -4,16 +4,16 @@
.dependencies = .{
.known_folders = .{
.url = "https://github.com/ziglibs/known-folders/archive/53fe3b676f32e59d46f4fd201d7ab200e5f6cb98.tar.gz",
.hash = "12203e18503cd0fa097a4404b0c4f8535a68536886b536ae51c786455238ba5f183b",
.url = "https://github.com/ziglibs/known-folders/archive/d13ba6137084e55f873f6afb67447fe8906cc951.tar.gz",
.hash = "122028c00915d9b37296059be8a3883c718dbb5bd174350caedf152fed1f46f99607",
},
.tres = .{
.url = "https://github.com/ziglibs/tres/archive/d8b0c24a945da02fffdae731edd1903c6889e73c.tar.gz",
.hash = "12209914477ef8c4ef99accb293c4a7ec90acdd9e77d3f60f5e056449cbfad3a7fd8",
.url = "https://github.com/ziglibs/tres/archive/707a09313b42e05d6ae22d1590499eece5f968ce.tar.gz",
.hash = "1220beaae8d152baa941a10b7ef3d3a59d093b257047035e2373c3c2f876ad29ccc8",
},
.diffz = .{
.url = "https://github.com/ziglibs/diffz/archive/efc91679b000a2d7f86fb40930f0a95a0d349bff.tar.gz",
.hash = "122019f94ec81a7cf6e9810983603dbacfc65ed30aea8f277f05ba0ce7c1511fff3d",
.url = "https://github.com/ziglibs/diffz/archive/b966296b4489eb082b0831ec9a37d6f5e1906040.tar.gz",
.hash = "1220ed4aed884221108ad39f2658b69a91653e0bbc8ce429bc7f1bc4e58f6a751553",
},
},
}

View File

@ -17,7 +17,7 @@ pub const Index = InternPool.Index;
pub const Key = InternPool.Key;
pub const ComptimeInterpreter = @This();
const log = std.log.scoped(.comptime_interpreter);
const log = std.log.scoped(.zls_comptime_interpreter);
allocator: std.mem.Allocator,
ip: InternPool,

View File

@ -4,7 +4,7 @@ const types = @import("lsp.zig");
const URI = @import("uri.zig");
const analysis = @import("analysis.zig");
const offsets = @import("offsets.zig");
const log = std.log.scoped(.store);
const log = std.log.scoped(.zls_store);
const Ast = std.zig.Ast;
const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig");
const BuildConfig = @import("special/build_runner.zig").BuildConfig;
@ -115,13 +115,13 @@ pub fn deinit(self: *DocumentStore) void {
self.cimports.deinit(self.allocator);
}
/// returns a handle to the given document
/// Returns a handle to the given document
pub fn getHandle(self: *DocumentStore, uri: Uri) ?*const Handle {
return self.handles.get(uri);
}
/// returns a handle to the given document
/// will load the document from disk if it hasn't been already
/// Returns a handle to the given document
/// Will load the document from disk if it hasn't been already
pub fn getOrLoadHandle(self: *DocumentStore, uri: Uri) ?*const Handle {
return self.getOrLoadHandleInternal(uri) catch null;
}
@ -141,7 +141,9 @@ fn getOrLoadHandleInternal(self: *DocumentStore, uri: Uri) !?*const Handle {
return gop.value_ptr.*;
}
pub fn openDocument(self: *DocumentStore, uri: Uri, text: []const u8) error{OutOfMemory}!Handle {
/// Takes ownership of `new_text` which has to be allocated
/// with this DocumentStore's allocator
pub fn openDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8) error{OutOfMemory}!Handle {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -157,9 +159,7 @@ pub fn openDocument(self: *DocumentStore, uri: Uri, text: []const u8) error{OutO
var handle = try self.allocator.create(Handle);
errdefer self.allocator.destroy(handle);
const duped_text = try self.allocator.dupeZ(u8, text);
handle.* = try self.createDocument(uri, duped_text, true);
handle.* = try self.createDocument(uri, text, true);
errdefer handle.deinit(self.allocator);
try self.handles.putNoClobber(self.allocator, handle.uri, handle);
@ -189,7 +189,8 @@ pub fn closeDocument(self: *DocumentStore, uri: Uri) void {
self.garbageCollectionBuildFiles() catch {};
}
/// takes ownership of `new_text` which has to be allocated with `self.allocator`
/// Takes ownership of `new_text` which has to be allocated
/// with this DocumentStore's allocator
pub fn refreshDocument(self: *DocumentStore, uri: Uri, new_text: [:0]const u8) !void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -430,13 +431,45 @@ fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: Bu
return try std.json.parse(BuildAssociatedConfig, &token_stream, .{ .allocator = allocator });
}
/// runs the build.zig and extracts include directories and packages
/// has to be freed with `std.json.parseFree`
fn loadBuildConfiguration(
/// Caller owns returned memory!
pub fn populateBuildConfigurationArgs(
allocator: std.mem.Allocator,
args: *std.ArrayListUnmanaged([]const u8),
zig_exe_path: []const u8,
build_runner_path: []const u8,
) error{OutOfMemory}!void {
try args.appendSlice(allocator, &.{ zig_exe_path, "build", "--build-runner", build_runner_path });
}
/// Runs the build.zig and returns the run result
/// Args should be the output of `createBuildConfigurationArgs`
/// plus any additional custom arguments
/// Arena recommended
pub fn executeBuildRunner(
allocator: std.mem.Allocator,
build_file_path: []const u8,
args: []const []const u8,
) !std.ChildProcess.ExecResult {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const build_file_directory_path = try std.fs.path.resolve(allocator, &.{ build_file_path, "../" });
defer allocator.free(build_file_directory_path);
return try std.ChildProcess.exec(.{
.allocator = allocator,
.argv = args,
.cwd = build_file_directory_path,
});
}
/// Runs the build.zig and extracts include directories and packages
/// Has to be freed with `std.json.parseFree`
pub fn loadBuildConfiguration(
allocator: std.mem.Allocator,
build_file: BuildFile,
config: Config,
runtime_zig_version: ZigVersionWrapper,
_: ZigVersionWrapper,
) !BuildConfig {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -446,54 +479,14 @@ fn loadBuildConfiguration(
const arena_allocator = arena.allocator();
const build_file_path = try URI.parse(arena_allocator, build_file.uri);
const directory_path = try std.fs.path.resolve(arena_allocator, &.{ build_file_path, "../" });
// TODO extract this option from `BuildAssociatedConfig.BuildOption`
const zig_cache_root: []const u8 = try std.fs.path.join(arena_allocator, &.{ directory_path, "zig-cache" });
// NOTE: This used to be backwards compatible
// but then I came in like a wrecking ball
// introduction of modified module cli arguments https://github.com/ziglang/zig/pull/14664
const module_version = comptime std.SemanticVersion.parse("0.11.0-dev.1718+2737dce84") catch unreachable;
const use_new_module_cli = runtime_zig_version.version.order(module_version) != .lt;
const standard_args = if (use_new_module_cli) blk: {
const build_module = try std.fmt.allocPrint(arena_allocator, "@build@::{s}", .{build_file_path});
break :blk [_][]const u8{
config.zig_exe_path.?,
"run",
config.build_runner_path.?,
"--cache-dir",
config.global_cache_path.?,
"--mod",
build_module,
"--deps",
"@build@",
"--",
config.zig_exe_path.?,
directory_path,
zig_cache_root,
config.build_runner_global_cache_path.?,
};
} else [_][]const u8{
config.zig_exe_path.?,
"run",
config.build_runner_path.?,
"--cache-dir",
config.global_cache_path.?,
"--pkg-begin",
"@build@",
build_file_path,
"--pkg-end",
"--",
config.zig_exe_path.?,
directory_path,
zig_cache_root,
config.build_runner_global_cache_path.?,
};
const arg_length = standard_args.len + if (build_file.build_associated_config) |cfg| if (cfg.build_options) |options| options.len else 0 else 0;
const arg_length = 4 + if (build_file.build_associated_config) |cfg| if (cfg.build_options) |options| options.len else 0 else 0;
var args = try std.ArrayListUnmanaged([]const u8).initCapacity(arena_allocator, arg_length);
args.appendSliceAssumeCapacity(standard_args[0..]);
try populateBuildConfigurationArgs(arena_allocator, &args, config.zig_exe_path.?, config.build_runner_path.?);
if (build_file.build_associated_config) |cfg| {
if (cfg.build_options) |options| {
for (options) |opt| {
@ -502,16 +495,7 @@ fn loadBuildConfiguration(
}
}
const zig_run_result = try std.ChildProcess.exec(.{
.allocator = arena_allocator,
.argv = args.items,
.cwd = try std.fs.path.resolve(arena_allocator, &.{ config.zig_exe_path.?, "../" }),
});
defer {
arena_allocator.free(zig_run_result.stdout);
arena_allocator.free(zig_run_result.stderr);
}
var zig_run_result = try executeBuildRunner(arena_allocator, build_file_path, args.items);
errdefer blk: {
const joined = std.mem.join(arena_allocator, " ", args.items) catch break :blk;
@ -527,13 +511,20 @@ fn loadBuildConfiguration(
else => return error.RunFailed,
}
const parse_options = std.json.ParseOptions{ .allocator = allocator };
const parse_options = std.json.ParseOptions{
.allocator = allocator,
// We ignore unknown fields so people can roll
// their own build runners in libraries with
// the only requirement being general adherance
// to the BuildConfig type
.ignore_unknown_fields = true,
};
var token_stream = std.json.TokenStream.init(zig_run_result.stdout);
var build_config = std.json.parse(BuildConfig, &token_stream, parse_options) catch return error.RunFailed;
errdefer std.json.parseFree(BuildConfig, build_config, parse_options);
for (build_config.packages) |*pkg| {
const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ directory_path, pkg.path });
const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ build_file_path, "..", pkg.path });
allocator.free(pkg.path);
pkg.path = pkg_abs_path;
}
@ -688,7 +679,7 @@ fn uriInImports(
}
/// takes ownership of the text passed in.
fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]u8, open: bool) error{OutOfMemory}!Handle {
fn createDocument(self: *DocumentStore, uri: Uri, text: [:0]const u8, open: bool) error{OutOfMemory}!Handle {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();

View File

@ -29,14 +29,14 @@ const snipped_data = @import("data/snippets.zig");
const tres = @import("tres");
const log = std.log.scoped(.server);
const log = std.log.scoped(.zls_server);
// Server fields
config: *Config,
allocator: std.mem.Allocator = undefined,
arena: *std.heap.ArenaAllocator = undefined,
document_store: DocumentStore = undefined,
allocator: std.mem.Allocator,
arena: std.heap.ArenaAllocator,
document_store: DocumentStore,
builtin_completions: ?std.ArrayListUnmanaged(types.CompletionItem),
client_capabilities: ClientCapabilities = .{},
runtime_zig_version: ?ZigVersionWrapper,
@ -500,7 +500,7 @@ fn getAutofixMode(server: *Server) enum {
}
/// caller owns returned memory.
fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) error{OutOfMemory}!std.ArrayListUnmanaged(types.TextEdit) {
pub fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const DocumentStore.Handle) error{OutOfMemory}!std.ArrayListUnmanaged(types.TextEdit) {
if (!server.config.enable_ast_check_diagnostics) return .{};
if (handle.tree.errors.len != 0) return .{};
@ -540,7 +540,7 @@ fn autofix(server: *Server, allocator: std.mem.Allocator, handle: *const Documen
return text_edits;
}
fn typeToCompletion(
pub fn typeToCompletion(
server: *Server,
list: *std.ArrayListUnmanaged(types.CompletionItem),
field_access: analysis.FieldAccessReturn,
@ -609,7 +609,7 @@ fn typeToCompletion(
}
}
fn nodeToCompletion(
pub fn nodeToCompletion(
server: *Server,
list: *std.ArrayListUnmanaged(types.CompletionItem),
node_handle: analysis.NodeWithHandle,
@ -834,7 +834,7 @@ pub fn identifierFromPosition(pos_index: usize, handle: DocumentStore.Handle) []
return handle.text[start_idx..end_idx];
}
fn gotoDefinitionSymbol(
pub fn gotoDefinitionSymbol(
server: *Server,
decl_handle: analysis.DeclWithHandle,
resolve_alias: bool,
@ -865,7 +865,7 @@ fn gotoDefinitionSymbol(
};
}
fn hoverSymbol(server: *Server, decl_handle: analysis.DeclWithHandle) error{OutOfMemory}!?types.Hover {
pub fn hoverSymbol(server: *Server, decl_handle: analysis.DeclWithHandle) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -987,7 +987,7 @@ fn hoverSymbol(server: *Server, decl_handle: analysis.DeclWithHandle) error{OutO
};
}
fn getLabelGlobal(pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?analysis.DeclWithHandle {
pub fn getLabelGlobal(pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?analysis.DeclWithHandle {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -997,7 +997,7 @@ fn getLabelGlobal(pos_index: usize, handle: *const DocumentStore.Handle) error{O
return try analysis.lookupLabel(handle, name, pos_index);
}
fn getSymbolGlobal(
pub fn getSymbolGlobal(
server: *Server,
pos_index: usize,
handle: *const DocumentStore.Handle,
@ -1011,7 +1011,7 @@ fn getSymbolGlobal(
return try analysis.lookupSymbolGlobal(&server.document_store, handle, name, pos_index);
}
fn gotoDefinitionLabel(
pub fn gotoDefinitionLabel(
server: *Server,
pos_index: usize,
handle: *const DocumentStore.Handle,
@ -1023,7 +1023,7 @@ fn gotoDefinitionLabel(
return try server.gotoDefinitionSymbol(decl, false);
}
fn gotoDefinitionGlobal(
pub fn gotoDefinitionGlobal(
server: *Server,
pos_index: usize,
handle: *const DocumentStore.Handle,
@ -1036,7 +1036,7 @@ fn gotoDefinitionGlobal(
return try server.gotoDefinitionSymbol(decl, resolve_alias);
}
fn gotoDefinitionBuiltin(
pub fn gotoDefinitionBuiltin(
server: *Server,
handle: *const DocumentStore.Handle,
loc: offsets.Loc,
@ -1068,7 +1068,7 @@ fn gotoDefinitionBuiltin(
return null;
}
fn hoverDefinitionLabel(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
pub fn hoverDefinitionLabel(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1076,7 +1076,7 @@ fn hoverDefinitionLabel(server: *Server, pos_index: usize, handle: *const Docume
return try server.hoverSymbol(decl);
}
fn hoverDefinitionBuiltin(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
pub fn hoverDefinitionBuiltin(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1126,7 +1126,7 @@ fn hoverDefinitionBuiltin(server: *Server, pos_index: usize, handle: *const Docu
};
}
fn hoverDefinitionGlobal(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
pub fn hoverDefinitionGlobal(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1134,7 +1134,7 @@ fn hoverDefinitionGlobal(server: *Server, pos_index: usize, handle: *const Docum
return try server.hoverSymbol(decl);
}
fn getSymbolFieldAccess(
pub fn getSymbolFieldAccess(
server: *Server,
handle: *const DocumentStore.Handle,
source_index: usize,
@ -1165,7 +1165,7 @@ fn getSymbolFieldAccess(
return null;
}
fn gotoDefinitionFieldAccess(
pub fn gotoDefinitionFieldAccess(
server: *Server,
handle: *const DocumentStore.Handle,
source_index: usize,
@ -1179,7 +1179,7 @@ fn gotoDefinitionFieldAccess(
return try server.gotoDefinitionSymbol(decl, resolve_alias);
}
fn hoverDefinitionFieldAccess(
pub fn hoverDefinitionFieldAccess(
server: *Server,
handle: *const DocumentStore.Handle,
source_index: usize,
@ -1192,7 +1192,7 @@ fn hoverDefinitionFieldAccess(
return try server.hoverSymbol(decl);
}
fn gotoDefinitionString(
pub fn gotoDefinitionString(
server: *Server,
pos_context: analysis.PositionContext,
handle: *const DocumentStore.Handle,
@ -1250,7 +1250,7 @@ const DeclToCompletionContext = struct {
parent_is_type_val: ?bool = null,
};
fn declToCompletion(context: DeclToCompletionContext, decl_handle: analysis.DeclWithHandle) error{OutOfMemory}!void {
pub fn declToCompletion(context: DeclToCompletionContext, decl_handle: analysis.DeclWithHandle) error{OutOfMemory}!void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1317,7 +1317,7 @@ fn declToCompletion(context: DeclToCompletionContext, decl_handle: analysis.Decl
}
}
fn completeLabel(
pub fn completeLabel(
server: *Server,
pos_index: usize,
handle: *const DocumentStore.Handle,
@ -1337,7 +1337,7 @@ fn completeLabel(
return completions.toOwnedSlice(server.arena.allocator());
}
fn populateSnippedCompletions(
pub fn populateSnippedCompletions(
allocator: std.mem.Allocator,
completions: *std.ArrayListUnmanaged(types.CompletionItem),
snippets: []const snipped_data.Snipped,
@ -1361,7 +1361,7 @@ fn populateSnippedCompletions(
}
}
fn completeBuiltin(server: *Server) error{OutOfMemory}!?[]types.CompletionItem {
pub fn completeBuiltin(server: *Server) error{OutOfMemory}!?[]types.CompletionItem {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1405,7 +1405,7 @@ fn completeBuiltin(server: *Server) error{OutOfMemory}!?[]types.CompletionItem {
return completions.items;
}
fn completeGlobal(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
pub fn completeGlobal(server: *Server, pos_index: usize, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1428,7 +1428,7 @@ fn completeGlobal(server: *Server, pos_index: usize, handle: *const DocumentStor
return completions.toOwnedSlice(server.arena.allocator());
}
fn completeFieldAccess(server: *Server, handle: *const DocumentStore.Handle, source_index: usize, loc: offsets.Loc) error{OutOfMemory}!?[]types.CompletionItem {
pub fn completeFieldAccess(server: *Server, handle: *const DocumentStore.Handle, source_index: usize, loc: offsets.Loc) error{OutOfMemory}!?[]types.CompletionItem {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1450,7 +1450,7 @@ fn completeFieldAccess(server: *Server, handle: *const DocumentStore.Handle, sou
return try completions.toOwnedSlice(allocator);
}
fn formatDetailledLabel(item: *types.CompletionItem, arena: std.mem.Allocator) error{OutOfMemory}!void {
pub fn formatDetailledLabel(item: *types.CompletionItem, arena: std.mem.Allocator) error{OutOfMemory}!void {
// NOTE: this is not ideal, we should build a detailled label like we do for label/detail
// because this implementation is very loose, nothing is formated properly so we need to clean
// things a little bit, wich is quite messy
@ -1616,14 +1616,14 @@ fn formatDetailledLabel(item: *types.CompletionItem, arena: std.mem.Allocator) e
// logger.info("labelDetails: {s} :: {s}", .{item.labelDetails.?.detail, item.labelDetails.?.description});
}
fn completeError(server: *Server, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
pub fn completeError(server: *Server, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
return try server.document_store.errorCompletionItems(server.arena.allocator(), handle.*);
}
fn kindToSortScore(kind: types.CompletionItemKind) ?[]const u8 {
pub fn kindToSortScore(kind: types.CompletionItemKind) ?[]const u8 {
return switch (kind) {
.Module => "1_", // use for packages
.Folder => "2_",
@ -1651,7 +1651,7 @@ fn kindToSortScore(kind: types.CompletionItemKind) ?[]const u8 {
};
}
fn completeDot(server: *Server, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
pub fn completeDot(server: *Server, handle: *const DocumentStore.Handle) error{OutOfMemory}![]types.CompletionItem {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -1660,7 +1660,7 @@ fn completeDot(server: *Server, handle: *const DocumentStore.Handle) error{OutOf
return completions;
}
fn completeFileSystemStringLiteral(
pub fn completeFileSystemStringLiteral(
arena: std.mem.Allocator,
store: DocumentStore,
handle: DocumentStore.Handle,
@ -2129,7 +2129,7 @@ fn openDocumentHandler(server: *Server, notification: types.DidOpenTextDocumentP
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
const handle = try server.document_store.openDocument(notification.textDocument.uri, notification.textDocument.text);
const handle = try server.document_store.openDocument(notification.textDocument.uri, try server.document_store.allocator.dupeZ(u8, notification.textDocument.text));
if (server.client_capabilities.supports_publish_diagnostics) blk: {
if (!std.process.can_spawn) break :blk;
@ -2213,12 +2213,12 @@ fn semanticTokensFullHandler(server: *Server, request: types.SemanticTokensParam
const handle = server.document_store.getHandle(request.textDocument.uri) orelse return null;
const token_array = try semantic_tokens.writeAllSemanticTokens(server.arena, &server.document_store, handle, server.offset_encoding);
const token_array = try semantic_tokens.writeAllSemanticTokens(server.arena.allocator(), &server.document_store, handle, server.offset_encoding);
return .{ .data = token_array };
}
fn completionHandler(server: *Server, request: types.CompletionParams) Error!?types.CompletionList {
pub fn completionHandler(server: *Server, request: types.CompletionParams) Error!?types.CompletionList {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2306,7 +2306,7 @@ fn completionHandler(server: *Server, request: types.CompletionParams) Error!?ty
return .{ .isIncomplete = false, .items = completions };
}
fn signatureHelpHandler(server: *Server, request: types.SignatureHelpParams) Error!?types.SignatureHelp {
pub fn signatureHelpHandler(server: *Server, request: types.SignatureHelpParams) Error!?types.SignatureHelp {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2319,7 +2319,7 @@ fn signatureHelpHandler(server: *Server, request: types.SignatureHelpParams) Err
const signature_info = (try getSignatureInfo(
&server.document_store,
server.arena,
server.arena.allocator(),
handle,
source_index,
data,
@ -2335,7 +2335,7 @@ fn signatureHelpHandler(server: *Server, request: types.SignatureHelpParams) Err
};
}
fn gotoHandler(server: *Server, request: types.TextDocumentPositionParams, resolve_alias: bool) Error!?types.Location {
pub fn gotoHandler(server: *Server, request: types.TextDocumentPositionParams, resolve_alias: bool) Error!?types.Location {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2379,7 +2379,7 @@ fn gotoDeclarationHandler(
return try server.gotoHandler(request, false);
}
fn hoverHandler(server: *Server, request: types.HoverParams) Error!?types.Hover {
pub fn hoverHandler(server: *Server, request: types.HoverParams) Error!?types.Hover {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2408,7 +2408,7 @@ fn hoverHandler(server: *Server, request: types.HoverParams) Error!?types.Hover
return response;
}
fn documentSymbolsHandler(server: *Server, request: types.DocumentSymbolParams) Error!?[]types.DocumentSymbol {
pub fn documentSymbolsHandler(server: *Server, request: types.DocumentSymbolParams) Error!?[]types.DocumentSymbol {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2417,7 +2417,7 @@ fn documentSymbolsHandler(server: *Server, request: types.DocumentSymbolParams)
return try analysis.getDocumentSymbols(server.arena.allocator(), handle.tree, server.offset_encoding);
}
fn formattingHandler(server: *Server, request: types.DocumentFormattingParams) Error!?[]types.TextEdit {
pub fn formattingHandler(server: *Server, request: types.DocumentFormattingParams) Error!?[]types.TextEdit {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2488,7 +2488,7 @@ fn didChangeConfigurationHandler(server: *Server, request: configuration.DidChan
}
}
fn renameHandler(server: *Server, request: types.RenameParams) Error!?types.WorkspaceEdit {
pub fn renameHandler(server: *Server, request: types.RenameParams) Error!?types.WorkspaceEdit {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2496,7 +2496,7 @@ fn renameHandler(server: *Server, request: types.RenameParams) Error!?types.Work
return if (response) |rep| rep.rename else null;
}
fn referencesHandler(server: *Server, request: types.ReferenceParams) Error!?[]types.Location {
pub fn referencesHandler(server: *Server, request: types.ReferenceParams) Error!?[]types.Location {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2504,7 +2504,7 @@ fn referencesHandler(server: *Server, request: types.ReferenceParams) Error!?[]t
return if (response) |rep| rep.references else null;
}
fn documentHighlightHandler(server: *Server, request: types.DocumentHighlightParams) Error!?[]types.DocumentHighlight {
pub fn documentHighlightHandler(server: *Server, request: types.DocumentHighlightParams) Error!?[]types.DocumentHighlight {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2540,7 +2540,7 @@ const GeneralReferencesResponse = union {
highlight: []types.DocumentHighlight,
};
fn generalReferencesHandler(server: *Server, request: GeneralReferencesRequest) Error!?GeneralReferencesResponse {
pub fn generalReferencesHandler(server: *Server, request: GeneralReferencesRequest) Error!?GeneralReferencesResponse {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -2913,14 +2913,11 @@ const Message = union(enum) {
pub fn processJsonRpc(
server: *Server,
arena: *std.heap.ArenaAllocator,
json: []const u8,
) void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
server.arena = arena;
var parser = std.json.Parser.init(server.arena.allocator(), false);
defer parser.deinit();
@ -2944,7 +2941,16 @@ pub fn processJsonRpc(
};
}
fn processMessage(server: *Server, message: Message) Error!void {
pub fn maybeFreeArena(server: *Server) void {
// Mom, can we have garbage collection?
// No, we already have garbage collection at home.
// at home:
if (server.arena.queryCapacity() > 128 * 1024) {
_ = server.arena.reset(.free_all);
}
}
pub fn processMessage(server: *Server, message: Message) Error!void {
const tracy_zone = tracy.trace(@src());
defer tracy_zone.end();
@ -3104,6 +3110,7 @@ pub fn create(
.config = config,
.runtime_zig_version = null,
.allocator = allocator,
.arena = std.heap.ArenaAllocator.init(allocator),
.document_store = .{
.allocator = allocator,
.config = config,
@ -3135,5 +3142,7 @@ pub fn destroy(server: *Server) void {
zig_version.free();
}
server.arena.deinit();
server.allocator.destroy(server);
}

View File

@ -4,7 +4,7 @@ const Ast = std.zig.Ast;
const types = @import("lsp.zig");
const offsets = @import("offsets.zig");
const URI = @import("uri.zig");
const log = std.log.scoped(.analysis);
const log = std.log.scoped(.zls_analysis);
const ast = @import("ast.zig");
const ComptimeInterpreter = @import("ComptimeInterpreter.zig");
const InternPool = ComptimeInterpreter.InternPool;

View File

@ -8,7 +8,7 @@ const known_folders = @import("known-folders");
const Config = @import("Config.zig");
const offsets = @import("offsets.zig");
const logger = std.log.scoped(.config);
const logger = std.log.scoped(.zls_config);
pub fn loadFromFile(allocator: std.mem.Allocator, file_path: []const u8) ?Config {
const tracy_zone = tracy.trace(@src());

View File

@ -5,7 +5,7 @@ const analysis = @import("analysis.zig");
const types = @import("lsp.zig");
const offsets = @import("offsets.zig");
const Ast = std.zig.Ast;
const log = std.log.scoped(.inlay_hint);
const log = std.log.scoped(.zls_inlay_hint);
const ast = @import("ast.zig");
const data = @import("data/data.zig");
const Config = @import("Config.zig");

View File

@ -9,7 +9,7 @@ const Server = @import("Server.zig");
const Header = @import("Header.zig");
const debug = @import("debug.zig");
const logger = std.log.scoped(.main);
const logger = std.log.scoped(.zls_main);
var actual_log_level: std.log.Level = switch (zig_builtin.mode) {
.Debug => .debug,
@ -30,8 +30,9 @@ pub const std_options = struct {
if (@enumToInt(level) > @enumToInt(actual_log_level)) return;
const level_txt = comptime level.asText();
const scope_txt = comptime @tagName(scope);
std.debug.print("{s:<5}: ({s:^6}): ", .{ level_txt, @tagName(scope) });
std.debug.print("{s:<5}: ({s:^6}): ", .{ level_txt, if (comptime std.mem.startsWith(u8, scope_txt, "zls_")) scope_txt[4..] else scope_txt });
std.debug.print(format ++ "\n", args);
}
};
@ -50,18 +51,8 @@ fn loop(
var buffered_writer = std.io.bufferedWriter(std_out);
const writer = buffered_writer.writer();
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
defer arena.deinit();
while (true) {
defer {
// Mom, can we have garbage collection?
// No, we already have garbage collection at home.
// at home:
if (arena.queryCapacity() > 128 * 1024) {
_ = arena.reset(.free_all);
}
}
defer server.maybeFreeArena();
// write server -> client messages
for (server.outgoing_messages.items) |outgoing_message| {
@ -76,9 +67,9 @@ fn loop(
server.outgoing_messages.clearRetainingCapacity();
// read and handle client -> server message
const header = try Header.parse(arena.allocator(), replay_file == null, reader);
const header = try Header.parse(server.arena.allocator(), replay_file == null, reader);
const json_message = try arena.allocator().alloc(u8, header.content_length);
const json_message = try server.arena.allocator().alloc(u8, header.content_length);
try reader.readNoEof(json_message);
if (record_file) |file| {
@ -86,7 +77,7 @@ fn loop(
try file.writeAll(json_message);
}
server.processJsonRpc(&arena, json_message);
server.processJsonRpc(json_message);
if (server.status == .exiting_success or server.status == .exiting_failure) return;
}

View File

@ -4,7 +4,7 @@ const DocumentStore = @import("DocumentStore.zig");
const analysis = @import("analysis.zig");
const types = @import("lsp.zig");
const offsets = @import("offsets.zig");
const log = std.log.scoped(.references);
const log = std.log.scoped(.zls_references);
const ast = @import("ast.zig");
pub fn labelReferences(

View File

@ -51,7 +51,7 @@ pub const TokenModifiers = packed struct {
};
const Builder = struct {
arena: *std.heap.ArenaAllocator,
arena: std.mem.Allocator,
store: *DocumentStore,
handle: *const DocumentStore.Handle,
previous_position: usize = 0,
@ -59,7 +59,7 @@ const Builder = struct {
arr: std.ArrayListUnmanaged(u32),
encoding: offsets.Encoding,
fn init(arena: *std.heap.ArenaAllocator, store: *DocumentStore, handle: *const DocumentStore.Handle, encoding: offsets.Encoding) Builder {
fn init(arena: std.mem.Allocator, store: *DocumentStore, handle: *const DocumentStore.Handle, encoding: offsets.Encoding) Builder {
return Builder{
.arena = arena,
.store = store,
@ -185,7 +185,7 @@ const Builder = struct {
const text = self.handle.tree.source[self.previous_position..start];
const delta = offsets.indexToPosition(text, text.len, self.encoding);
try self.arr.appendSlice(self.arena.allocator(), &.{
try self.arr.appendSlice(self.arena, &.{
@truncate(u32, delta.line),
@truncate(u32, delta.character),
@truncate(u32, length),
@ -196,7 +196,7 @@ const Builder = struct {
}
fn toOwnedSlice(self: *Builder) error{OutOfMemory}![]u32 {
return self.arr.toOwnedSlice(self.arena.allocator());
return self.arr.toOwnedSlice(self.arena);
}
};
@ -286,7 +286,7 @@ fn writeNodeTokens(builder: *Builder, maybe_node: ?Ast.Node.Index) error{OutOfMe
const main_tokens = tree.nodes.items(.main_token);
if (node == 0 or node >= node_data.len) return;
var allocator = builder.arena.allocator();
var allocator = builder.arena;
const tag = node_tags[node];
const main_token = main_tokens[node];
@ -994,7 +994,7 @@ fn writeContainerField(builder: *Builder, node: Ast.Node.Index, field_token_type
const base = tree.nodes.items(.main_token)[node];
const tokens = tree.tokens.items(.tag);
var allocator = builder.arena.allocator();
var allocator = builder.arena;
if (analysis.getDocCommentTokenIndex(tokens, base)) |docs|
try writeDocComments(builder, tree, docs);
@ -1027,7 +1027,7 @@ fn writeContainerField(builder: *Builder, node: Ast.Node.Index, field_token_type
// TODO Range version, edit version.
pub fn writeAllSemanticTokens(
arena: *std.heap.ArenaAllocator,
arena: std.mem.Allocator,
store: *DocumentStore,
handle: *const DocumentStore.Handle,
encoding: offsets.Encoding,

View File

@ -8,10 +8,9 @@ const Token = std.zig.Token;
const identifierFromPosition = @import("Server.zig").identifierFromPosition;
const ast = @import("ast.zig");
fn fnProtoToSignatureInfo(document_store: *DocumentStore, arena: *std.heap.ArenaAllocator, commas: u32, skip_self_param: bool, handle: *const DocumentStore.Handle, fn_node: Ast.Node.Index, proto: Ast.full.FnProto) !types.SignatureInformation {
fn fnProtoToSignatureInfo(document_store: *DocumentStore, alloc: std.mem.Allocator, commas: u32, skip_self_param: bool, handle: *const DocumentStore.Handle, fn_node: Ast.Node.Index, proto: Ast.full.FnProto) !types.SignatureInformation {
const tree = handle.tree;
const token_starts = tree.tokens.items(.start);
const alloc = arena.allocator();
const label = analysis.getFunctionSignature(tree, proto);
const proto_comments = (try analysis.getDocComments(alloc, tree, fn_node, .markdown)) orelse "";
@ -71,7 +70,7 @@ fn fnProtoToSignatureInfo(document_store: *DocumentStore, arena: *std.heap.Arena
};
}
pub fn getSignatureInfo(document_store: *DocumentStore, arena: *std.heap.ArenaAllocator, handle: *const DocumentStore.Handle, absolute_index: usize, comptime data: type) !?types.SignatureInformation {
pub fn getSignatureInfo(document_store: *DocumentStore, alloc: std.mem.Allocator, handle: *const DocumentStore.Handle, absolute_index: usize, comptime data: type) !?types.SignatureInformation {
const innermost_block = analysis.innermostBlockScope(handle.*, absolute_index);
const tree = handle.tree;
const token_tags = tree.tokens.items(.tag);
@ -123,7 +122,6 @@ pub fn getSignatureInfo(document_store: *DocumentStore, arena: *std.heap.ArenaAl
};
}
};
const alloc = arena.allocator();
var symbol_stack = try std.ArrayListUnmanaged(StackSymbol).initCapacity(alloc, 8);
var curr_commas: u32 = 0;
var comma_stack = try std.ArrayListUnmanaged(u32).initCapacity(alloc, 4);
@ -277,7 +275,7 @@ pub fn getSignatureInfo(document_store: *DocumentStore, arena: *std.heap.ArenaAl
if (type_handle.handle.tree.fullFnProto(&buf, node)) |proto| {
return try fnProtoToSignatureInfo(
document_store,
arena,
alloc,
paren_commas,
false,
type_handle.handle,
@ -327,7 +325,7 @@ pub fn getSignatureInfo(document_store: *DocumentStore, arena: *std.heap.ArenaAl
if (res_handle.tree.fullFnProto(&buf, node)) |proto| {
return try fnProtoToSignatureInfo(
document_store,
arena,
alloc,
paren_commas,
skip_self_param,
res_handle,

View File

@ -1,20 +1,13 @@
const root = @import("@build@");
const root = @import("@build");
const std = @import("std");
const log = std.log;
const process = std.process;
// Zig 0.11.0-dev.1524+
const Build = if (@hasDecl(std, "Build")) std.Build else std.build;
pub const dependencies = @import("@dependencies");
// Zig 0.11.0-dev.1524+
const Builder = if (@hasDecl(std, "Build")) std.Build else std.build.Builder;
// Zig 0.11.0-dev.1637+
const Cache = if (@hasDecl(Build, "Cache")) std.Build.Cache else void;
const has_cache = Cache != void;
// Zig 0.11.0-dev.1524+
const CompileStep = if (@hasDecl(Build, "CompileStep")) Build.CompileStep else Build.LibExeObjStep;
const Build = std.Build;
const Cache = std.Build.Cache;
const CompileStep = Build.CompileStep;
const InstallArtifactStep = Build.InstallArtifactStep;
const OptionsStep = Build.OptionsStep;
@ -60,31 +53,30 @@ pub fn main() !void {
return error.InvalidArgs;
};
const build_root_directory = if (has_cache) Cache.Directory{
const build_root_directory = Cache.Directory{
.path = build_root,
.handle = try std.fs.cwd().openDir(build_root, .{}),
} else build_root;
};
const local_cache_directory = if (has_cache) Cache.Directory{
const local_cache_directory = Cache.Directory{
.path = cache_root,
.handle = try std.fs.cwd().makeOpenPath(cache_root, .{}),
} else cache_root;
};
const global_cache_directory = if (has_cache) Cache.Directory{
const global_cache_directory = Cache.Directory{
.path = global_cache_root,
.handle = try std.fs.cwd().makeOpenPath(global_cache_root, .{}),
} else global_cache_root;
};
var cache = if (has_cache) Cache{
var cache = Cache{
.gpa = allocator,
.manifest_dir = try local_cache_directory.handle.makeOpenPath("h", .{}),
} else {};
if (has_cache) {
};
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
cache.addPrefix(build_root_directory);
cache.addPrefix(local_cache_directory);
cache.addPrefix(global_cache_directory);
}
const builder = blk: {
// Zig 0.11.0-dev.1524+
@ -93,8 +85,7 @@ pub fn main() !void {
const host = if (does_builder_need_host) try std.zig.system.NativeTargetInfo.detect(.{}) else {};
if (does_builder_need_host) {
if (has_cache) {
break :blk try Builder.create(
break :blk try Build.create(
allocator,
zig_exe,
build_root_directory,
@ -105,17 +96,7 @@ pub fn main() !void {
);
}
break :blk try Builder.create(
allocator,
zig_exe,
build_root_directory,
local_cache_directory,
global_cache_directory,
host,
);
}
break :blk try Builder.create(
break :blk try Build.create(
allocator,
zig_exe,
build_root,
@ -150,7 +131,7 @@ pub fn main() !void {
}
}
builder.resolveInstallPrefix(null, Builder.DirList{});
builder.resolveInstallPrefix(null, Build.DirList{});
try runBuild(builder);
var packages = std.ArrayListUnmanaged(BuildConfig.Pkg){};
@ -219,16 +200,11 @@ fn processStep(
try processIncludeDirs(allocator, include_dirs, install_exe.artifact.include_dirs.items);
try processPkgConfig(allocator, include_dirs, install_exe.artifact);
if (@hasField(CompileStep, "modules")) {
var modules_it = install_exe.artifact.modules.iterator();
while (modules_it.next()) |module_entry| {
try processModule(allocator, packages, module_entry);
}
} else { // assuming @hasField(LibExeObjStep, "packages")
for (install_exe.artifact.packages.items) |pkg| {
try processPackage(allocator, packages, pkg);
}
}
} else if (step.cast(CompileStep)) |exe| {
if (exe.root_src) |src| {
const maybe_path = switch (src) {
@ -239,16 +215,11 @@ fn processStep(
}
try processIncludeDirs(allocator, include_dirs, exe.include_dirs.items);
try processPkgConfig(allocator, include_dirs, exe);
if (@hasField(CompileStep, "modules")) {
var modules_it = exe.modules.iterator();
while (modules_it.next()) |module_entry| {
try processModule(allocator, packages, module_entry);
}
} else { // assuming @hasField(LibExeObjStep, "packages")
for (exe.packages.items) |pkg| {
try processPackage(allocator, packages, pkg);
}
}
} else {
for (step.dependencies.items) |unknown_step| {
try processStep(allocator, packages, include_dirs, unknown_step);
@ -265,13 +236,15 @@ fn processModule(
if (std.mem.eql(u8, package.name, module.key_ptr.*)) return;
}
const builder = module.value_ptr.*.builder;
const maybe_path = switch (module.value_ptr.*.source_file) {
.path => |path| path,
.generated => |generated| generated.path,
};
if (maybe_path) |path| {
try packages.append(allocator, .{ .name = module.key_ptr.*, .path = path });
try packages.append(allocator, .{ .name = module.key_ptr.*, .path = builder.pathFromRoot(path) });
}
var deps_it = module.value_ptr.*.dependencies.iterator();
@ -280,34 +253,6 @@ fn processModule(
}
}
fn processPackage(
allocator: std.mem.Allocator,
packages: *std.ArrayListUnmanaged(BuildConfig.Pkg),
pkg: Build.Pkg,
) anyerror!void {
for (packages.items) |package| {
if (std.mem.eql(u8, package.name, pkg.name)) return;
}
// Support Zig 0.9.1
const source = if (@hasField(Build.Pkg, "source")) pkg.source else pkg.path;
const maybe_path = switch (source) {
.path => |path| path,
.generated => |generated| generated.path,
};
if (maybe_path) |path| {
try packages.append(allocator, .{ .name = pkg.name, .path = path });
}
if (pkg.dependencies) |dependencies| {
for (dependencies) |dep| {
try processPackage(allocator, packages, dep);
}
}
}
fn processIncludeDirs(
allocator: std.mem.Allocator,
include_dirs: *std.StringArrayHashMapUnmanaged(void),
@ -376,7 +321,7 @@ fn getPkgConfigIncludes(
} else |err| return err;
}
fn runBuild(builder: *Builder) anyerror!void {
fn runBuild(builder: *Build) anyerror!void {
switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),

View File

@ -5,7 +5,7 @@ const Config = @import("Config.zig");
const ast = @import("ast.zig");
const Ast = std.zig.Ast;
const URI = @import("uri.zig");
const log = std.log.scoped(.translate_c);
const log = std.log.scoped(.zls_translate_c);
/// converts a `@cInclude` node into an equivalent c header file
/// which can then be handed over to `zig translate-c`

View File

@ -1,5 +1,5 @@
// Used by tests as a package, can be used by tools such as
// zigbot9001 to take advantage of zls' tools
//! Used by tests as a package, can be used by tools such as
//! zigbot9001 to take advantage of zls' tools
pub const ast = @import("ast.zig");
pub const analysis = @import("analysis.zig");
@ -15,6 +15,8 @@ pub const DocumentStore = @import("DocumentStore.zig");
pub const ComptimeInterpreter = @import("ComptimeInterpreter.zig");
pub const diff = @import("diff.zig");
pub const analyser = @import("analyser/analyser.zig");
pub const configuration = @import("configuration.zig");
pub const references = @import("references.zig");
pub const ZigVersionWrapper = @import("ZigVersionWrapper.zig");

View File

@ -82,7 +82,7 @@ pub const Context = struct {
defer allocator.free(req);
// send the request to the server
self.server.processJsonRpc(&self.arena, req);
self.server.processJsonRpc(req);
for (self.server.outgoing_messages.items) |outgoing_message| {
self.server.allocator.free(outgoing_message);
@ -103,7 +103,7 @@ pub const Context = struct {
defer allocator.free(req);
// send the request to the server
self.server.processJsonRpc(&self.arena, req);
self.server.processJsonRpc(req);
const messages = self.server.outgoing_messages.items;

View File

@ -326,7 +326,7 @@ const Context = struct {
else => "file:///test.zig",
};
const handle = try document_store.openDocument(test_uri, source);
const handle = try document_store.openDocument(test_uri, try document_store.allocator.dupeZ(u8, source));
// TODO handle handle.tree.errors