Merge pull request #589 from Techatrix/cimport-support
Add Cimport support
This commit is contained in:
commit
dcf43389cf
@ -108,7 +108,7 @@ The following options are currently available.
|
||||
| `zig_exe_path` | `?[]const u8` | `null` | zig executable path, e.g. `/path/to/zig/zig`, used to run the custom build runner. If `null`, zig is looked up in `PATH`. Will be used to infer the zig standard library path if none is provided. |
|
||||
| `warn_style` | `bool` | `false` | Enables warnings for style *guideline* mismatches |
|
||||
| `build_runner_path` | `?[]const u8` | `null` | Path to the build_runner.zig file provided by zls. `null` is equivalent to `${executable_directory}/build_runner.zig` |
|
||||
| `build_runner_cache_path` | `?[]const u8` | `null` | Path to a directroy that will be used as zig's cache when running `zig run build_runner.zig ...`. `null` is equivalent to `${KnownFloders.Cache}/zls` |
|
||||
| `global_cache_path` | `?[]const u8` | `null` | Path to a directroy that will be used as zig's cache. `null` is equivalent to `${KnownFloders.Cache}/zls` |
|
||||
| `enable_semantic_tokens` | `bool` | `true` | Enables semantic token support when the client also supports it. |
|
||||
| `enable_inlay_hints` | `bool` | `false` | Enables inlay hint support when the client also supports it. |
|
||||
| `operator_completions` | `bool` | `true` | Enables `*` and `?` operators in completion lists. |
|
||||
|
@ -32,8 +32,8 @@ warn_style: bool = false,
|
||||
/// Path to the build_runner.zig file.
|
||||
build_runner_path: ?[]const u8 = null,
|
||||
|
||||
/// Path to a directory that will be used as cache when `zig run`ning the build runner
|
||||
build_runner_cache_path: ?[]const u8 = null,
|
||||
/// Path to the global cache directory
|
||||
global_cache_path: ?[]const u8 = null,
|
||||
|
||||
/// Semantic token support
|
||||
enable_semantic_tokens: bool = true,
|
||||
@ -208,7 +208,7 @@ pub fn configChanged(config: *Config, allocator: std.mem.Allocator, builtin_crea
|
||||
break :blk try std.fs.path.resolve(allocator, &[_][]const u8{ exe_dir_path, "build_runner.zig" });
|
||||
};
|
||||
|
||||
config.build_runner_cache_path = if (config.build_runner_cache_path) |p|
|
||||
config.global_cache_path = if (config.global_cache_path) |p|
|
||||
try allocator.dupe(u8, p)
|
||||
else blk: {
|
||||
const cache_dir_path = (try known_folders.getPath(allocator, .cache)) orelse {
|
||||
|
@ -6,20 +6,22 @@ const offsets = @import("offsets.zig");
|
||||
const log = std.log.scoped(.doc_store);
|
||||
const Ast = std.zig.Ast;
|
||||
const BuildAssociatedConfig = @import("BuildAssociatedConfig.zig");
|
||||
const BuildConfig = @import("special/build_runner.zig").BuildConfig;
|
||||
const tracy = @import("tracy.zig");
|
||||
const Config = @import("Config.zig");
|
||||
const translate_c = @import("translate_c.zig");
|
||||
|
||||
const DocumentStore = @This();
|
||||
|
||||
const BuildFile = struct {
|
||||
const Pkg = struct {
|
||||
name: []const u8,
|
||||
uri: []const u8,
|
||||
};
|
||||
pub const Hasher = std.crypto.auth.siphash.SipHash128(1, 3);
|
||||
|
||||
/// Initial state, that can be copied.
|
||||
pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length);
|
||||
|
||||
const BuildFile = struct {
|
||||
refs: usize,
|
||||
uri: []const u8,
|
||||
packages: std.ArrayListUnmanaged(Pkg),
|
||||
config: BuildConfig,
|
||||
|
||||
builtin_uri: ?[]const u8 = null,
|
||||
|
||||
@ -34,7 +36,9 @@ pub const Handle = struct {
|
||||
count: usize,
|
||||
/// Contains one entry for every import in the document
|
||||
import_uris: []const []const u8,
|
||||
/// Items in this array list come from `import_uris`
|
||||
/// Contains one entry for every cimport in the document
|
||||
cimports: []CImportHandle,
|
||||
/// Items in this array list come from `import_uris` and `cimports`
|
||||
imports_used: std.ArrayListUnmanaged([]const u8),
|
||||
tree: Ast,
|
||||
document_scope: analysis.DocumentScope,
|
||||
@ -110,25 +114,25 @@ fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: *B
|
||||
}
|
||||
}
|
||||
|
||||
const LoadPackagesContext = struct {
|
||||
const LoadBuildConfigContext = struct {
|
||||
build_file: *BuildFile,
|
||||
allocator: std.mem.Allocator,
|
||||
build_runner_path: []const u8,
|
||||
build_runner_cache_path: []const u8,
|
||||
global_cache_path: []const u8,
|
||||
zig_exe_path: []const u8,
|
||||
build_file_path: ?[]const u8 = null,
|
||||
cache_root: []const u8,
|
||||
global_cache_root: []const u8,
|
||||
};
|
||||
|
||||
fn loadPackages(context: LoadPackagesContext) !void {
|
||||
fn loadBuildConfiguration(context: LoadBuildConfigContext) !void {
|
||||
const tracy_zone = tracy.trace(@src());
|
||||
defer tracy_zone.end();
|
||||
|
||||
const allocator = context.allocator;
|
||||
const build_file = context.build_file;
|
||||
const build_runner_path = context.build_runner_path;
|
||||
const build_runner_cache_path = context.build_runner_cache_path;
|
||||
const global_cache_path = context.global_cache_path;
|
||||
const zig_exe_path = context.zig_exe_path;
|
||||
|
||||
const build_file_path = context.build_file_path orelse try URI.parse(allocator, build_file.uri);
|
||||
@ -140,7 +144,7 @@ fn loadPackages(context: LoadPackagesContext) !void {
|
||||
"run",
|
||||
build_runner_path,
|
||||
"--cache-dir",
|
||||
build_runner_cache_path,
|
||||
global_cache_path,
|
||||
"--pkg-begin",
|
||||
"@build@",
|
||||
build_file_path,
|
||||
@ -167,46 +171,24 @@ fn loadPackages(context: LoadPackagesContext) !void {
|
||||
defer allocator.free(joined);
|
||||
|
||||
log.err(
|
||||
"Failed to execute build runner to collect packages, command:\n{s}\nError: {s}",
|
||||
"Failed to execute build runner to collect build configuration, command:\n{s}\nError: {s}",
|
||||
.{ joined, zig_run_result.stderr },
|
||||
);
|
||||
}
|
||||
|
||||
switch (zig_run_result.term) {
|
||||
.Exited => |exit_code| {
|
||||
if (exit_code == 0) {
|
||||
log.debug("Finished zig run for build file {s}", .{build_file.uri});
|
||||
if (exit_code != 0) return error.RunFailed;
|
||||
|
||||
for (build_file.packages.items) |old_pkg| {
|
||||
allocator.free(old_pkg.name);
|
||||
allocator.free(old_pkg.uri);
|
||||
}
|
||||
const parse_options = std.json.ParseOptions{ .allocator = allocator };
|
||||
|
||||
build_file.packages.shrinkAndFree(allocator, 0);
|
||||
var line_it = std.mem.split(u8, zig_run_result.stdout, "\n");
|
||||
while (line_it.next()) |line| {
|
||||
if (std.mem.indexOfScalar(u8, line, '\x00')) |zero_byte_idx| {
|
||||
const name = line[0..zero_byte_idx];
|
||||
const rel_path = line[zero_byte_idx + 1 ..];
|
||||
std.json.parseFree(BuildConfig, build_file.config, parse_options);
|
||||
|
||||
const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ directory_path, rel_path });
|
||||
defer allocator.free(pkg_abs_path);
|
||||
|
||||
const pkg_uri = try URI.fromPath(allocator, pkg_abs_path);
|
||||
errdefer allocator.free(pkg_uri);
|
||||
|
||||
const duped_name = try allocator.dupe(u8, name);
|
||||
errdefer allocator.free(duped_name);
|
||||
|
||||
(try build_file.packages.addOne(allocator)).* = .{
|
||||
.name = duped_name,
|
||||
.uri = pkg_uri,
|
||||
};
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return error.RunFailed;
|
||||
}
|
||||
build_file.config = std.json.parse(
|
||||
BuildConfig,
|
||||
&std.json.TokenStream.init(zig_run_result.stdout),
|
||||
parse_options,
|
||||
) catch return error.RunFailed;
|
||||
},
|
||||
else => return error.RunFailed,
|
||||
}
|
||||
@ -232,6 +214,7 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha
|
||||
handle.* = Handle{
|
||||
.count = 1,
|
||||
.import_uris = &.{},
|
||||
.cimports = &.{},
|
||||
.imports_used = .{},
|
||||
.document = .{
|
||||
.uri = uri,
|
||||
@ -256,7 +239,10 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha
|
||||
build_file.* = .{
|
||||
.refs = 1,
|
||||
.uri = try self.allocator.dupe(u8, uri),
|
||||
.packages = .{},
|
||||
.config = .{
|
||||
.packages = &.{},
|
||||
.include_dirs = &.{},
|
||||
},
|
||||
};
|
||||
|
||||
const build_file_path = try URI.parse(self.allocator, build_file.uri);
|
||||
@ -274,11 +260,11 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha
|
||||
|
||||
// TODO: Do this in a separate thread?
|
||||
// It can take quite long.
|
||||
loadPackages(.{
|
||||
loadBuildConfiguration(.{
|
||||
.build_file = build_file,
|
||||
.allocator = self.allocator,
|
||||
.build_runner_path = self.config.build_runner_path.?,
|
||||
.build_runner_cache_path = self.config.build_runner_cache_path.?,
|
||||
.global_cache_path = self.config.global_cache_path.?,
|
||||
.zig_exe_path = self.config.zig_exe_path.?,
|
||||
.build_file_path = build_file_path,
|
||||
.cache_root = self.zig_cache_root,
|
||||
@ -382,6 +368,14 @@ fn newDocument(self: *DocumentStore, uri: []const u8, text: [:0]u8) anyerror!*Ha
|
||||
self.allocator.free(handle.import_uris);
|
||||
}
|
||||
|
||||
handle.cimports = try self.collectCIncludes(handle);
|
||||
errdefer {
|
||||
for (handle.cimports) |item| {
|
||||
self.allocator.free(item.uri);
|
||||
}
|
||||
self.allocator.free(handle.cimports);
|
||||
}
|
||||
|
||||
try self.handles.putNoClobber(self.allocator, uri, handle);
|
||||
return handle;
|
||||
}
|
||||
@ -409,11 +403,8 @@ fn decrementBuildFileRefs(self: *DocumentStore, build_file: *BuildFile) void {
|
||||
build_file.refs -= 1;
|
||||
if (build_file.refs == 0) {
|
||||
log.debug("Freeing build file {s}", .{build_file.uri});
|
||||
for (build_file.packages.items) |pkg| {
|
||||
self.allocator.free(pkg.name);
|
||||
self.allocator.free(pkg.uri);
|
||||
}
|
||||
build_file.packages.deinit(self.allocator);
|
||||
|
||||
std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator });
|
||||
|
||||
// Decrement count of the document since one count comes
|
||||
// from the build file existing.
|
||||
@ -456,9 +447,14 @@ fn decrementCount(self: *DocumentStore, uri: []const u8) void {
|
||||
self.allocator.free(import_uri);
|
||||
}
|
||||
|
||||
for (handle.cimports) |item| {
|
||||
self.allocator.free(item.uri);
|
||||
}
|
||||
|
||||
handle.document_scope.deinit(self.allocator);
|
||||
handle.imports_used.deinit(self.allocator);
|
||||
self.allocator.free(handle.import_uris);
|
||||
self.allocator.free(handle.cimports);
|
||||
self.allocator.destroy(handle);
|
||||
const uri_key = entry.key_ptr.*;
|
||||
std.debug.assert(self.handles.remove(uri));
|
||||
@ -475,27 +471,149 @@ pub fn getHandle(self: *DocumentStore, uri: []const u8) ?*Handle {
|
||||
}
|
||||
|
||||
fn collectImportUris(self: *DocumentStore, handle: *Handle) ![]const []const u8 {
|
||||
var new_imports = std.ArrayList([]const u8).init(self.allocator);
|
||||
const collected_imports = try analysis.collectImports(self.allocator, handle.tree);
|
||||
|
||||
var imports = std.ArrayList([]const u8).fromOwnedSlice(self.allocator, collected_imports);
|
||||
errdefer {
|
||||
for (new_imports.items) |imp| {
|
||||
for (imports.items) |imp| {
|
||||
self.allocator.free(imp);
|
||||
}
|
||||
new_imports.deinit();
|
||||
imports.deinit();
|
||||
}
|
||||
try analysis.collectImports(&new_imports, handle.tree);
|
||||
|
||||
// Convert to URIs
|
||||
var i: usize = 0;
|
||||
while (i < new_imports.items.len) {
|
||||
if (try self.uriFromImportStr(self.allocator, handle.*, new_imports.items[i])) |uri| {
|
||||
while (i < imports.items.len) {
|
||||
if (try self.uriFromImportStr(self.allocator, handle.*, imports.items[i])) |uri| {
|
||||
// The raw import strings are owned by the document and do not need to be freed here.
|
||||
new_imports.items[i] = uri;
|
||||
imports.items[i] = uri;
|
||||
i += 1;
|
||||
} else {
|
||||
_ = new_imports.swapRemove(i);
|
||||
_ = imports.swapRemove(i);
|
||||
}
|
||||
}
|
||||
return new_imports.toOwnedSlice();
|
||||
return imports.toOwnedSlice();
|
||||
}
|
||||
|
||||
pub const CImportSource = struct {
|
||||
/// the `@cInclude` node
|
||||
node: Ast.Node.Index,
|
||||
/// hash of c source file
|
||||
hash: [Hasher.mac_length]u8,
|
||||
/// c source file
|
||||
source: []const u8,
|
||||
};
|
||||
|
||||
/// Collects all `@cImport` nodes and converts them into c source code
|
||||
/// the translation process is defined in `translate_c.convertCInclude`
|
||||
/// Caller owns returned memory.
|
||||
fn collectCIncludeSources(self: *DocumentStore, handle: *Handle) ![]CImportSource {
|
||||
var cimport_nodes = try analysis.collectCImportNodes(self.allocator, handle.tree);
|
||||
defer self.allocator.free(cimport_nodes);
|
||||
|
||||
var sources = try std.ArrayListUnmanaged(CImportSource).initCapacity(self.allocator, cimport_nodes.len);
|
||||
errdefer {
|
||||
for (sources.items) |item| {
|
||||
self.allocator.free(item.source);
|
||||
}
|
||||
sources.deinit(self.allocator);
|
||||
}
|
||||
|
||||
for (cimport_nodes) |node| {
|
||||
const c_source = translate_c.convertCInclude(self.allocator, handle.tree, node) catch |err| switch (err) {
|
||||
error.Unsupported => continue,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
|
||||
var hasher = hasher_init;
|
||||
hasher.update(c_source);
|
||||
var hash: [Hasher.mac_length]u8 = undefined;
|
||||
hasher.final(&hash);
|
||||
|
||||
sources.appendAssumeCapacity(.{
|
||||
.node = node,
|
||||
.hash = hash,
|
||||
.source = c_source,
|
||||
});
|
||||
}
|
||||
|
||||
return sources.toOwnedSlice(self.allocator);
|
||||
}
|
||||
|
||||
pub const CImportHandle = struct {
|
||||
/// the `@cInclude` node
|
||||
node: Ast.Node.Index,
|
||||
/// hash of the c source file
|
||||
hash: [Hasher.mac_length]u8,
|
||||
/// uri to a zig source file generated with translate-c
|
||||
uri: []const u8,
|
||||
};
|
||||
|
||||
/// Collects all `@cImport` nodes and converts them into zig files using translate-c
|
||||
/// Caller owns returned memory.
|
||||
fn collectCIncludes(self: *DocumentStore, handle: *Handle) ![]CImportHandle {
|
||||
var cimport_nodes = try analysis.collectCImportNodes(self.allocator, handle.tree);
|
||||
defer self.allocator.free(cimport_nodes);
|
||||
|
||||
var uris = try std.ArrayListUnmanaged(CImportHandle).initCapacity(self.allocator, cimport_nodes.len);
|
||||
errdefer {
|
||||
for (uris.items) |item| {
|
||||
self.allocator.free(item.uri);
|
||||
}
|
||||
uris.deinit(self.allocator);
|
||||
}
|
||||
|
||||
for (cimport_nodes) |node| {
|
||||
const c_source = translate_c.convertCInclude(self.allocator, handle.tree, node) catch |err| switch (err) {
|
||||
error.Unsupported => continue,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
defer self.allocator.free(c_source);
|
||||
|
||||
const uri = self.translate(handle, c_source) catch |err| {
|
||||
std.log.warn("failed to translate cInclude: {}", .{err});
|
||||
continue;
|
||||
} orelse continue;
|
||||
errdefer self.allocator.free(uri);
|
||||
|
||||
var hasher = hasher_init;
|
||||
hasher.update(c_source);
|
||||
var hash: [Hasher.mac_length]u8 = undefined;
|
||||
hasher.final(&hash);
|
||||
|
||||
uris.appendAssumeCapacity(.{
|
||||
.node = node,
|
||||
.hash = hash,
|
||||
.uri = uri,
|
||||
});
|
||||
}
|
||||
|
||||
return uris.toOwnedSlice(self.allocator);
|
||||
}
|
||||
|
||||
fn translate(self: *DocumentStore, handle: *Handle, source: []const u8) !?[]const u8 {
|
||||
const dirs: []BuildConfig.IncludeDir = if (handle.associated_build_file) |build_file| build_file.config.include_dirs else &.{};
|
||||
const include_dirs = blk: {
|
||||
var result = try self.allocator.alloc([]const u8, dirs.len);
|
||||
errdefer self.allocator.free(result);
|
||||
|
||||
for (dirs) |dir, i| {
|
||||
result[i] = dir.getPath();
|
||||
}
|
||||
|
||||
break :blk result;
|
||||
};
|
||||
defer self.allocator.free(include_dirs);
|
||||
|
||||
const file_path = (try translate_c.translate(
|
||||
self.allocator,
|
||||
self.config,
|
||||
include_dirs,
|
||||
source,
|
||||
)) orelse return null;
|
||||
defer self.allocator.free(file_path);
|
||||
|
||||
return try URI.fromPath(self.allocator, file_path);
|
||||
}
|
||||
|
||||
fn refreshDocument(self: *DocumentStore, handle: *Handle) !void {
|
||||
@ -506,28 +624,37 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle) !void {
|
||||
handle.document_scope.deinit(self.allocator);
|
||||
handle.document_scope = try analysis.makeDocumentScope(self.allocator, handle.tree);
|
||||
|
||||
const new_imports = try self.collectImportUris(handle);
|
||||
errdefer {
|
||||
for (new_imports) |imp| {
|
||||
self.allocator.free(imp);
|
||||
}
|
||||
self.allocator.free(new_imports);
|
||||
}
|
||||
var old_imports = handle.import_uris;
|
||||
var old_cimports = handle.cimports;
|
||||
|
||||
handle.import_uris = try self.collectImportUris(handle);
|
||||
|
||||
handle.cimports = try self.refreshDocumentCIncludes(handle);
|
||||
|
||||
const old_imports = handle.import_uris;
|
||||
handle.import_uris = new_imports;
|
||||
defer {
|
||||
for (old_imports) |uri| {
|
||||
self.allocator.free(uri);
|
||||
}
|
||||
self.allocator.free(old_imports);
|
||||
|
||||
for (old_cimports) |old_cimport| {
|
||||
self.allocator.free(old_cimport.uri);
|
||||
}
|
||||
self.allocator.free(old_cimports);
|
||||
}
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < handle.imports_used.items.len) {
|
||||
const old = handle.imports_used.items[i];
|
||||
still_exists: {
|
||||
for (new_imports) |new| {
|
||||
for (handle.import_uris) |new| {
|
||||
if (std.mem.eql(u8, new, old)) {
|
||||
handle.imports_used.items[i] = new;
|
||||
break :still_exists;
|
||||
}
|
||||
}
|
||||
for (handle.cimports) |cimport| {
|
||||
const new = cimport.uri;
|
||||
if (std.mem.eql(u8, new, old)) {
|
||||
handle.imports_used.items[i] = new;
|
||||
break :still_exists;
|
||||
@ -542,18 +669,104 @@ fn refreshDocument(self: *DocumentStore, handle: *Handle) !void {
|
||||
}
|
||||
}
|
||||
|
||||
fn refreshDocumentCIncludes(self: *DocumentStore, handle: *Handle) ![]CImportHandle {
|
||||
const new_sources: []CImportSource = try self.collectCIncludeSources(handle);
|
||||
defer {
|
||||
for (new_sources) |new_source| {
|
||||
self.allocator.free(new_source.source);
|
||||
}
|
||||
self.allocator.free(new_sources);
|
||||
}
|
||||
|
||||
var old_cimports = handle.cimports;
|
||||
var new_cimports = try std.ArrayListUnmanaged(CImportHandle).initCapacity(self.allocator, new_sources.len);
|
||||
errdefer {
|
||||
for (new_cimports.items) |new_cimport| {
|
||||
self.allocator.free(new_cimport.uri);
|
||||
}
|
||||
new_cimports.deinit(self.allocator);
|
||||
}
|
||||
|
||||
for (new_sources) |new_source| {
|
||||
const maybe_old_cimport: ?CImportHandle = blk: {
|
||||
const old_cimport: CImportHandle = found: {
|
||||
for (old_cimports) |old_cimport| {
|
||||
if (new_source.node == old_cimport.node) {
|
||||
break :found old_cimport;
|
||||
}
|
||||
}
|
||||
break :blk null;
|
||||
};
|
||||
|
||||
// avoid re-translating if the source didn't change
|
||||
if (std.mem.eql(u8, &new_source.hash, &old_cimport.hash)) {
|
||||
break :blk CImportHandle{
|
||||
.node = old_cimport.node,
|
||||
.hash = old_cimport.hash,
|
||||
.uri = try self.allocator.dupe(u8, old_cimport.uri),
|
||||
};
|
||||
}
|
||||
|
||||
const new_uri = self.translate(handle, new_source.source) catch |err| {
|
||||
std.log.warn("failed to translate cInclude: {}", .{err});
|
||||
continue;
|
||||
} orelse continue;
|
||||
errdefer self.allocator.free(new_uri);
|
||||
|
||||
break :blk CImportHandle{
|
||||
.node = old_cimport.node,
|
||||
.hash = old_cimport.hash,
|
||||
.uri = new_uri,
|
||||
};
|
||||
};
|
||||
|
||||
if (maybe_old_cimport) |cimport| {
|
||||
new_cimports.appendAssumeCapacity(cimport);
|
||||
continue;
|
||||
}
|
||||
|
||||
const c_source = translate_c.convertCInclude(self.allocator, handle.tree, new_source.node) catch |err| switch (err) {
|
||||
error.Unsupported => continue,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
defer self.allocator.free(c_source);
|
||||
|
||||
var hasher = hasher_init;
|
||||
var hash: [Hasher.mac_length]u8 = undefined;
|
||||
hasher.update(c_source);
|
||||
hasher.final(&hash);
|
||||
|
||||
const new_uri = self.translate(
|
||||
handle,
|
||||
c_source,
|
||||
) catch |err| {
|
||||
std.log.warn("failed to translate cInclude: {}", .{err});
|
||||
continue;
|
||||
} orelse continue;
|
||||
errdefer self.allocator.free(new_uri);
|
||||
|
||||
new_cimports.appendAssumeCapacity(.{
|
||||
.node = new_source.node,
|
||||
.hash = hash,
|
||||
.uri = new_uri,
|
||||
});
|
||||
}
|
||||
|
||||
return new_cimports.toOwnedSlice(self.allocator);
|
||||
}
|
||||
|
||||
pub fn applySave(self: *DocumentStore, handle: *Handle) !void {
|
||||
if (handle.is_build_file) |build_file| {
|
||||
loadPackages(.{
|
||||
loadBuildConfiguration(.{
|
||||
.build_file = build_file,
|
||||
.allocator = self.allocator,
|
||||
.build_runner_path = self.config.build_runner_path.?,
|
||||
.build_runner_cache_path = self.config.build_runner_cache_path.?,
|
||||
.global_cache_path = self.config.global_cache_path.?,
|
||||
.zig_exe_path = self.config.zig_exe_path.?,
|
||||
.cache_root = self.zig_cache_root,
|
||||
.global_cache_root = self.zig_global_cache_root,
|
||||
}) catch |err| {
|
||||
log.err("Failed to load packages of build file {s} (error: {})", .{ build_file.uri, err });
|
||||
log.err("Failed to load build configuration for {s} (error: {})", .{ build_file.uri, err });
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -643,7 +856,7 @@ pub fn uriFromImportStr(self: *DocumentStore, allocator: std.mem.Allocator, hand
|
||||
return null;
|
||||
} else if (!std.mem.endsWith(u8, import_str, ".zig")) {
|
||||
if (handle.associated_build_file) |build_file| {
|
||||
for (build_file.packages.items) |pkg| {
|
||||
for (build_file.config.packages) |pkg| {
|
||||
if (std.mem.eql(u8, import_str, pkg.name)) {
|
||||
return try allocator.dupe(u8, pkg.uri);
|
||||
}
|
||||
@ -686,7 +899,7 @@ pub fn resolveImport(self: *DocumentStore, handle: *Handle, import_str: []const
|
||||
}
|
||||
}
|
||||
if (handle.associated_build_file) |bf| {
|
||||
for (bf.packages.items) |pkg| {
|
||||
for (bf.config.packages) |pkg| {
|
||||
if (std.mem.eql(u8, pkg.uri, final_uri)) {
|
||||
break :find_uri pkg.uri;
|
||||
}
|
||||
@ -706,39 +919,66 @@ pub fn resolveImport(self: *DocumentStore, handle: *Handle, import_str: []const
|
||||
}
|
||||
|
||||
// New document, read the file then call into openDocument.
|
||||
const file_path = try URI.parse(allocator, final_uri);
|
||||
defer allocator.free(file_path);
|
||||
var document_handle = try self.newDocumentFromUri(final_uri);
|
||||
|
||||
var file = std.fs.cwd().openFile(file_path, .{}) catch {
|
||||
log.debug("Cannot open import file {s}", .{file_path});
|
||||
// Add to import table of current handle.
|
||||
try handle.imports_used.append(allocator, handle_uri);
|
||||
|
||||
return document_handle;
|
||||
}
|
||||
|
||||
pub fn resolveCImport(self: *DocumentStore, handle: *Handle, node: Ast.Node.Index) !?*Handle {
|
||||
const uri = blk: {
|
||||
for (handle.cimports) |item| {
|
||||
if (item.node == node) break :blk item.uri;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
defer file.close();
|
||||
{
|
||||
const file_contents = file.readToEndAllocOptions(
|
||||
allocator,
|
||||
std.math.maxInt(usize),
|
||||
null,
|
||||
@alignOf(u8),
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
log.debug("Could not read from file {s}", .{file_path});
|
||||
return null;
|
||||
},
|
||||
};
|
||||
errdefer allocator.free(file_contents);
|
||||
|
||||
// Add to import table of current handle.
|
||||
try handle.imports_used.append(self.allocator, handle_uri);
|
||||
// Swap handles.
|
||||
// This takes ownership of the passed uri and text.
|
||||
const duped_final_uri = try allocator.dupe(u8, final_uri);
|
||||
errdefer allocator.free(duped_final_uri);
|
||||
return try self.newDocument(duped_final_uri, file_contents);
|
||||
// Check if the import is already opened by others.
|
||||
if (self.getHandle(uri)) |new_handle| {
|
||||
// If it is, append it to our imports, increment the count, set our new handle
|
||||
// and return the parsed tree root node.
|
||||
try handle.imports_used.append(self.allocator, uri);
|
||||
new_handle.count += 1;
|
||||
return new_handle;
|
||||
}
|
||||
|
||||
// New document, read the file then call into openDocument.
|
||||
var document_handle = try self.newDocumentFromUri(uri);
|
||||
|
||||
// Add to cimport table of current handle.
|
||||
try handle.imports_used.append(self.allocator, uri);
|
||||
|
||||
return document_handle;
|
||||
}
|
||||
|
||||
fn newDocumentFromUri(self: *DocumentStore, uri: []const u8) !?*Handle {
|
||||
const file_path = try URI.parse(self.allocator, uri);
|
||||
defer self.allocator.free(file_path);
|
||||
|
||||
var file = std.fs.openFileAbsolute(file_path, .{}) catch |err| {
|
||||
log.debug("Cannot open file '{s}': {}", .{ file_path, err });
|
||||
return null;
|
||||
};
|
||||
defer file.close();
|
||||
|
||||
const file_contents = file.readToEndAllocOptions(
|
||||
self.allocator,
|
||||
std.math.maxInt(usize),
|
||||
null,
|
||||
@alignOf(u8),
|
||||
0,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
else => {
|
||||
log.debug("Could not read from file {s}", .{file_path});
|
||||
return null;
|
||||
},
|
||||
};
|
||||
errdefer self.allocator.free(file_contents);
|
||||
|
||||
return try self.newDocument(try self.allocator.dupe(u8, uri), file_contents);
|
||||
}
|
||||
|
||||
fn stdUriFromLibPath(allocator: std.mem.Allocator, zig_lib_path: ?[]const u8) !?[]const u8 {
|
||||
@ -768,6 +1008,10 @@ pub fn deinit(self: *DocumentStore) void {
|
||||
self.allocator.free(uri);
|
||||
}
|
||||
self.allocator.free(entry.value_ptr.*.import_uris);
|
||||
for (entry.value_ptr.*.cimports) |cimport| {
|
||||
self.allocator.free(cimport.uri);
|
||||
}
|
||||
self.allocator.free(entry.value_ptr.*.cimports);
|
||||
entry.value_ptr.*.imports_used.deinit(self.allocator);
|
||||
self.allocator.free(entry.key_ptr.*);
|
||||
self.allocator.destroy(entry.value_ptr.*);
|
||||
@ -775,11 +1019,7 @@ pub fn deinit(self: *DocumentStore) void {
|
||||
|
||||
self.handles.deinit(self.allocator);
|
||||
for (self.build_files.items) |build_file| {
|
||||
for (build_file.packages.items) |pkg| {
|
||||
self.allocator.free(pkg.name);
|
||||
self.allocator.free(pkg.uri);
|
||||
}
|
||||
build_file.packages.deinit(self.allocator);
|
||||
std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator });
|
||||
self.allocator.free(build_file.uri);
|
||||
build_file.destroy(self.allocator);
|
||||
}
|
||||
@ -808,6 +1048,7 @@ fn tagStoreCompletionItems(self: DocumentStore, arena: *std.heap.ArenaAllocator,
|
||||
result_set.putAssumeCapacity(completion, {});
|
||||
}
|
||||
}
|
||||
|
||||
return result_set.entries.items(.key);
|
||||
}
|
||||
|
||||
|
@ -2063,9 +2063,9 @@ fn completionHandler(server: *Server, writer: anytype, id: types.RequestId, req:
|
||||
|
||||
if (!subpath_present and pos_context == .import_string_literal) {
|
||||
if (handle.associated_build_file) |bf| {
|
||||
try fsl_completions.ensureUnusedCapacity(server.arena.allocator(), bf.packages.items.len);
|
||||
try fsl_completions.ensureUnusedCapacity(server.arena.allocator(), bf.config.packages.len);
|
||||
|
||||
for (bf.packages.items) |pkg| {
|
||||
for (bf.config.packages) |pkg| {
|
||||
try fsl_completions.append(server.arena.allocator(), .{
|
||||
.label = pkg.name,
|
||||
.kind = .Module,
|
||||
|
@ -337,7 +337,8 @@ fn resolveVarDeclAliasInternal(store: *DocumentStore, arena: *std.heap.ArenaAllo
|
||||
const lhs = datas[node_handle.node].lhs;
|
||||
|
||||
const container_node = if (ast.isBuiltinCall(tree, lhs)) block: {
|
||||
if (!std.mem.eql(u8, tree.tokenSlice(main_tokens[lhs]), "@import"))
|
||||
const name = tree.tokenSlice(main_tokens[lhs]);
|
||||
if (!std.mem.eql(u8, name, "@import") and !std.mem.eql(u8, name, "@cImport"))
|
||||
return null;
|
||||
|
||||
const inner_node = (try resolveTypeOfNode(store, arena, .{ .node = lhs, .handle = handle })) orelse return null;
|
||||
@ -904,20 +905,28 @@ pub fn resolveTypeOfNodeInternal(store: *DocumentStore, arena: *std.heap.ArenaAl
|
||||
return resolved_type;
|
||||
}
|
||||
|
||||
if (!std.mem.eql(u8, call_name, "@import")) return null;
|
||||
if (params.len == 0) return null;
|
||||
if (std.mem.eql(u8, call_name, "@import")) {
|
||||
if (params.len == 0) return null;
|
||||
const import_param = params[0];
|
||||
if (node_tags[import_param] != .string_literal) return null;
|
||||
|
||||
const import_param = params[0];
|
||||
if (node_tags[import_param] != .string_literal) return null;
|
||||
const import_str = tree.tokenSlice(main_tokens[import_param]);
|
||||
const new_handle = (store.resolveImport(handle, import_str[1 .. import_str.len - 1]) catch |err| {
|
||||
log.debug("Error {} while processing import {s}", .{ err, import_str });
|
||||
return null;
|
||||
}) orelse return null;
|
||||
|
||||
const import_str = tree.tokenSlice(main_tokens[import_param]);
|
||||
const new_handle = (store.resolveImport(handle, import_str[1 .. import_str.len - 1]) catch |err| {
|
||||
log.debug("Error {} while processing import {s}", .{ err, import_str });
|
||||
return null;
|
||||
}) orelse return null;
|
||||
// reference to node '0' which is root
|
||||
return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle });
|
||||
} else if (std.mem.eql(u8, call_name, "@cImport")) {
|
||||
const new_handle = (store.resolveCImport(handle, node) catch |err| {
|
||||
log.debug("Error {} while processing cImport", .{err}); // TODO improve
|
||||
return null;
|
||||
}) orelse return null;
|
||||
|
||||
// reference to node '0' which is root
|
||||
return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle });
|
||||
// reference to node '0' which is root
|
||||
return TypeWithHandle.typeVal(.{ .node = 0, .handle = new_handle });
|
||||
}
|
||||
},
|
||||
.fn_proto,
|
||||
.fn_proto_multi,
|
||||
@ -1077,8 +1086,17 @@ pub fn resolveTypeOfNode(store: *DocumentStore, arena: *std.heap.ArenaAllocator,
|
||||
return resolveTypeOfNodeInternal(store, arena, node_handle, &bound_type_params);
|
||||
}
|
||||
|
||||
/// Collects all imports we can find into a slice of import paths (without quotes).
|
||||
pub fn collectImports(import_arr: *std.ArrayList([]const u8), tree: Ast) !void {
|
||||
/// Collects all `@import`'s we can find into a slice of import paths (without quotes).
|
||||
/// Caller owns returned memory.
|
||||
pub fn collectImports(allocator: std.mem.Allocator, tree: Ast) error{OutOfMemory}![][]const u8 {
|
||||
var imports = std.ArrayListUnmanaged([]const u8){};
|
||||
errdefer {
|
||||
for (imports.items) |imp| {
|
||||
allocator.free(imp);
|
||||
}
|
||||
imports.deinit(allocator);
|
||||
}
|
||||
|
||||
const tags = tree.tokens.items(.tag);
|
||||
|
||||
var i: usize = 0;
|
||||
@ -1098,9 +1116,33 @@ pub fn collectImports(import_arr: *std.ArrayList([]const u8), tree: Ast) !void {
|
||||
continue;
|
||||
|
||||
const str = tree.tokenSlice(@intCast(u32, i + 2));
|
||||
try import_arr.append(str[1 .. str.len - 1]);
|
||||
try imports.append(allocator, str[1 .. str.len - 1]);
|
||||
}
|
||||
}
|
||||
|
||||
return imports.toOwnedSlice(allocator);
|
||||
}
|
||||
|
||||
/// Collects all `@cImport` nodes
|
||||
/// Caller owns returned memory.
|
||||
pub fn collectCImportNodes(allocator: std.mem.Allocator, tree: Ast) error{OutOfMemory}![]Ast.Node.Index {
|
||||
var import_nodes = std.ArrayListUnmanaged(Ast.Node.Index){};
|
||||
errdefer import_nodes.deinit(allocator);
|
||||
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
|
||||
var i: usize = 0;
|
||||
while (i < node_tags.len) : (i += 1) {
|
||||
const node = @intCast(Ast.Node.Index, i);
|
||||
if (!ast.isBuiltinCall(tree, node)) continue;
|
||||
|
||||
if (!std.mem.eql(u8, Ast.tokenSlice(tree, main_tokens[node]), "@cImport")) continue;
|
||||
|
||||
try import_nodes.append(allocator, node);
|
||||
}
|
||||
|
||||
return import_nodes.toOwnedSlice(allocator);
|
||||
}
|
||||
|
||||
pub const NodeWithHandle = struct {
|
||||
@ -1341,26 +1383,22 @@ pub fn getImportStr(tree: Ast, node: Ast.Node.Index, source_index: usize) ?[]con
|
||||
return getImportStr(tree, tree.nodes.items(.data)[node].lhs, source_index);
|
||||
}
|
||||
|
||||
if (!nodeContainsSourceIndex(tree, node, source_index)) {
|
||||
return null;
|
||||
}
|
||||
if (!nodeContainsSourceIndex(tree, node, source_index)) return null;
|
||||
|
||||
if (ast.isBuiltinCall(tree, node)) {
|
||||
const builtin_token = tree.nodes.items(.main_token)[node];
|
||||
const call_name = tree.tokenSlice(builtin_token);
|
||||
if (!ast.isBuiltinCall(tree, node)) return null;
|
||||
|
||||
if (!std.mem.eql(u8, call_name, "@import")) return null;
|
||||
const builtin_token = tree.nodes.items(.main_token)[node];
|
||||
const call_name = tree.tokenSlice(builtin_token);
|
||||
|
||||
var buffer: [2]Ast.Node.Index = undefined;
|
||||
const params = ast.builtinCallParams(tree, node, &buffer).?;
|
||||
if (!std.mem.eql(u8, call_name, "@import")) return null;
|
||||
|
||||
if (params.len != 1) return null;
|
||||
var buffer: [2]Ast.Node.Index = undefined;
|
||||
const params = ast.builtinCallParams(tree, node, &buffer).?;
|
||||
|
||||
const import_str = tree.tokenSlice(tree.nodes.items(.main_token)[params[0]]);
|
||||
return import_str[1 .. import_str.len - 1];
|
||||
}
|
||||
if (params.len != 1) return null;
|
||||
|
||||
return null;
|
||||
const import_str = tree.tokenSlice(tree.nodes.items(.main_token)[params[0]]);
|
||||
return import_str[1 .. import_str.len - 1];
|
||||
}
|
||||
|
||||
pub const SourceRange = std.zig.Token.Loc;
|
||||
|
@ -282,7 +282,7 @@ pub const Configuration = struct {
|
||||
zig_exe_path: ?[]const u8,
|
||||
warn_style: ?bool,
|
||||
build_runner_path: ?[]const u8,
|
||||
build_runner_cache_path: ?[]const u8,
|
||||
global_cache_path: ?[]const u8,
|
||||
enable_semantic_tokens: ?bool,
|
||||
enable_inlay_hints: ?bool,
|
||||
inlay_hints_show_builtin: ?bool,
|
||||
|
@ -5,14 +5,39 @@ const io = std.io;
|
||||
const log = std.log;
|
||||
const process = std.process;
|
||||
const Builder = std.build.Builder;
|
||||
const Pkg = std.build.Pkg;
|
||||
const InstallArtifactStep = std.build.InstallArtifactStep;
|
||||
const LibExeObjStep = std.build.LibExeObjStep;
|
||||
const ArrayList = std.ArrayList;
|
||||
|
||||
pub const BuildConfig = struct {
|
||||
packages: []Pkg,
|
||||
include_dirs: []IncludeDir,
|
||||
|
||||
pub const Pkg = struct {
|
||||
name: []const u8,
|
||||
uri: []const u8,
|
||||
};
|
||||
|
||||
pub const IncludeDir = union(enum) {
|
||||
raw_path: []const u8,
|
||||
raw_path_system: []const u8,
|
||||
|
||||
pub fn getPath(self: IncludeDir) []const u8 {
|
||||
return switch (self) {
|
||||
.raw_path => |path| return path,
|
||||
.raw_path_system => |path| return path,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn eql(a: IncludeDir, b: IncludeDir) bool {
|
||||
return @enumToInt(a) == @enumToInt(b) and
|
||||
std.mem.eql(u8, a.getPath(), b.getPath());
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
///! This is a modified build runner to extract information out of build.zig
|
||||
///! Modified from the std.special.build_runner
|
||||
///! Modified version of lib/build_runner.zig
|
||||
pub fn main() !void {
|
||||
var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena.deinit();
|
||||
@ -55,48 +80,93 @@ pub fn main() !void {
|
||||
builder.resolveInstallPrefix(null, Builder.DirList{});
|
||||
try runBuild(builder);
|
||||
|
||||
const stdout_stream = io.getStdOut().writer();
|
||||
var packages = ArrayList(BuildConfig.Pkg).init(allocator);
|
||||
defer packages.deinit();
|
||||
|
||||
var include_dirs = ArrayList(BuildConfig.IncludeDir).init(allocator);
|
||||
defer include_dirs.deinit();
|
||||
|
||||
// TODO: We currently add packages from every LibExeObj step that the install step depends on.
|
||||
// Should we error out or keep one step or something similar?
|
||||
// We also flatten them, we should probably keep the nested structure.
|
||||
for (builder.top_level_steps.items) |tls| {
|
||||
for (tls.step.dependencies.items) |step| {
|
||||
try processStep(stdout_stream, step);
|
||||
try processStep(&packages, &include_dirs, step);
|
||||
}
|
||||
}
|
||||
|
||||
try std.json.stringify(
|
||||
BuildConfig{
|
||||
.packages = packages.items,
|
||||
.include_dirs = include_dirs.items,
|
||||
},
|
||||
.{ .whitespace = .{} },
|
||||
io.getStdOut().writer(),
|
||||
);
|
||||
}
|
||||
|
||||
fn processStep(stdout_stream: anytype, step: *std.build.Step) anyerror!void {
|
||||
fn processStep(
|
||||
packages: *ArrayList(BuildConfig.Pkg),
|
||||
include_dirs: *ArrayList(BuildConfig.IncludeDir),
|
||||
step: *std.build.Step,
|
||||
) anyerror!void {
|
||||
if (step.cast(InstallArtifactStep)) |install_exe| {
|
||||
try processIncludeDirs(include_dirs, install_exe.artifact.include_dirs.items);
|
||||
for (install_exe.artifact.packages.items) |pkg| {
|
||||
try processPackage(stdout_stream, pkg);
|
||||
try processPackage(packages, pkg);
|
||||
}
|
||||
} else if (step.cast(LibExeObjStep)) |exe| {
|
||||
try processIncludeDirs(include_dirs, exe.include_dirs.items);
|
||||
for (exe.packages.items) |pkg| {
|
||||
try processPackage(stdout_stream, pkg);
|
||||
try processPackage(packages, pkg);
|
||||
}
|
||||
} else {
|
||||
for (step.dependencies.items) |unknown_step| {
|
||||
try processStep(stdout_stream, unknown_step);
|
||||
try processStep(packages, include_dirs, unknown_step);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn processPackage(out_stream: anytype, pkg: Pkg) anyerror!void {
|
||||
const source = if (@hasField(Pkg, "source")) pkg.source else pkg.path;
|
||||
fn processPackage(
|
||||
packages: *ArrayList(BuildConfig.Pkg),
|
||||
pkg: std.build.Pkg,
|
||||
) anyerror!void {
|
||||
for (packages.items) |package| {
|
||||
if (std.mem.eql(u8, package.name, pkg.name)) return;
|
||||
}
|
||||
|
||||
const source = if (@hasField(std.build.Pkg, "source")) pkg.source else pkg.path;
|
||||
switch (source) {
|
||||
.path => |path| try out_stream.print("{s}\x00{s}\n", .{ pkg.name, path }),
|
||||
.generated => |generated| if (generated.path != null) try out_stream.print("{s}\x00{s}\n", .{ pkg.name, generated.path.? }),
|
||||
.path => |path| try packages.append(.{ .name = pkg.name, .uri = path }),
|
||||
.generated => |generated| if (generated.path != null) try packages.append(.{ .name = pkg.name, .uri = generated.path.? }),
|
||||
}
|
||||
|
||||
if (pkg.dependencies) |dependencies| {
|
||||
for (dependencies) |dep| {
|
||||
try processPackage(out_stream, dep);
|
||||
try processPackage(packages, dep);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn processIncludeDirs(
|
||||
include_dirs: *ArrayList(BuildConfig.IncludeDir),
|
||||
dirs: []std.build.LibExeObjStep.IncludeDir,
|
||||
) !void {
|
||||
outer: for (dirs) |dir| {
|
||||
const candidate: BuildConfig.IncludeDir = switch (dir) {
|
||||
.raw_path => |path| .{ .raw_path = path },
|
||||
.raw_path_system => |path| .{ .raw_path_system = path },
|
||||
else => continue,
|
||||
};
|
||||
|
||||
for (include_dirs.items) |include_dir| {
|
||||
if (candidate.eql(include_dir)) continue :outer;
|
||||
}
|
||||
|
||||
try include_dirs.append(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
fn runBuild(builder: *Builder) anyerror!void {
|
||||
switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
|
||||
.Void => root.build(builder),
|
||||
|
183
src/translate_c.zig
Normal file
183
src/translate_c.zig
Normal file
@ -0,0 +1,183 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const Config = @import("Config.zig");
|
||||
const ast = @import("ast.zig");
|
||||
const Ast = std.zig.Ast;
|
||||
const URI = @import("uri.zig");
|
||||
|
||||
/// converts a `@cInclude` node into an equivalent c header file
|
||||
/// which can then be handed over to `zig translate-c`
|
||||
/// Caller owns returned memory.
|
||||
///
|
||||
/// **Example**
|
||||
/// ```zig
|
||||
/// const glfw = @cImport(
|
||||
/// @cDefine("GLFW_INCLUDE_VULKAN", {})
|
||||
/// @cInclude("GLFW/glfw3.h")
|
||||
/// );
|
||||
/// ```
|
||||
/// gets converted into:
|
||||
/// ```c
|
||||
/// #define GLFW_INCLUDE_VULKAN
|
||||
/// #include "GLFW/glfw3.h"
|
||||
/// ```
|
||||
pub fn convertCInclude(allocator: std.mem.Allocator, tree: Ast, node: Ast.Node.Index) error{ OutOfMemory, Unsupported }![]const u8 {
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
|
||||
std.debug.assert(ast.isBuiltinCall(tree, node));
|
||||
std.debug.assert(std.mem.eql(u8, Ast.tokenSlice(tree, main_tokens[node]), "@cImport"));
|
||||
|
||||
var output = std.ArrayList(u8).init(allocator);
|
||||
errdefer output.deinit();
|
||||
|
||||
var stack_allocator = std.heap.stackFallback(512, allocator);
|
||||
|
||||
var buffer: [2]Ast.Node.Index = undefined;
|
||||
for (ast.builtinCallParams(tree, node, &buffer).?) |child| {
|
||||
try convertCIncludeInternal(stack_allocator.get(), tree, child, &output);
|
||||
}
|
||||
|
||||
return output.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn convertCIncludeInternal(allocator: std.mem.Allocator, tree: Ast, node: Ast.Node.Index, output: *std.ArrayList(u8)) error{ OutOfMemory, Unsupported }!void {
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const main_tokens = tree.nodes.items(.main_token);
|
||||
|
||||
var buffer: [2]Ast.Node.Index = undefined;
|
||||
if (ast.isBlock(tree, node)) {
|
||||
const FrameSize = @sizeOf(@Frame(convertCIncludeInternal));
|
||||
var child_frame = try allocator.alignedAlloc(u8, std.Target.stack_align, FrameSize);
|
||||
defer allocator.free(child_frame);
|
||||
|
||||
for (ast.blockStatements(tree, node, &buffer).?) |statement| {
|
||||
try await @asyncCall(child_frame, {}, convertCIncludeInternal, .{ allocator, tree, statement, output });
|
||||
}
|
||||
} else if (ast.builtinCallParams(tree, node, &buffer)) |params| {
|
||||
if (params.len < 1) return;
|
||||
|
||||
const call_name = Ast.tokenSlice(tree, main_tokens[node]);
|
||||
|
||||
if (node_tags[params[0]] != .string_literal) return error.Unsupported;
|
||||
const first = extractString(Ast.tokenSlice(tree, main_tokens[params[0]]));
|
||||
|
||||
if (std.mem.eql(u8, call_name, "@cInclude")) {
|
||||
try output.writer().print("#include <{s}>\n", .{first});
|
||||
} else if (std.mem.eql(u8, call_name, "@cDefine")) {
|
||||
if (params.len < 2) return;
|
||||
|
||||
var buffer2: [2]Ast.Node.Index = undefined;
|
||||
const is_void = if (ast.blockStatements(tree, params[1], &buffer2)) |block| block.len == 0 else false;
|
||||
|
||||
if (is_void) {
|
||||
try output.writer().print("#define {s}\n", .{first});
|
||||
} else {
|
||||
if (node_tags[params[1]] != .string_literal) return error.Unsupported;
|
||||
const second = extractString(Ast.tokenSlice(tree, main_tokens[params[1]]));
|
||||
try output.writer().print("#define {s} {s}\n", .{ first, second });
|
||||
}
|
||||
} else if (std.mem.eql(u8, call_name, "@cUndef")) {
|
||||
try output.writer().print("#undefine {s}\n", .{first});
|
||||
} else {
|
||||
return error.Unsupported;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// takes a c header file and returns the result from calling `zig translate-c`
|
||||
/// returns the file path to the generated zig file
|
||||
/// Caller owns returned memory.
|
||||
pub fn translate(allocator: std.mem.Allocator, config: Config, include_dirs: []const []const u8, source: []const u8) error{OutOfMemory}!?[]const u8 {
|
||||
const file_path = try std.fs.path.join(allocator, &[_][]const u8{ config.global_cache_path.?, "cimport.h" });
|
||||
defer allocator.free(file_path);
|
||||
|
||||
var file = std.fs.createFileAbsolute(file_path, .{}) catch |err| {
|
||||
std.log.warn("failed to create file '{s}': {}", .{ file_path, err });
|
||||
return null;
|
||||
};
|
||||
defer file.close();
|
||||
defer std.fs.deleteFileAbsolute(file_path) catch |err| {
|
||||
std.log.warn("failed to delete file '{s}': {}", .{ file_path, err });
|
||||
};
|
||||
|
||||
_ = file.write(source) catch |err| {
|
||||
std.log.warn("failed to write to '{s}': {}", .{ file_path, err });
|
||||
};
|
||||
|
||||
const base_include_dirs = blk: {
|
||||
const target_info = std.zig.system.NativeTargetInfo.detect(allocator, .{}) catch break :blk null;
|
||||
var native_paths = std.zig.system.NativePaths.detect(allocator, target_info) catch break :blk null;
|
||||
defer native_paths.deinit();
|
||||
|
||||
break :blk native_paths.include_dirs.toOwnedSlice();
|
||||
};
|
||||
defer if (base_include_dirs) |dirs| {
|
||||
for (dirs) |path| {
|
||||
allocator.free(path);
|
||||
}
|
||||
allocator.free(dirs);
|
||||
};
|
||||
|
||||
const base_args = &[_][]const u8{
|
||||
config.zig_exe_path.?,
|
||||
"translate-c",
|
||||
"--enable-cache",
|
||||
"--zig-lib-dir",
|
||||
config.zig_lib_path.?,
|
||||
"--cache-dir",
|
||||
config.global_cache_path.?,
|
||||
};
|
||||
|
||||
const argc = base_args.len + 2 * (include_dirs.len + if (base_include_dirs) |dirs| dirs.len else 0) + 1;
|
||||
var argv = try std.ArrayListUnmanaged([]const u8).initCapacity(allocator, argc);
|
||||
defer argv.deinit(allocator);
|
||||
|
||||
argv.appendSliceAssumeCapacity(base_args);
|
||||
|
||||
if (base_include_dirs) |dirs| {
|
||||
for (dirs) |include_dir| {
|
||||
argv.appendAssumeCapacity("-I");
|
||||
argv.appendAssumeCapacity(include_dir);
|
||||
}
|
||||
}
|
||||
|
||||
for (include_dirs) |include_dir| {
|
||||
argv.appendAssumeCapacity("-I");
|
||||
argv.appendAssumeCapacity(include_dir);
|
||||
}
|
||||
|
||||
argv.appendAssumeCapacity(file_path);
|
||||
|
||||
const result = std.ChildProcess.exec(.{
|
||||
.allocator = allocator,
|
||||
.argv = argv.items,
|
||||
}) catch |err| {
|
||||
std.log.err("Failed to execute zig translate-c process, error: {}", .{err});
|
||||
return null;
|
||||
};
|
||||
|
||||
defer allocator.free(result.stdout);
|
||||
defer allocator.free(result.stderr);
|
||||
|
||||
return switch (result.term) {
|
||||
.Exited => |code| if (code == 0) {
|
||||
return try allocator.dupe(u8, std.mem.sliceTo(result.stdout, '\n'));
|
||||
} else {
|
||||
// TODO convert failure to `textDocument/publishDiagnostics`
|
||||
std.log.err("zig translate-c process failed, code: {}, stderr: '{s}'", .{ code, result.stderr });
|
||||
return null;
|
||||
},
|
||||
else => {
|
||||
std.log.err("zig translate-c process terminated '{}'", .{result.term});
|
||||
return null;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
fn extractString(str: []const u8) []const u8 {
|
||||
if (std.mem.startsWith(u8, str, "\"") and std.mem.endsWith(u8, str, "\"")) {
|
||||
return str[1 .. str.len - 1];
|
||||
} else {
|
||||
return str;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user