From d0c62157b46c6a5e84171462305407ebe2f69e8b Mon Sep 17 00:00:00 2001 From: FalsePattern <30945458+FalsePattern@users.noreply.github.com> Date: Wed, 21 Jun 2023 17:24:29 +0200 Subject: [PATCH] Added legacy-style json api wrapper --- src/DocumentStore.zig | 20 ++-- src/Server.zig | 13 +- src/configuration.zig | 10 +- src/legacy_json.zig | 176 ++++++++++++++++++++++++++++ src/main.zig | 9 +- tests/context.zig | 15 +-- tests/language_features/cimport.zig | 2 +- 7 files changed, 209 insertions(+), 36 deletions(-) create mode 100644 src/legacy_json.zig diff --git a/src/DocumentStore.zig b/src/DocumentStore.zig index 4646015..afe607b 100644 --- a/src/DocumentStore.zig +++ b/src/DocumentStore.zig @@ -17,6 +17,8 @@ const AstGen = @import("stage2/AstGen.zig"); const Zir = @import("stage2/Zir.zig"); const InternPool = @import("analyser/InternPool.zig"); +const legacy_json = @import("legacy_json.zig"); + const DocumentStore = @This(); pub const Uri = []const u8; @@ -43,10 +45,10 @@ const BuildFile = struct { pub fn deinit(self: *BuildFile, allocator: std.mem.Allocator) void { allocator.free(self.uri); - std.json.parseFree(BuildConfig, allocator, self.config); + legacy_json.parseFree(BuildConfig, allocator, self.config); if (self.builtin_uri) |builtin_uri| allocator.free(builtin_uri); if (self.build_associated_config) |cfg| { - std.json.parseFree(BuildAssociatedConfig, allocator, cfg); + legacy_json.parseFree(BuildAssociatedConfig, allocator, cfg); } } }; @@ -301,7 +303,7 @@ pub fn applySave(self: *DocumentStore, handle: *const Handle) !void { return; }; - std.json.parseFree(BuildConfig, self.allocator, build_file.config); + legacy_json.parseFree(BuildConfig, self.allocator, build_file.config); build_file.config = build_config; } } @@ -326,7 +328,7 @@ pub fn invalidateBuildFiles(self: *DocumentStore) void { return; }; - std.json.parseFree(BuildConfig, self.allocator, build_file.config); + legacy_json.parseFree(BuildConfig, self.allocator, build_file.config); build_file.config = build_config; } } @@ -457,7 +459,7 @@ pub fn isInStd(uri: Uri) bool { } /// looks for a `zls.build.json` file in the build file directory -/// has to be freed with `std.json.parseFree` +/// has to be freed with `json_compat.parseFree` fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: BuildFile) !BuildAssociatedConfig { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); @@ -473,7 +475,7 @@ fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: Bu const file_buf = try config_file.readToEndAlloc(allocator, std.math.maxInt(usize)); defer allocator.free(file_buf); - return try std.json.parseFromSlice(BuildAssociatedConfig, allocator, file_buf, .{}); + return try legacy_json.parseFromSlice(BuildAssociatedConfig, allocator, file_buf, .{}); } /// Caller owns returned memory! @@ -510,7 +512,7 @@ pub fn executeBuildRunner( } /// Runs the build.zig and extracts include directories and packages -/// Has to be freed with `std.json.parseFree` +/// Has to be freed with `json_compat.parseFree` pub fn loadBuildConfiguration( allocator: std.mem.Allocator, build_file: BuildFile, @@ -564,13 +566,13 @@ pub fn loadBuildConfiguration( // to the BuildConfig type .ignore_unknown_fields = true, }; - const build_config = std.json.parseFromSlice( + const build_config = legacy_json.parseFromSlice( BuildConfig, allocator, zig_run_result.stdout, parse_options, ) catch return error.RunFailed; - errdefer std.json.parseFree(BuildConfig, allocator, build_config); + errdefer legacy_json.parseFree(BuildConfig, allocator, build_config); for (build_config.packages) |*pkg| { const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ build_file_path, "..", pkg.path }); diff --git a/src/Server.zig b/src/Server.zig index 8cf0592..f1a3831 100644 --- a/src/Server.zig +++ b/src/Server.zig @@ -1286,12 +1286,12 @@ const Message = union(enum) { }; } - pub fn fromJsonValueTree(tree: std.json.ValueTree) error{InvalidRequest}!Message { + pub fn fromJsonValueTree(root: std.json.Value) error{InvalidRequest}!Message { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - if (tree.root != .object) return error.InvalidRequest; - const object = tree.root.object; + if (root != .object) return error.InvalidRequest; + const object = root.object; if (object.get("id")) |id_obj| { comptime std.debug.assert(!tres.isAllocatorRequired(types.RequestId)); @@ -1348,16 +1348,13 @@ pub fn processJsonRpc( const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); - var parser = std.json.Parser.init(server.arena.allocator(), .alloc_always); - defer parser.deinit(); - - var tree = parser.parse(json) catch |err| { + var tree = std.json.parseFromSlice(std.json.Value, server.arena.allocator(), json, .{}) catch |err| { log.err("failed to parse message: {}", .{err}); return; // maybe panic? }; defer tree.deinit(); - const message = Message.fromJsonValueTree(tree) catch |err| { + const message = Message.fromJsonValueTree(tree.value) catch |err| { log.err("failed to parse message: {}", .{err}); return; // maybe panic? }; diff --git a/src/configuration.zig b/src/configuration.zig index e81d717..18707bf 100644 --- a/src/configuration.zig +++ b/src/configuration.zig @@ -10,6 +10,8 @@ const offsets = @import("offsets.zig"); const logger = std.log.scoped(.zls_config); +const legacy_json = @import("legacy_json.zig"); + pub fn loadFromFile(allocator: std.mem.Allocator, file_path: []const u8) ?Config { const tracy_zone = tracy.trace(@src()); defer tracy_zone.end(); @@ -34,7 +36,7 @@ pub fn loadFromFile(allocator: std.mem.Allocator, file_path: []const u8) ?Config scanner.enableDiagnostics(&parse_diagnostics); // TODO: report errors using "textDocument/publishDiagnostics" - var config = std.json.parseFromTokenSource(Config, allocator, &scanner, parse_options) catch |err| { + var config = legacy_json.parseFromTokenSource(Config, allocator, &scanner, parse_options) catch |err| { logger.warn( "{s}:{d}:{d}: Error while parsing configuration file {}", .{ file_path, parse_diagnostics.getLine(), parse_diagnostics.getColumn(), err }, @@ -82,7 +84,7 @@ pub fn configChanged(config: *Config, runtime_zig_version: *?ZigVersionWrapper, logger.info("Using zig executable '{s}'", .{exe_path}); var env = getZigEnv(allocator, exe_path) orelse break :blk; - defer std.json.parseFree(Env, allocator, env); + defer legacy_json.parseFree(Env, allocator, env); if (config.zig_lib_path) |lib_path| allocator.free(lib_path); // Make sure the path is absolute @@ -177,7 +179,7 @@ pub const Env = struct { target: ?[]const u8 = null, }; -/// result has to be freed with `std.json.parseFree` +/// result has to be freed with `json_compat.parseFree` pub fn getZigEnv(allocator: std.mem.Allocator, zig_exe_path: []const u8) ?Env { const zig_env_result = std.ChildProcess.exec(.{ .allocator = allocator, @@ -202,7 +204,7 @@ pub fn getZigEnv(allocator: std.mem.Allocator, zig_exe_path: []const u8) ?Env { else => logger.err("zig env invocation failed", .{}), } - return std.json.parseFromSlice( + return legacy_json.parseFromSlice( Env, allocator, zig_env_result.stdout, diff --git a/src/legacy_json.zig b/src/legacy_json.zig new file mode 100644 index 0000000..41006ec --- /dev/null +++ b/src/legacy_json.zig @@ -0,0 +1,176 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const ParseError = std.json.ParseError; +const Scanner = std.json.Scanner; +const ParseOptions = std.json.ParseOptions; +pub fn parseFromSlice( + comptime T: type, + allocator: Allocator, + s: []const u8, + options: ParseOptions, +) (ParseError(Scanner) || std.mem.Allocator.Error)!T { + const json = try std.json.parseFromSlice(T, allocator, s, options); + defer json.deinit(); + return deepCopy(T, allocator, json.value); +} +pub fn parseFromTokenSource( + comptime T: type, + allocator: Allocator, + scanner_or_reader: anytype, + options: ParseOptions, +) (ParseError(@TypeOf(scanner_or_reader.*)) || std.mem.Allocator.Error)!T { + const json = try std.json.parseFromTokenSource(T, allocator, scanner_or_reader, options); + defer json.deinit(); + return try deepCopy(T, allocator, json.value); +} + +/// Recursively copies a struct, reallocating pointers and slices +fn deepCopy(comptime T: type, allocator: Allocator, value: T) !T { + switch (@typeInfo(T)) { + .Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => return value, + .Optional => { + if (value) |v| { + return try deepCopy(@TypeOf(v), allocator, v); + } + return null; + }, + .Union => |unionInfo| { + if (unionInfo.tag_type) |UnionTagType| { + inline for (unionInfo.fields) |u_field| { + if (value == @field(UnionTagType, u_field.name)) { + return @unionInit(T, u_field.name, deepCopy(u_field.type, allocator, @field(value, u_field.name))); + } + } + } else { + unreachable; + } + }, + .Struct => |structInfo| { + var result: T = undefined; + inline for (structInfo.fields) |field| { + if (field.is_comptime) @compileError("comptime fields are not supported: " ++ @typeName(T) ++ "." ++ field.name); + const field_value = @field(value, field.name); + @field(result, field.name) = try deepCopy(@TypeOf(field_value), allocator, field_value); + } + return result; + }, + .Array, .Vector => { + var r: T = undefined; + for (value, 0..) |v, i| { + r[i] = try deepCopy(@TypeOf(v), allocator, v); + } + return r; + }, + .Pointer => |ptrInfo| { + switch (ptrInfo.size) { + .One => { + const r: *ptrInfo.child = try allocator.create(ptrInfo.child); + errdefer allocator.destroy(r); + r.* = try deepCopy(ptrInfo.child, allocator, value.*); + return r; + }, + .Slice => { + var result = std.ArrayList(ptrInfo.child).init(allocator); + errdefer result.deinit(); + for (value) |v| { + try result.append(try deepCopy(ptrInfo.child, allocator, v)); + } + if (ptrInfo.sentinel) |some| { + const sentinel_value = @ptrCast(*align(1) const ptrInfo.child, some).*; + return try result.toOwnedSliceSentinel(sentinel_value); + } + return try result.toOwnedSlice(); + }, + + else => @compileError("Unable to deepCopy type '" ++ @typeName(T) ++ "'"), + } + }, + + else => @compileError("Unable to deepCopy type '" ++ @typeName(T) ++ "'"), + } +} +/// Releases resources created by parseFromSlice() or parseFromTokenSource(). +pub fn parseFree(comptime T: type, allocator: Allocator, value: T) void { + switch (@typeInfo(T)) { + .Bool, .Float, .ComptimeFloat, .Int, .ComptimeInt, .Enum => {}, + .Optional => { + if (value) |v| { + return parseFree(@TypeOf(v), allocator, v); + } + }, + .Union => |unionInfo| { + if (unionInfo.tag_type) |UnionTagType| { + inline for (unionInfo.fields) |u_field| { + if (value == @field(UnionTagType, u_field.name)) { + parseFree(u_field.type, allocator, @field(value, u_field.name)); + break; + } + } + } else { + unreachable; + } + }, + .Struct => |structInfo| { + inline for (structInfo.fields) |field| { + var should_free = true; + if (field.default_value) |default| { + switch (@typeInfo(field.type)) { + // We must not attempt to free pointers to struct default values + .Pointer => |fieldPtrInfo| { + const field_value = @field(value, field.name); + const field_ptr = switch (fieldPtrInfo.size) { + .One => field_value, + .Slice => field_value.ptr, + else => unreachable, // Other pointer types are not parseable + }; + const field_addr = @intFromPtr(field_ptr); + + const casted_default = @ptrCast(*const field.type, @alignCast(@alignOf(field.type), default)).*; + const default_ptr = switch (fieldPtrInfo.size) { + .One => casted_default, + .Slice => casted_default.ptr, + else => unreachable, // Other pointer types are not parseable + }; + const default_addr = @intFromPtr(default_ptr); + + if (field_addr == default_addr) { + should_free = false; + } + }, + else => {}, + } + } + if (should_free) { + parseFree(field.type, allocator, @field(value, field.name)); + } + } + }, + .Array => |arrayInfo| { + for (value) |v| { + parseFree(arrayInfo.child, allocator, v); + } + }, + .Vector => |vecInfo| { + var i: usize = 0; + while (i < vecInfo.len) : (i += 1) { + parseFree(vecInfo.child, allocator, value[i]); + } + }, + .Pointer => |ptrInfo| { + switch (ptrInfo.size) { + .One => { + parseFree(ptrInfo.child, allocator, value.*); + allocator.destroy(value); + }, + .Slice => { + for (value) |v| { + parseFree(ptrInfo.child, allocator, v); + } + allocator.free(value); + }, + else => unreachable, + } + }, + else => unreachable, + } +} diff --git a/src/main.zig b/src/main.zig index 4d51d96..442c33d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -10,6 +10,7 @@ const Server = @import("Server.zig"); const Header = @import("Header.zig"); const debug = @import("debug.zig"); const binned_allocator = @import("binned_allocator"); +const legacy_json = @import("legacy_json.zig"); const logger = std.log.scoped(.zls_main); const message_logger = std.log.scoped(.message); @@ -147,8 +148,8 @@ fn updateConfig( defer allocator.free(json_message); try file.reader().readNoEof(json_message); - const new_config = try std.json.parseFromSlice(Config, allocator, json_message, .{}); - std.json.parseFree(Config, allocator, config.*); + const new_config = try legacy_json.parseFromSlice(Config, allocator, json_message, .{}); + legacy_json.parseFree(Config, allocator, config.*); config.* = new_config; } } @@ -334,7 +335,7 @@ fn parseArgs(allocator: std.mem.Allocator) !ParseArgsResult { if (specified.get(.@"show-config-path")) { const new_config = try getConfig(allocator, result.config_path); defer if (new_config.config_path) |path| allocator.free(path); - defer std.json.parseFree(Config, allocator, new_config.config); + defer legacy_json.parseFree(Config, allocator, new_config.config); const full_path = if (new_config.config_path) |path| blk: { break :blk try std.fs.path.resolve(allocator, &.{ path, "zls.json" }); @@ -392,7 +393,7 @@ pub fn main() !void { logger.info("Starting ZLS {s} @ '{s}'", .{ build_options.version, result.zls_exe_path }); var config = try getConfig(allocator, result.config_path); - defer std.json.parseFree(Config, allocator, config.config); + defer legacy_json.parseFree(Config, allocator, config.config); defer if (config.config_path) |path| allocator.free(path); if (result.replay_enabled and config.config.replay_session_path == null and config.config.record_session_path == null) { diff --git a/tests/context.zig b/tests/context.zig index df21fa2..e523ba0 100644 --- a/tests/context.zig +++ b/tests/context.zig @@ -60,7 +60,7 @@ pub const Context = struct { } pub fn deinit(self: *Context) void { - std.json.parseFree(Config, allocator, self.config.*); + @import("../src/legacy_json.zig").parseFree(Config, allocator, self.config.*); allocator.destroy(self.config); self.request("shutdown", "{}", null) catch {}; @@ -129,14 +129,10 @@ pub const Context = struct { const expected = expect orelse return; - // parse the response - var parser = std.json.Parser.init(allocator, .alloc_always); - defer parser.deinit(); - - var tree = try parser.parse(response_bytes); + var tree = try std.json.parseFromSlice(std.json.Value, allocator, response_bytes, .{}); defer tree.deinit(); - const response = tree.root.object; + const response = tree.value.object; // assertions try std.testing.expectEqualStrings("2.0", response.get("jsonrpc").?.string); @@ -195,11 +191,10 @@ pub const Context = struct { const response_bytes = try self.requestAlloc(method, buffer.items); defer self.server.allocator.free(response_bytes); - var parser = std.json.Parser.init(self.arena.allocator(), .alloc_always); - var tree = try parser.parse(try self.arena.allocator().dupe(u8, response_bytes)); + var tree = try std.json.parseFromSlice(std.json.Value, self.arena.allocator(), try self.arena.allocator().dupe(u8, response_bytes), .{}); // TODO validate jsonrpc and id - return tres.parse(Response(Result), tree.root, self.arena.allocator()); + return tres.parse(Response(Result), tree.value, self.arena.allocator()); } }; diff --git a/tests/language_features/cimport.zig b/tests/language_features/cimport.zig index 31ce0cb..069b519 100644 --- a/tests/language_features/cimport.zig +++ b/tests/language_features/cimport.zig @@ -108,7 +108,7 @@ fn testTranslate(c_source: []const u8) !translate_c.Result { if (!std.process.can_spawn) return error.SkipZigTest; var config: zls.Config = .{}; - defer std.json.parseFree(zls.Config, allocator, config); + defer @import("../../src/legacy_json.zig").parseFree(zls.Config, allocator, config); var runtime_zig_version: ?zls.ZigVersionWrapper = null; defer if (runtime_zig_version) |*v| v.free();