update to new std.json api (#1191)
* update lsp.zig * update to new `std.json` api * update min zig version * fix json api
This commit is contained in:
parent
16453c31b0
commit
3500aa7a76
@ -7,7 +7,7 @@ const zls_version = std.builtin.Version{ .major = 0, .minor = 11, .patch = 0 };
|
||||
pub fn build(b: *std.build.Builder) !void {
|
||||
comptime {
|
||||
const current_zig = builtin.zig_version;
|
||||
const min_zig = std.SemanticVersion.parse("0.11.0-dev.3003+e1f5ad3cc") catch unreachable; // std.http: do -> wait, fix redirects
|
||||
const min_zig = std.SemanticVersion.parse("0.11.0-dev.3134+018b743c7") catch unreachable; // std.http: do -> wait, fix redirects
|
||||
if (current_zig.order(min_zig) == .lt) {
|
||||
@compileError(std.fmt.comptimePrint("Your Zig version v{} does not meet the minimum build requirement of v{}", .{ current_zig, min_zig }));
|
||||
}
|
||||
|
@ -8,8 +8,8 @@
|
||||
.hash = "122028c00915d9b37296059be8a3883c718dbb5bd174350caedf152fed1f46f99607",
|
||||
},
|
||||
.tres = .{
|
||||
.url = "https://github.com/ziglibs/tres/archive/707a09313b42e05d6ae22d1590499eece5f968ce.tar.gz",
|
||||
.hash = "1220beaae8d152baa941a10b7ef3d3a59d093b257047035e2373c3c2f876ad29ccc8",
|
||||
.url = "https://github.com/ziglibs/tres/archive/220d01f3931595e3a2e2a6a0693363c0bfaf47e9.tar.gz",
|
||||
.hash = "12204d32c44b494c0cbe553811dedfb7a73da37b13b492872dd4edc0340983072697",
|
||||
},
|
||||
.diffz = .{
|
||||
.url = "https://github.com/ziglibs/diffz/archive/b966296b4489eb082b0831ec9a37d6f5e1906040.tar.gz",
|
||||
|
@ -43,10 +43,10 @@ const BuildFile = struct {
|
||||
|
||||
pub fn deinit(self: *BuildFile, allocator: std.mem.Allocator) void {
|
||||
allocator.free(self.uri);
|
||||
std.json.parseFree(BuildConfig, self.config, .{ .allocator = allocator });
|
||||
std.json.parseFree(BuildConfig, allocator, self.config);
|
||||
if (self.builtin_uri) |builtin_uri| allocator.free(builtin_uri);
|
||||
if (self.build_associated_config) |cfg| {
|
||||
std.json.parseFree(BuildAssociatedConfig, cfg, .{ .allocator = allocator });
|
||||
std.json.parseFree(BuildAssociatedConfig, allocator, cfg);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -301,7 +301,7 @@ pub fn applySave(self: *DocumentStore, handle: *const Handle) !void {
|
||||
return;
|
||||
};
|
||||
|
||||
std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator });
|
||||
std.json.parseFree(BuildConfig, self.allocator, build_file.config);
|
||||
build_file.config = build_config;
|
||||
}
|
||||
}
|
||||
@ -326,7 +326,7 @@ pub fn invalidateBuildFiles(self: *DocumentStore) void {
|
||||
return;
|
||||
};
|
||||
|
||||
std.json.parseFree(BuildConfig, build_file.config, .{ .allocator = self.allocator });
|
||||
std.json.parseFree(BuildConfig, self.allocator, build_file.config);
|
||||
build_file.config = build_config;
|
||||
}
|
||||
}
|
||||
@ -473,8 +473,7 @@ fn loadBuildAssociatedConfiguration(allocator: std.mem.Allocator, build_file: Bu
|
||||
const file_buf = try config_file.readToEndAlloc(allocator, std.math.maxInt(usize));
|
||||
defer allocator.free(file_buf);
|
||||
|
||||
var token_stream = std.json.TokenStream.init(file_buf);
|
||||
return try std.json.parse(BuildAssociatedConfig, &token_stream, .{ .allocator = allocator });
|
||||
return try std.json.parseFromSlice(BuildAssociatedConfig, allocator, file_buf, .{});
|
||||
}
|
||||
|
||||
/// Caller owns returned memory!
|
||||
@ -558,16 +557,19 @@ pub fn loadBuildConfiguration(
|
||||
}
|
||||
|
||||
const parse_options = std.json.ParseOptions{
|
||||
.allocator = allocator,
|
||||
// We ignore unknown fields so people can roll
|
||||
// their own build runners in libraries with
|
||||
// the only requirement being general adherance
|
||||
// to the BuildConfig type
|
||||
.ignore_unknown_fields = true,
|
||||
};
|
||||
var token_stream = std.json.TokenStream.init(zig_run_result.stdout);
|
||||
var build_config = std.json.parse(BuildConfig, &token_stream, parse_options) catch return error.RunFailed;
|
||||
errdefer std.json.parseFree(BuildConfig, build_config, parse_options);
|
||||
const build_config = std.json.parseFromSlice(
|
||||
BuildConfig,
|
||||
allocator,
|
||||
zig_run_result.stdout,
|
||||
parse_options,
|
||||
) catch return error.RunFailed;
|
||||
errdefer std.json.parseFree(BuildConfig, allocator, build_config);
|
||||
|
||||
for (build_config.packages) |*pkg| {
|
||||
const pkg_abs_path = try std.fs.path.resolve(allocator, &[_][]const u8{ build_file_path, "..", pkg.path });
|
||||
|
@ -686,7 +686,7 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory}
|
||||
// but not sure how standard this "standard" really is
|
||||
|
||||
var new_zig_exe = false;
|
||||
const result = json.Array;
|
||||
const result = json.array;
|
||||
|
||||
inline for (std.meta.fields(Config), result.items) |field, value| {
|
||||
const ft = if (@typeInfo(field.type) == .Optional)
|
||||
@ -695,10 +695,10 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory}
|
||||
field.type;
|
||||
const ti = @typeInfo(ft);
|
||||
|
||||
if (value != .Null) {
|
||||
if (value != .null) {
|
||||
const new_value: field.type = switch (ft) {
|
||||
[]const u8 => switch (value) {
|
||||
.String => |s| blk: {
|
||||
.string => |s| blk: {
|
||||
const trimmed = std.mem.trim(u8, s, " ");
|
||||
if (trimmed.len == 0 or std.mem.eql(u8, trimmed, "nil")) {
|
||||
log.warn("Ignoring new value for \"zls.{s}\": the given new value is invalid", .{field.name});
|
||||
@ -723,7 +723,7 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory}
|
||||
},
|
||||
else => switch (ti) {
|
||||
.Int => switch (value) {
|
||||
.Integer => |val| std.math.cast(ft, val) orelse blk: {
|
||||
.integer => |val| std.math.cast(ft, val) orelse blk: {
|
||||
log.warn("Ignoring new value for \"zls.{s}\": the given new value is invalid", .{field.name});
|
||||
break :blk @field(server.config, field.name);
|
||||
},
|
||||
@ -733,14 +733,14 @@ fn handleConfiguration(server: *Server, json: std.json.Value) error{OutOfMemory}
|
||||
},
|
||||
},
|
||||
.Bool => switch (value) {
|
||||
.Bool => |b| b,
|
||||
.bool => |b| b,
|
||||
else => blk: {
|
||||
log.warn("Ignoring new value for \"zls.{s}\": the given new value has an invalid type", .{field.name});
|
||||
break :blk @field(server.config, field.name);
|
||||
},
|
||||
},
|
||||
.Enum => switch (value) {
|
||||
.String => |s| blk: {
|
||||
.string => |s| blk: {
|
||||
const trimmed = std.mem.trim(u8, s, " ");
|
||||
break :blk std.meta.stringToEnum(field.type, trimmed) orelse inner: {
|
||||
log.warn("Ignoring new value for \"zls.{s}\": the given new value is invalid", .{field.name});
|
||||
@ -1298,8 +1298,8 @@ const Message = union(enum) {
|
||||
const tracy_zone = tracy.trace(@src());
|
||||
defer tracy_zone.end();
|
||||
|
||||
if (tree.root != .Object) return error.InvalidRequest;
|
||||
const object = tree.root.Object;
|
||||
if (tree.root != .object) return error.InvalidRequest;
|
||||
const object = tree.root.object;
|
||||
|
||||
if (object.get("id")) |id_obj| {
|
||||
comptime std.debug.assert(!tres.isAllocatorRequired(types.RequestId));
|
||||
@ -1307,11 +1307,11 @@ const Message = union(enum) {
|
||||
|
||||
if (object.get("method")) |method_obj| {
|
||||
const msg_method = switch (method_obj) {
|
||||
.String => |str| str,
|
||||
.string => |str| str,
|
||||
else => return error.InvalidRequest,
|
||||
};
|
||||
|
||||
const msg_params = object.get("params") orelse .Null;
|
||||
const msg_params = object.get("params") orelse .null;
|
||||
|
||||
return .{ .RequestMessage = .{
|
||||
.id = msg_id,
|
||||
@ -1319,13 +1319,13 @@ const Message = union(enum) {
|
||||
.params = msg_params,
|
||||
} };
|
||||
} else {
|
||||
const result = object.get("result") orelse .Null;
|
||||
const error_obj = object.get("error") orelse .Null;
|
||||
const result = object.get("result") orelse .null;
|
||||
const error_obj = object.get("error") orelse .null;
|
||||
|
||||
comptime std.debug.assert(!tres.isAllocatorRequired(?types.ResponseError));
|
||||
const err = tres.parse(?types.ResponseError, error_obj, null) catch return error.InvalidRequest;
|
||||
|
||||
if (result != .Null and err != null) return error.InvalidRequest;
|
||||
if (result != .null and err != null) return error.InvalidRequest;
|
||||
|
||||
return .{ .ResponseMessage = .{
|
||||
.id = msg_id,
|
||||
@ -1335,11 +1335,11 @@ const Message = union(enum) {
|
||||
}
|
||||
} else {
|
||||
const msg_method = switch (object.get("method") orelse return error.InvalidRequest) {
|
||||
.String => |str| str,
|
||||
.string => |str| str,
|
||||
else => return error.InvalidRequest,
|
||||
};
|
||||
|
||||
const msg_params = object.get("params") orelse .Null;
|
||||
const msg_params = object.get("params") orelse .null;
|
||||
|
||||
return .{ .NotificationMessage = .{
|
||||
.method = msg_method,
|
||||
@ -1356,7 +1356,7 @@ pub fn processJsonRpc(
|
||||
const tracy_zone = tracy.trace(@src());
|
||||
defer tracy_zone.end();
|
||||
|
||||
var parser = std.json.Parser.init(server.arena.allocator(), false);
|
||||
var parser = std.json.Parser.init(server.arena.allocator(), .alloc_always);
|
||||
defer parser.deinit();
|
||||
|
||||
var tree = parser.parse(json) catch |err| {
|
||||
|
@ -1050,12 +1050,8 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
|
||||
const parse_options = std.json.ParseOptions{
|
||||
.allocator = gpa,
|
||||
};
|
||||
var token_stream = std.json.TokenStream.init(@embedFile("config.json"));
|
||||
const config = try std.json.parse(Config, &token_stream, parse_options);
|
||||
defer std.json.parseFree(Config, config, parse_options);
|
||||
const config = try std.json.parseFromSlice(Config, gpa, @embedFile("config.json"), .{});
|
||||
defer std.json.parseFree(Config, config, .{});
|
||||
|
||||
try generateConfigFile(gpa, config, config_path);
|
||||
try generateSchemaFile(gpa, config, schema_path);
|
||||
|
@ -26,20 +26,19 @@ pub fn loadFromFile(allocator: std.mem.Allocator, file_path: []const u8) ?Config
|
||||
defer allocator.free(file_buf);
|
||||
@setEvalBranchQuota(10000);
|
||||
|
||||
var token_stream = std.json.TokenStream.init(file_buf);
|
||||
const parse_options = std.json.ParseOptions{ .allocator = allocator, .ignore_unknown_fields = true };
|
||||
const parse_options = std.json.ParseOptions{ .ignore_unknown_fields = true };
|
||||
var parse_diagnostics: std.json.Diagnostics = undefined;
|
||||
|
||||
var scanner = std.json.Scanner.initCompleteInput(allocator, file_buf);
|
||||
defer scanner.deinit();
|
||||
scanner.enableDiagnostics(&parse_diagnostics);
|
||||
|
||||
// TODO: use a better error reporting system or use ZON instead of JSON
|
||||
// TODO: report errors using "textDocument/publishDiagnostics"
|
||||
var config = std.json.parse(Config, &token_stream, parse_options) catch |err| {
|
||||
const loc = if (token_stream.slice.len == 0)
|
||||
std.zig.Loc{ .line = 0, .column = 0, .source_line = "" }
|
||||
else
|
||||
std.zig.findLineColumn(file_buf, token_stream.i);
|
||||
logger.warn("{s}:{d}:{d}: Error while parsing configuration file {}", .{ file_path, loc.line + 1, loc.column, err });
|
||||
if (err == error.InvalidValueBegin) {
|
||||
logger.warn("Maybe your configuration file contains a trailing comma", .{});
|
||||
}
|
||||
var config = std.json.parseFromTokenSource(Config, allocator, &scanner, parse_options) catch |err| {
|
||||
logger.warn(
|
||||
"{s}:{d}:{d}: Error while parsing configuration file {}",
|
||||
.{ file_path, parse_diagnostics.getLine(), parse_diagnostics.getColumn(), err },
|
||||
);
|
||||
return null;
|
||||
};
|
||||
|
||||
@ -83,7 +82,7 @@ pub fn configChanged(config: *Config, runtime_zig_version: *?ZigVersionWrapper,
|
||||
logger.info("Using zig executable '{s}'", .{exe_path});
|
||||
|
||||
var env = getZigEnv(allocator, exe_path) orelse break :blk;
|
||||
defer std.json.parseFree(Env, env, .{ .allocator = allocator });
|
||||
defer std.json.parseFree(Env, allocator, env);
|
||||
|
||||
if (config.zig_lib_path) |lib_path| allocator.free(lib_path);
|
||||
// Make sure the path is absolute
|
||||
@ -203,14 +202,11 @@ pub fn getZigEnv(allocator: std.mem.Allocator, zig_exe_path: []const u8) ?Env {
|
||||
else => logger.err("zig env invocation failed", .{}),
|
||||
}
|
||||
|
||||
var token_stream = std.json.TokenStream.init(zig_env_result.stdout);
|
||||
return std.json.parse(
|
||||
return std.json.parseFromSlice(
|
||||
Env,
|
||||
&token_stream,
|
||||
.{
|
||||
.allocator = allocator,
|
||||
.ignore_unknown_fields = true,
|
||||
},
|
||||
allocator,
|
||||
zig_env_result.stdout,
|
||||
.{ .ignore_unknown_fields = true },
|
||||
) catch {
|
||||
logger.err("Failed to parse zig env JSON result", .{});
|
||||
return null;
|
||||
|
2837
src/lsp.zig
2837
src/lsp.zig
File diff suppressed because it is too large
Load Diff
@ -146,9 +146,8 @@ fn updateConfig(
|
||||
defer allocator.free(json_message);
|
||||
try file.reader().readNoEof(json_message);
|
||||
|
||||
var token_stream = std.json.TokenStream.init(json_message);
|
||||
const new_config = try std.json.parse(Config, &token_stream, .{ .allocator = allocator });
|
||||
std.json.parseFree(Config, config.*, .{ .allocator = allocator });
|
||||
const new_config = try std.json.parseFromSlice(Config, allocator, json_message, .{});
|
||||
std.json.parseFree(Config, allocator, config.*);
|
||||
config.* = new_config;
|
||||
}
|
||||
}
|
||||
@ -334,7 +333,7 @@ fn parseArgs(allocator: std.mem.Allocator) !ParseArgsResult {
|
||||
if (specified.get(.@"show-config-path")) {
|
||||
const new_config = try getConfig(allocator, result.config_path);
|
||||
defer if (new_config.config_path) |path| allocator.free(path);
|
||||
defer std.json.parseFree(Config, new_config.config, .{ .allocator = allocator });
|
||||
defer std.json.parseFree(Config, allocator, new_config.config);
|
||||
|
||||
const full_path = if (new_config.config_path) |path| blk: {
|
||||
break :blk try std.fs.path.resolve(allocator, &.{ path, "zls.json" });
|
||||
@ -383,7 +382,7 @@ pub fn main() !void {
|
||||
logger.info("Starting ZLS {s} @ '{s}'", .{ build_options.version, result.zls_exe_path });
|
||||
|
||||
var config = try getConfig(allocator, result.config_path);
|
||||
defer std.json.parseFree(Config, config.config, .{ .allocator = allocator });
|
||||
defer std.json.parseFree(Config, allocator, config.config);
|
||||
defer if (config.config_path) |path| allocator.free(path);
|
||||
|
||||
if (result.replay_enabled and config.config.replay_session_path == null and config.config.record_session_path == null) {
|
||||
|
@ -60,7 +60,7 @@ pub const Context = struct {
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Context) void {
|
||||
std.json.parseFree(Config, self.config.*, .{ .allocator = allocator });
|
||||
std.json.parseFree(Config, allocator, self.config.*);
|
||||
allocator.destroy(self.config);
|
||||
|
||||
self.request("shutdown", "{}", null) catch {};
|
||||
@ -130,17 +130,17 @@ pub const Context = struct {
|
||||
const expected = expect orelse return;
|
||||
|
||||
// parse the response
|
||||
var parser = std.json.Parser.init(allocator, false);
|
||||
var parser = std.json.Parser.init(allocator, .alloc_always);
|
||||
defer parser.deinit();
|
||||
|
||||
var tree = try parser.parse(response_bytes);
|
||||
defer tree.deinit();
|
||||
|
||||
const response = tree.root.Object;
|
||||
const response = tree.root.object;
|
||||
|
||||
// assertions
|
||||
try std.testing.expectEqualStrings("2.0", response.get("jsonrpc").?.String);
|
||||
try std.testing.expectEqual(self.request_id, @intCast(u32, response.get("id").?.Integer));
|
||||
try std.testing.expectEqualStrings("2.0", response.get("jsonrpc").?.string);
|
||||
try std.testing.expectEqual(self.request_id, @intCast(u32, response.get("id").?.integer));
|
||||
try std.testing.expect(!response.contains("error"));
|
||||
|
||||
const result_json = try std.json.stringifyAlloc(allocator, response.get("result").?, .{});
|
||||
@ -195,7 +195,7 @@ pub const Context = struct {
|
||||
const response_bytes = try self.requestAlloc(method, buffer.items);
|
||||
defer self.server.allocator.free(response_bytes);
|
||||
|
||||
var parser = std.json.Parser.init(self.arena.allocator(), false);
|
||||
var parser = std.json.Parser.init(self.arena.allocator(), .alloc_always);
|
||||
var tree = try parser.parse(try self.arena.allocator().dupe(u8, response_bytes));
|
||||
|
||||
// TODO validate jsonrpc and id
|
||||
|
@ -229,7 +229,7 @@ fn testFoldingRange(source: []const u8, expect: []const types.FoldingRange) !voi
|
||||
var expected = std.ArrayListUnmanaged(u8){};
|
||||
defer expected.deinit(allocator);
|
||||
|
||||
const options = std.json.StringifyOptions{ .emit_null_optional_fields = false, .whitespace = .{ .indent = .None } };
|
||||
const options = std.json.StringifyOptions{ .emit_null_optional_fields = false };
|
||||
try tres.stringify(response.result, options, actual.writer(allocator));
|
||||
try tres.stringify(expect, options, expected.writer(allocator));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user