Removed DebugAllocator, use std.heap.GeneralPurposeAllocator

This commit is contained in:
Alexandros Naskos 2020-10-10 12:44:37 +03:00
parent 162f1b2fbe
commit 27f9f14ab3
4 changed files with 12 additions and 233 deletions

View File

@ -38,8 +38,6 @@ The `zls` executable will be saved to `zls\zig-cache\bin`.
| Option | Type | Default Value | What it Does |
| --- | --- | --- | --- |
| `-Ddata_version` | `string` (master or 0.6.0) | 0.6.0 | The data file version. This selects the files in the `src/data` folder that correspond to the Zig version being served.|
| `-Dallocation_info` | `bool` | `false` | Enable the use of the debug allocator that will print out information in debug mode and track memory leaks.|
| `-Dmax_bytes_allocated` | `usize` | `0` | When `allocation_info` is true, enables a maximum allowed allocation size (excluding stacktraces) before the program panics.|
Then, you can use the `zls` executable in an editor of your choice that has a Zig language server client!

View File

@ -142,19 +142,6 @@ pub fn build(b: *std.build.Builder) !void {
b.option([]const u8, "data_version", "The data version - either 0.6.0 or master.") orelse "0.6.0",
);
exe.addBuildOption(
bool,
"allocation_info",
b.option(bool, "allocation_info", "Enable use of debugging allocator and info logging.") orelse false,
);
const max_bytes_str = b.option([]const u8, "max_bytes_allocated", "Maximum amount of bytes to allocate before we exit. Zero for unlimited allocations. Only takes effect when allocation_info=true") orelse "0";
exe.addBuildOption(
usize,
"max_bytes_allocated",
try std.fmt.parseInt(usize, max_bytes_str, 10),
);
exe.addPackage(.{ .name = "known-folders", .path = "src/known-folders/known-folders.zig" });
exe.setTarget(target);

View File

@ -1,192 +0,0 @@
//! This allocator collects information about allocation sizes
const std = @import("std");
const log = std.log.scoped(.debug_alloc);
const DebugAllocator = @This();
const Stats = struct {
mean: f64 = 0,
mean_of_squares: f64 = 0,
total: usize = 0,
count: usize = 0,
fn addSample(self: *Stats, value: usize) void {
const count_f64 = @intToFloat(f64, self.count);
self.mean = (self.mean * count_f64 + @intToFloat(f64, value)) / (count_f64 + 1);
self.mean_of_squares = (self.mean_of_squares * count_f64 + @intToFloat(f64, value * value)) / (count_f64 + 1);
self.total += value;
self.count += 1;
}
fn stdDev(self: Stats) f64 {
return std.math.sqrt(self.mean_of_squares - self.mean * self.mean);
}
};
pub const AllocationInfo = struct {
allocation_stats: Stats = Stats{},
deallocation_count: usize = 0,
deallocation_total: usize = 0,
peak_allocated: usize = 0,
reallocation_stats: Stats = Stats{},
shrink_stats: Stats = Stats{},
fn currentlyAllocated(self: AllocationInfo) usize {
return self.allocation_stats.total + self.reallocation_stats.total - self.deallocation_total - self.shrink_stats.total;
}
pub fn format(
self: AllocationInfo,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
@setEvalBranchQuota(2000);
return std.fmt.format(
out_stream,
\\------------------------------------------ Allocation info ------------------------------------------
\\{} total allocations (total: {Bi:.2}, mean: {Bi:.2}, std. dev: {Bi:.2} MB), {} deallocations
\\{} current allocations ({Bi:.2}), peak mem usage: {Bi:.2}
\\{} reallocations (total: {Bi:.2}, mean: {Bi:.2}, std. dev: {Bi:.2})
\\{} shrinks (total: {Bi:.2}, mean: {Bi:.2}, std. dev: {Bi:.2})
\\-----------------------------------------------------------------------------------------------------
,
.{
self.allocation_stats.count,
self.allocation_stats.total,
self.allocation_stats.mean,
self.allocation_stats.stdDev(),
self.deallocation_count,
self.allocation_stats.count - self.deallocation_count,
self.currentlyAllocated(),
self.peak_allocated,
self.reallocation_stats.count,
self.reallocation_stats.total,
self.reallocation_stats.mean,
self.reallocation_stats.stdDev(),
self.shrink_stats.count,
self.shrink_stats.total,
self.shrink_stats.mean,
self.shrink_stats.stdDev(),
},
);
}
};
const stack_addresses_size = 15;
base_allocator: *std.mem.Allocator,
info: AllocationInfo,
max_bytes: usize,
allocation_strack_addresses: std.AutoHashMap(usize, [stack_addresses_size]usize),
// Interface implementation
allocator: std.mem.Allocator,
pub fn init(base_allocator: *std.mem.Allocator, max_bytes: usize) DebugAllocator {
return .{
.base_allocator = base_allocator,
.info = .{},
.max_bytes = max_bytes,
.allocation_strack_addresses = std.AutoHashMap(usize, [stack_addresses_size]usize).init(base_allocator),
.allocator = .{
.allocFn = alloc,
.resizeFn = resize,
},
};
}
pub fn deinit(self: *DebugAllocator) void {
self.allocation_strack_addresses.deinit();
}
fn alloc(allocator: *std.mem.Allocator, len: usize, ptr_align: u29, len_align: u29, ret_addr: usize) std.mem.Allocator.Error![]u8 {
const self = @fieldParentPtr(DebugAllocator, "allocator", allocator);
const ptr = try self.base_allocator.allocFn(self.base_allocator, len, ptr_align, len_align, ret_addr);
self.info.allocation_stats.addSample(ptr.len);
var stack_addresses = std.mem.zeroes([stack_addresses_size + 2]usize);
var stack_trace = std.builtin.StackTrace{
.instruction_addresses = &stack_addresses,
.index = 0,
};
std.debug.captureStackTrace(@returnAddress(), &stack_trace);
try self.allocation_strack_addresses.putNoClobber(@ptrToInt(ptr.ptr), stack_addresses[2..].*);
const curr_allocs = self.info.currentlyAllocated();
if (self.max_bytes != 0 and curr_allocs >= self.max_bytes) {
std.debug.print("Exceeded maximum bytes {}, exiting.\n", .{self.max_bytes});
std.process.exit(1);
}
if (curr_allocs > self.info.peak_allocated) {
self.info.peak_allocated = curr_allocs;
}
return ptr;
}
// TODO: Check if this complies with the new allocator interface.
fn resize(allocator: *std.mem.Allocator, old_mem: []u8, buf_align: u29, new_size: usize, len_align: u29, ret_addr: usize) std.mem.Allocator.Error!usize {
const self = @fieldParentPtr(DebugAllocator, "allocator", allocator);
if (old_mem.len == 0) {
log.debug("Trying to resize empty slice\n", .{});
std.process.exit(1);
}
if (self.allocation_strack_addresses.get(@ptrToInt(old_mem.ptr)) == null) {
@panic("error - resize call on block not allocated by debug allocator");
}
if (new_size == 0) {
if (self.info.allocation_stats.count == self.info.deallocation_count) {
@panic("error - too many calls to free, most likely double free");
}
self.info.deallocation_total += old_mem.len;
self.info.deallocation_count += 1;
self.allocation_strack_addresses.removeAssertDiscard(@ptrToInt(old_mem.ptr));
} else if (new_size > old_mem.len) {
self.info.reallocation_stats.addSample(new_size - old_mem.len);
} else if (new_size < old_mem.len) {
self.info.shrink_stats.addSample(old_mem.len - new_size);
}
const curr_allocs = self.info.currentlyAllocated();
if (self.max_bytes != 0 and curr_allocs >= self.max_bytes) {
log.debug("Exceeded maximum bytes {}, exiting.\n", .{self.max_bytes});
std.process.exit(1);
}
if (curr_allocs > self.info.peak_allocated) {
self.info.peak_allocated = curr_allocs;
}
return self.base_allocator.resizeFn(self.base_allocator, old_mem, buf_align, new_size, len_align, ret_addr) catch |e| {
return e;
};
}
pub fn printRemainingStackTraces(self: DebugAllocator) void {
std.debug.print(
\\{} allocations - stack traces follow
\\------------------------------------
, .{self.allocation_strack_addresses.count()});
var it = self.allocation_strack_addresses.iterator();
var idx: usize = 1;
while (it.next()) |entry| : (idx += 1) {
std.debug.print("\nAllocation {}\n-------------\n", .{idx});
var len: usize = 0;
while (len < stack_addresses_size and entry.value[len] != 0) : (len += 1) {}
const stack_trace = std.builtin.StackTrace{
.instruction_addresses = &entry.value,
.index = len,
};
std.debug.dumpStackTrace(stack_trace);
}
}

View File

@ -3,7 +3,6 @@ const build_options = @import("build_options");
const Config = @import("config.zig");
const DocumentStore = @import("document_store.zig");
const DebugAllocator = @import("debug_allocator.zig");
const readRequestHeader = @import("header.zig").readRequestHeader;
const data = @import("data/" ++ build_options.data_version ++ ".zig");
const requests = @import("requests.zig");
@ -27,6 +26,12 @@ pub fn log(
comptime format: []const u8,
args: anytype,
) void {
// After shutdown, pipe output to stderr
if (!keep_running) {
std.debug.print("[{}-{}] " ++ format, .{ @tagName(message_level), @tagName(scope) } ++ args);
return;
}
var arena = std.heap.ArenaAllocator.init(allocator);
defer arena.deinit();
@ -34,6 +39,7 @@ pub fn log(
std.debug.print("Failed to allocPrint message.", .{});
return;
};
if (@enumToInt(message_level) <= @enumToInt(std.log.Level.notice)) {
const message_type: types.MessageType = switch (message_level) {
.info => .Log,
@ -1108,6 +1114,8 @@ fn initializeHandler(arena: *std.heap.ArenaAllocator, id: types.RequestId, req:
var keep_running = true;
fn shutdownHandler(arena: *std.heap.ArenaAllocator, id: types.RequestId, config: Config) !void {
logger.notice("Server closing...", .{});
keep_running = false;
// Technically we should deinitialize first and send possible errors to the client
try respondGeneric(id, null_result_response);
@ -1495,28 +1503,10 @@ fn processJsonRpc(arena: *std.heap.ArenaAllocator, parser: *std.json.Parser, jso
logger.debug("Method without return value not implemented: {}", .{method});
}
var debug_alloc_state: DebugAllocator = undefined;
// We can now use if(leak_count_alloc) |alloc| { ... } as a comptime check.
const debug_alloc: ?*DebugAllocator = if (build_options.allocation_info) &debug_alloc_state else null;
var gpa_state = std.heap.GeneralPurposeAllocator(.{}){};
pub fn main() anyerror!void {
// TODO: Use a better purpose general allocator once std has one.
// Probably after the generic composable allocators PR?
// This is not too bad for now since most allocations happen in local arenas.
allocator = std.heap.page_allocator;
if (build_options.allocation_info) {
// Initialize the leak counting allocator.
debug_alloc_state = DebugAllocator.init(allocator, build_options.max_bytes_allocated);
allocator = &debug_alloc_state.allocator;
}
defer if (debug_alloc) |dbg| {
std.debug.print("Finished cleanup, last allocation info.\n", .{});
std.debug.print("\n{}\n", .{dbg.info});
dbg.printRemainingStackTraces();
dbg.deinit();
};
defer _ = gpa_state.deinit();
allocator = &gpa_state.allocator;
// Init global vars
const reader = std.io.getStdIn().reader();
@ -1653,9 +1643,5 @@ pub fn main() anyerror!void {
json_parser.reset();
arena.deinit();
arena.state = .{};
if (debug_alloc) |dbg| {
logger.debug("\n{}\n", .{dbg.info});
}
}
}