Compare commits

..

4 Commits

Author SHA1 Message Date
kayomn 155645a308 Fix allocation size info not updating in reallocs
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-06-04 13:44:27 +00:00
kayomn d378939f30 Remove allocation checks in optimized release builds 2023-06-04 13:37:26 +00:00
kayomn 341710bfbc Fix so, so many memory leaks 2023-06-04 13:09:30 +00:00
kayomn 9c871aac97 Clarify panic wording in Ona heap implementation 2023-06-04 12:34:48 +00:00
6 changed files with 243 additions and 145 deletions

View File

@ -134,47 +134,6 @@ pub const FixedBuffer = struct {
} }
}; };
pub const GrowingBuffer = struct {
allocator: Allocator,
appender: Appender,
const AppendOptions = struct {
allocator: Allocator,
bytes: []const u8,
};
const Appender = Generator(AllocationError!void, AppendOptions);
pub fn as_writer(self: *GrowingBuffer) Writer {
return Writer.bind(GrowingBuffer, self, struct {
fn write(growing_buffer: *GrowingBuffer, bytes: []const u8) ?usize {
growing_buffer.write(bytes) catch return null;
return bytes.len;
}
}.write);
}
pub fn bind(comptime State: type, allocator: Allocator, state: *State, comptime appender: fn (capture: *State, allocator: Allocator, bytes: []const u8) AllocationError!void) GrowingBuffer {
return .{
.appender = Appender.bind(State, state, struct {
fn append(self: *State, options: AppendOptions) AllocationError!void {
return appender(self, options.allocator, options.bytes);
}
}.append),
.allocator = allocator,
};
}
pub fn write(self: GrowingBuffer, bytes: []const u8) AllocationError!void {
return self.appender.invoke(.{
.allocator = self.allocator,
.bytes = bytes,
});
}
};
pub const Writer = Generator(?usize, []const u8); pub const Writer = Generator(?usize, []const u8);
pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type { pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type {

View File

@ -17,19 +17,6 @@ pub fn Stack(comptime Value: type) type {
/// ///
const Self = @This(); const Self = @This();
///
/// Returns a [io.GrowableBuffer] bound to `self` and `allocator`.
///
/// The returned buffer may be used to write to the stack without needing to explicitly pass an allocator
/// context, as well decay further into a generic [io.Writer] type.
///
/// *Note* if `capacity` is a non-zero value, `allocator` must reference the same allocation strategy as the one
/// originally used to allocate the current internal buffer.
///
pub fn as_buffer(self: *Self, allocator: io.Allocator) io.GrowingBuffer {
return io.GrowingBuffer.bind(Self, allocator, self, push_all);
}
/// ///
/// Clears all elements from `self` while preserving the current internal buffer. /// Clears all elements from `self` while preserving the current internal buffer.
/// ///
@ -194,3 +181,49 @@ pub fn Stack(comptime Value: type) type {
} }
}; };
} }
///
/// Bridge context between a list type implement as part of the list module and an allocator, allowing the list resource
/// referenced by the [Writable] instance to be written to directly or virtually via the [io.Writer] interface.
///
/// *Note* if the given list contains an existing allocation, the provided [io.Allocator] instance must reference the
/// same allocation strategy as the one originally used to allocate the list type memory.
///
pub const Writable = struct {
allocator: io.Allocator,
list: union (enum) {
stack: *ByteStack,
},
///
/// Stack of bytes.
///
const ByteStack = Stack(u8);
///
/// Returns a [io.Writer] instance that binds a reference of `self` to the [write] operation.
///
pub fn as_writer(self: *Writable) io.Writer {
return io.Writer.bind(Writable, self, struct {
fn write(writable: *Writable, bytes: []const u8) ?usize {
writable.write(bytes) catch return null;
return bytes.len;
}
}.write);
}
///
/// Attempts to call the appropriate multi-element writing function for the current list referenced by `self`,
/// passing `bytes` along.
///
/// The function returns [io.AllocationError] if `allocator` could not commit the memory by the list implementation
/// referenced by `self`. See the specific implementation details of the respective list type for more information.
///
pub fn write(self: *Writable, bytes: []const u8) io.AllocationError!void {
return switch (self.list) {
.stack => |stack| stack.push_all(self.allocator, bytes),
};
}
};

View File

@ -65,6 +65,30 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
} }
}; };
///
/// Iterable wrapper for [Hashed] instances to make unordered traversal of key-value entries relatively trivial.
///
pub const Iterable = struct {
hashed_map: *Self,
iterations: usize = 0,
///
/// Attempts to move past the current iteration of `self` and onto the next key-value entry, returning it or
/// `null` if there are no more elements in the referenced map.
///
pub fn next(self: *Iterable) ?Entry {
while (self.iterations < self.hashed_map.table.len) {
defer self.iterations += 1;
if (self.hashed_map.table[self.iterations]) |entry| {
return entry;
}
}
return null;
}
};
/// ///
/// Table type. /// Table type.
/// ///

View File

@ -1,3 +1,5 @@
const builtin = @import("builtin");
const coral = @import("coral"); const coral = @import("coral");
const ext = @import("./ext.zig"); const ext = @import("./ext.zig");
@ -5,16 +7,26 @@ const ext = @import("./ext.zig");
const std = @import("std"); const std = @import("std");
/// ///
/// /// Recorded allocation info state.
/// ///
const AllocationInfo = struct { const AllocationInfo = struct {
trace: std.debug.Trace, trace: AllocationTrace,
next_info: ?*AllocationInfo, next_info: ?*AllocationInfo,
size: usize, size: usize,
}; };
/// ///
/// Recorded stack trace of allocation call site.
/// ///
/// *Note* this structure is reduced to zero bytes in released builds optimized for speed or size.
///
const AllocationTrace = std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
.Debug, .ReleaseSafe => true,
.ReleaseFast, .ReleaseSmall => false,
});
///
/// Heap allocation context.
/// ///
const Context = struct { const Context = struct {
allocation_info_head: ?*AllocationInfo = null, allocation_info_head: ?*AllocationInfo = null,
@ -29,23 +41,40 @@ const Context = struct {
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory /// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
/// leak. /// leak.
/// ///
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
///
fn allocate(self: *Context, size: usize, return_address: usize) ?[]u8 { fn allocate(self: *Context, size: usize, return_address: usize) ?[]u8 {
const allocation_info_size = @sizeOf(AllocationInfo); switch (builtin.mode) {
const total_allocation_size = allocation_info_size + size; .Debug, .ReleaseSafe => {
const allocation = ext.SDL_malloc(total_allocation_size) orelse return null; const allocation_info_size = @sizeOf(AllocationInfo);
const allocation_info = @ptrCast(*AllocationInfo, @alignCast(@alignOf(AllocationInfo), allocation)); const total_allocation_size = allocation_info_size + size;
const allocation = ext.SDL_malloc(total_allocation_size) orelse return null;
const allocation_info = @ptrCast(*AllocationInfo, @alignCast(@alignOf(AllocationInfo), allocation));
allocation_info.* = .{ allocation_info.* = .{
.size = size, .size = size,
.next_info = self.allocation_info_head, .next_info = self.allocation_info_head,
.trace = .{}, .trace = .{},
}; };
allocation_info.trace.addAddr(return_address, ""); allocation_info.trace.addAddr(return_address, "");
self.allocation_info_head = allocation_info; self.allocation_info_head = allocation_info;
return @ptrCast([*]u8, allocation)[allocation_info_size .. total_allocation_size]; return @ptrCast([*]u8, allocation)[allocation_info_size .. total_allocation_size];
},
.ReleaseFast, .ReleaseSmall => {
return @ptrCast([*]u8, ext.SDL_malloc(size) orelse return null)[0 .. size];
},
}
}
///
/// Returns the assumed pointer to the [AllocationInfo] address of `allocation`.
///
fn allocation_info_of(allocation: [*]u8) *AllocationInfo {
return @intToPtr(*AllocationInfo, @ptrToInt(allocation) - @sizeOf(AllocationInfo));
} }
/// ///
@ -54,40 +83,50 @@ const Context = struct {
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety- /// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
/// checked behavior will occur. /// checked behavior will occur.
/// ///
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
///
fn deallocate(self: *Context, allocation: []u8) void { fn deallocate(self: *Context, allocation: []u8) void {
const target_allocation_info = @intToPtr(*AllocationInfo, @ptrToInt(allocation.ptr) - @sizeOf(AllocationInfo)); switch (builtin.mode) {
.Debug, .ReleaseSafe => {
const target_allocation_info = allocation_info_of(allocation.ptr);
if (target_allocation_info.size != allocation.len) { if (target_allocation_info.size != allocation.len) {
@panic("incorrect allocation length"); @panic("incorrect allocation length for deallocating");
}
if (self.allocation_info_head) |allocation_info_head| {
if (target_allocation_info == allocation_info_head) {
self.allocation_info_head = allocation_info_head.next_info;
ext.SDL_free(target_allocation_info);
return;
}
var previous_allocation_info = allocation_info_head;
var current_allocation_info = allocation_info_head.next_info;
while (current_allocation_info) |allocation_info| {
if (allocation_info == target_allocation_info) {
previous_allocation_info.next_info = allocation_info.next_info;
ext.SDL_free(target_allocation_info);
return;
} }
previous_allocation_info = allocation_info; if (self.allocation_info_head) |allocation_info_head| {
current_allocation_info = allocation_info.next_info; if (target_allocation_info == allocation_info_head) {
} self.allocation_info_head = allocation_info_head.next_info;
}
@panic("double-free detected"); ext.SDL_free(target_allocation_info);
return;
}
var previous_allocation_info = allocation_info_head;
var current_allocation_info = allocation_info_head.next_info;
while (current_allocation_info) |allocation_info| {
if (allocation_info == target_allocation_info) {
previous_allocation_info.next_info = allocation_info.next_info;
ext.SDL_free(target_allocation_info);
return;
}
previous_allocation_info = allocation_info;
current_allocation_info = allocation_info.next_info;
}
}
@panic("incorrect allocation address for deallocating");
},
.ReleaseFast, .ReleaseSmall => {
ext.SDL_free(allocation.ptr);
},
}
} }
/// ///
@ -105,41 +144,58 @@ const Context = struct {
/// *Note* the allocation referenced by `allocation` should be considered invalid once the function returns, /// *Note* the allocation referenced by `allocation` should be considered invalid once the function returns,
/// discarding it in favor of the return value. /// discarding it in favor of the return value.
/// ///
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
///
fn reallocate(self: *Context, allocation: []u8, size: usize) ?[]u8 { fn reallocate(self: *Context, allocation: []u8, size: usize) ?[]u8 {
const allocation_info_size = @sizeOf(AllocationInfo); switch (builtin.mode) {
const target_allocation_info = @intToPtr(*AllocationInfo, @ptrToInt(allocation.ptr) - allocation_info_size); .Debug, .ReleaseSafe => {
const target_allocation_info = allocation_info_of(allocation.ptr);
if (target_allocation_info.size != allocation.len) { if (target_allocation_info.size != allocation.len) {
@panic("incorrect allocation length"); @panic("incorrect allocation length for reallocating");
}
if (self.allocation_info_head) |allocation_info_head| {
if (target_allocation_info == allocation_info_head) {
self.allocation_info_head = allocation_info_head.next_info;
return @ptrCast([*]u8, ext.SDL_realloc(target_allocation_info, size) orelse {
return null;
})[allocation_info_size .. allocation_info_size + size];
}
var previous_allocation_info = allocation_info_head;
var current_allocation_info = allocation_info_head.next_info;
while (current_allocation_info) |allocation_info| {
if (allocation_info == target_allocation_info) {
previous_allocation_info.next_info = allocation_info.next_info;
return @ptrCast([*]u8, ext.SDL_realloc(target_allocation_info, size) orelse {
return null;
})[allocation_info_size .. allocation_info_size + size];
} }
previous_allocation_info = allocation_info; const allocation_info_size = @sizeOf(AllocationInfo);
current_allocation_info = allocation_info.next_info;
}
}
@panic("use-after-free detected"); if (self.allocation_info_head) |allocation_info_head| {
if (target_allocation_info == allocation_info_head) {
self.allocation_info_head = allocation_info_head.next_info;
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
target_allocation_info.size = size;
return @ptrCast([*]u8, allocation_address)[
allocation_info_size .. (allocation_info_size + size)];
}
var previous_allocation_info = allocation_info_head;
var current_allocation_info = allocation_info_head.next_info;
while (current_allocation_info) |allocation_info| {
if (allocation_info == target_allocation_info) {
previous_allocation_info.next_info = allocation_info.next_info;
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
target_allocation_info.size = size;
return @ptrCast([*]u8, allocation_address)[
allocation_info_size .. (allocation_info_size + size)];
}
previous_allocation_info = allocation_info;
current_allocation_info = allocation_info.next_info;
}
}
@panic("incorrect allocation address for reallocating");
},
.ReleaseFast, .ReleaseSmall => {
return @ptrCast([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse return null)[0 .. size];
},
}
} }
}; };
@ -176,14 +232,23 @@ pub const allocator = coral.io.Allocator.bind(Context, &context, struct {
/// alive and reports the stack traces of any detected allocations to stderr along with the allocation address and /// alive and reports the stack traces of any detected allocations to stderr along with the allocation address and
/// length. /// length.
/// ///
/// *Note* this function becomes a no-op in release builds optimized for speed or size.
///
pub fn trace_leaks() void { pub fn trace_leaks() void {
var current_allocation_info = context.allocation_info_head; switch (builtin.mode) {
.Debug, .ReleaseSafe => {
var current_allocation_info = context.allocation_info_head;
while (current_allocation_info) |allocation_info| : (current_allocation_info = allocation_info.next_info) { while (current_allocation_info) |allocation_info| : (current_allocation_info = allocation_info.next_info) {
std.debug.print("{d} byte leak at 0x{x} detected: {}", .{ std.debug.print("{d} byte leak at 0x{x} detected:\n", .{
allocation_info.size, allocation_info.size,
@ptrToInt(allocation_info) + @sizeOf(AllocationInfo), @ptrToInt(allocation_info) + @sizeOf(AllocationInfo),
allocation_info.trace });
});
allocation_info.trace.dump();
}
},
.ReleaseFast, .ReleaseSmall => {},
} }
} }

View File

@ -53,7 +53,7 @@ fn clear_error_details(self: *Self) void {
pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void { pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
var ast = try Ast.init(self.env.allocator); var ast = try Ast.init(self.env.allocator);
errdefer ast.deinit(); defer ast.deinit();
{ {
var tokenizer = tokens.Tokenizer{.source = data}; var tokenizer = tokens.Tokenizer{.source = data};
@ -62,9 +62,12 @@ pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
if (init_error == error.BadSyntax) { if (init_error == error.BadSyntax) {
self.clear_error_details(); self.clear_error_details();
var message_buffer = self.message_data.as_buffer(self.env.allocator); var writable_data = coral.list.Writable{
.allocator = self.env.allocator,
.list = .{.stack = &self.message_data},
};
coral.utf8.print_formatted(message_buffer.as_writer(), "@({line}): {name}", .{ coral.utf8.print_formatted(writable_data.as_writer(), "@({line}): {name}", .{
.line = tokenizer.lines_stepped, .line = tokenizer.lines_stepped,
.name = ast.error_message, .name = ast.error_message,
}) catch return error.OutOfMemory; }) catch return error.OutOfMemory;

View File

@ -171,6 +171,16 @@ pub fn check(self: *Self, condition: bool, failure_message: []const u8) !void {
pub fn deinit(self: *Self) void { pub fn deinit(self: *Self) void {
self.discard(.{.object = self.global_object}); self.discard(.{.object = self.global_object});
{
var interned_iterable = InternTable.Iterable{.hashed_map = &self.interned};
while (interned_iterable.next()) |entry| {
self.discard(.{.object = entry.value});
}
}
self.interned.deinit(self.allocator);
self.values.deinit(self.allocator); self.values.deinit(self.allocator);
self.calls.deinit(self.allocator); self.calls.deinit(self.allocator);
self.heap.deinit(self.allocator); self.heap.deinit(self.allocator);
@ -191,9 +201,9 @@ pub fn discard(self: *Self, val: types.Val) void {
.obj = val.as_ref(), .obj = val.as_ref(),
}); });
coral.io.deallocate(self.allocator, data.state.userdata); // TODO: Free individual key-value pairs of fields
data.state.fields.deinit(self.allocator); data.state.fields.deinit(self.allocator);
coral.io.deallocate(self.allocator, data.state.userdata);
self.heap.remove(object); self.heap.remove(object);
} else { } else {
self.heap.assign(object, data); self.heap.assign(object, data);
@ -242,25 +252,29 @@ pub fn execute_file(self: *Self, fs: file.System, file_path: file.Path) ExecuteF
defer readable_file.close(); defer readable_file.close();
var file_source = coral.list.Stack(u8){}; var file_data = coral.list.Stack(u8){};
const file_size = (try fs.query_info(file_path)).size; const file_size = (try fs.query_info(file_path)).size;
try file_source.grow(self.allocator, file_size); try file_data.grow(self.allocator, file_size);
defer file_source.deinit(self.allocator); defer file_data.deinit(self.allocator);
{ {
var file_buffer = file_source.as_buffer(self.allocator); var writable_data = coral.list.Writable{
.allocator = self.allocator,
.list = .{.stack = &file_data},
};
var stream_buffer = [_]u8{0} ** 4096; var stream_buffer = [_]u8{0} ** 4096;
if ((try coral.io.stream(file_buffer.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) { if ((try coral.io.stream(writable_data.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
return error.ReadFailure; return error.ReadFailure;
} }
} }
return try self.execute_data(.{ return try self.execute_data(.{
.name = try file_path.to_string(), .name = try file_path.to_string(),
.data = file_source.values, .data = file_data.values,
}); });
} }