Add Leak Detection to Ona Heap Allocator #15

Merged
kayomn merged 12 commits from ona-allocator-safety-tracker into main 2023-06-04 16:07:47 +02:00
5 changed files with 98 additions and 65 deletions
Showing only changes of commit 341710bfbc - Show all commits

View File

@ -134,47 +134,6 @@ pub const FixedBuffer = struct {
}
};
pub const GrowingBuffer = struct {
allocator: Allocator,
appender: Appender,
const AppendOptions = struct {
allocator: Allocator,
bytes: []const u8,
};
const Appender = Generator(AllocationError!void, AppendOptions);
pub fn as_writer(self: *GrowingBuffer) Writer {
return Writer.bind(GrowingBuffer, self, struct {
fn write(growing_buffer: *GrowingBuffer, bytes: []const u8) ?usize {
growing_buffer.write(bytes) catch return null;
return bytes.len;
}
}.write);
}
pub fn bind(comptime State: type, allocator: Allocator, state: *State, comptime appender: fn (capture: *State, allocator: Allocator, bytes: []const u8) AllocationError!void) GrowingBuffer {
return .{
.appender = Appender.bind(State, state, struct {
fn append(self: *State, options: AppendOptions) AllocationError!void {
return appender(self, options.allocator, options.bytes);
}
}.append),
.allocator = allocator,
};
}
pub fn write(self: GrowingBuffer, bytes: []const u8) AllocationError!void {
return self.appender.invoke(.{
.allocator = self.allocator,
.bytes = bytes,
});
}
};
pub const Writer = Generator(?usize, []const u8);
pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type {

View File

@ -17,19 +17,6 @@ pub fn Stack(comptime Value: type) type {
///
const Self = @This();
///
/// Returns a [io.GrowableBuffer] bound to `self` and `allocator`.
///
/// The returned buffer may be used to write to the stack without needing to explicitly pass an allocator
/// context, as well decay further into a generic [io.Writer] type.
///
/// *Note* if `capacity` is a non-zero value, `allocator` must reference the same allocation strategy as the one
/// originally used to allocate the current internal buffer.
///
pub fn as_buffer(self: *Self, allocator: io.Allocator) io.GrowingBuffer {
return io.GrowingBuffer.bind(Self, allocator, self, push_all);
}
///
/// Clears all elements from `self` while preserving the current internal buffer.
///
@ -194,3 +181,49 @@ pub fn Stack(comptime Value: type) type {
}
};
}
///
/// Bridge context between a list type implement as part of the list module and an allocator, allowing the list resource
/// referenced by the [Writable] instance to be written to directly or virtually via the [io.Writer] interface.
///
/// *Note* if the given list contains an existing allocation, the provided [io.Allocator] instance must reference the
/// same allocation strategy as the one originally used to allocate the list type memory.
///
pub const Writable = struct {
allocator: io.Allocator,
list: union (enum) {
stack: *ByteStack,
},
///
/// Stack of bytes.
///
const ByteStack = Stack(u8);
///
/// Returns a [io.Writer] instance that binds a reference of `self` to the [write] operation.
///
pub fn as_writer(self: *Writable) io.Writer {
return io.Writer.bind(Writable, self, struct {
fn write(writable: *Writable, bytes: []const u8) ?usize {
writable.write(bytes) catch return null;
return bytes.len;
}
}.write);
}
///
/// Attempts to call the appropriate multi-element writing function for the current list referenced by `self`,
/// passing `bytes` along.
///
/// The function returns [io.AllocationError] if `allocator` could not commit the memory by the list implementation
/// referenced by `self`. See the specific implementation details of the respective list type for more information.
///
pub fn write(self: *Writable, bytes: []const u8) io.AllocationError!void {
return switch (self.list) {
.stack => |stack| stack.push_all(self.allocator, bytes),
};
}
};

View File

@ -65,6 +65,30 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
}
};
///
/// Iterable wrapper for [Hashed] instances to make unordered traversal of key-value entries relatively trivial.
///
pub const Iterable = struct {
hashed_map: *Self,
iterations: usize = 0,
///
/// Attempts to move past the current iteration of `self` and onto the next key-value entry, returning it or
/// `null` if there are no more elements in the referenced map.
///
pub fn next(self: *Iterable) ?Entry {
while (self.iterations < self.hashed_map.table.len) {
defer self.iterations += 1;
if (self.hashed_map.table[self.iterations]) |entry| {
return entry;
}
}
return null;
}
};
///
/// Table type.
///

View File

@ -53,7 +53,7 @@ fn clear_error_details(self: *Self) void {
pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
var ast = try Ast.init(self.env.allocator);
errdefer ast.deinit();
defer ast.deinit();
{
var tokenizer = tokens.Tokenizer{.source = data};
@ -62,9 +62,12 @@ pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
if (init_error == error.BadSyntax) {
self.clear_error_details();
var message_buffer = self.message_data.as_buffer(self.env.allocator);
var writable_data = coral.list.Writable{
.allocator = self.env.allocator,
.list = .{.stack = &self.message_data},
};
coral.utf8.print_formatted(message_buffer.as_writer(), "@({line}): {name}", .{
coral.utf8.print_formatted(writable_data.as_writer(), "@({line}): {name}", .{
.line = tokenizer.lines_stepped,
.name = ast.error_message,
}) catch return error.OutOfMemory;

View File

@ -171,6 +171,16 @@ pub fn check(self: *Self, condition: bool, failure_message: []const u8) !void {
pub fn deinit(self: *Self) void {
self.discard(.{.object = self.global_object});
{
var interned_iterable = InternTable.Iterable{.hashed_map = &self.interned};
while (interned_iterable.next()) |entry| {
self.discard(.{.object = entry.value});
kayomn marked this conversation as resolved Outdated

Perhaps discard could have an internal complimentary function specifically for discarding objects to save on the effort of wrapping a known type in a discriminated union only to unwrap it again back to the known type.

Perhaps `discard` could have an internal complimentary function specifically for discarding objects to save on the effort of wrapping a known type in a discriminated union only to unwrap it again back to the known type.
}
}
self.interned.deinit(self.allocator);
self.values.deinit(self.allocator);
self.calls.deinit(self.allocator);
self.heap.deinit(self.allocator);
kayomn marked this conversation as resolved Outdated

May be worth adding an assert to check that the VM environment heap is empty by now before calling deinit.

May be worth adding an assert to check that the VM environment heap is empty by now before calling deinit.
@ -191,9 +201,9 @@ pub fn discard(self: *Self, val: types.Val) void {
.obj = val.as_ref(),
});
coral.io.deallocate(self.allocator, data.state.userdata);
// TODO: Free individual key-value pairs of fields
data.state.fields.deinit(self.allocator);
coral.io.deallocate(self.allocator, data.state.userdata);
self.heap.remove(object);
} else {
self.heap.assign(object, data);
@ -242,25 +252,29 @@ pub fn execute_file(self: *Self, fs: file.System, file_path: file.Path) ExecuteF
defer readable_file.close();
var file_source = coral.list.Stack(u8){};
var file_data = coral.list.Stack(u8){};
const file_size = (try fs.query_info(file_path)).size;
try file_source.grow(self.allocator, file_size);
try file_data.grow(self.allocator, file_size);
defer file_source.deinit(self.allocator);
defer file_data.deinit(self.allocator);
{
var file_buffer = file_source.as_buffer(self.allocator);
var writable_data = coral.list.Writable{
.allocator = self.allocator,
.list = .{.stack = &file_data},
};
var stream_buffer = [_]u8{0} ** 4096;
if ((try coral.io.stream(file_buffer.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
if ((try coral.io.stream(writable_data.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
return error.ReadFailure;
}
}
return try self.execute_data(.{
.name = try file_path.to_string(),
.data = file_source.values,
.data = file_data.values,
});
}