Merge pull request 'Add Leak Detection to Ona Heap Allocator' (#15) from ona-allocator-safety-tracker into main
continuous-integration/drone/push Build is passing
Details
continuous-integration/drone/push Build is passing
Details
Reviewed-on: #15
This commit is contained in:
commit
7714dae4d8
|
@ -49,7 +49,7 @@ pub const Stacking = struct {
|
|||
}
|
||||
|
||||
fn allocate_page(self: *Stacking, page_size: usize) io.AllocationError!*Page {
|
||||
var buffer = try io.allocate_many(u8, page_size, self.base_allocator);
|
||||
var buffer = try io.allocate_many(self.base_allocator, page_size, u8);
|
||||
|
||||
errdefer io.deallocate(self.base_allocator, buffer);
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ pub const AllocationError = error {
|
|||
};
|
||||
|
||||
pub const AllocationOptions = struct {
|
||||
return_address: usize,
|
||||
allocation: ?[]u8 = null,
|
||||
size: usize,
|
||||
};
|
||||
|
@ -133,57 +134,17 @@ pub const FixedBuffer = struct {
|
|||
}
|
||||
};
|
||||
|
||||
pub const GrowingBuffer = struct {
|
||||
allocator: Allocator,
|
||||
appender: Appender,
|
||||
|
||||
const AppendOptions = struct {
|
||||
allocator: Allocator,
|
||||
bytes: []const u8,
|
||||
};
|
||||
|
||||
const Appender = Generator(AllocationError!void, AppendOptions);
|
||||
|
||||
pub fn as_writer(self: *GrowingBuffer) Writer {
|
||||
return Writer.bind(GrowingBuffer, self, struct {
|
||||
fn write(growing_buffer: *GrowingBuffer, bytes: []const u8) ?usize {
|
||||
growing_buffer.write(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn bind(comptime State: type, allocator: Allocator, state: *State, comptime appender: fn (capture: *State, allocator: Allocator, bytes: []const u8) AllocationError!void) GrowingBuffer {
|
||||
return .{
|
||||
.appender = Appender.bind(State, state, struct {
|
||||
fn append(self: *State, options: AppendOptions) AllocationError!void {
|
||||
return appender(self, options.allocator, options.bytes);
|
||||
}
|
||||
}.append),
|
||||
|
||||
.allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: GrowingBuffer, bytes: []const u8) AllocationError!void {
|
||||
return self.appender.invoke(.{
|
||||
.allocator = self.allocator,
|
||||
.bytes = bytes,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = Generator(?usize, []const u8);
|
||||
|
||||
pub fn allocate_many(comptime Type: type, amount: usize, allocator: Allocator) AllocationError![]Type {
|
||||
pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type {
|
||||
if (@sizeOf(Type) == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
return @ptrCast([*]Type, @alignCast(@alignOf(Type), allocator.invoke(.{.size = @sizeOf(Type) * amount}) orelse {
|
||||
return error.OutOfMemory;
|
||||
}))[0 .. amount];
|
||||
return @ptrCast([*]Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type) * amount,
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory))[0 .. amount];
|
||||
}
|
||||
|
||||
pub fn allocate_one(allocator: Allocator, value: anytype) AllocationError!*@TypeOf(value) {
|
||||
|
@ -193,9 +154,10 @@ pub fn allocate_one(allocator: Allocator, value: anytype) AllocationError!*@Type
|
|||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
const allocation = @ptrCast(*Type, @alignCast(@alignOf(Type), allocator.invoke(.{.size = @sizeOf(Type)}) orelse {
|
||||
return error.OutOfMemory;
|
||||
}));
|
||||
const allocation = @ptrCast(*Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type),
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory));
|
||||
|
||||
allocation.* = value;
|
||||
|
||||
|
@ -232,17 +194,16 @@ pub fn copy(target: []u8, source: []const u8) void {
|
|||
}
|
||||
|
||||
pub fn deallocate(allocator: Allocator, allocation: anytype) void {
|
||||
const Allocation = @TypeOf(allocation);
|
||||
|
||||
switch (@typeInfo(Allocation)) {
|
||||
.Pointer => |allocation_pointer| {
|
||||
switch (@typeInfo(@TypeOf(allocation))) {
|
||||
.Pointer => |pointer| {
|
||||
_ = allocator.invoke(.{
|
||||
.allocation = switch (allocation_pointer.size) {
|
||||
.One => @ptrCast([*]u8, allocation)[0 .. @sizeOf(Allocation)],
|
||||
.Slice => @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(Allocation) * allocation.len)],
|
||||
.allocation = switch (pointer.size) {
|
||||
.One => @ptrCast([*]u8, allocation)[0 .. @sizeOf(pointer.child)],
|
||||
.Slice => @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(pointer.child) * allocation.len)],
|
||||
.Many, .C => @compileError("length of allocation must be known to deallocate"),
|
||||
},
|
||||
|
||||
.return_address = @returnAddress(),
|
||||
.size = 0,
|
||||
});
|
||||
},
|
||||
|
|
|
@ -17,19 +17,6 @@ pub fn Stack(comptime Value: type) type {
|
|||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Returns a [io.GrowableBuffer] bound to `self` and `allocator`.
|
||||
///
|
||||
/// The returned buffer may be used to write to the stack without needing to explicitly pass an allocator
|
||||
/// context, as well decay further into a generic [io.Writer] type.
|
||||
///
|
||||
/// *Note* if `capacity` is a non-zero value, `allocator` must reference the same allocation strategy as the one
|
||||
/// originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn as_buffer(self: *Self, allocator: io.Allocator) io.GrowingBuffer {
|
||||
return io.GrowingBuffer.bind(Self, allocator, self, push_all);
|
||||
}
|
||||
|
||||
///
|
||||
/// Clears all elements from `self` while preserving the current internal buffer.
|
||||
///
|
||||
|
@ -52,7 +39,7 @@ pub fn Stack(comptime Value: type) type {
|
|||
return;
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.values);
|
||||
io.deallocate(allocator, self.values.ptr[0 .. self.capacity]);
|
||||
|
||||
self.values = &.{};
|
||||
self.capacity = 0;
|
||||
|
@ -87,7 +74,7 @@ pub fn Stack(comptime Value: type) type {
|
|||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.capacity + growth_amount;
|
||||
const values = (try io.allocate_many(Value, grown_capacity, allocator))[0 .. self.values.len];
|
||||
const values = (try io.allocate_many(allocator, grown_capacity, Value))[0 .. self.values.len];
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
|
||||
|
@ -96,7 +83,7 @@ pub fn Stack(comptime Value: type) type {
|
|||
values[index] = self.values[index];
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.values);
|
||||
io.deallocate(allocator, self.values.ptr[0 .. self.capacity]);
|
||||
}
|
||||
|
||||
self.values = values;
|
||||
|
@ -132,7 +119,7 @@ pub fn Stack(comptime Value: type) type {
|
|||
pub fn push_all(self: *Self, allocator: io.Allocator, values: []const Value) io.AllocationError!void {
|
||||
const new_length = self.values.len + values.len;
|
||||
|
||||
if (new_length >= self.capacity) {
|
||||
if (new_length > self.capacity) {
|
||||
try self.grow(allocator, values.len + values.len);
|
||||
}
|
||||
|
||||
|
@ -194,3 +181,49 @@ pub fn Stack(comptime Value: type) type {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Bridge context between a list type implement as part of the list module and an allocator, allowing the list resource
|
||||
/// referenced by the [Writable] instance to be written to directly or virtually via the [io.Writer] interface.
|
||||
///
|
||||
/// *Note* if the given list contains an existing allocation, the provided [io.Allocator] instance must reference the
|
||||
/// same allocation strategy as the one originally used to allocate the list type memory.
|
||||
///
|
||||
pub const Writable = struct {
|
||||
allocator: io.Allocator,
|
||||
|
||||
list: union (enum) {
|
||||
stack: *ByteStack,
|
||||
},
|
||||
|
||||
///
|
||||
/// Stack of bytes.
|
||||
///
|
||||
const ByteStack = Stack(u8);
|
||||
|
||||
///
|
||||
/// Returns a [io.Writer] instance that binds a reference of `self` to the [write] operation.
|
||||
///
|
||||
pub fn as_writer(self: *Writable) io.Writer {
|
||||
return io.Writer.bind(Writable, self, struct {
|
||||
fn write(writable: *Writable, bytes: []const u8) ?usize {
|
||||
writable.write(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to call the appropriate multi-element writing function for the current list referenced by `self`,
|
||||
/// passing `bytes` along.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory by the list implementation
|
||||
/// referenced by `self`. See the specific implementation details of the respective list type for more information.
|
||||
///
|
||||
pub fn write(self: *Writable, bytes: []const u8) io.AllocationError!void {
|
||||
return switch (self.list) {
|
||||
.stack => |stack| stack.push_all(self.allocator, bytes),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
|
@ -52,17 +52,6 @@ pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type
|
|||
entry.value = value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Fetches the value referenced by `index` in `self`, returning it.
|
||||
///
|
||||
pub fn fetch(self: *Self, index: Index) Value {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
|
@ -81,6 +70,17 @@ pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type
|
|||
self.free_index = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Fetches the value referenced by `index` in `self`, returning it.
|
||||
///
|
||||
pub fn fetch(self: *Self, index: Index) Value {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||
///
|
||||
|
@ -95,7 +95,7 @@ pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type
|
|||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.table.len + growth_amount;
|
||||
const entries = try io.allocate_many(Entry, grown_capacity, allocator);
|
||||
const entries = try io.allocate_many(allocator, grown_capacity, Entry);
|
||||
|
||||
errdefer io.deallocate(allocator, entries);
|
||||
|
||||
|
@ -147,12 +147,20 @@ pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type
|
|||
|
||||
debug.assert(entry.* == .free_index);
|
||||
|
||||
self.count += 1;
|
||||
self.free_index = entry.free_index;
|
||||
entry.* = .{.value = value};
|
||||
|
||||
return entry_index;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `true` if `self` contains no values, otherwise `false`.
|
||||
///
|
||||
pub fn is_empty(self: Self) bool {
|
||||
return self.count == 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Removes the value referenced by `index` from `self`.
|
||||
///
|
||||
|
@ -161,6 +169,7 @@ pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type
|
|||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
self.count -= 1;
|
||||
entry.* = .{.free_index = self.free_index};
|
||||
self.free_index = index;
|
||||
}
|
||||
|
|
|
@ -65,6 +65,30 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
|
|||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Iterable wrapper for [Hashed] instances to make unordered traversal of key-value entries relatively trivial.
|
||||
///
|
||||
pub const Iterable = struct {
|
||||
hashed_map: *Self,
|
||||
iterations: usize = 0,
|
||||
|
||||
///
|
||||
/// Attempts to move past the current iteration of `self` and onto the next key-value entry, returning it or
|
||||
/// `null` if there are no more elements in the referenced map.
|
||||
///
|
||||
pub fn next(self: *Iterable) ?Entry {
|
||||
while (self.iterations < self.hashed_map.table.len) {
|
||||
defer self.iterations += 1;
|
||||
|
||||
if (self.hashed_map.table[self.iterations]) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Table type.
|
||||
///
|
||||
|
@ -228,7 +252,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
|
|||
pub fn rehash(self: *Self, allocator: io.Allocator, requested_range: usize) io.AllocationError!void {
|
||||
const old_table = self.table;
|
||||
|
||||
self.table = try io.allocate_many(?Entry, math.max(requested_range, self.count), allocator);
|
||||
self.table = try io.allocate_many(allocator, math.max(requested_range, self.count), ?Entry);
|
||||
|
||||
errdefer {
|
||||
io.deallocate(allocator, self.table);
|
||||
|
|
|
@ -1,54 +1,254 @@
|
|||
const builtin = @import("builtin");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
const ext = @import("./ext.zig");
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Recorded allocation info state.
|
||||
///
|
||||
const AllocationInfo = struct {
|
||||
trace: AllocationTrace,
|
||||
next_info: ?*AllocationInfo,
|
||||
size: usize,
|
||||
};
|
||||
|
||||
///
|
||||
/// Recorded stack trace of allocation call site.
|
||||
///
|
||||
/// *Note* this structure is reduced to zero bytes in released builds optimized for speed or size.
|
||||
///
|
||||
const AllocationTrace = std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => true,
|
||||
.ReleaseFast, .ReleaseSmall => false,
|
||||
});
|
||||
|
||||
///
|
||||
/// Heap allocation context.
|
||||
///
|
||||
const Context = struct {
|
||||
live_allocations: usize,
|
||||
allocation_info_head: ?*AllocationInfo = null,
|
||||
|
||||
const Self = @This();
|
||||
///
|
||||
/// Attempts to allocate a buffer of `size` length from `self`, with `return_address` as the location of the
|
||||
/// allocation request origin.
|
||||
///
|
||||
/// A reference to the allocated buffer is returned via a slice if the allocation was successful, otherwise `null`
|
||||
/// is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn allocate(self: *Context, size: usize, return_address: usize) ?[]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
const total_allocation_size = allocation_info_size + size;
|
||||
const allocation = ext.SDL_malloc(total_allocation_size) orelse return null;
|
||||
const allocation_info = @ptrCast(*AllocationInfo, @alignCast(@alignOf(AllocationInfo), allocation));
|
||||
|
||||
const empty_allocation = [0]u8{};
|
||||
allocation_info.* = .{
|
||||
.size = size,
|
||||
.next_info = self.allocation_info_head,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
fn reallocate(self: *Self, options: coral.io.AllocationOptions) ?[]u8 {
|
||||
if (options.size == 0) {
|
||||
if (options.allocation) |allocation| {
|
||||
if (allocation.ptr != &empty_allocation) {
|
||||
ext.SDL_free(allocation.ptr);
|
||||
allocation_info.trace.addAddr(return_address, "");
|
||||
|
||||
self.allocation_info_head = allocation_info;
|
||||
|
||||
return @ptrCast([*]u8, allocation)[allocation_info_size .. total_allocation_size];
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_malloc(size) orelse return null)[0 .. size];
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the assumed pointer to the [AllocationInfo] address of `allocation`.
|
||||
///
|
||||
fn allocation_info_of(allocation: [*]u8) *AllocationInfo {
|
||||
return @intToPtr(*AllocationInfo, @ptrToInt(allocation) - @sizeOf(AllocationInfo));
|
||||
}
|
||||
|
||||
///
|
||||
/// Deallocates a the allocation buffer referenced by `allocation`.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn deallocate(self: *Context, allocation: []u8) void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for deallocating");
|
||||
}
|
||||
|
||||
self.live_allocations -= 1;
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
|
||||
return null;
|
||||
}
|
||||
ext.SDL_free(target_allocation_info);
|
||||
|
||||
self.live_allocations += 1;
|
||||
return;
|
||||
}
|
||||
|
||||
return &empty_allocation;
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
|
||||
ext.SDL_free(target_allocation_info);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
}
|
||||
}
|
||||
|
||||
@panic("incorrect allocation address for deallocating");
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
ext.SDL_free(allocation.ptr);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if (options.allocation) |allocation| {
|
||||
if (ext.SDL_realloc(allocation.ptr, options.size)) |reallocation| {
|
||||
self.live_allocations += 1;
|
||||
///
|
||||
/// Attempts to reallocate the buffer referenced by `allocation` to be `size` length from `self`.
|
||||
///
|
||||
/// A reference to the reallocated buffer is returned via a slice if the reallocation was successful, otherwise
|
||||
/// `null` is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* the allocation referenced by `allocation` should be considered invalid once the function returns,
|
||||
/// discarding it in favor of the return value.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn reallocate(self: *Context, allocation: []u8, size: usize) ?[]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
|
||||
return @ptrCast([*]u8, reallocation)[0 .. options.size];
|
||||
}
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for reallocating");
|
||||
}
|
||||
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
}
|
||||
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
}
|
||||
}
|
||||
|
||||
@panic("incorrect allocation address for reallocating");
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse return null)[0 .. size];
|
||||
},
|
||||
}
|
||||
|
||||
if (ext.SDL_malloc(options.size)) |allocation| {
|
||||
self.live_allocations += 1;
|
||||
|
||||
return @ptrCast([*]u8, allocation)[0 .. options.size];
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
var context = Context{
|
||||
.live_allocations = 0,
|
||||
};
|
||||
///
|
||||
/// Heap context.
|
||||
///
|
||||
var context = Context{};
|
||||
|
||||
///
|
||||
/// Heap allocator.
|
||||
///
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, Context.reallocate);
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, struct {
|
||||
fn reallocate(self: *Context, options: coral.io.AllocationOptions) ?[]u8 {
|
||||
if (options.size == 0) {
|
||||
if (options.allocation) |allocation| {
|
||||
self.deallocate(allocation);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
return self.allocate(0, options.return_address);
|
||||
}
|
||||
|
||||
if (options.allocation) |allocation| {
|
||||
return self.reallocate(allocation, options.size);
|
||||
}
|
||||
|
||||
return self.allocate(options.size, options.return_address);
|
||||
}
|
||||
}.reallocate);
|
||||
|
||||
///
|
||||
/// Checks for any allocations belonging to the process heap allocated through the [allocator] interface that are still
|
||||
/// alive and reports the stack traces of any detected allocations to stderr along with the allocation address and
|
||||
/// length.
|
||||
///
|
||||
/// *Note* this function becomes a no-op in release builds optimized for speed or size.
|
||||
///
|
||||
pub fn trace_leaks() void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
var current_allocation_info = context.allocation_info_head;
|
||||
|
||||
while (current_allocation_info) |allocation_info| : (current_allocation_info = allocation_info.next_info) {
|
||||
std.debug.print("{d} byte leak at 0x{x} detected:\n", .{
|
||||
allocation_info.size,
|
||||
@ptrToInt(allocation_info) + @sizeOf(AllocationInfo),
|
||||
});
|
||||
|
||||
allocation_info.trace.dump();
|
||||
}
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ fn clear_error_details(self: *Self) void {
|
|||
pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
|
||||
var ast = try Ast.init(self.env.allocator);
|
||||
|
||||
errdefer ast.deinit();
|
||||
defer ast.deinit();
|
||||
|
||||
{
|
||||
var tokenizer = tokens.Tokenizer{.source = data};
|
||||
|
@ -62,9 +62,12 @@ pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
|
|||
if (init_error == error.BadSyntax) {
|
||||
self.clear_error_details();
|
||||
|
||||
var message_buffer = self.message_data.as_buffer(self.env.allocator);
|
||||
var writable_data = coral.list.Writable{
|
||||
.allocator = self.env.allocator,
|
||||
.list = .{.stack = &self.message_data},
|
||||
};
|
||||
|
||||
coral.utf8.print_formatted(message_buffer.as_writer(), "@({line}): {name}", .{
|
||||
coral.utf8.print_formatted(writable_data.as_writer(), "@({line}): {name}", .{
|
||||
.line = tokenizer.lines_stepped,
|
||||
.name = ast.error_message,
|
||||
}) catch return error.OutOfMemory;
|
||||
|
|
|
@ -69,21 +69,6 @@ const Object = struct {
|
|||
|
||||
self.ref_count += 1;
|
||||
}
|
||||
|
||||
pub fn release(self: *Object, env: *Self) bool {
|
||||
coral.debug.assert(self.ref_count != 0);
|
||||
|
||||
self.ref_count -= 1;
|
||||
|
||||
if (self.ref_count == 0) {
|
||||
coral.io.deallocate(env.allocator, self.state.userdata);
|
||||
self.state.fields.deinit(env.allocator);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
pub const ObjectInfo = struct {
|
||||
|
@ -185,22 +170,26 @@ pub fn check(self: *Self, condition: bool, failure_message: []const u8) !void {
|
|||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.object_release(self.global_object);
|
||||
|
||||
{
|
||||
var interned_iterable = InternTable.Iterable{.hashed_map = &self.interned};
|
||||
|
||||
while (interned_iterable.next()) |entry| {
|
||||
self.object_release(entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
self.interned.deinit(self.allocator);
|
||||
self.values.deinit(self.allocator);
|
||||
self.calls.deinit(self.allocator);
|
||||
coral.debug.assert(self.heap.is_empty());
|
||||
self.heap.deinit(self.allocator);
|
||||
}
|
||||
|
||||
pub fn discard(self: *Self, val: types.Val) void {
|
||||
switch (val) {
|
||||
.object => |object| {
|
||||
var data = self.heap.fetch(object);
|
||||
|
||||
if (data.release(self)) {
|
||||
self.heap.remove(object);
|
||||
} else {
|
||||
self.heap.assign(object, data);
|
||||
}
|
||||
},
|
||||
|
||||
.object => |object| self.object_release(object),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
@ -214,17 +203,21 @@ pub fn execute_data(self: *Self, source: DataSource) types.RuntimeError!types.Va
|
|||
}
|
||||
};
|
||||
|
||||
var chunk = try Chunk.init(self, source.name);
|
||||
var compiled_chunk = init_compiled_chunk: {
|
||||
var chunk = try Chunk.init(self, source.name);
|
||||
|
||||
errdefer chunk.deinit();
|
||||
errdefer chunk.deinit();
|
||||
|
||||
chunk.compile(source.data) catch |compile_error| {
|
||||
self.reporter.invoke(chunk.error_details());
|
||||
chunk.compile(source.data) catch |compile_error| {
|
||||
self.reporter.invoke(chunk.error_details());
|
||||
|
||||
return compile_error;
|
||||
return compile_error;
|
||||
};
|
||||
|
||||
break: init_compiled_chunk chunk;
|
||||
};
|
||||
|
||||
const script = try self.new_object(coral.io.bytes_of(&chunk), .{
|
||||
const script = try self.new_object(coral.io.bytes_of(&compiled_chunk), .{
|
||||
.identity = typeid,
|
||||
.deinitializer = Behaviors.deinitialize,
|
||||
});
|
||||
|
@ -239,25 +232,29 @@ pub fn execute_file(self: *Self, fs: file.System, file_path: file.Path) ExecuteF
|
|||
|
||||
defer readable_file.close();
|
||||
|
||||
var file_source = coral.list.Stack(u8){};
|
||||
var file_data = coral.list.Stack(u8){};
|
||||
const file_size = (try fs.query_info(file_path)).size;
|
||||
|
||||
try file_source.grow(self.allocator, file_size);
|
||||
try file_data.grow(self.allocator, file_size);
|
||||
|
||||
defer file_source.deinit(self.allocator);
|
||||
defer file_data.deinit(self.allocator);
|
||||
|
||||
{
|
||||
var file_buffer = file_source.as_buffer(self.allocator);
|
||||
var writable_data = coral.list.Writable{
|
||||
.allocator = self.allocator,
|
||||
.list = .{.stack = &file_data},
|
||||
};
|
||||
|
||||
var stream_buffer = [_]u8{0} ** 4096;
|
||||
|
||||
if ((try coral.io.stream(file_buffer.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
|
||||
if ((try coral.io.stream(writable_data.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
|
||||
return error.ReadFailure;
|
||||
}
|
||||
}
|
||||
|
||||
return try self.execute_data(.{
|
||||
.name = try file_path.to_string(),
|
||||
.data = file_source.values,
|
||||
.data = file_data.values,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -346,12 +343,11 @@ pub fn native_cast(self: *Self, castable: types.Ref, id: *const anyopaque, compt
|
|||
try self.check(castable == .object, "invalid type conversion: object");
|
||||
|
||||
const object = self.heap.fetch(castable.object);
|
||||
const alignment = @alignOf(Type);
|
||||
const is_expected_type = (object.state.info.identity == id) and (object.state.userdata.len == alignment);
|
||||
const is_expected_type = (object.state.info.identity == id) and (object.state.userdata.len == @sizeOf(Type));
|
||||
|
||||
try self.check(is_expected_type, "invalid object cast: native type");
|
||||
|
||||
return @ptrCast(*Type, @alignCast(alignment, object.state.userdata));
|
||||
return @ptrCast(*Type, @alignCast(@alignOf(Type), object.state.userdata));
|
||||
}
|
||||
|
||||
pub fn new_array(self: *Self) coral.io.AllocationError!types.Val {
|
||||
|
@ -361,7 +357,7 @@ pub fn new_array(self: *Self) coral.io.AllocationError!types.Val {
|
|||
}
|
||||
|
||||
pub fn new_object(self: *Self, userdata: []const u8, info: ObjectInfo) coral.io.AllocationError!types.Val {
|
||||
const allocation = try coral.io.allocate_many(u8, userdata.len, self.allocator);
|
||||
const allocation = try coral.io.allocate_many(self.allocator, userdata.len, u8);
|
||||
|
||||
errdefer coral.io.deallocate(self.allocator, allocation);
|
||||
|
||||
|
@ -400,6 +396,28 @@ pub fn new_string(self: *Self, data: []const u8) coral.io.AllocationError!types.
|
|||
});
|
||||
}
|
||||
|
||||
pub fn object_release(self: *Self, object: types.Object) void {
|
||||
var data = self.heap.fetch(object);
|
||||
|
||||
coral.debug.assert(data.ref_count != 0);
|
||||
|
||||
data.ref_count -= 1;
|
||||
|
||||
if (data.ref_count == 0) {
|
||||
data.state.info.deinitializer(.{
|
||||
.env = self,
|
||||
.obj = .{.object = object},
|
||||
});
|
||||
|
||||
// TODO: Free individual key-value pairs of fields
|
||||
data.state.fields.deinit(self.allocator);
|
||||
coral.io.deallocate(self.allocator, data.state.userdata);
|
||||
self.heap.remove(object);
|
||||
} else {
|
||||
self.heap.assign(object, data);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_global(self: *Self, global_name: []const u8, value: types.Ref) coral.io.AllocationError!void {
|
||||
try self.globals.assign(self.allocator, global_name, value);
|
||||
}
|
||||
|
|
|
@ -56,6 +56,8 @@ const AppManifest = struct {
|
|||
};
|
||||
|
||||
pub fn run_app(base_file_system: file.System) void {
|
||||
defer heap.trace_leaks();
|
||||
|
||||
const Logger = struct {
|
||||
const Self = @This();
|
||||
|
||||
|
|
Loading…
Reference in New Issue