Kym Object Allocation Fix #13

Merged
kayomn merged 3 commits from kym-object-alloc-fix into main 2023-06-03 22:07:19 +02:00
5 changed files with 171 additions and 37 deletions

View File

@ -79,8 +79,8 @@ pub fn Stack(comptime Value: type) type {
/// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the /// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the
/// grow. /// grow.
/// ///
/// Growing ahead of pushing operations is useful when the upper bound of pushes is well-understood, as it can /// Growing ahead of multiple push operations is useful when the upper bound of pushes is well-understood, as it
/// reduce the number of allocations required per push. /// can reduce the number of allocations required per push.
/// ///
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation /// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
/// strategy as the one originally used to allocate the current internal buffer. /// strategy as the one originally used to allocate the current internal buffer.

View File

@ -2,44 +2,167 @@ const debug = @import("./debug.zig");
const io = @import("./io.zig"); const io = @import("./io.zig");
// TODO: Finish implementing. const math = @import("./math.zig");
pub fn Map(comptime Index: type, comptime Element: type) type { const std = @import("std");
///
/// Addressable mapping of integers described by `index_int` to values of type `Value`.
kayomn marked this conversation as resolved Outdated

Overly wordy comment

Overly wordy comment
///
/// Slab maps are similar to slot maps in that they have O(1) insertion and removal, however, use a flat table layout
/// instead of parallel arrays. This reduces memory usage in some cases and can be useful for data that does not need to
kayomn marked this conversation as resolved Outdated

Comment could do with clarification.

Comment could do with clarification.
/// be quickly iterated over, as values ordering is not guaranteed.
///
/// *Note* `index_int` values may be as big or as small as desired per the use-case of the consumer, however, integers
/// smaller than `usize` may result in the map reporting it is out of memory due to exhausting the addressable space
/// provided by the integer.
///
pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type {
return struct { return struct {
free_index: Index = 0, free_index: Index = 0,
entries: []Entry = &.{}, count: Index = 0,
table: []Entry = &.{},
///
/// Table entry which may either store an inserted value or an index to the next free entry in the table.
///
const Entry = union (enum) { const Entry = union (enum) {
free_index: usize, free_index: Index,
element: Element, value: Value,
}; };
///
/// Used for indexing into the slab map.
///
const Index = math.Int(index_int);
///
/// Slab map type.
kayomn marked this conversation as resolved Outdated

Slab map type.

Slab *map* type.
///
const Self = @This(); const Self = @This();
pub fn fetch(self: *Self, index: Index) *Element { ///
const entry = &self.entries[index]; /// Overwrites the value referenced by `index` in `self`.
///
pub fn assign(self: *Self, index: Index, value: Value) void {
const entry = &self.table[index];
debug.assert(entry.* == .element); debug.assert(entry.* == .value);
return &entry.element; entry.value = value;
} }
///
/// Fetches the value referenced by `index` in `self`, returning it.
///
pub fn fetch(self: *Self, index: Index) Value {
const entry = &self.table[index];
debug.assert(entry.* == .value);
return entry.value;
}
///
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
///
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
/// strategy as the one originally used to allocate the current table.
///
pub fn deinit(self: *Self, allocator: io.Allocator) void { pub fn deinit(self: *Self, allocator: io.Allocator) void {
io.deallocate(allocator, self.entries); if (self.table.len == 0) {
return;
} }
pub fn insert(self: *Self, allocator: io.Allocator, value: Element) io.AllocationError!Index { io.deallocate(allocator, self.table);
_ = self;
_ = allocator;
_ = value;
return 0; self.table = &.{};
self.count = 0;
kayomn marked this conversation as resolved
Review

free_index should be reset in here as well, otherwise re-uses of the data structure in future will have a corrupt initial state.

`free_index` should be reset in here as well, otherwise re-uses of the data structure in future will have a corrupt initial state.
self.free_index = 0;
} }
///
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
///
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
/// table by `growth_amount`, leaving `self` in the same state that it was in prior to starting the grow.
///
/// Growing ahead of multiple insertion operations is useful when the upper bound of insertions is well-
/// understood, as it can reduce the number of allocations required per insertion.
///
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
/// strategy as the one originally used to allocate the current table.
///
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
const grown_capacity = self.table.len + growth_amount;
const entries = try io.allocate_many(Entry, grown_capacity, allocator);
errdefer io.deallocate(allocator, entries);
if (self.table.len != 0) {
for (0 .. self.table.len) |index| {
entries[index] = self.table[index];
}
for (self.table.len .. entries.len) |index| {
entries[index] = .{.free_index = 0};
}
io.deallocate(allocator, self.table);
}
self.table = entries;
}
///
/// Attempts to insert `value` into `self` as a new entry using `allocator` as the allocation strategy,
/// returning an index value representing a reference to the inserted value that may be queried through `self`
/// after.
///
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
/// internal buffer of `self` when necessary.
///
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
/// strategy as the one originally used to allocate the current table.
///
pub fn insert(self: *Self, allocator: io.Allocator, value: Value) io.AllocationError!Index {
if (self.count == self.table.len) {
try self.grow(allocator, math.max(1, self.count));
}
if (self.free_index == self.count) {
const entry_index = self.count;
const entry = &self.table[entry_index];
entry.* = .{.value = value};
self.count += 1;
self.free_index += 1;
return entry_index;
}
const entry_index = self.free_index;
const entry = &self.table[self.free_index];
debug.assert(entry.* == .free_index);
self.free_index = entry.free_index;
entry.* = .{.value = value};
return entry_index;
}
///
/// Removes the value referenced by `index` from `self`.
///
pub fn remove(self: *Self, index: Index) void { pub fn remove(self: *Self, index: Index) void {
const entry = &self.entries[index]; const entry = &self.table[index];
debug.assert(entry.* == .element); debug.assert(entry.* == .value);
entry.* = .{.free_index = self.free_index};
self.free_index = index;
} }
}; };
} }

View File

@ -154,6 +154,10 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
/// `self`. /// `self`.
/// ///
pub fn deinit(self: *Self, allocator: io.Allocator) void { pub fn deinit(self: *Self, allocator: io.Allocator) void {
if (self.table.len == 0) {
return;
}
io.deallocate(allocator, self.table); io.deallocate(allocator, self.table);
self.table = &.{}; self.table = &.{};
@ -196,7 +200,6 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
return null; return null;
} }
{
const hash_max = math.min(math.max_int(hash_info), self.table.len); const hash_max = math.min(math.max_int(hash_info), self.table.len);
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max); var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
var iterations = @as(usize, 0); var iterations = @as(usize, 0);
@ -210,7 +213,6 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
hashed_key = (hashed_key +% 1) % hash_max; hashed_key = (hashed_key +% 1) % hash_max;
} }
}
return null; return null;
} }

View File

@ -143,7 +143,7 @@ pub const ObjectInfo = struct {
} }
}; };
const ObjectSlab = coral.slab.Map(u32, Object); const ObjectSlab = coral.slab.Map(@typeInfo(u32).Int, Object);
pub const Reporter = coral.io.Functor(void, []const u8); pub const Reporter = coral.io.Functor(void, []const u8);
@ -192,8 +192,12 @@ pub fn deinit(self: *Self) void {
pub fn discard(self: *Self, val: types.Val) void { pub fn discard(self: *Self, val: types.Val) void {
switch (val) { switch (val) {
.object => |object| { .object => |object| {
if (!self.heap.fetch(object).release(self)) { var data = self.heap.fetch(object);
if (data.release(self)) {
self.heap.remove(object); self.heap.remove(object);
} else {
self.heap.assign(object, data);
} }
}, },
@ -267,12 +271,19 @@ pub fn get_field(self: *Self, indexable: types.Ref, field: types.Ref) !types.Val
try self.check(indexable == .object, "invalid type conversion: object"); try self.check(indexable == .object, "invalid type conversion: object");
try self.check(field == .object, "invalid type conversion: object"); try self.check(field == .object, "invalid type conversion: object");
const value = self.heap.fetch(indexable.object).state.fields.lookup(self.heap.fetch(field.object)) orelse { const value = get_value: {
var field_data = self.heap.fetch(field.object);
break: get_value self.heap.fetch(indexable.object).state.fields.lookup(&field_data) orelse {
return .nil; return .nil;
}; };
};
if (value == .object) { if (value == .object) {
self.heap.fetch(value.object).acquire(); var value_data = self.heap.fetch(value.object);
value_data.acquire();
self.heap.assign(value.object, value_data);
} }
return value; return value;

View File

@ -96,8 +96,6 @@ pub const Tokenizer = struct {
defer self.source = self.source[cursor ..]; defer self.source = self.source[cursor ..];
defer @import("std").debug.print("{s}\n", .{self.current_token.text()});
while (cursor < self.source.len) { while (cursor < self.source.len) {
switch (self.source[cursor]) { switch (self.source[cursor]) {
'#' => { '#' => {