Merge pull request 'Kym Object Allocation Fix' (#13) from kym-object-alloc-fix into main
continuous-integration/drone/push Build is passing
Details
continuous-integration/drone/push Build is passing
Details
Reviewed-on: #13
This commit is contained in:
commit
9eadd30aa5
|
@ -79,8 +79,8 @@ pub fn Stack(comptime Value: type) type {
|
||||||
/// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the
|
/// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the
|
||||||
/// grow.
|
/// grow.
|
||||||
///
|
///
|
||||||
/// Growing ahead of pushing operations is useful when the upper bound of pushes is well-understood, as it can
|
/// Growing ahead of multiple push operations is useful when the upper bound of pushes is well-understood, as it
|
||||||
/// reduce the number of allocations required per push.
|
/// can reduce the number of allocations required per push.
|
||||||
///
|
///
|
||||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||||
/// strategy as the one originally used to allocate the current internal buffer.
|
/// strategy as the one originally used to allocate the current internal buffer.
|
||||||
|
|
|
@ -2,44 +2,167 @@ const debug = @import("./debug.zig");
|
||||||
|
|
||||||
const io = @import("./io.zig");
|
const io = @import("./io.zig");
|
||||||
|
|
||||||
// TODO: Finish implementing.
|
const math = @import("./math.zig");
|
||||||
|
|
||||||
pub fn Map(comptime Index: type, comptime Element: type) type {
|
const std = @import("std");
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Addressable mapping of integers described by `index_int` to values of type `Value`.
|
||||||
|
///
|
||||||
|
/// Slab maps are similar to slot maps in that they have O(1) insertion and removal, however, use a flat table layout
|
||||||
|
/// instead of parallel arrays. This reduces memory usage in some cases and can be useful for data that does not need to
|
||||||
|
/// be quickly iterated over, as values ordering is not guaranteed.
|
||||||
|
///
|
||||||
|
/// *Note* `index_int` values may be as big or as small as desired per the use-case of the consumer, however, integers
|
||||||
|
/// smaller than `usize` may result in the map reporting it is out of memory due to exhausting the addressable space
|
||||||
|
/// provided by the integer.
|
||||||
|
///
|
||||||
|
pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type {
|
||||||
return struct {
|
return struct {
|
||||||
free_index: Index = 0,
|
free_index: Index = 0,
|
||||||
entries: []Entry = &.{},
|
count: Index = 0,
|
||||||
|
table: []Entry = &.{},
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Table entry which may either store an inserted value or an index to the next free entry in the table.
|
||||||
|
///
|
||||||
const Entry = union (enum) {
|
const Entry = union (enum) {
|
||||||
free_index: usize,
|
free_index: Index,
|
||||||
element: Element,
|
value: Value,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Used for indexing into the slab map.
|
||||||
|
///
|
||||||
|
const Index = math.Int(index_int);
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Slab map type.
|
||||||
|
///
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
pub fn fetch(self: *Self, index: Index) *Element {
|
///
|
||||||
const entry = &self.entries[index];
|
/// Overwrites the value referenced by `index` in `self`.
|
||||||
|
///
|
||||||
|
pub fn assign(self: *Self, index: Index, value: Value) void {
|
||||||
|
const entry = &self.table[index];
|
||||||
|
|
||||||
debug.assert(entry.* == .element);
|
debug.assert(entry.* == .value);
|
||||||
|
|
||||||
return &entry.element;
|
entry.value = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Fetches the value referenced by `index` in `self`, returning it.
|
||||||
|
///
|
||||||
|
pub fn fetch(self: *Self, index: Index) Value {
|
||||||
|
const entry = &self.table[index];
|
||||||
|
|
||||||
|
debug.assert(entry.* == .value);
|
||||||
|
|
||||||
|
return entry.value;
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||||
|
///
|
||||||
|
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||||
|
/// strategy as the one originally used to allocate the current table.
|
||||||
|
///
|
||||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||||
io.deallocate(allocator, self.entries);
|
if (self.table.len == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
io.deallocate(allocator, self.table);
|
||||||
|
|
||||||
|
self.table = &.{};
|
||||||
|
self.count = 0;
|
||||||
|
self.free_index = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert(self: *Self, allocator: io.Allocator, value: Element) io.AllocationError!Index {
|
///
|
||||||
_ = self;
|
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||||
_ = allocator;
|
///
|
||||||
_ = value;
|
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
|
||||||
|
/// table by `growth_amount`, leaving `self` in the same state that it was in prior to starting the grow.
|
||||||
|
///
|
||||||
|
/// Growing ahead of multiple insertion operations is useful when the upper bound of insertions is well-
|
||||||
|
/// understood, as it can reduce the number of allocations required per insertion.
|
||||||
|
///
|
||||||
|
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||||
|
/// strategy as the one originally used to allocate the current table.
|
||||||
|
///
|
||||||
|
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||||
|
const grown_capacity = self.table.len + growth_amount;
|
||||||
|
const entries = try io.allocate_many(Entry, grown_capacity, allocator);
|
||||||
|
|
||||||
return 0;
|
errdefer io.deallocate(allocator, entries);
|
||||||
|
|
||||||
|
if (self.table.len != 0) {
|
||||||
|
for (0 .. self.table.len) |index| {
|
||||||
|
entries[index] = self.table[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
for (self.table.len .. entries.len) |index| {
|
||||||
|
entries[index] = .{.free_index = 0};
|
||||||
|
}
|
||||||
|
|
||||||
|
io.deallocate(allocator, self.table);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.table = entries;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Attempts to insert `value` into `self` as a new entry using `allocator` as the allocation strategy,
|
||||||
|
/// returning an index value representing a reference to the inserted value that may be queried through `self`
|
||||||
|
/// after.
|
||||||
|
///
|
||||||
|
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||||
|
/// internal buffer of `self` when necessary.
|
||||||
|
///
|
||||||
|
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||||
|
/// strategy as the one originally used to allocate the current table.
|
||||||
|
///
|
||||||
|
pub fn insert(self: *Self, allocator: io.Allocator, value: Value) io.AllocationError!Index {
|
||||||
|
if (self.count == self.table.len) {
|
||||||
|
try self.grow(allocator, math.max(1, self.count));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (self.free_index == self.count) {
|
||||||
|
const entry_index = self.count;
|
||||||
|
const entry = &self.table[entry_index];
|
||||||
|
|
||||||
|
entry.* = .{.value = value};
|
||||||
|
|
||||||
|
self.count += 1;
|
||||||
|
self.free_index += 1;
|
||||||
|
|
||||||
|
return entry_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
const entry_index = self.free_index;
|
||||||
|
const entry = &self.table[self.free_index];
|
||||||
|
|
||||||
|
debug.assert(entry.* == .free_index);
|
||||||
|
|
||||||
|
self.free_index = entry.free_index;
|
||||||
|
entry.* = .{.value = value};
|
||||||
|
|
||||||
|
return entry_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Removes the value referenced by `index` from `self`.
|
||||||
|
///
|
||||||
pub fn remove(self: *Self, index: Index) void {
|
pub fn remove(self: *Self, index: Index) void {
|
||||||
const entry = &self.entries[index];
|
const entry = &self.table[index];
|
||||||
|
|
||||||
debug.assert(entry.* == .element);
|
debug.assert(entry.* == .value);
|
||||||
|
|
||||||
|
entry.* = .{.free_index = self.free_index};
|
||||||
|
self.free_index = index;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,6 +154,10 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
|
||||||
/// `self`.
|
/// `self`.
|
||||||
///
|
///
|
||||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||||
|
if (self.table.len == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
io.deallocate(allocator, self.table);
|
io.deallocate(allocator, self.table);
|
||||||
|
|
||||||
self.table = &.{};
|
self.table = &.{};
|
||||||
|
@ -196,20 +200,18 @@ pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Ke
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
||||||
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
||||||
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
var iterations = @as(usize, 0);
|
||||||
var iterations = @as(usize, 0);
|
|
||||||
|
|
||||||
while (iterations < self.count) : (iterations += 1) {
|
while (iterations < self.count) : (iterations += 1) {
|
||||||
const entry = &(self.table[hashed_key] orelse return null);
|
const entry = &(self.table[hashed_key] orelse return null);
|
||||||
|
|
||||||
if (keyer.comparer(entry.key, key) == 0) {
|
if (keyer.comparer(entry.key, key) == 0) {
|
||||||
return entry.value;
|
return entry.value;
|
||||||
}
|
|
||||||
|
|
||||||
hashed_key = (hashed_key +% 1) % hash_max;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hashed_key = (hashed_key +% 1) % hash_max;
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
|
|
@ -143,7 +143,7 @@ pub const ObjectInfo = struct {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const ObjectSlab = coral.slab.Map(u32, Object);
|
const ObjectSlab = coral.slab.Map(@typeInfo(u32).Int, Object);
|
||||||
|
|
||||||
pub const Reporter = coral.io.Functor(void, []const u8);
|
pub const Reporter = coral.io.Functor(void, []const u8);
|
||||||
|
|
||||||
|
@ -192,8 +192,12 @@ pub fn deinit(self: *Self) void {
|
||||||
pub fn discard(self: *Self, val: types.Val) void {
|
pub fn discard(self: *Self, val: types.Val) void {
|
||||||
switch (val) {
|
switch (val) {
|
||||||
.object => |object| {
|
.object => |object| {
|
||||||
if (!self.heap.fetch(object).release(self)) {
|
var data = self.heap.fetch(object);
|
||||||
|
|
||||||
|
if (data.release(self)) {
|
||||||
self.heap.remove(object);
|
self.heap.remove(object);
|
||||||
|
} else {
|
||||||
|
self.heap.assign(object, data);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -267,12 +271,19 @@ pub fn get_field(self: *Self, indexable: types.Ref, field: types.Ref) !types.Val
|
||||||
try self.check(indexable == .object, "invalid type conversion: object");
|
try self.check(indexable == .object, "invalid type conversion: object");
|
||||||
try self.check(field == .object, "invalid type conversion: object");
|
try self.check(field == .object, "invalid type conversion: object");
|
||||||
|
|
||||||
const value = self.heap.fetch(indexable.object).state.fields.lookup(self.heap.fetch(field.object)) orelse {
|
const value = get_value: {
|
||||||
return .nil;
|
var field_data = self.heap.fetch(field.object);
|
||||||
|
|
||||||
|
break: get_value self.heap.fetch(indexable.object).state.fields.lookup(&field_data) orelse {
|
||||||
|
return .nil;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
if (value == .object) {
|
if (value == .object) {
|
||||||
self.heap.fetch(value.object).acquire();
|
var value_data = self.heap.fetch(value.object);
|
||||||
|
|
||||||
|
value_data.acquire();
|
||||||
|
self.heap.assign(value.object, value_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
return value;
|
return value;
|
||||||
|
|
|
@ -96,8 +96,6 @@ pub const Tokenizer = struct {
|
||||||
|
|
||||||
defer self.source = self.source[cursor ..];
|
defer self.source = self.source[cursor ..];
|
||||||
|
|
||||||
defer @import("std").debug.print("{s}\n", .{self.current_token.text()});
|
|
||||||
|
|
||||||
while (cursor < self.source.len) {
|
while (cursor < self.source.len) {
|
||||||
switch (self.source[cursor]) {
|
switch (self.source[cursor]) {
|
||||||
'#' => {
|
'#' => {
|
||||||
|
|
Loading…
Reference in New Issue