Merge pull request 'Implement Bytecode Executor for Kym' (#19) from kym-bytecode-executor into main
continuous-integration/drone/push Build is passing
Details
continuous-integration/drone/push Build is passing
Details
Reviewed-on: #19
This commit is contained in:
commit
4ff733ca72
|
@ -2,20 +2,12 @@
|
|||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Runtime",
|
||||
"name": "Runner",
|
||||
"type": "gdb",
|
||||
"request": "launch",
|
||||
"target": "${workspaceRoot}/zig-out/bin/ona-runner",
|
||||
"target": "${workspaceRoot}/zig-out/bin/runner",
|
||||
"cwd": "${workspaceRoot}/debug/",
|
||||
"valuesFormatting": "parseText"
|
||||
},
|
||||
{
|
||||
"name": "Build Script",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "./build.py",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
|
||||
"[zig]": {
|
||||
"editor.formatOnSave": false,
|
||||
"files.eol": "\n",
|
||||
"editor.minimap.maxColumn": 120,
|
||||
"editor.detectIndentation": false,
|
||||
"editor.insertSpaces": false,
|
||||
"editor.rulers": [120],
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"zig.formattingProvider": "off",
|
||||
"zig.zls.enableAutofix": false,
|
||||
"editor.formatOnSave": false,
|
||||
"spellright.language": ["en-US-10-1."],
|
||||
"spellright.documentTypes": ["markdown"],
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,24 +2,23 @@
|
|||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"type": "process",
|
||||
"command": "zig",
|
||||
"args": ["build"],
|
||||
"label": "Build All",
|
||||
"type": "shell",
|
||||
"command": "zig build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"problemMatcher": "$gcc",
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "silent",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"showReuseMessage": false,
|
||||
"clear": true,
|
||||
"revealProblems": "onProblem"
|
||||
},
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,6 +1,9 @@
|
|||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const coral_module = b.createModule(.{.source_file = .{.path = "./source/coral/coral.zig"}});
|
||||
|
||||
const ona_module = b.createModule(.{
|
||||
|
@ -14,37 +17,28 @@ pub fn build(b: *std.Build) void {
|
|||
},
|
||||
});
|
||||
|
||||
const cross_target = b.standardTargetOptions(.{});
|
||||
const optimize_mode = std.builtin.Mode.Debug;
|
||||
|
||||
// Ona runner.
|
||||
{
|
||||
const ona_exe = b.addExecutable(.{
|
||||
.name = "ona-runner",
|
||||
.root_source_file = .{.path = "./source/runner.zig"},
|
||||
.target = cross_target,
|
||||
.optimize = optimize_mode,
|
||||
b.installArtifact(create: {
|
||||
const compile_step = b.addExecutable(.{
|
||||
.name = "runner",
|
||||
.root_source_file = .{ .path = "source/runner.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
ona_exe.addModule("coral", coral_module);
|
||||
ona_exe.addModule("ona", ona_module);
|
||||
compile_step.addModule("ona", ona_module);
|
||||
compile_step.linkLibC();
|
||||
compile_step.linkSystemLibrary("SDL2");
|
||||
|
||||
// ona_exe.addIncludeDir("./ext");
|
||||
ona_exe.linkSystemLibrary("SDL2");
|
||||
ona_exe.linkLibC();
|
||||
b.installArtifact(ona_exe);
|
||||
}
|
||||
break: create compile_step;
|
||||
});
|
||||
|
||||
// Test step.
|
||||
{
|
||||
const exe_tests = b.addTest(.{
|
||||
b.step("test", "Run unit tests").dependOn(create: {
|
||||
const tests = b.addTest(.{
|
||||
.root_source_file = .{.path = "source/test.zig"},
|
||||
.target = cross_target,
|
||||
.optimize = optimize_mode,
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
|
||||
test_step.dependOn(&exe_tests.step);
|
||||
}
|
||||
break: create &tests.step;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
|
||||
title = "Afterglow"
|
||||
|
||||
return {
|
||||
title = "Afterglow",
|
||||
title = title,
|
||||
width = 1280,
|
||||
height = 800,
|
||||
tick_rate = 60,
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ Ona is also the Catalan word for "wave".
|
|||
|
||||
* Provide utilities for handling rendering but otherwise leave the higher-level game logic and data structuring to the programmer.
|
||||
|
||||
* Provide a simple scripting interface for people who want to do something quick and a powerful plug-in API for engine-level extensions and speed-critical application logic.
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Requirements
|
||||
|
|
|
@ -7,53 +7,30 @@ const list = @import("./list.zig");
|
|||
const math = @import("./math.zig");
|
||||
|
||||
pub const Stacking = struct {
|
||||
base_allocator: io.Allocator,
|
||||
page_allocator: io.Allocator,
|
||||
min_page_size: usize,
|
||||
allocations: list.Stack(usize) = .{},
|
||||
pages: list.Stack(Page) = .{},
|
||||
allocations: list.Stack(usize),
|
||||
pages: list.Stack(Page),
|
||||
|
||||
const AllocationsList = list.Stack(usize);
|
||||
|
||||
const Page = struct {
|
||||
buffer: []u8,
|
||||
buffer: []io.Byte,
|
||||
used: usize,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn available(self: Self) usize {
|
||||
fn available(self: Page) usize {
|
||||
return self.buffer.len - self.used;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn allocate(self: *Stacking, allocation_size: usize) io.AllocationError![]u8 {
|
||||
const alignment = @as(usize, 4);
|
||||
const aligned_allocation_size = (allocation_size + alignment - 1) & ~(alignment - 1);
|
||||
|
||||
if (self.pages.values.len == 0) {
|
||||
const page = try self.allocate_page(math.max(self.min_page_size, aligned_allocation_size));
|
||||
|
||||
page.used = allocation_size;
|
||||
|
||||
return page.buffer[0 .. allocation_size];
|
||||
}
|
||||
|
||||
var page = self.current_page() orelse unreachable;
|
||||
|
||||
if (page.available() <= aligned_allocation_size) {
|
||||
page = try self.allocate_page(math.max(self.min_page_size, aligned_allocation_size));
|
||||
}
|
||||
|
||||
debug.assert(page.available() >= allocation_size);
|
||||
|
||||
defer page.used += aligned_allocation_size;
|
||||
|
||||
return page.buffer[page.used .. (page.used + allocation_size)];
|
||||
}
|
||||
const PageList = list.Stack(Page);
|
||||
|
||||
fn allocate_page(self: *Stacking, page_size: usize) io.AllocationError!*Page {
|
||||
var buffer = try io.allocate_many(self.base_allocator, page_size, u8);
|
||||
var buffer = try self.page_allocator.reallocate(null, page_size);
|
||||
|
||||
errdefer io.deallocate(self.base_allocator, buffer);
|
||||
errdefer self.page_allocator.deallocate(buffer);
|
||||
|
||||
try self.pages.push_one(self.base_allocator, .{
|
||||
try self.pages.push_one(.{
|
||||
.buffer = buffer,
|
||||
.used = 0,
|
||||
});
|
||||
|
@ -62,34 +39,10 @@ pub const Stacking = struct {
|
|||
}
|
||||
|
||||
pub fn as_allocator(self: *Stacking) io.Allocator {
|
||||
return io.Allocator.bind(Stacking, self, struct {
|
||||
fn reallocate(stacking: *Stacking, options: io.AllocationOptions) ?[]u8 {
|
||||
const allocation = options.allocation orelse {
|
||||
return stacking.allocate(options.size) catch null;
|
||||
};
|
||||
|
||||
if (allocation.len == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const reallocation = stacking.allocate(allocation.len) catch {
|
||||
return null;
|
||||
};
|
||||
|
||||
io.copy(reallocation, allocation);
|
||||
|
||||
return reallocation;
|
||||
}
|
||||
}.reallocate);
|
||||
}
|
||||
|
||||
pub fn clear_allocations(self: *Stacking) void {
|
||||
for (self.pages.values) |page| {
|
||||
io.deallocate(self.base_allocator, page.buffer);
|
||||
}
|
||||
|
||||
self.pages.deinit(self.base_allocator);
|
||||
self.allocations.deinit(self.base_allocator);
|
||||
return io.Allocator.bind(Stacking, self, .{
|
||||
.deallocate = deallocate,
|
||||
.reallocate = reallocate,
|
||||
});
|
||||
}
|
||||
|
||||
fn current_page(self: Stacking) ?*Page {
|
||||
|
@ -99,4 +52,60 @@ pub const Stacking = struct {
|
|||
|
||||
return &self.pages.values[self.pages.values.len - 1];
|
||||
}
|
||||
|
||||
pub fn free(self: *Stacking) void {
|
||||
for (self.pages.values) |page| {
|
||||
self.page_allocator.deallocate(page.buffer);
|
||||
}
|
||||
|
||||
self.pages.free();
|
||||
self.allocations.free();
|
||||
}
|
||||
|
||||
pub fn deallocate(_: *Stacking, _: []io.Byte) void {
|
||||
// TODO: Decide how to implement.
|
||||
}
|
||||
|
||||
pub fn reallocate(self: *Stacking, return_address: usize, existing_allocation: ?[]io.Byte, size: usize) io.AllocationError![]io.Byte {
|
||||
// TODO: Safety-check existing allocation is from allocator or null.
|
||||
_ = return_address;
|
||||
|
||||
const alignment = @as(usize, 4);
|
||||
const aligned_size = (size + alignment - 1) & ~(alignment - 1);
|
||||
|
||||
if (self.pages.values.len == 0) {
|
||||
const page = try self.allocate_page(math.max(self.min_page_size, aligned_size));
|
||||
|
||||
page.used = size;
|
||||
|
||||
return page.buffer[0 .. size];
|
||||
}
|
||||
|
||||
var page = self.current_page() orelse unreachable;
|
||||
|
||||
if (page.available() <= aligned_size) {
|
||||
page = try self.allocate_page(math.max(self.min_page_size, aligned_size));
|
||||
}
|
||||
|
||||
debug.assert(page.available() >= size);
|
||||
|
||||
defer page.used += aligned_size;
|
||||
|
||||
const reallocation = page.buffer[page.used .. (page.used + size)];
|
||||
|
||||
if (existing_allocation) |allocation| {
|
||||
io.copy(reallocation, allocation);
|
||||
}
|
||||
|
||||
return reallocation;
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator, min_page_size: usize) Stacking {
|
||||
return Stacking{
|
||||
.allocations = AllocationsList.make(allocator),
|
||||
.pages = PageList.make(allocator),
|
||||
.page_allocator = allocator,
|
||||
.min_page_size = min_page_size,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,44 +1,14 @@
|
|||
///
|
||||
/// Arena-based memory allocation strategies.
|
||||
///
|
||||
|
||||
pub const arena = @import("./arena.zig");
|
||||
|
||||
///
|
||||
/// Debug build-only utilities and sanity-checkers.
|
||||
///
|
||||
pub const debug = @import("./debug.zig");
|
||||
|
||||
///
|
||||
/// Platform-agnostic data input and output operations.
|
||||
///
|
||||
pub const io = @import("./io.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for sequential, "list-like" collections.
|
||||
///
|
||||
pub const list = @import("./list.zig");
|
||||
|
||||
///
|
||||
/// Types and functions designed for mathematics in interactive media applications.
|
||||
///
|
||||
pub const map = @import("./map.zig");
|
||||
|
||||
pub const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for fragmented, "heap-like" collections.
|
||||
///
|
||||
pub const slab = @import("./slab.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for the highly-specialized "slotmap" collection.
|
||||
///
|
||||
pub const slots = @import("./slots.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for associative, "table-like" collections.
|
||||
///
|
||||
pub const table = @import("./table.zig");
|
||||
|
||||
///
|
||||
/// Converters, parsers, and validators for sequences of bytes treated as UTF8 unicode strings.
|
||||
///
|
||||
pub const utf8 = @import("./utf8.zig");
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
///
|
||||
/// Active code comment to assert that `condition` should always be true.
|
||||
///
|
||||
/// Safety-checked behavior is invoked where `condition` evaluates to false.
|
||||
///
|
||||
|
||||
pub fn assert(condition: bool) void {
|
||||
if (!condition) {
|
||||
unreachable;
|
||||
|
|
|
@ -6,18 +6,106 @@ pub const AllocationError = error {
|
|||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub const AllocationOptions = struct {
|
||||
return_address: usize,
|
||||
allocation: ?[]u8 = null,
|
||||
size: usize,
|
||||
pub const Allocator = struct {
|
||||
context: *anyopaque,
|
||||
|
||||
actions: *const struct {
|
||||
deallocate: *const fn (context: *anyopaque, allocation: []Byte) void,
|
||||
reallocate: *const fn (context: *anyopaque, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte,
|
||||
},
|
||||
|
||||
pub fn Actions(comptime State: type) type {
|
||||
return struct {
|
||||
deallocate: fn (state: *State, allocation: []Byte) void,
|
||||
reallocate: fn (state: *State, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bind(comptime State: type, state: *State, comptime actions: Actions(State)) Allocator {
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
const ErasedActions = struct {
|
||||
fn deallocate(context: *anyopaque, allocation: []Byte) void {
|
||||
if (is_zero_aligned) {
|
||||
return actions.deallocator(@ptrCast(context), allocation);
|
||||
}
|
||||
|
||||
return actions.deallocate(@ptrCast(@alignCast(context)), allocation);
|
||||
}
|
||||
|
||||
fn reallocate(context: *anyopaque, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte {
|
||||
if (is_zero_aligned) {
|
||||
return actions.reallocator(@ptrCast(context), return_address, existing_allocation, size);
|
||||
}
|
||||
|
||||
return actions.reallocate(@ptrCast(@alignCast(context)), return_address, existing_allocation, size);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Allocator = Generator(?[]u8, AllocationOptions);
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.actions = &.{
|
||||
.deallocate = ErasedActions.deallocate,
|
||||
.reallocate = ErasedActions.reallocate,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deallocate(self: Allocator, allocation: anytype) void {
|
||||
switch (@typeInfo(@TypeOf(allocation))) {
|
||||
.Pointer => |pointer| {
|
||||
self.actions.deallocate(self.context, switch (pointer.size) {
|
||||
.One => @as([*]Byte, @ptrCast(allocation))[0 .. @sizeOf(pointer.child)],
|
||||
.Slice => @as([*]Byte, @ptrCast(allocation.ptr))[0 .. (@sizeOf(pointer.child) * allocation.len)],
|
||||
.Many, .C => @compileError("length of allocation must be known to deallocate"),
|
||||
});
|
||||
},
|
||||
|
||||
else => @compileError("cannot deallocate " ++ allocation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reallocate(self: Allocator, allocation: ?[]Byte, allocation_size: usize) AllocationError![]Byte {
|
||||
return self.actions.reallocate(self.context, @returnAddress(), allocation, allocation_size);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Byte = u8;
|
||||
|
||||
pub const FixedBuffer = struct {
|
||||
bytes: []Byte,
|
||||
|
||||
pub fn as_writer(self: *FixedBuffer) Writer {
|
||||
return Writer.bind(FixedBuffer, self, struct {
|
||||
fn write(writable_memory: *FixedBuffer, data: []const Byte) ?usize {
|
||||
return writable_memory.write(data);
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn put(self: *FixedBuffer, byte: Byte) bool {
|
||||
if (self.bytes.len == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.bytes[0] = byte;
|
||||
self.bytes = self.bytes[1 ..];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn write(self: *FixedBuffer, bytes: []const Byte) usize {
|
||||
const writable = math.min(self.bytes.len, bytes.len);
|
||||
|
||||
copy(self.bytes, bytes);
|
||||
|
||||
self.bytes = self.bytes[writable ..];
|
||||
|
||||
return writable;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Function pointer coupled with an immutable state context for providing dynamic dispatch over a given `Input` and
|
||||
/// `Output`.
|
||||
///
|
||||
pub fn Functor(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *const anyopaque,
|
||||
|
@ -26,21 +114,20 @@ pub fn Functor(comptime Output: type, comptime Input: type) type {
|
|||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *const State, comptime invoker: fn (capture: *const State, input: Input) Output) Self {
|
||||
const alignment = @alignOf(State);
|
||||
const is_zero_aligned = alignment == 0;
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(*const anyopaque, state),
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke_opaque(context: *const anyopaque, input: Input) Output {
|
||||
fn invoke(context: *const anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(*const State, context), input);
|
||||
return invoker(@ptrCast(context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(*const State, @alignCast(alignment, context)), input);
|
||||
return invoker(@ptrCast(@alignCast(context)), input);
|
||||
}
|
||||
}.invoke_opaque,
|
||||
}.invoke,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -50,10 +137,6 @@ pub fn Functor(comptime Output: type, comptime Input: type) type {
|
|||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Function pointer coupled with a mutable state context for providing dynamic dispatch over a given `Input` and
|
||||
/// `Output`.
|
||||
///
|
||||
pub fn Generator(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *anyopaque,
|
||||
|
@ -62,21 +145,20 @@ pub fn Generator(comptime Output: type, comptime Input: type) type {
|
|||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *State, comptime invoker: fn (capture: *State, input: Input) Output) Self {
|
||||
const alignment = @alignOf(State);
|
||||
const is_zero_aligned = alignment == 0;
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(*anyopaque, state),
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke_opaque(context: *anyopaque, input: Input) Output {
|
||||
fn invoke(context: *anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(*State, context), input);
|
||||
return invoker(@ptrCast(context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(*State, @alignCast(alignment, context)), input);
|
||||
return invoker(@ptrCast(@alignCast(context)), input);
|
||||
}
|
||||
}.invoke_opaque,
|
||||
}.invoke,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -86,13 +168,6 @@ pub fn Generator(comptime Output: type, comptime Input: type) type {
|
|||
};
|
||||
}
|
||||
|
||||
pub const Reader = Generator(?usize, []u8);
|
||||
|
||||
pub const StreamError = error {
|
||||
ReadFailure,
|
||||
WriteFailure,
|
||||
};
|
||||
|
||||
pub fn Tag(comptime Element: type) type {
|
||||
return switch (@typeInfo(Element)) {
|
||||
.Enum => |info| info.tag_type,
|
||||
|
@ -101,136 +176,80 @@ pub fn Tag(comptime Element: type) type {
|
|||
};
|
||||
}
|
||||
|
||||
pub const FixedBuffer = struct {
|
||||
slice: []u8,
|
||||
pub const Writer = Generator(?usize, []const Byte);
|
||||
|
||||
pub fn as_writer(self: *FixedBuffer) Writer {
|
||||
return Writer.bind(FixedBuffer, self, struct {
|
||||
fn write(writable_memory: *FixedBuffer, data: []const u8) ?usize {
|
||||
return writable_memory.write(data);
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
pub fn allocate_copy(allocator: Allocator, source: []const Byte) AllocationError![]Byte {
|
||||
const allocation = try allocator.actions.reallocate(allocator.context, @returnAddress(), null, source.len);
|
||||
|
||||
pub fn put(self: *FixedBuffer, byte: u8) bool {
|
||||
if (self.slice.len == 0) {
|
||||
return false;
|
||||
}
|
||||
copy(allocation, source);
|
||||
|
||||
self.slice[0] = byte;
|
||||
self.slice = self.slice[1 ..];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn write(self: *FixedBuffer, bytes: []const u8) usize {
|
||||
const writable = math.min(self.slice.len, bytes.len);
|
||||
|
||||
copy(self.slice, bytes);
|
||||
|
||||
self.slice = self.slice[writable ..];
|
||||
|
||||
return writable;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = Generator(?usize, []const u8);
|
||||
|
||||
pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type {
|
||||
if (@sizeOf(Type) == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
return @ptrCast([*]Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type) * amount,
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory))[0 .. amount];
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn allocate_one(allocator: Allocator, value: anytype) AllocationError!*@TypeOf(value) {
|
||||
const Type = @TypeOf(value);
|
||||
const typeSize = @sizeOf(Type);
|
||||
|
||||
if (@sizeOf(Type) == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
if (typeSize == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte sized type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
const allocation = @ptrCast(*Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type),
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory));
|
||||
const allocation = @as(*Type, @ptrCast(@alignCast(try allocator.actions.reallocate(
|
||||
allocator.context,
|
||||
@returnAddress(),
|
||||
null,
|
||||
typeSize))));
|
||||
|
||||
allocation.* = value;
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn bytes_of(value: anytype) []const u8 {
|
||||
pub fn bytes_of(value: anytype) []const Byte {
|
||||
const pointer_info = @typeInfo(@TypeOf(value)).Pointer;
|
||||
|
||||
debug.assert(pointer_info.size == .One);
|
||||
|
||||
return @ptrCast([*]const u8, value)[0 .. @sizeOf(pointer_info.child)];
|
||||
return switch (pointer_info.size) {
|
||||
.One => @as([*]const Byte, @ptrCast(value))[0 .. @sizeOf(pointer_info.child)],
|
||||
.Slice => @as([*]const Byte, @ptrCast(value.ptr))[0 .. @sizeOf(pointer_info.child) * value.len],
|
||||
else => @compileError("`value` must be single-element pointer or slice type"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn compare(this: []const u8, that: []const u8) isize {
|
||||
const range = math.min(this.len, that.len);
|
||||
pub fn copy(target: []Byte, source: []const Byte) void {
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < range) : (index += 1) {
|
||||
const difference = @intCast(isize, this[index]) - @intCast(isize, that[index]);
|
||||
|
||||
if (difference != 0) {
|
||||
return difference;
|
||||
while (index < source.len) : (index += 1) {
|
||||
target[index] = source[index];
|
||||
}
|
||||
}
|
||||
|
||||
return @intCast(isize, this.len) - @intCast(isize, that.len);
|
||||
pub fn ends_with(target: []const Byte, match: []const Byte) bool {
|
||||
if (target.len < match.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn copy(target: []u8, source: []const u8) void {
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < source.len) : (index += 1) target[index] = source[index];
|
||||
}
|
||||
|
||||
pub fn deallocate(allocator: Allocator, allocation: anytype) void {
|
||||
switch (@typeInfo(@TypeOf(allocation))) {
|
||||
.Pointer => |pointer| {
|
||||
_ = allocator.invoke(.{
|
||||
.allocation = switch (pointer.size) {
|
||||
.One => @ptrCast([*]u8, allocation)[0 .. @sizeOf(pointer.child)],
|
||||
.Slice => @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(pointer.child) * allocation.len)],
|
||||
.Many, .C => @compileError("length of allocation must be known to deallocate"),
|
||||
},
|
||||
|
||||
.return_address = @returnAddress(),
|
||||
.size = 0,
|
||||
});
|
||||
},
|
||||
|
||||
else => @compileError("cannot deallocate " ++ allocation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ends_with(target: []const u8, match: []const u8) bool {
|
||||
if (target.len < match.len) return false;
|
||||
|
||||
{
|
||||
var index = @as(usize, 0);
|
||||
|
||||
while (index < match.len) : (index += 1) {
|
||||
if (target[target.len - (1 + index)] != match[match.len - (1 + index)]) return false;
|
||||
if (target[target.len - (1 + index)] != match[match.len - (1 + index)]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn equals(this: []const u8, that: []const u8) bool {
|
||||
if (this.len != that.len) return false;
|
||||
pub fn equals(target: []const Byte, match: []const Byte) bool {
|
||||
if (target.len != match.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
{
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < this.len) : (index += 1) if (this[index] != that[index]) return false;
|
||||
for (0 .. target.len) |index| {
|
||||
if (target[index] != match[index]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -238,68 +257,28 @@ pub fn equals(this: []const u8, that: []const u8) bool {
|
|||
|
||||
var null_context = @as(usize, 0);
|
||||
|
||||
pub const null_allocator = Allocator.bind(&null_context, struct {
|
||||
fn reallocate(context: *usize, options: AllocationOptions) ?[]u8 {
|
||||
debug.assert(context.* == 0);
|
||||
debug.assert(options.allocation == null);
|
||||
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
pub const null_writer = Writer.bind(&null_context, struct {
|
||||
fn write(context: *usize, buffer: []const u8) usize {
|
||||
pub const null_writer = Writer.bind(usize, &null_context, struct {
|
||||
fn write(context: *usize, buffer: []const u8) ?usize {
|
||||
debug.assert(context.* == 0);
|
||||
|
||||
return buffer.len;
|
||||
}
|
||||
}.write);
|
||||
|
||||
pub fn reallocate(allocator: Allocator, allocation: anytype, amount: usize) AllocationError![]@typeInfo(@TypeOf(allocation)).Pointer.child {
|
||||
const pointer_info = @typeInfo(@TypeOf(allocation)).Pointer;
|
||||
const Element = pointer_info.child;
|
||||
pub fn slice_sentineled(comptime sen: anytype, ptr: [*:sen]const @TypeOf(sen)) [:sen]const @TypeOf(sen) {
|
||||
var len = @as(usize, 0);
|
||||
|
||||
return @ptrCast([*]Element, @alignCast(@alignOf(Element), (allocator.invoke(switch (pointer_info.size) {
|
||||
.Slice => .{
|
||||
.allocation = @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(Element) * allocation.len)],
|
||||
.size = @sizeOf(Element) * amount,
|
||||
},
|
||||
|
||||
.Many, .C, .One => @compileError("allocation must be a slice to reallocate"),
|
||||
}) orelse return error.OutOfMemory).ptr))[0 .. amount];
|
||||
while (ptr[len] != sen) {
|
||||
len += 1;
|
||||
}
|
||||
|
||||
pub fn sentinel_index(comptime element: type, comptime sentinel: element, sequence: [*:sentinel]const element) usize {
|
||||
var index: usize = 0;
|
||||
|
||||
while (sequence[index] != sentinel) : (index += 1) {}
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn stream(output: Writer, input: Reader, buffer: []u8) StreamError!u64 {
|
||||
var total_written: u64 = 0;
|
||||
var read = input.invoke(buffer) orelse return error.ReadFailure;
|
||||
|
||||
while (read != 0) {
|
||||
total_written += output.invoke(buffer[0..read]) orelse return error.WriteFailure;
|
||||
read = input.invoke(buffer) orelse return error.ReadFailure;
|
||||
}
|
||||
|
||||
return total_written;
|
||||
}
|
||||
|
||||
pub fn swap(comptime Element: type, this: *Element, that: *Element) void {
|
||||
const temp = this.*;
|
||||
|
||||
this.* = that.*;
|
||||
that.* = temp;
|
||||
return ptr[0 .. len:sen];
|
||||
}
|
||||
|
||||
pub fn tag_of(comptime value: anytype) Tag(@TypeOf(value)) {
|
||||
return @as(Tag(@TypeOf(value)), value);
|
||||
}
|
||||
|
||||
pub fn zero(target: []u8) void {
|
||||
pub fn zero(target: []Byte) void {
|
||||
for (target) |*t| t.* = 0;
|
||||
}
|
||||
|
|
|
@ -1,99 +1,54 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Returns a dynamically sized stack capable of holding `Value`.
|
||||
///
|
||||
pub const ByteStack = Stack(io.Byte);
|
||||
|
||||
pub fn Stack(comptime Value: type) type {
|
||||
return struct {
|
||||
capacity: usize = 0,
|
||||
values: []Value = &.{},
|
||||
allocator: io.Allocator,
|
||||
capacity: usize,
|
||||
values: []Value,
|
||||
|
||||
///
|
||||
/// Stack type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Clears all elements from `self` while preserving the current internal buffer.
|
||||
///
|
||||
/// To clean up memory allocations made by the stack and deinitialize it, see [deinit] instead.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
self.values = self.values[0 .. 0];
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// To clear all items from the stack while preserving the current internal buffer, see [clear] instead.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
pub fn free(self: *Self) void {
|
||||
if (self.capacity == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.values.ptr[0 .. self.capacity]);
|
||||
self.allocator.deallocate(self.values.ptr[0 .. self.capacity]);
|
||||
|
||||
self.values = &.{};
|
||||
self.capacity = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove `amount` number of `Value`s from the stack, returning `bool` if it was successful,
|
||||
/// otherwise `false` if the stack contains fewer elements than `amount`.
|
||||
///
|
||||
pub fn drop(self: *Self, amount: usize) bool {
|
||||
if (amount > self.values.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.values = self.values[0 .. self.values.len - amount];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||
///
|
||||
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the
|
||||
/// grow.
|
||||
///
|
||||
/// Growing ahead of multiple push operations is useful when the upper bound of pushes is well-understood, as it
|
||||
/// can reduce the number of allocations required per push.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
pub fn grow(self: *Self, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.capacity + growth_amount;
|
||||
const values = (try io.allocate_many(allocator, grown_capacity, Value))[0 .. self.values.len];
|
||||
const buffer = try self.allocator.reallocate(null, @sizeOf(Value) * grown_capacity);
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
errdefer self.allocator.deallocate(buffer);
|
||||
|
||||
if (self.capacity != 0) {
|
||||
for (0 .. self.values.len) |index| {
|
||||
values[index] = self.values[index];
|
||||
io.copy(buffer, io.bytes_of(self.values));
|
||||
self.allocator.deallocate(self.values.ptr[0 .. self.capacity]);
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.values.ptr[0 .. self.capacity]);
|
||||
}
|
||||
|
||||
self.values = values;
|
||||
self.values = @as([*]Value, @ptrCast(@alignCast(buffer)))[0 .. self.values.len];
|
||||
self.capacity = grown_capacity;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove the last element of `self` that was inserted, if one exists, returning it or `null` if
|
||||
/// `self` is empty.
|
||||
///
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.capacity = 0,
|
||||
.values = &.{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn pop(self: *Self) ?Value {
|
||||
if (self.values.len == 0) {
|
||||
return null;
|
||||
|
@ -106,71 +61,9 @@ pub fn Stack(comptime Value: type) type {
|
|||
return self.values[last_index];
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push every `Value` in `values` to `self` using `allocator` to grow the internal buffer as
|
||||
/// necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn push_all(self: *Self, allocator: io.Allocator, values: []const Value) io.AllocationError!void {
|
||||
const new_length = self.values.len + values.len;
|
||||
|
||||
if (new_length > self.capacity) {
|
||||
try self.grow(allocator, values.len + values.len);
|
||||
}
|
||||
|
||||
const offset_index = self.values.len;
|
||||
|
||||
self.values = self.values.ptr[0 .. new_length];
|
||||
|
||||
for (0 .. values.len) |index| {
|
||||
self.values[offset_index + index] = values[index];
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push the `Value` in `value` to `self` by `amount` number of times using `allocator` to grow
|
||||
/// the internal buffer as necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn push_many(self: *Self, allocator: io.Allocator, value: Value, amount: usize) io.AllocationError!void {
|
||||
const new_length = self.values.len + amount;
|
||||
|
||||
if (new_length >= self.capacity) {
|
||||
try self.grow(allocator, amount + amount);
|
||||
}
|
||||
|
||||
const offset_index = self.values.len;
|
||||
|
||||
self.values = self.values.ptr[0 .. new_length];
|
||||
|
||||
for (0 .. amount) |index| {
|
||||
self.values[offset_index + index] = value;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push the `Value` in `value` to `self` using `allocator` to grow the internal buffer as
|
||||
/// necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn push_one(self: *Self, allocator: io.Allocator, value: Value) io.AllocationError!void {
|
||||
pub fn push_one(self: *Self, value: Value) io.AllocationError!void {
|
||||
if (self.values.len == self.capacity) {
|
||||
try self.grow(allocator, math.max(1, self.capacity));
|
||||
try self.grow(math.max(1, self.capacity));
|
||||
}
|
||||
|
||||
const offset_index = self.values.len;
|
||||
|
@ -181,49 +74,3 @@ pub fn Stack(comptime Value: type) type {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Bridge context between a list type implement as part of the list module and an allocator, allowing the list resource
|
||||
/// referenced by the [Writable] instance to be written to directly or virtually via the [io.Writer] interface.
|
||||
///
|
||||
/// *Note* if the given list contains an existing allocation, the provided [io.Allocator] instance must reference the
|
||||
/// same allocation strategy as the one originally used to allocate the list type memory.
|
||||
///
|
||||
pub const Writable = struct {
|
||||
allocator: io.Allocator,
|
||||
|
||||
list: union (enum) {
|
||||
stack: *ByteStack,
|
||||
},
|
||||
|
||||
///
|
||||
/// Stack of bytes.
|
||||
///
|
||||
const ByteStack = Stack(u8);
|
||||
|
||||
///
|
||||
/// Returns a [io.Writer] instance that binds a reference of `self` to the [write] operation.
|
||||
///
|
||||
pub fn as_writer(self: *Writable) io.Writer {
|
||||
return io.Writer.bind(Writable, self, struct {
|
||||
fn write(writable: *Writable, bytes: []const u8) ?usize {
|
||||
writable.write(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to call the appropriate multi-element writing function for the current list referenced by `self`,
|
||||
/// passing `bytes` along.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory by the list implementation
|
||||
/// referenced by `self`. See the specific implementation details of the respective list type for more information.
|
||||
///
|
||||
pub fn write(self: *Writable, bytes: []const u8) io.AllocationError!void {
|
||||
return switch (self.list) {
|
||||
.stack => |stack| stack.push_all(self.allocator, bytes),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
|
|
@ -0,0 +1,303 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const list = @import("./list.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
pub fn Slab(comptime Value: type) type {
|
||||
return struct {
|
||||
next_index: usize,
|
||||
entries: EntryList,
|
||||
|
||||
const EntryList = list.Stack(union (enum) {
|
||||
value: Value,
|
||||
next_index: usize,
|
||||
});
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn lookup(self: Self, key: usize) ?Value {
|
||||
if (key == 0 or key > self.entries.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return switch (self.entries.values[key - 1]) {
|
||||
.value => |value| value,
|
||||
.next_index => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
self.entries.free();
|
||||
|
||||
self.next_index = 0;
|
||||
}
|
||||
|
||||
pub fn insert(self: *Self, value: Value) io.AllocationError!usize {
|
||||
if (self.next_index < self.entries.values.len) {
|
||||
const index = self.next_index;
|
||||
const entry = &self.entries.values[index];
|
||||
|
||||
debug.assert(entry.* == .next_index);
|
||||
|
||||
self.next_index = entry.next_index;
|
||||
entry.* = .{.value = value};
|
||||
|
||||
return index + 1;
|
||||
} else {
|
||||
try self.entries.push_one(.{.value = value});
|
||||
|
||||
self.next_index += 1;
|
||||
|
||||
return self.next_index;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.next_index = 0,
|
||||
.entries = EntryList.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn remove(self: *Self, key: usize) ?Value {
|
||||
if (key == 0 or key > self.entries.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const index = key - 1;
|
||||
const entry = &self.entries.values[index];
|
||||
|
||||
return switch (entry.*) {
|
||||
.next_index => null,
|
||||
|
||||
.value => get_value: {
|
||||
const value = entry.value;
|
||||
|
||||
entry.* = .{.next_index = self.next_index};
|
||||
self.next_index = index;
|
||||
|
||||
break: get_value value;
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Table(comptime Key: type, comptime Value: type, comptime traits: TableTraits(Key)) type {
|
||||
const load_max = 0.75;
|
||||
|
||||
return struct {
|
||||
allocator: io.Allocator,
|
||||
count: usize,
|
||||
entries: []?Entry,
|
||||
|
||||
pub const Entry = struct {
|
||||
key: Key,
|
||||
value: Value,
|
||||
|
||||
fn write_into(self: Entry, entry_table: []?Entry) bool {
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), entry_table.len);
|
||||
var hashed_key = math.wrap(traits.hash(self.key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (true) : (iterations += 1) {
|
||||
debug.assert(iterations < entry_table.len);
|
||||
|
||||
const table_entry = &(entry_table[hashed_key] orelse {
|
||||
entry_table[hashed_key] = .{
|
||||
.key = self.key,
|
||||
.value = self.value,
|
||||
};
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (traits.match(table_entry.key, self.key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Iterable = struct {
|
||||
table: *Self,
|
||||
iterations: usize = 0,
|
||||
|
||||
pub fn next(self: *Iterable) ?Entry {
|
||||
while (self.iterations < self.table.entries.len) {
|
||||
defer self.iterations += 1;
|
||||
|
||||
if (self.table.entries[self.iterations]) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn replace(self: *Self, key: Key, value: Value) io.AllocationError!?Entry {
|
||||
try self.rehash(load_max);
|
||||
|
||||
debug.assert(self.entries.len > self.count);
|
||||
|
||||
{
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), self.entries.len);
|
||||
var hashed_key = math.wrap(traits.hash(key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
|
||||
while (true) {
|
||||
const entry = &(self.entries[hashed_key] orelse {
|
||||
self.entries[hashed_key] = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
self.count += 1;
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (traits.match(entry.key, key)) {
|
||||
const original_entry = entry.*;
|
||||
|
||||
entry.* = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return original_entry;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_load_factor(self: Self) f32 {
|
||||
return if (self.entries.len == 0) 1 else @as(f32, @floatFromInt(self.count)) / @as(f32, @floatFromInt(self.entries.len));
|
||||
}
|
||||
|
||||
pub fn clear(self: *Self) void {
|
||||
for (self.entries) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
if (self.entries.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.allocator.deallocate(self.entries);
|
||||
|
||||
self.entries = &.{};
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
pub fn insert(self: *Self, key: Key, value: Value) io.AllocationError!bool {
|
||||
try self.rehash(load_max);
|
||||
|
||||
debug.assert(self.entries.len > self.count);
|
||||
|
||||
defer self.count += 1;
|
||||
|
||||
const entry = Entry{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return entry.write_into(self.entries);
|
||||
}
|
||||
|
||||
pub fn lookup(self: Self, key: Key) ?Value {
|
||||
if (self.count == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), self.entries.len);
|
||||
var hashed_key = math.wrap(traits.hash(key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (iterations < self.count) : (iterations += 1) {
|
||||
const entry = &(self.entries[hashed_key] orelse return null);
|
||||
|
||||
if (traits.match(entry.key, key)) {
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.count = 0,
|
||||
.entries = &.{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn rehash(self: *Self, max_load: f32) io.AllocationError!void {
|
||||
if (self.calculate_load_factor() <= max_load) {
|
||||
return;
|
||||
}
|
||||
|
||||
const min_count = math.max(1, self.count);
|
||||
const table_size = min_count * 2;
|
||||
const allocation = @as([*]?Entry, @ptrCast(@alignCast(try self.allocator.reallocate(null, @sizeOf(?Entry) * table_size))))[0 .. table_size];
|
||||
|
||||
errdefer self.allocator.deallocate(allocation);
|
||||
|
||||
self.entries = replace_table: {
|
||||
for (allocation) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
if (self.entries.len != 0) {
|
||||
for (self.entries) |maybe_entry| {
|
||||
if (maybe_entry) |entry| {
|
||||
debug.assert(entry.write_into(allocation));
|
||||
}
|
||||
}
|
||||
|
||||
self.allocator.deallocate(self.entries);
|
||||
}
|
||||
|
||||
break: replace_table allocation;
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn TableTraits(comptime Key: type) type {
|
||||
return struct {
|
||||
hash: fn (key: Key) usize,
|
||||
match: fn (key: Key, key: Key) bool,
|
||||
};
|
||||
}
|
||||
|
||||
fn hash_string(key: []const io.Byte) usize {
|
||||
var hash_code = @as(usize, 5381);
|
||||
|
||||
for (key) |byte| {
|
||||
hash_code = ((hash_code << 5) +% hash_code) +% byte;
|
||||
}
|
||||
|
||||
return hash_code;
|
||||
}
|
||||
|
||||
pub const string_table_traits = TableTraits([]const io.Byte){
|
||||
.hash = hash_string,
|
||||
.match = io.equals,
|
||||
};
|
|
@ -1,147 +1,21 @@
|
|||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Errors that may occur during checked integer arithmetic operations.
|
||||
///
|
||||
pub const CheckedArithmeticError = error {
|
||||
IntOverflow,
|
||||
};
|
||||
|
||||
///
|
||||
/// Returns the float type described by `float`.
|
||||
///
|
||||
pub fn Float(comptime float: std.builtin.Type.Float) type {
|
||||
return @Type(.{.Float = float});
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the integer type described by `int`.
|
||||
///
|
||||
pub fn Int(comptime int: std.builtin.Type.Int) type {
|
||||
return @Type(.{.Int = int});
|
||||
}
|
||||
|
||||
///
|
||||
/// Two-dimensional vector type.
|
||||
///
|
||||
pub const Vector2 = extern struct {
|
||||
x: f32,
|
||||
y: f32,
|
||||
|
||||
///
|
||||
/// A [Vector2] with a value of `0` assigned to all of the components.
|
||||
///
|
||||
pub const zero = Vector2{.x = 0, .y = 0};
|
||||
};
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked addition between `a` and `b`, returning the result or [CheckedArithmeticError] if the
|
||||
/// operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_add` can be seen as an alternative to the language-native addition operator (+) that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_add(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a + b) {
|
||||
const result = @addWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked integer cast to the type expressed by `int` on `value`, returning the result or
|
||||
/// [CheckedArithmeticError] if the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_cast` can be seen as an alternative to the language-native `@intCast` builtin that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_cast(comptime int: std.builtin.Type.Int, value: anytype) CheckedArithmeticError!Int(int) {
|
||||
if ((value < min_int(int)) or (value > max_int(int))) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return @intCast(Int(int), value);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked multiplication between `a` and `b`, returning the result or [CheckedArithmeticError]
|
||||
/// if the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_mul` can be seen as an alternative to the language-native multiplication operator (*) that exposes the
|
||||
/// safety-checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_mul(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a * b) {
|
||||
const result = @mulWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked subtraction between `a` and `b`, returning the result or [CheckedArithmeticError] if
|
||||
/// the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_sub` can be seen as an alternative to the language-native subtraction operator (-) that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_sub(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a - b) {
|
||||
const result = @subWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `value` clamped between the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn clamp(value: anytype, lower: anytype, upper: anytype) @TypeOf(value, lower, upper) {
|
||||
return max(lower, min(upper, value));
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `true` if `value` is clamped within the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn is_clamped(value: anytype, lower: anytype, upper: anytype) bool {
|
||||
return (value >= lower) and (value <= upper);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the maximum value between `a` and `b`.
|
||||
///
|
||||
pub fn max(a: anytype, b: anytype) @TypeOf(a, b) {
|
||||
return @max(a, b);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the maximum value that the integer described by `int` may express.
|
||||
///
|
||||
pub fn max_int(comptime int: std.builtin.Type.Int) comptime_int {
|
||||
const bit_count = int.bits;
|
||||
|
||||
if (bit_count == 0) return 0;
|
||||
|
||||
return (1 << (bit_count - @boolToInt(int.signedness == .signed))) - 1;
|
||||
return (1 << (bit_count - @intFromBool(int.signedness == .signed))) - 1;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the minimum value between `a` and `b`.
|
||||
///
|
||||
pub fn min(a: anytype, b: anytype) @TypeOf(a, b) {
|
||||
return @min(a, b);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the minimum value that the integer described by `int` may express.
|
||||
///
|
||||
pub fn min_int(comptime int: std.builtin.Type.Int) comptime_int {
|
||||
if (int.signedness == .unsigned) return 0;
|
||||
|
||||
|
@ -152,9 +26,6 @@ pub fn min_int(comptime int: std.builtin.Type.Int) comptime_int {
|
|||
return -(1 << (bit_count - 1));
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `value` wrapped around the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn wrap(value: anytype, lower: anytype, upper: anytype) @TypeOf(value, lower, upper) {
|
||||
const range = upper - lower;
|
||||
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Addressable mapping of integers described by `index_int` to values of type `Value`.
|
||||
///
|
||||
/// Slab maps are similar to slot maps in that they have O(1) insertion and removal, however, use a flat table layout
|
||||
/// instead of parallel arrays. This reduces memory usage in some cases and can be useful for data that does not need to
|
||||
/// be quickly iterated over, as values ordering is not guaranteed.
|
||||
///
|
||||
/// *Note* `index_int` values may be as big or as small as desired per the use-case of the consumer, however, integers
|
||||
/// smaller than `usize` may result in the map reporting it is out of memory due to exhausting the addressable space
|
||||
/// provided by the integer.
|
||||
///
|
||||
pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type {
|
||||
return struct {
|
||||
free_index: Index = 0,
|
||||
count: Index = 0,
|
||||
table: []Entry = &.{},
|
||||
|
||||
///
|
||||
/// Table entry which may either store an inserted value or an index to the next free entry in the table.
|
||||
///
|
||||
const Entry = union (enum) {
|
||||
free_index: Index,
|
||||
value: Value,
|
||||
};
|
||||
|
||||
///
|
||||
/// Used for indexing into the slab map.
|
||||
///
|
||||
const Index = math.Int(index_int);
|
||||
|
||||
///
|
||||
/// Slab map type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Overwrites the value referenced by `index` in `self`.
|
||||
///
|
||||
pub fn assign(self: *Self, index: Index, value: Value) void {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
entry.value = value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
if (self.table.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.table);
|
||||
|
||||
self.table = &.{};
|
||||
self.count = 0;
|
||||
self.free_index = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Fetches the value referenced by `index` in `self`, returning it.
|
||||
///
|
||||
pub fn fetch(self: *Self, index: Index) Value {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||
///
|
||||
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
|
||||
/// table by `growth_amount`, leaving `self` in the same state that it was in prior to starting the grow.
|
||||
///
|
||||
/// Growing ahead of multiple insertion operations is useful when the upper bound of insertions is well-
|
||||
/// understood, as it can reduce the number of allocations required per insertion.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.table.len + growth_amount;
|
||||
const entries = try io.allocate_many(allocator, grown_capacity, Entry);
|
||||
|
||||
errdefer io.deallocate(allocator, entries);
|
||||
|
||||
if (self.table.len != 0) {
|
||||
for (0 .. self.table.len) |index| {
|
||||
entries[index] = self.table[index];
|
||||
}
|
||||
|
||||
for (self.table.len .. entries.len) |index| {
|
||||
entries[index] = .{.free_index = 0};
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.table);
|
||||
}
|
||||
|
||||
self.table = entries;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to insert `value` into `self` as a new entry using `allocator` as the allocation strategy,
|
||||
/// returning an index value representing a reference to the inserted value that may be queried through `self`
|
||||
/// after.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn insert(self: *Self, allocator: io.Allocator, value: Value) io.AllocationError!Index {
|
||||
if (self.count == self.table.len) {
|
||||
try self.grow(allocator, math.max(1, self.count));
|
||||
}
|
||||
|
||||
if (self.free_index == self.count) {
|
||||
const entry_index = self.count;
|
||||
const entry = &self.table[entry_index];
|
||||
|
||||
entry.* = .{.value = value};
|
||||
|
||||
self.count += 1;
|
||||
self.free_index += 1;
|
||||
|
||||
return entry_index;
|
||||
}
|
||||
|
||||
const entry_index = self.free_index;
|
||||
const entry = &self.table[self.free_index];
|
||||
|
||||
debug.assert(entry.* == .free_index);
|
||||
|
||||
self.count += 1;
|
||||
self.free_index = entry.free_index;
|
||||
entry.* = .{.value = value};
|
||||
|
||||
return entry_index;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `true` if `self` contains no values, otherwise `false`.
|
||||
///
|
||||
pub fn is_empty(self: Self) bool {
|
||||
return self.count == 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Removes the value referenced by `index` from `self`.
|
||||
///
|
||||
pub fn remove(self: *Self, index: Index) void {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
self.count -= 1;
|
||||
entry.* = .{.free_index = self.free_index};
|
||||
self.free_index = index;
|
||||
}
|
||||
};
|
||||
}
|
|
@ -1,236 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Retruns a dense mapping slots that may store `Element`s indexable by a [Slot], where `key` defines how many bits the
|
||||
/// [Slot] used is made from.
|
||||
///
|
||||
pub fn Map(comptime key: Key, comptime Element: type) type {
|
||||
const KeySlot = Slot(key);
|
||||
const Index = math.Unsigned(key.index_bits);
|
||||
|
||||
return struct {
|
||||
capacity: usize,
|
||||
values: []Element,
|
||||
slots: [*]KeySlot,
|
||||
erase: [*]Index,
|
||||
next_free: Index,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Clears all elements from the slots in `self`.
|
||||
///
|
||||
/// *Note* that clearing the slots is not the same as deinitializing them, as it does not deallocate any memory
|
||||
/// that has already been allocated to the slots structure.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
self.next_free = 0;
|
||||
self.values = self.values[0 .. 0];
|
||||
|
||||
{
|
||||
var index = @as(usize, 0);
|
||||
|
||||
while (index < self.capacity) : (index += 1) {
|
||||
const slot = &self.slots[index];
|
||||
|
||||
slot.salt = math.max(slot.salt +% 1, 1);
|
||||
slot.index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Frees all memory allocated by `allocator` to self.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
io.deallocate(allocator, self.values.ptr);
|
||||
io.deallocate(allocator, self.slots);
|
||||
io.deallocate(allocator, self.erase);
|
||||
|
||||
self.values = &.{};
|
||||
self.slots = null;
|
||||
self.erase = null;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to fetch the element identified referenced by `slot` from `self`, returning it or `null` if `slot`
|
||||
/// does not reference a valid element.
|
||||
///
|
||||
pub fn fetch(self: Self, slot: KeySlot) ?*Element {
|
||||
if (slot.index >= self.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const redirect = &self.slots[slot.index];
|
||||
|
||||
if (slot.salt != redirect.salt) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return &self.values[redirect.index];
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to transactionally grow `self` by `growth_amount` using `allocator`, returning a
|
||||
/// [io.AllocationError] if it failed.
|
||||
///
|
||||
/// Should growing fail, `self` is left in an unmodified state.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.capacity + growth_amount;
|
||||
const values = try io.allocate_many(Element, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
|
||||
const slots = try io.allocate_many(KeySlot, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
const erase = try io.allocate_many(Index, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
self.values = values;
|
||||
self.slots = slots.ptr;
|
||||
self.erase = erase.ptr;
|
||||
self.capacity = grown_capacity;
|
||||
|
||||
// Add new values to the freelist
|
||||
{
|
||||
var index = @intCast(Index, self.values.len);
|
||||
|
||||
while (index < self.capacity) : (index += 1) {
|
||||
const slot = &self.slots.?[index];
|
||||
|
||||
slot.salt = 1;
|
||||
slot.index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to return an initialized slot map with an initial capacity of `initial_capacity` and `allocator` as
|
||||
/// the memory allocation strategy.
|
||||
///
|
||||
/// Upon failure, a [io.AllocationError] is returned instead.
|
||||
///
|
||||
pub fn init(allocator: io.Allocator, initial_capacity: usize) io.AllocationError!Self {
|
||||
const values = try io.allocate_many(Element, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
|
||||
const slots = try io.allocate_many(KeySlot, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
const erase = try io.allocate_many(Index, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, erase);
|
||||
|
||||
return Self{
|
||||
.capacity = initial_capacity,
|
||||
.values = values[0 .. 0],
|
||||
.slots = slots.ptr,
|
||||
.erase = erase.ptr,
|
||||
.next_free = 0,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to insert `value` into `self`, growing the internal buffer with `allocator` if it is full and
|
||||
/// returning a `Slot` of `key` referencing the inserted element or a [io.AllocationError] if it failed.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn insert(self: *Self, allocator: io.Allocator, value: Element) io.AllocationError!KeySlot {
|
||||
if (self.values.len == self.capacity) {
|
||||
try self.grow(allocator, math.max(usize, 1, self.capacity));
|
||||
}
|
||||
|
||||
const index_of_redirect = self.next_free;
|
||||
const redirect = &self.slots.?[index_of_redirect];
|
||||
|
||||
// redirect.index points to the next free slot.
|
||||
self.next_free = redirect.index;
|
||||
redirect.index = @intCast(Index, self.values.len);
|
||||
self.values = self.values.ptr[0 .. self.values.len + 1];
|
||||
self.values[redirect.index] = value;
|
||||
self.erase.?[redirect.index] = index_of_redirect;
|
||||
|
||||
return KeySlot{
|
||||
.index = index_of_redirect,
|
||||
.salt = redirect.salt,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove the element referenced by `slot` from `self`, returning `true` if it was successful or
|
||||
/// `false` if `slot` does not reference a valid slot.
|
||||
///
|
||||
pub fn remove(self: *Self, slot: KeySlot) bool {
|
||||
const redirect = &self.slots.?[slot.index];
|
||||
|
||||
if (slot.salt != redirect.salt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const free_index = redirect.index;
|
||||
|
||||
self.values = self.values[0 .. (self.values.len - 1)];
|
||||
|
||||
if (self.values.len > 0) {
|
||||
const free_value = &self.values[free_index];
|
||||
const free_erase = &self.erase.?[free_index];
|
||||
const last_value = &self.values[self.values.len];
|
||||
const last_erase = &self.erase.?[self.values.len];
|
||||
|
||||
free_value.* = last_value.*;
|
||||
free_erase.* = last_erase.*;
|
||||
self.slots.?[free_erase.*].index = free_index;
|
||||
}
|
||||
|
||||
redirect.salt = math.max(Index, redirect.salt +% 1, 1);
|
||||
redirect.index = self.next_free;
|
||||
self.next_free = slot.index;
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Describes the memory layout of an element-slot mapping.
|
||||
///
|
||||
pub const Key = struct {
|
||||
index_bits: usize,
|
||||
salt_bits: usize,
|
||||
};
|
||||
|
||||
///
|
||||
/// References a slot in a slot mapping.
|
||||
///
|
||||
pub fn Slot(comptime key: Key) type {
|
||||
return extern struct {
|
||||
index: math.Unsigned(key.index_bits),
|
||||
salt: math.Unsigned(key.salt_bits),
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// [Key] that uses the same number of bits as a [usize].
|
||||
///
|
||||
pub const addressable_key = Key{
|
||||
.index_bits = (@bitSizeOf(usize) / 2),
|
||||
.salt_bits = (@bitSizeOf(usize) / 2),
|
||||
};
|
|
@ -1,310 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Hash type used by tables and their associated structures.
|
||||
///
|
||||
pub const Hash = u64;
|
||||
|
||||
///
|
||||
/// Returns a table type of `Key`-`Value` pairs implementing a hash-only approach to key-value storage.
|
||||
///
|
||||
/// Entries are hashed using the `keyer` and collisions are resolved by looking for another empty space nearby. This
|
||||
/// repeats until the load factor exceeds the implementation-defined load maximum, at which point the table will rehash
|
||||
/// itself to acquire more capacity.
|
||||
///
|
||||
pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Key)) type {
|
||||
const hash_info = @typeInfo(Hash).Int;
|
||||
const load_max = 0.75;
|
||||
const growth_factor = 0.6;
|
||||
|
||||
return struct {
|
||||
count: usize = 0,
|
||||
table: []?Entry = &.{},
|
||||
|
||||
///
|
||||
/// Key-value pair bundling.
|
||||
///
|
||||
pub const Entry = struct {
|
||||
key: Key,
|
||||
value: Value,
|
||||
|
||||
///
|
||||
/// Attempts to write `self` into `entry_table`, returning `true` if no identical entry already existed,
|
||||
/// otherwise `false`.
|
||||
///
|
||||
/// Note that this does not modify the memory pointed to by `entry_table` in any way, meaning that
|
||||
/// completely filled entry tables cannot perform the write at all and will invoke safety-checked behavior.
|
||||
///
|
||||
fn write_into(self: Entry, entry_table: []?Entry) bool {
|
||||
const hash_max = math.min(math.max_int(hash_info), entry_table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(self.key), math.min_int(hash_info), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (true) : (iterations += 1) {
|
||||
debug.assert(iterations < entry_table.len);
|
||||
|
||||
const table_entry = &(entry_table[hashed_key] orelse {
|
||||
entry_table[hashed_key] = .{
|
||||
.key = self.key,
|
||||
.value = self.value,
|
||||
};
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (keyer.comparer(table_entry.key, self.key) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Iterable wrapper for [Hashed] instances to make unordered traversal of key-value entries relatively trivial.
|
||||
///
|
||||
pub const Iterable = struct {
|
||||
hashed_map: *Self,
|
||||
iterations: usize = 0,
|
||||
|
||||
///
|
||||
/// Attempts to move past the current iteration of `self` and onto the next key-value entry, returning it or
|
||||
/// `null` if there are no more elements in the referenced map.
|
||||
///
|
||||
pub fn next(self: *Iterable) ?Entry {
|
||||
while (self.iterations < self.hashed_map.table.len) {
|
||||
defer self.iterations += 1;
|
||||
|
||||
if (self.hashed_map.table[self.iterations]) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Table type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Attempts to write the `key`-`value` pair into `self`, using `allocator` as the memory allocation strategy,
|
||||
/// and overwriting any value stored with a matching `key` and returning it if one existed.
|
||||
///
|
||||
/// The function returns [AllocationError] instead if `allocator` cannot commit the memory required to grow the
|
||||
/// entry table of `self` when necessary.
|
||||
///
|
||||
/// *Note* `allocator` must reference the same allocation strategy as the one originally used to initialize
|
||||
/// `self`.
|
||||
///
|
||||
pub fn assign(self: *Self, allocator: io.Allocator, key: Key, value: Value) io.AllocationError!?Entry {
|
||||
if (self.calculate_load_factor() >= load_max) {
|
||||
const growth_size = @intToFloat(f64, math.max(1, self.table.len)) * growth_factor;
|
||||
|
||||
if (growth_size > math.max_int(@typeInfo(usize).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
try self.rehash(allocator, @floatToInt(usize, growth_size));
|
||||
}
|
||||
|
||||
debug.assert(self.table.len > self.count);
|
||||
|
||||
{
|
||||
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
||||
|
||||
while (true) {
|
||||
const entry = &(self.table[hashed_key] orelse {
|
||||
self.table[hashed_key] = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (keyer.comparer(entry.key, key) == 0) {
|
||||
const original_entry = entry.*;
|
||||
|
||||
entry.* = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return original_entry;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the calculated load factor of `self` at the moment.
|
||||
///
|
||||
pub fn calculate_load_factor(self: Self) f32 {
|
||||
return if (self.table.len == 0) 1 else @intToFloat(f32, self.count) / @intToFloat(f32, self.table.len);
|
||||
}
|
||||
|
||||
///
|
||||
/// Clears all entries from `self`, resetting the count to `0`.
|
||||
///
|
||||
/// To clean up memory allocations made by the stack and deinitialize it, see [deinit] instead.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
for (self.table) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// To clear all items from the table while preserving the current capacity, see [clear] instead.
|
||||
///
|
||||
/// *Note* `allocator` must reference the same allocation strategy as the one originally used to initialize
|
||||
/// `self`.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
if (self.table.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.table);
|
||||
|
||||
self.table = &.{};
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to write the `key`-`value` pair into `self`, using `allocator` as the memory allocation strategy,
|
||||
/// if no value already exists with a matching `key`, returning `true` if it was inserted, otherwise `false`.
|
||||
///
|
||||
/// The function returns [AllocationError] instead if `allocator` cannot commit the memory required to grow the
|
||||
/// entry table of `self` when necessary.
|
||||
///
|
||||
/// *Note* `allocator` must reference the same allocation strategy as the one originally used to initialize
|
||||
/// `self`.
|
||||
///
|
||||
pub fn insert(self: *Self, allocator: io.Allocator, key: Key, value: Value) io.AllocationError!bool {
|
||||
if (self.calculate_load_factor() >= load_max) {
|
||||
const growth_amount = @intToFloat(f64, self.table.len) * growth_factor;
|
||||
const min_size = 1;
|
||||
|
||||
try self.rehash(allocator, self.table.len + math.max(min_size, @floatToInt(usize, growth_amount)));
|
||||
}
|
||||
|
||||
debug.assert(self.table.len > self.count);
|
||||
|
||||
defer self.count += 1;
|
||||
|
||||
return (Entry{
|
||||
.key = key,
|
||||
.value = value,
|
||||
}).write_into(self.table);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to find an entry in `self` matching `key`, returning it or `null` if no matching entry was found.
|
||||
///
|
||||
pub fn lookup(self: Self, key: Key) ?Value {
|
||||
if (self.count == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (iterations < self.count) : (iterations += 1) {
|
||||
const entry = &(self.table[hashed_key] orelse return null);
|
||||
|
||||
if (keyer.comparer(entry.key, key) == 0) {
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to reallocate and regenerate the table capacity in `self` using `allocator` to be equal to or
|
||||
/// greater than `requested_range`, returning [io.AllocationError] if `allocator` cannot commit the memory
|
||||
/// required for the table capacity size.
|
||||
///
|
||||
/// *Note* `allocator` must reference the same allocation strategy as the one originally used to initialize
|
||||
/// `self`.
|
||||
///
|
||||
pub fn rehash(self: *Self, allocator: io.Allocator, requested_range: usize) io.AllocationError!void {
|
||||
const old_table = self.table;
|
||||
|
||||
self.table = try io.allocate_many(allocator, math.max(requested_range, self.count), ?Entry);
|
||||
|
||||
errdefer {
|
||||
io.deallocate(allocator, self.table);
|
||||
|
||||
self.table = old_table;
|
||||
}
|
||||
|
||||
for (self.table) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
if (old_table.len != 0)
|
||||
{
|
||||
for (old_table) |maybe_entry| {
|
||||
if (maybe_entry) |entry| {
|
||||
debug.assert(entry.write_into(self.table));
|
||||
}
|
||||
}
|
||||
|
||||
io.deallocate(allocator, old_table);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns a function group for defining table keying operations performable on `Key`.
|
||||
///
|
||||
pub fn Keyer(comptime Key: type) type {
|
||||
return struct {
|
||||
hasher: fn (key: Key) Hash,
|
||||
comparer: fn (key_a: Key, key_b: Key) isize,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// A standard [Keyer] for `[]const u8` types that provides general-purpose string keying.
|
||||
///
|
||||
pub const string_keyer = Keyer([]const u8){
|
||||
.hasher = hash_string,
|
||||
.comparer = io.compare,
|
||||
};
|
||||
|
||||
///
|
||||
/// Returns a general-purpose, non-cryptographically safe hash value for `string`.
|
||||
///
|
||||
pub fn hash_string(string: []const u8) Hash {
|
||||
var hash_code = @as(Hash, 5381);
|
||||
|
||||
for (string) |byte| {
|
||||
hash_code = ((hash_code << 5) + hash_code) + byte;
|
||||
}
|
||||
|
||||
return hash_code;
|
||||
}
|
|
@ -4,46 +4,18 @@ const io = @import("./io.zig");
|
|||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const DecimalFormat = struct {
|
||||
delimiter: []const u8 = "",
|
||||
positive_prefix: enum {none, plus, space} = .none,
|
||||
delimiter: []const io.Byte,
|
||||
positive_prefix: enum {none, plus, space},
|
||||
|
||||
const default = DecimalFormat{
|
||||
.delimiter = "",
|
||||
.positive_prefix = .none,
|
||||
};
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const HexadecimalFormat = struct {
|
||||
delimiter: []const u8 = "",
|
||||
positive_prefix: enum {none, plus, space} = .none,
|
||||
casing: enum {lower, upper} = .lower,
|
||||
};
|
||||
|
||||
///
|
||||
/// Errors that may occur during any kind of utf8-encoded parsing.
|
||||
///
|
||||
pub const ParseError = error {
|
||||
BadSyntax,
|
||||
};
|
||||
|
||||
///
|
||||
/// Errors that may occur during any kind of utf8-encoded printing.
|
||||
///
|
||||
pub const PrintError = error {
|
||||
PrintFailed,
|
||||
PrintIncomplete,
|
||||
};
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFormat) !Decimal {
|
||||
pub fn parse(self: DecimalFormat, utf8: []const io.Byte, comptime Decimal: type) ?Decimal {
|
||||
if (utf8.len == 0) {
|
||||
return error.BadSyntax;
|
||||
return null;
|
||||
}
|
||||
|
||||
switch (@typeInfo(Decimal)) {
|
||||
|
@ -55,7 +27,7 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
|
||||
var result = @as(Decimal, 0);
|
||||
|
||||
for (@boolToInt(has_sign) .. utf8.len) |index| {
|
||||
for (@intFromBool(has_sign) .. utf8.len) |index| {
|
||||
const radix = 10;
|
||||
const code = utf8[index];
|
||||
|
||||
|
@ -67,8 +39,8 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
},
|
||||
|
||||
else => {
|
||||
if (format.delimiter.len == 0 or !io.equals(format.delimiter, utf8[index ..])) {
|
||||
return error.BadSyntax;
|
||||
if (self.delimiter.len == 0 or !io.equals(self.delimiter, utf8[index ..])) {
|
||||
return null;
|
||||
}
|
||||
},
|
||||
}
|
||||
|
@ -81,7 +53,7 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
|
||||
.unsigned => {
|
||||
if (has_sign and utf8[0] == '-') {
|
||||
return error.OutOfMemory;
|
||||
return null;
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -90,11 +62,6 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
},
|
||||
|
||||
.Float => {
|
||||
// ""
|
||||
if (utf8.len == 0) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
var has_sign = switch (utf8[0]) {
|
||||
'-', '+', ' ' => true,
|
||||
else => false,
|
||||
|
@ -102,35 +69,41 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
|
||||
// "-"
|
||||
if (has_sign and utf8.len == 1) {
|
||||
return error.BadSyntax;
|
||||
return null;
|
||||
}
|
||||
|
||||
const sign_offset = @boolToInt(has_sign);
|
||||
const sign_offset = @intFromBool(has_sign);
|
||||
var has_decimal = utf8[sign_offset] == '.';
|
||||
|
||||
// "-."
|
||||
if (has_decimal and (utf8.len == 2)) {
|
||||
return error.BadSyntax;
|
||||
return null;
|
||||
}
|
||||
|
||||
var result = @as(Decimal, 0);
|
||||
var factor = @as(Decimal, if (has_sign and utf8[0] == '-') -1 else 1);
|
||||
|
||||
for (utf8[0 .. (sign_offset + @boolToInt(has_decimal))]) |code| switch (code) {
|
||||
for (utf8[sign_offset + @intFromBool(has_decimal) .. utf8.len]) |code| {
|
||||
switch (code) {
|
||||
'.' => {
|
||||
if (has_decimal) return error.BadSyntax;
|
||||
if (has_decimal) {
|
||||
return null;
|
||||
}
|
||||
|
||||
has_decimal = true;
|
||||
},
|
||||
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||
if (has_decimal) factor /= 10.0;
|
||||
if (has_decimal) {
|
||||
factor /= 10.0;
|
||||
}
|
||||
|
||||
result = ((result * 10.0) + @intToFloat(Decimal, code - '0'));
|
||||
result = ((result * 10.0) + @as(Decimal, @floatFromInt(code - '0')));
|
||||
},
|
||||
|
||||
else => return error.BadSyntax,
|
||||
};
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
|
||||
return result * factor;
|
||||
},
|
||||
|
@ -139,20 +112,80 @@ pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFo
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to print `utf8` to `writer`.
|
||||
///
|
||||
/// The function returns [PrintError] if the write failed to complete partially or entirely.
|
||||
///
|
||||
pub fn print(writer: io.Writer, utf8: []const u8) PrintError!void {
|
||||
pub fn print(self: DecimalFormat, writer: io.Writer, value: anytype) PrintError!void {
|
||||
if (value == 0) {
|
||||
return print_string(writer, switch (self.positive_prefix) {
|
||||
.none => "0",
|
||||
.plus => "+0",
|
||||
.space => " 0",
|
||||
});
|
||||
}
|
||||
|
||||
const ValueType = @TypeOf(value);
|
||||
|
||||
switch (@typeInfo(ValueType)) {
|
||||
.Int => |int| {
|
||||
const radix = 10;
|
||||
var buffer = [_]u8{0} ** (1 + math.max(int.bits, 1));
|
||||
var buffer_start = buffer.len - 1;
|
||||
|
||||
{
|
||||
var decomposable_value = value;
|
||||
|
||||
while (decomposable_value != 0) : (buffer_start -= 1) {
|
||||
buffer[buffer_start] = @intCast((decomposable_value % radix) + '0');
|
||||
decomposable_value = (decomposable_value / radix);
|
||||
}
|
||||
}
|
||||
|
||||
if (int.signedness == .unsigned and value < 0) {
|
||||
buffer[buffer_start] = '-';
|
||||
} else {
|
||||
switch (self.positive_prefix) {
|
||||
.none => buffer_start += 1,
|
||||
.plus => buffer[buffer_start] = '+',
|
||||
.space => buffer[buffer_start] = ' ',
|
||||
}
|
||||
}
|
||||
|
||||
try print_string(writer, buffer[buffer_start ..]);
|
||||
},
|
||||
|
||||
else => unformattableMessage(ValueType),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const HexadecimalFormat = struct {
|
||||
delimiter: []const u8 = "",
|
||||
positive_prefix: enum {none, plus, space} = .none,
|
||||
casing: enum {lower, upper} = .lower,
|
||||
|
||||
const default = HexadecimalFormat{
|
||||
.delimiter = "",
|
||||
.positive_prefix = .none,
|
||||
.casing = .lower,
|
||||
};
|
||||
|
||||
pub fn print(self: HexadecimalFormat, writer: io.Writer, value: anytype) PrintError!void {
|
||||
// TODO: Implement.
|
||||
_ = self;
|
||||
_ = writer;
|
||||
_ = value;
|
||||
}
|
||||
};
|
||||
|
||||
pub const PrintError = error {
|
||||
PrintFailed,
|
||||
PrintIncomplete,
|
||||
};
|
||||
|
||||
pub fn print_string(writer: io.Writer, utf8: []const io.Byte) PrintError!void {
|
||||
if ((writer.invoke(utf8) orelse return error.PrintFailed) != utf8.len) {
|
||||
return error.PrintIncomplete;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments: anytype) PrintError!void {
|
||||
switch (@typeInfo(@TypeOf(arguments))) {
|
||||
.Struct => |arguments_struct| {
|
||||
|
@ -170,7 +203,7 @@ pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments
|
|||
|
||||
switch (format[tail]) {
|
||||
'{' => {
|
||||
try print(writer, format[head .. (tail - 1)]);
|
||||
try print_string(writer, format[head .. (tail - 1)]);
|
||||
|
||||
tail += 1;
|
||||
head = tail;
|
||||
|
@ -181,7 +214,7 @@ pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments
|
|||
@compileError("all format specifiers must be named when using a named struct");
|
||||
}
|
||||
|
||||
try print(writer, arguments[arg_index]);
|
||||
try print_string(writer, arguments[arg_index]);
|
||||
|
||||
arg_index += 1;
|
||||
tail += 1;
|
||||
|
@ -193,7 +226,7 @@ pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments
|
|||
@compileError("format specifiers cannot be named when using a tuple struct");
|
||||
}
|
||||
|
||||
try print(writer, format[head .. (tail - 1)]);
|
||||
try print_string(writer, format[head .. (tail - 1)]);
|
||||
|
||||
head = tail;
|
||||
tail += 1;
|
||||
|
@ -224,69 +257,17 @@ pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn print_decimal(writer: io.Writer, value: anytype, format: DecimalFormat) PrintError!void {
|
||||
if (value == 0) {
|
||||
return print(writer, switch (format.positive_prefix) {
|
||||
.none => "0",
|
||||
.plus => "+0",
|
||||
.space => " 0",
|
||||
});
|
||||
}
|
||||
|
||||
switch (@typeInfo(@TypeOf(value))) {
|
||||
.Int => |int| {
|
||||
const radix = 10;
|
||||
var buffer = [_]u8{0} ** (1 + math.max(int.bits, 1));
|
||||
var buffer_start = buffer.len - 1;
|
||||
|
||||
{
|
||||
var decomposable_value = value;
|
||||
|
||||
while (decomposable_value != 0) : (buffer_start -= 1) {
|
||||
buffer[buffer_start] = @intCast(u8, (decomposable_value % radix) + '0');
|
||||
decomposable_value = (decomposable_value / radix);
|
||||
}
|
||||
}
|
||||
|
||||
if (int.signedness == .unsigned and value < 0) {
|
||||
buffer[buffer_start] = '-';
|
||||
} else {
|
||||
switch (format.positive_prefix) {
|
||||
.none => buffer_start += 1,
|
||||
.plus => buffer[buffer_start] = '+',
|
||||
.space => buffer[buffer_start] = ' ',
|
||||
}
|
||||
}
|
||||
|
||||
try print(writer, buffer[buffer_start ..]);
|
||||
},
|
||||
|
||||
else => @compileError("`arguments` must be a struct type"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_hexadecimal(writer: io.Writer, value: anytype, format: HexadecimalFormat) PrintError!void {
|
||||
// TODO: Implement.
|
||||
_ = writer;
|
||||
_ = value;
|
||||
_ = format;
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
noinline fn print_value(writer: io.Writer, value: anytype) PrintError!void {
|
||||
const Value = @TypeOf(value);
|
||||
|
||||
return switch (@typeInfo(Value)) {
|
||||
.Int => print_decimal(writer, value, .{}),
|
||||
.Float => print_decimal(writer, value, .{}),
|
||||
.Int => DecimalFormat.default.print(writer, value),
|
||||
.Float => DecimalFormat.default.print(writer, value),
|
||||
|
||||
.Pointer => |pointer| switch (pointer.size) {
|
||||
.One, .Many, .C => print_hexadecimal(writer, @ptrToInt(value), .{}),
|
||||
.Slice => if (pointer.child == u8) print(writer, value) else @compileError(unformattableMessage(Value)),
|
||||
.Many, .C => HexadecimalFormat.default.print(writer, @intFromPtr(value)),
|
||||
.One => if (pointer.child == []const u8) print_string(writer, *value) else HexadecimalFormat.default.print(writer, @intFromPtr(value)),
|
||||
.Slice => if (pointer.child == u8) print_string(writer, value) else @compileError(unformattableMessage(Value)),
|
||||
},
|
||||
|
||||
else => @compileError(unformattableMessage(Value)),
|
||||
|
@ -294,5 +275,5 @@ noinline fn print_value(writer: io.Writer, value: anytype) PrintError!void {
|
|||
}
|
||||
|
||||
fn unformattableMessage(comptime Value: type) []const u8 {
|
||||
return "`" ++ @typeName(Value) ++ "` are not formattable";
|
||||
return "type `" ++ @typeName(Value) ++ "` is not formattable with this formatter";
|
||||
}
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
const ext = @import("./ext.zig");
|
||||
|
||||
const file = @import("./file.zig");
|
||||
|
||||
const kym = @import("./kym.zig");
|
||||
|
||||
pub const Manifest = struct {
|
||||
title: [255:0]coral.io.Byte = [_:0]coral.io.Byte{0} ** 255,
|
||||
width: u16 = 640,
|
||||
height: u16 = 480,
|
||||
tick_rate: f32 = 60.0,
|
||||
|
||||
pub fn load(self: *Manifest, env: *kym.RuntimeEnv, file_access: file.Access) kym.RuntimeError!void {
|
||||
const manifest = try env.execute_file(file_access, file.Path.from(&.{"app.ona"}));
|
||||
|
||||
defer env.discard(manifest);
|
||||
|
||||
const title = try env.get_field(manifest, "title");
|
||||
|
||||
defer env.discard(title);
|
||||
|
||||
const title_string = try env.get_string(title);
|
||||
|
||||
const width = @as(u16, get: {
|
||||
const ref = try env.get_field(manifest, "width");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @intFromFloat(env.get_float(ref) catch @as(f64, @floatFromInt(self.width)));
|
||||
});
|
||||
|
||||
const height = @as(u16, get: {
|
||||
const ref = try env.get_field(manifest, "height");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @intFromFloat(env.get_float(ref) catch @as(f64, @floatFromInt(self.height)));
|
||||
});
|
||||
|
||||
const tick_rate = @as(f32, get: {
|
||||
const ref = try env.get_field(manifest, "tick_rate");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @floatCast(env.get_float(ref) catch self.tick_rate);
|
||||
});
|
||||
|
||||
{
|
||||
const limited_title_len = coral.math.min(title_string.len, self.title.len);
|
||||
|
||||
coral.io.copy(&self.title, title_string[0 .. limited_title_len]);
|
||||
coral.io.zero(self.title[limited_title_len .. self.title.len]);
|
||||
}
|
||||
|
||||
self.tick_rate = tick_rate;
|
||||
self.width = width;
|
||||
self.height = height;
|
||||
}
|
||||
};
|
||||
|
||||
pub const LogSeverity = enum {
|
||||
info,
|
||||
warn,
|
||||
fail,
|
||||
};
|
||||
|
||||
pub const WritableLog = struct {
|
||||
severity: LogSeverity,
|
||||
write_buffer: coral.list.ByteStack,
|
||||
|
||||
pub fn as_writer(self: *WritableLog) coral.io.Writer {
|
||||
return coral.io.Writer.bind(WritableLog, self, struct {
|
||||
fn write(writable_log: *WritableLog, bytes: []const coral.io.Byte) ?usize {
|
||||
writable_log.write(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn free(self: *WritableLog) void {
|
||||
self.write_buffer.free();
|
||||
}
|
||||
|
||||
pub fn make(log_severity: LogSeverity, allocator: coral.io.Allocator) WritableLog {
|
||||
return .{
|
||||
.severity = log_severity,
|
||||
.write_buffer = coral.list.ByteStack.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *WritableLog, bytes: []const coral.io.Byte) coral.io.AllocationError!void {
|
||||
const format_string = "%.*s";
|
||||
var line_written = @as(usize, 0);
|
||||
|
||||
for (bytes) |byte| {
|
||||
if (byte == '\n') {
|
||||
ext.SDL_LogError(
|
||||
ext.SDL_LOG_CATEGORY_APPLICATION,
|
||||
format_string,
|
||||
self.write_buffer.values.len,
|
||||
self.write_buffer.values.ptr);
|
||||
|
||||
self.write_buffer.clear();
|
||||
|
||||
line_written = 0;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
try self.write_buffer.push_one(byte);
|
||||
|
||||
line_written += 1;
|
||||
}
|
||||
|
||||
if (self.write_buffer.values.len == 0) {
|
||||
ext.SDL_LogError(
|
||||
ext.SDL_LOG_CATEGORY_APPLICATION,
|
||||
format_string,
|
||||
self.write_buffer.values.len,
|
||||
self.write_buffer.values.ptr);
|
||||
|
||||
self.write_buffer.clear();
|
||||
}
|
||||
}
|
||||
};
|
|
@ -1,23 +0,0 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
pub const Item = struct {
|
||||
transform: Transform,
|
||||
|
||||
options: union (enum) {
|
||||
sprite: struct {
|
||||
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub const Transform = extern struct {
|
||||
x: coral.math.Vector2,
|
||||
y: coral.math.Vector2,
|
||||
origin: coral.math.Vector2,
|
||||
|
||||
pub const identity = Transform{
|
||||
.x = .{1, 0},
|
||||
.y = .{0, 1},
|
||||
.origin = .{0, 0},
|
||||
};
|
||||
};
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
pub usingnamespace @cImport({
|
||||
@cInclude("SDL2/SDL.h");
|
||||
});
|
||||
|
|
|
@ -2,53 +2,53 @@ const coral = @import("coral");
|
|||
|
||||
const ext = @import("./ext.zig");
|
||||
|
||||
pub const Contents = struct {
|
||||
allocator: coral.io.Allocator,
|
||||
data: []u8,
|
||||
pub const Access = union (enum) {
|
||||
sandboxed_path: *const Path,
|
||||
|
||||
pub const InitError = coral.io.AllocationError || Readable.ReadError;
|
||||
pub fn open_readable(self: Access, readable_path: Path) ?*Readable {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const readable_path_string = sandboxed_path.joined(readable_path).to_string() orelse return null;
|
||||
|
||||
pub fn deinit(self: *Contents) void {
|
||||
coral.io.deallocate(self.allocator, self.data);
|
||||
return @ptrCast(ext.SDL_RWFromFile(readable_path_string.ptr, "rb"));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator, readable_file: *Readable) InitError!Contents {
|
||||
const file_offset = try readable_file.skip(0);
|
||||
const file_size = try readable_file.seek_end();
|
||||
pub fn query(self: Access, path: Path) ?Info {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const path_string = sandboxed_path.joined(path).to_string() orelse return null;
|
||||
const rw_ops = ext.SDL_RWFromFile(path_string, "rb") orelse return null;
|
||||
const file_size = ext.SDL_RWseek(rw_ops, 0, ext.RW_SEEK_END);
|
||||
|
||||
_ = try readable_file.seek(file_offset);
|
||||
|
||||
const allocation = try coral.io.allocate_many(u8, file_size, allocator);
|
||||
|
||||
errdefer coral.io.deallocate(allocator, allocation);
|
||||
|
||||
if (try readable_file.read(allocation) != allocation.len) {
|
||||
// Read less than was allocated for.
|
||||
return error.FileUnavailable;
|
||||
if (ext.SDL_RWclose(rw_ops) != 0 or file_size < 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return Contents{
|
||||
.allocator = allocator,
|
||||
.data = allocation,
|
||||
return Info{
|
||||
.size = @intCast(file_size),
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Info = struct {
|
||||
size: u64,
|
||||
};
|
||||
|
||||
pub const Path = extern struct {
|
||||
data: [4096]u8 = [_]u8{0} ** 4096,
|
||||
data: [4096]coral.io.Byte = [_]coral.io.Byte{0} ** 4096,
|
||||
|
||||
pub const cwd = Path.from(&.{"./"});
|
||||
|
||||
pub const ValidationError = error {
|
||||
PathTooLong,
|
||||
};
|
||||
|
||||
pub fn from(components: []const []const u8) Path {
|
||||
// TODO: Implement proper parsing / removal of duplicate path delimiters.
|
||||
var path = Path{};
|
||||
|
||||
{
|
||||
var writable_slice = coral.io.FixedBuffer{.slice = &path.data};
|
||||
var writable_slice = coral.io.FixedBuffer{.bytes = &path.data};
|
||||
|
||||
for (components) |component| {
|
||||
if (writable_slice.write(component) != component.len) {
|
||||
|
@ -64,7 +64,7 @@ pub const Path = extern struct {
|
|||
var path = Path{};
|
||||
|
||||
{
|
||||
var writable = coral.io.FixedBuffer{.slice = &path.data};
|
||||
var writable = coral.io.FixedBuffer{.bytes = &path.data};
|
||||
var written = @as(usize, 0);
|
||||
|
||||
for (&self.data) |byte| {
|
||||
|
@ -91,32 +91,20 @@ pub const Path = extern struct {
|
|||
return path;
|
||||
}
|
||||
|
||||
pub fn to_string(self: Path) ValidationError![:0]const u8 {
|
||||
const sentineled_data = get_sentineled_data: {
|
||||
pub fn to_string(self: Path) ?[:0]const coral.io.Byte {
|
||||
const last_index = self.data.len - 1;
|
||||
|
||||
if (self.data[last_index] != 0) {
|
||||
return error.PathTooLong;
|
||||
return null;
|
||||
}
|
||||
|
||||
break: get_sentineled_data self.data[0 .. last_index:0];
|
||||
};
|
||||
|
||||
return sentineled_data[0 .. coral.io.sentinel_index(u8, 0, sentineled_data):0];
|
||||
return coral.io.slice_sentineled(@as(coral.io.Byte, 0), @as([*:0]const coral.io.Byte, @ptrCast(&self.data)));
|
||||
}
|
||||
};
|
||||
|
||||
pub const ReadError = error {
|
||||
FileUnavailable,
|
||||
};
|
||||
|
||||
pub const Readable = opaque {
|
||||
pub fn as_reader(self: *Readable) coral.io.Reader {
|
||||
return coral.io.Reader.bind(Readable, self, struct {
|
||||
fn read(readable: *Readable, buffer: []u8) ?usize {
|
||||
return readable.read(buffer) catch null;
|
||||
}
|
||||
}.read);
|
||||
return coral.io.Reader.bind(Readable, self, read_into);
|
||||
}
|
||||
|
||||
pub fn close(self: *Readable) void {
|
||||
|
@ -125,97 +113,81 @@ pub const Readable = opaque {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn read(self: *Readable, buffer: []u8) ReadError!usize {
|
||||
pub fn read_into(self: *Readable, buffer: []coral.io.Byte) ?usize {
|
||||
ext.SDL_ClearError();
|
||||
|
||||
const bytes_read = ext.SDL_RWread(rw_ops_cast(self), buffer.ptr, @sizeOf(u8), buffer.len);
|
||||
const bytes_read = ext.SDL_RWread(rw_ops_cast(self), buffer.ptr, @sizeOf(coral.io.Byte), buffer.len);
|
||||
const error_message = ext.SDL_GetError();
|
||||
|
||||
if (bytes_read == 0 and error_message != null and error_message.* != 0) {
|
||||
return error.FileUnavailable;
|
||||
return null;
|
||||
}
|
||||
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
pub fn seek(self: *Readable, cursor: u64) ReadError!u64 {
|
||||
pub fn seek_head(self: *Readable, cursor: u64) ?u64 {
|
||||
// TODO: Fix safety of int cast.
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), @intCast(i64, cursor), ext.RW_SEEK_SET);
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), @intCast(cursor), ext.RW_SEEK_SET);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
return null;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
|
||||
pub fn seek_end(self: *Readable) ReadError!usize {
|
||||
pub fn seek_tail(self: *Readable) ?usize {
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), 0, ext.RW_SEEK_END);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
|
||||
pub fn skip(self: *Readable, offset: i64) ReadError!u64 {
|
||||
pub fn skip(self: *Readable, offset: i64) ?u64 {
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), offset, ext.RW_SEEK_CUR);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
};
|
||||
|
||||
pub const System = union (enum) {
|
||||
sandboxed_path: *const Path,
|
||||
pub fn allocate_and_load(allocator: coral.io.Allocator, access: Access, path: Path) coral.io.AllocationError!?[]coral.io.Byte {
|
||||
const allocation = try allocator.reallocate(null, query_file_size: {
|
||||
const info = access.query(path) orelse return null;
|
||||
|
||||
pub const FileInfo = struct {
|
||||
size: u64,
|
||||
};
|
||||
|
||||
pub const OpenError = Path.ValidationError || error {
|
||||
FileNotFound,
|
||||
};
|
||||
|
||||
pub const QueryError = OpenError || ReadError;
|
||||
|
||||
pub fn open_readable(self: System, path: Path) OpenError!*Readable {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
return @ptrCast(*Readable, ext.SDL_RWFromFile(try sandboxed_path.joined(path).to_string(), "rb") orelse {
|
||||
return error.FileNotFound;
|
||||
break: query_file_size info.size;
|
||||
});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn query_info(self: System, path: Path) QueryError!FileInfo {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const file = ext.SDL_RWFromFile(try sandboxed_path.joined(path).to_string(), "rb") orelse {
|
||||
return error.FileNotFound;
|
||||
const readable = access.open_readable(path) orelse {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
defer coral.debug.assert(ext.SDL_RWclose(file) == 0);
|
||||
defer _ = readable.close();
|
||||
|
||||
const file_size = ext.SDL_RWseek(file, 0, ext.RW_SEEK_END);
|
||||
const bytes_read = readable.read_into(allocation) orelse {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
if (file_size < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return FileInfo{
|
||||
.size = @intCast(u64, file_size),
|
||||
return null;
|
||||
};
|
||||
|
||||
if (bytes_read != allocation.len) {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
return allocation;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fn rw_ops_cast(ptr: *anyopaque) *ext.SDL_RWops {
|
||||
return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), ptr));
|
||||
return @ptrCast(@alignCast(ptr));
|
||||
}
|
||||
|
|
|
@ -6,121 +6,84 @@ const ext = @import("./ext.zig");
|
|||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Recorded allocation info state.
|
||||
///
|
||||
const AllocationInfo = struct {
|
||||
trace: AllocationTrace,
|
||||
next_info: ?*AllocationInfo,
|
||||
size: usize,
|
||||
};
|
||||
|
||||
///
|
||||
/// Recorded stack trace of allocation call site.
|
||||
///
|
||||
/// *Note* this structure is reduced to zero bytes in released builds optimized for speed or size.
|
||||
///
|
||||
const AllocationTrace = std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
|
||||
const AllocationNode = struct {
|
||||
trace: std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => true,
|
||||
.ReleaseFast, .ReleaseSmall => false,
|
||||
});
|
||||
}),
|
||||
|
||||
///
|
||||
/// Heap allocation context.
|
||||
///
|
||||
const Context = struct {
|
||||
allocation_info_head: ?*AllocationInfo = null,
|
||||
next: ?*AllocationNode,
|
||||
size: usize,
|
||||
|
||||
///
|
||||
/// Attempts to allocate a buffer of `size` length from `self`, with `return_address` as the location of the
|
||||
/// allocation request origin.
|
||||
///
|
||||
/// A reference to the allocated buffer is returned via a slice if the allocation was successful, otherwise `null`
|
||||
/// is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn allocate(self: *Context, size: usize, return_address: usize) ?[]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
const total_allocation_size = allocation_info_size + size;
|
||||
const allocation = ext.SDL_malloc(total_allocation_size) orelse return null;
|
||||
const allocation_info = @ptrCast(*AllocationInfo, @alignCast(@alignOf(AllocationInfo), allocation));
|
||||
fn alloc(size: usize, return_address: usize) *AllocationNode {
|
||||
const node = @as(*AllocationNode, @ptrCast(@alignCast(ext.SDL_malloc(@sizeOf(AllocationNode) + size))));
|
||||
|
||||
allocation_info.* = .{
|
||||
node.* = .{
|
||||
.size = size,
|
||||
.next_info = self.allocation_info_head,
|
||||
.next = null,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
allocation_info.trace.addAddr(return_address, "");
|
||||
node.trace.addAddr(return_address, "");
|
||||
|
||||
self.allocation_info_head = allocation_info;
|
||||
|
||||
return @ptrCast([*]u8, allocation)[allocation_info_size .. total_allocation_size];
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_malloc(size) orelse return null)[0 .. size];
|
||||
},
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the assumed pointer to the [AllocationInfo] address of `allocation`.
|
||||
///
|
||||
fn allocation_info_of(allocation: [*]u8) *AllocationInfo {
|
||||
return @intToPtr(*AllocationInfo, @ptrToInt(allocation) - @sizeOf(AllocationInfo));
|
||||
fn dealloc(self: *AllocationNode) void {
|
||||
ext.SDL_free(self);
|
||||
}
|
||||
|
||||
///
|
||||
/// Deallocates a the allocation buffer referenced by `allocation`.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn realloc(self: *AllocationNode, size: usize, return_address: usize) *AllocationNode {
|
||||
const node = @as(*AllocationNode, @ptrCast(@alignCast(ext.SDL_realloc(self, @sizeOf(AllocationNode) + size))));
|
||||
|
||||
node.* = .{
|
||||
.size = size,
|
||||
.next = null,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
node.trace.addAddr(return_address, "");
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
fn owns_userdata(self: *AllocationNode, other_userdata: []const coral.io.Byte) bool {
|
||||
const self_userdata = self.userdata();
|
||||
|
||||
return self_userdata.ptr == other_userdata.ptr and self_userdata.len == other_userdata.len;
|
||||
}
|
||||
|
||||
fn userdata(self: *AllocationNode) []coral.io.Byte {
|
||||
return @as([*]coral.io.Byte, @ptrFromInt(@intFromPtr(self) + @sizeOf(AllocationNode)))[0 .. self.size];
|
||||
}
|
||||
};
|
||||
|
||||
const Context = struct {
|
||||
head: ?*AllocationNode = null,
|
||||
|
||||
fn deallocate(self: *Context, allocation: []u8) void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
const panic_message = "incorrect allocation address for deallocating";
|
||||
var current_node = self.head orelse @panic(panic_message);
|
||||
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for deallocating");
|
||||
if (current_node.owns_userdata(allocation)) {
|
||||
self.head = current_node.next;
|
||||
|
||||
return current_node.dealloc();
|
||||
}
|
||||
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
while (true) {
|
||||
const next_node = current_node.next orelse @panic(panic_message);
|
||||
|
||||
ext.SDL_free(target_allocation_info);
|
||||
if (next_node.owns_userdata(allocation)) {
|
||||
current_node.next = next_node.next;
|
||||
|
||||
return;
|
||||
return next_node.dealloc();
|
||||
}
|
||||
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
|
||||
ext.SDL_free(target_allocation_info);
|
||||
|
||||
return;
|
||||
current_node = next_node;
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
}
|
||||
}
|
||||
|
||||
@panic("incorrect allocation address for deallocating");
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
|
@ -129,111 +92,67 @@ const Context = struct {
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to reallocate the buffer referenced by `allocation` to be `size` length from `self`.
|
||||
///
|
||||
/// A reference to the reallocated buffer is returned via a slice if the reallocation was successful, otherwise
|
||||
/// `null` is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* the allocation referenced by `allocation` should be considered invalid once the function returns,
|
||||
/// discarding it in favor of the return value.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn reallocate(self: *Context, allocation: []u8, size: usize) ?[]u8 {
|
||||
fn reallocate(self: *Context, return_address: usize, existing_allocation: ?[]u8, size: usize) coral.io.AllocationError![]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
if (existing_allocation) |allocation| {
|
||||
const panic_message = "incorrect allocation address for reallocating";
|
||||
var current_node = self.head orelse @panic(panic_message);
|
||||
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for reallocating");
|
||||
if (current_node.owns_userdata(allocation)) {
|
||||
const node = current_node.realloc(size, return_address);
|
||||
|
||||
self.head = node;
|
||||
|
||||
return node.userdata();
|
||||
}
|
||||
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
while (true) {
|
||||
const next_node = current_node.next orelse @panic(panic_message);
|
||||
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
if (next_node.owns_userdata(allocation)) {
|
||||
const node = next_node.realloc(size, return_address);
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
current_node.next = node;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
return node.userdata();
|
||||
}
|
||||
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
current_node = next_node;
|
||||
}
|
||||
} else {
|
||||
const node = AllocationNode.alloc(size, return_address);
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
if (self.head) |head| {
|
||||
node.next = head;
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
}
|
||||
}
|
||||
self.head = node;
|
||||
|
||||
@panic("incorrect allocation address for reallocating");
|
||||
return node.userdata();
|
||||
}
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse return null)[0 .. size];
|
||||
if (existing_allocation) |allocation | {
|
||||
return @as([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse {
|
||||
return error.OutOfMemory;
|
||||
})[0 .. size];
|
||||
}
|
||||
|
||||
return @as([*]u8, ext.SDL_malloc(size) orelse return error.OutOfMemory)[0 .. size];
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Heap context.
|
||||
///
|
||||
var context = Context{};
|
||||
|
||||
///
|
||||
/// Heap allocator.
|
||||
///
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, struct {
|
||||
fn reallocate(self: *Context, options: coral.io.AllocationOptions) ?[]u8 {
|
||||
if (options.size == 0) {
|
||||
if (options.allocation) |allocation| {
|
||||
self.deallocate(allocation);
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, .{
|
||||
.reallocate = Context.reallocate,
|
||||
.deallocate = Context.deallocate,
|
||||
});
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
return self.allocate(0, options.return_address);
|
||||
}
|
||||
|
||||
if (options.allocation) |allocation| {
|
||||
return self.reallocate(allocation, options.size);
|
||||
}
|
||||
|
||||
return self.allocate(options.size, options.return_address);
|
||||
}
|
||||
}.reallocate);
|
||||
|
||||
///
|
||||
/// Checks for any allocations belonging to the process heap allocated through the [allocator] interface that are still
|
||||
/// alive and reports the stack traces of any detected allocations to stderr along with the allocation address and
|
||||
/// length.
|
||||
///
|
||||
/// *Note* this function becomes a no-op in release builds optimized for speed or size.
|
||||
///
|
||||
pub fn trace_leaks() void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
|
@ -242,7 +161,7 @@ pub fn trace_leaks() void {
|
|||
while (current_allocation_info) |allocation_info| : (current_allocation_info = allocation_info.next_info) {
|
||||
std.debug.print("{d} byte leak at 0x{x} detected:\n", .{
|
||||
allocation_info.size,
|
||||
@ptrToInt(allocation_info) + @sizeOf(AllocationInfo),
|
||||
@as(usize, allocation_info) + @sizeOf(AllocationNode),
|
||||
});
|
||||
|
||||
allocation_info.trace.dump();
|
||||
|
|
|
@ -1,5 +1,649 @@
|
|||
pub const Environment = @import("./kym/Environment.zig");
|
||||
const Ast = @import("./kym/Ast.zig");
|
||||
|
||||
const State = @import("./kym/State.zig");
|
||||
|
||||
const Table = @import("./kym/Table.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
const types = @import("./kym/types.zig");
|
||||
const file = @import("./file.zig");
|
||||
|
||||
const tokens = @import("./kym/tokens.zig");
|
||||
|
||||
pub const CallContext = struct {
|
||||
env: *RuntimeEnv,
|
||||
caller: *const RuntimeRef,
|
||||
callable: *const RuntimeRef,
|
||||
userdata: []u8,
|
||||
args: []const *const RuntimeRef = &.{},
|
||||
|
||||
pub fn arg_at(self: CallContext, index: u8) RuntimeError!*const RuntimeRef {
|
||||
if (!coral.math.is_clamped(index, 0, self.args.len - 1)) {
|
||||
return self.env.check_fail("argument out of bounds");
|
||||
}
|
||||
|
||||
return self.args[@as(usize, index)];
|
||||
}
|
||||
};
|
||||
|
||||
const Compiler = struct {
|
||||
state: *State,
|
||||
opcodes: OpcodeList,
|
||||
|
||||
locals: struct {
|
||||
buffer: [255][]const coral.io.Byte = [_][]const coral.io.Byte{""} ** 255,
|
||||
count: u8 = 0,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn declare(self: *Self, identifier: []const u8) CompileError!void {
|
||||
if (self.count == self.buffer.len) {
|
||||
return error.TooManyLocals;
|
||||
}
|
||||
|
||||
self.buffer[self.count] = identifier;
|
||||
self.count += 1;
|
||||
}
|
||||
|
||||
fn resolve(self: *Self, local_identifier: []const coral.io.Byte) ?u8 {
|
||||
var index = @as(u8, self.count);
|
||||
|
||||
while (index != 0) {
|
||||
index -= 1;
|
||||
|
||||
if (coral.io.equals(local_identifier, self.buffer[index])) {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
},
|
||||
|
||||
const CompileError = coral.io.AllocationError || error {
|
||||
UndefinedLocal,
|
||||
TooManyLocals,
|
||||
};
|
||||
|
||||
const LocalsList = coral.list.Stack([]const u8);
|
||||
|
||||
const OpcodeList = coral.list.Stack(Opcode);
|
||||
|
||||
fn compile_ast(self: *Compiler, ast: Ast) CompileError!void {
|
||||
for (ast.list_statements()) |statement| {
|
||||
switch (statement) {
|
||||
.return_expression => |return_expression| {
|
||||
try self.compile_expression(return_expression);
|
||||
},
|
||||
|
||||
.return_nothing => {
|
||||
try self.opcodes.push_one(.push_nil);
|
||||
},
|
||||
|
||||
.set_local => |local| {
|
||||
try self.compile_expression(local.expression);
|
||||
|
||||
if (self.locals.resolve(local.identifier)) |index| {
|
||||
try self.opcodes.push_one(.{.set_local = index});
|
||||
} else {
|
||||
try self.locals.declare(local.identifier);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn compile_expression(self: *Compiler, expression: Ast.Expression) CompileError!void {
|
||||
const is_zero = struct {
|
||||
fn is_zero(utf8: []const u8) bool {
|
||||
return coral.io.equals(utf8, "0") or coral.io.equals(utf8, "0.0");
|
||||
}
|
||||
}.is_zero;
|
||||
|
||||
const number_format = coral.utf8.DecimalFormat{
|
||||
.delimiter = "_",
|
||||
.positive_prefix = .none,
|
||||
};
|
||||
|
||||
switch (expression) {
|
||||
.nil_literal => try self.opcodes.push_one(.push_nil),
|
||||
.true_literal => try self.opcodes.push_one(.push_true),
|
||||
.false_literal => try self.opcodes.push_one(.push_false),
|
||||
|
||||
.number_literal => |literal| {
|
||||
const parsed_number = number_format.parse(literal, State.Float);
|
||||
|
||||
coral.debug.assert(parsed_number != null);
|
||||
|
||||
try self.opcodes.push_one(if (is_zero(literal)) .push_zero else .{.push_number = parsed_number.?});
|
||||
},
|
||||
|
||||
.string_literal => |literal| {
|
||||
try self.opcodes.push_one(.{
|
||||
.push_object = try self.state.acquire_interned(literal, &string_info),
|
||||
});
|
||||
},
|
||||
|
||||
.table_literal => |fields| {
|
||||
if (fields.values.len > coral.math.max_int(@typeInfo(u32).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
for (fields.values) |field| {
|
||||
try self.compile_expression(field.expression);
|
||||
|
||||
try self.opcodes.push_one(.{
|
||||
.push_object = try self.state.acquire_interned(field.identifier, &string_info),
|
||||
});
|
||||
}
|
||||
|
||||
try self.opcodes.push_one(.{.push_table = @intCast(fields.values.len)});
|
||||
},
|
||||
|
||||
.binary_operation => |operation| {
|
||||
try self.compile_expression(operation.lhs_expression.*);
|
||||
try self.compile_expression(operation.rhs_expression.*);
|
||||
|
||||
try self.opcodes.push_one(switch (operation.operator) {
|
||||
.addition => .add,
|
||||
.subtraction => .sub,
|
||||
.multiplication => .mul,
|
||||
.divsion => .div,
|
||||
.greater_equals_comparison => .eql,
|
||||
.greater_than_comparison => .cgt,
|
||||
.equals_comparison => .cge,
|
||||
.less_than_comparison => .clt,
|
||||
.less_equals_comparison => .cle,
|
||||
});
|
||||
},
|
||||
|
||||
.unary_operation => |operation| {
|
||||
try self.compile_expression(operation.expression.*);
|
||||
|
||||
try self.opcodes.push_one(switch (operation.operator) {
|
||||
.boolean_negation => .not,
|
||||
.numeric_negation => .neg,
|
||||
});
|
||||
},
|
||||
|
||||
.grouped_expression => |grouped_expression| {
|
||||
try self.compile_expression(grouped_expression.*);
|
||||
},
|
||||
|
||||
.get_local => |local| {
|
||||
try self.opcodes.push_one(.{
|
||||
.get_local = self.locals.resolve(local) orelse return error.UndefinedLocal,
|
||||
});
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn free(self: *Compiler) void {
|
||||
for (self.opcodes.values) |opcode| {
|
||||
if (opcode == .push_object) {
|
||||
self.state.release(opcode.push_object);
|
||||
}
|
||||
}
|
||||
|
||||
self.opcodes.free();
|
||||
}
|
||||
|
||||
fn list_opcodes(self: Compiler) []const Opcode {
|
||||
return self.opcodes.values;
|
||||
}
|
||||
|
||||
fn make(allocator: coral.io.Allocator, state: *State) Compiler {
|
||||
return .{
|
||||
.locals = .{},
|
||||
.opcodes = OpcodeList.make(allocator),
|
||||
.state = state,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const IndexContext = struct {
|
||||
env: *RuntimeEnv,
|
||||
indexable: *const RuntimeRef,
|
||||
index: *const RuntimeRef,
|
||||
userdata: []u8,
|
||||
};
|
||||
|
||||
pub const ObjectInfo = struct {
|
||||
call: *const fn (context: CallContext) RuntimeError!*RuntimeRef = default_call,
|
||||
clean: *const fn (userdata: []u8) void = default_clean,
|
||||
get: *const fn (context: IndexContext) RuntimeError!*RuntimeRef = default_get,
|
||||
set: *const fn (context: IndexContext, value: *const RuntimeRef) RuntimeError!void = default_set,
|
||||
|
||||
fn cast(object_info: *const anyopaque) *const ObjectInfo {
|
||||
return @ptrCast(@alignCast(object_info));
|
||||
}
|
||||
|
||||
fn default_call(context: CallContext) RuntimeError!*RuntimeRef {
|
||||
return context.env.raise(error.BadOperation, "attempt to call non-callable");
|
||||
}
|
||||
|
||||
fn default_clean(_: []u8) void {
|
||||
// Nothing to clean up by default.
|
||||
}
|
||||
|
||||
fn default_get(context: IndexContext) RuntimeError!*RuntimeRef {
|
||||
return context.env.raise(error.BadOperation, "attempt to get non-indexable");
|
||||
}
|
||||
|
||||
fn default_set(context: IndexContext, _: *const RuntimeRef) RuntimeError!void {
|
||||
return context.env.raise(error.BadOperation, "attempt to set non-indexable");
|
||||
}
|
||||
};
|
||||
|
||||
pub const Opcode = union (enum) {
|
||||
push_nil,
|
||||
push_true,
|
||||
push_false,
|
||||
push_zero,
|
||||
push_number: State.Float,
|
||||
push_table: u32,
|
||||
push_object: *State.Object,
|
||||
|
||||
set_local: u8,
|
||||
get_local: u8,
|
||||
|
||||
not,
|
||||
neg,
|
||||
|
||||
add,
|
||||
sub,
|
||||
mul,
|
||||
div,
|
||||
|
||||
eql,
|
||||
cgt,
|
||||
clt,
|
||||
cge,
|
||||
cle,
|
||||
};
|
||||
|
||||
pub const RuntimeEnv = struct {
|
||||
allocator: coral.io.Allocator,
|
||||
err_writer: coral.io.Writer,
|
||||
bound_refs: VariantSlab,
|
||||
state: State,
|
||||
|
||||
pub const Options = struct {
|
||||
out_writer: coral.io.Writer = coral.io.null_writer,
|
||||
err_writer: coral.io.Writer = coral.io.null_writer,
|
||||
};
|
||||
|
||||
pub const ScriptSource = struct {
|
||||
name: []const coral.io.Byte,
|
||||
data: []const coral.io.Byte,
|
||||
};
|
||||
|
||||
const VariantSlab = coral.map.Slab(State.Variant);
|
||||
|
||||
pub fn discard(self: *RuntimeEnv, ref: *RuntimeRef) void {
|
||||
coral.debug.assert(self.bound_refs.remove(@intFromPtr(ref)) != null);
|
||||
}
|
||||
|
||||
pub fn execute_chunk(self: *RuntimeEnv, name: []const coral.io.Byte, opcodes: []const Opcode) RuntimeError!*RuntimeRef {
|
||||
_ = name;
|
||||
|
||||
for (opcodes) |opcode| {
|
||||
switch (opcode) {
|
||||
.push_nil => try self.state.push_value(.nil),
|
||||
.push_true => try self.state.push_value(.true),
|
||||
.push_false => try self.state.push_value(.false),
|
||||
.push_zero => try self.state.push_value(.{.number = 0}),
|
||||
.push_number => |number| try self.state.push_value(.{.number = number}),
|
||||
|
||||
.push_table => |size| {
|
||||
var table = Table.make(self.allocator, &self.state);
|
||||
|
||||
errdefer table.free();
|
||||
|
||||
{
|
||||
var popped = @as(usize, 0);
|
||||
|
||||
while (popped < size) : (popped += 1) {
|
||||
try table.set_field(
|
||||
try to_object(self, try self.state.pop_value()),
|
||||
try self.state.pop_value());
|
||||
}
|
||||
}
|
||||
|
||||
const table_object = try self.state.acquire_new(coral.io.bytes_of(&table), &table_info);
|
||||
|
||||
errdefer self.state.release(table_object);
|
||||
|
||||
try self.state.push_value(.{.object = table_object});
|
||||
},
|
||||
|
||||
.push_object => |object| {
|
||||
const acquired_object = self.state.acquire_instance(object);
|
||||
|
||||
errdefer self.state.release(acquired_object);
|
||||
|
||||
try self.state.push_value(.{.object = acquired_object});
|
||||
},
|
||||
|
||||
.set_local => |local| {
|
||||
if (!self.state.set_value(local, try self.state.pop_value())) {
|
||||
return self.raise(error.BadOperation, "invalid local set");
|
||||
}
|
||||
},
|
||||
|
||||
.get_local => |local| {
|
||||
try self.state.push_value(self.state.get_value(local));
|
||||
},
|
||||
|
||||
.not => {
|
||||
try self.state.push_value(switch (try self.state.pop_value()) {
|
||||
.nil => return self.raise(error.BadOperation, "cannot convert nil to true or false"),
|
||||
.false => .true,
|
||||
.true => .false,
|
||||
.number => return self.raise(error.BadOperation, "cannot convert a number to true or false"),
|
||||
.object => return self.raise(error.BadOperation, "cannot convert an object to true or false"),
|
||||
});
|
||||
},
|
||||
|
||||
.neg => {
|
||||
try self.state.push_value(.{.number = -(try to_number(self, try self.state.pop_value()))});
|
||||
},
|
||||
|
||||
.add => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(.{.number = lhs_number + rhs_number});
|
||||
},
|
||||
|
||||
.sub => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(.{.number = lhs_number - rhs_number});
|
||||
},
|
||||
|
||||
.mul => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(.{.number = lhs_number * rhs_number});
|
||||
},
|
||||
|
||||
.div => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(.{.number = lhs_number / rhs_number});
|
||||
},
|
||||
|
||||
.eql => {
|
||||
const lhs = try self.state.pop_value();
|
||||
const rhs = try self.state.pop_value();
|
||||
|
||||
try self.state.push_value(if (lhs.equals(rhs)) .true else .false);
|
||||
},
|
||||
|
||||
.cgt => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(if (lhs_number > rhs_number) .true else .false);
|
||||
},
|
||||
|
||||
.clt => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(if (lhs_number < rhs_number) .true else .false);
|
||||
},
|
||||
|
||||
.cge => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(if (lhs_number >= rhs_number) .true else .false);
|
||||
},
|
||||
|
||||
.cle => {
|
||||
const lhs_number = try to_number(self, try self.state.pop_value());
|
||||
const rhs_number = try to_number(self, try self.state.pop_value());
|
||||
|
||||
try self.state.push_value(if (lhs_number <= rhs_number) .true else .false);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const return_value = try self.state.pop_value();
|
||||
|
||||
errdefer if (return_value == .object) {
|
||||
self.state.release(return_value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try self.bound_refs.insert(return_value));
|
||||
}
|
||||
|
||||
pub fn execute_file(self: *RuntimeEnv, file_access: file.Access, file_path: file.Path) RuntimeError!*RuntimeRef {
|
||||
const error_message = "failed to load file";
|
||||
|
||||
const file_data = (try file.allocate_and_load(self.allocator, file_access, file_path)) orelse {
|
||||
return self.raise(error.SystemFailure, error_message);
|
||||
};
|
||||
|
||||
defer self.allocator.deallocate(file_data);
|
||||
|
||||
return self.execute_script(.{
|
||||
.name = file_path.to_string() orelse return self.raise(error.SystemFailure, error_message),
|
||||
.data = file_data,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn execute_script(self: *RuntimeEnv, source: ScriptSource) RuntimeError!*RuntimeRef {
|
||||
var ast = Ast.make(self.allocator);
|
||||
|
||||
defer ast.free();
|
||||
|
||||
{
|
||||
var tokenizer = tokens.Tokenizer{.source = source.data};
|
||||
|
||||
ast.parse(&tokenizer) catch |parse_error| switch (parse_error) {
|
||||
error.BadSyntax => return self.raise(error.BadSyntax, ast.error_message),
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
|
||||
var compiler = Compiler.make(self.allocator, &self.state);
|
||||
|
||||
defer compiler.free();
|
||||
|
||||
compiler.compile_ast(ast) catch |compile_error| return switch (compile_error) {
|
||||
error.OutOfMemory => error.OutOfMemory,
|
||||
error.UndefinedLocal => self.raise(error.BadOperation, "use of undefined local"),
|
||||
error.TooManyLocals => self.raise(error.OutOfMemory, "functions cannot contain more than 255 locals"),
|
||||
};
|
||||
|
||||
return self.execute_chunk(source.name, compiler.list_opcodes());
|
||||
}
|
||||
|
||||
pub fn free(self: *RuntimeEnv) void {
|
||||
self.bound_refs.free();
|
||||
self.state.free();
|
||||
}
|
||||
|
||||
pub fn get_field(self: *RuntimeEnv, indexable: *const RuntimeRef, field: []const u8) RuntimeError!*RuntimeRef {
|
||||
const interned_field = try self.intern(field);
|
||||
|
||||
defer self.discard(interned_field);
|
||||
|
||||
const indexable_object = try to_object(self, try indexable.fetch(self));
|
||||
|
||||
return ObjectInfo.cast(indexable_object.userinfo).get(.{
|
||||
.env = self,
|
||||
.indexable = indexable,
|
||||
.index = interned_field,
|
||||
.userdata = indexable_object.userdata,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn get_float(self: *RuntimeEnv, ref: *const RuntimeRef) RuntimeError!State.Float {
|
||||
return to_number(self, try ref.fetch(self));
|
||||
}
|
||||
|
||||
pub fn get_string(self: *RuntimeEnv, ref: *const RuntimeRef) RuntimeError![]const u8 {
|
||||
const object = try to_object(self, try ref.fetch(self));
|
||||
|
||||
if (ObjectInfo.cast(object.userinfo) != &string_info) {
|
||||
return self.raise(error.BadOperation, "object is not a string");
|
||||
}
|
||||
|
||||
return object.userdata;
|
||||
}
|
||||
|
||||
pub fn intern(self: *RuntimeEnv, data: []const u8) RuntimeError!*RuntimeRef {
|
||||
const data_object = try self.state.acquire_interned(data, &string_info);
|
||||
|
||||
errdefer self.state.release(data_object);
|
||||
|
||||
return @ptrFromInt(try self.bound_refs.insert(.{.object = data_object}));
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator, options: Options) RuntimeError!RuntimeEnv {
|
||||
var env = RuntimeEnv{
|
||||
.allocator = allocator,
|
||||
.bound_refs = VariantSlab.make(allocator),
|
||||
.state = State.make(allocator),
|
||||
.err_writer = options.err_writer,
|
||||
};
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
pub fn new_object(self: *RuntimeEnv, userdata: []const u8, info: *const ObjectInfo) RuntimeError!*RuntimeRef {
|
||||
const data_object = try self.state.acquire_new(userdata, info);
|
||||
|
||||
defer self.state.release(data_object);
|
||||
|
||||
return @ptrFromInt(try self.bound_refs.insert(.{.object = data_object}));
|
||||
}
|
||||
|
||||
pub fn raise(self: *RuntimeEnv, runtime_error: RuntimeError, error_message: []const u8) RuntimeError {
|
||||
// TODO: Print stack trace from state.
|
||||
coral.utf8.print_formatted(self.err_writer, "{name}@{line}: {message}", .{
|
||||
.name = "???",
|
||||
.line = @as(u64, 0),
|
||||
.message = error_message,
|
||||
}) catch return error.SystemFailure;
|
||||
|
||||
return runtime_error;
|
||||
}
|
||||
};
|
||||
|
||||
pub const RuntimeError = coral.io.AllocationError || State.PopError || error {
|
||||
BadSyntax,
|
||||
BadOperation,
|
||||
SystemFailure,
|
||||
};
|
||||
|
||||
pub const RuntimeRef = opaque {
|
||||
fn fetch(self: *const RuntimeRef, env: *RuntimeEnv) RuntimeError!State.Variant {
|
||||
return env.bound_refs.lookup(@intFromPtr(self)) orelse env.raise(error.BadOperation, "stale ref");
|
||||
}
|
||||
};
|
||||
|
||||
fn table_clean(userdata: []u8) void {
|
||||
@as(*Table, @ptrCast(@alignCast(userdata.ptr))).free();
|
||||
}
|
||||
|
||||
fn table_get(context: IndexContext) RuntimeError!*RuntimeRef {
|
||||
const table = @as(*Table, @ptrCast(@alignCast(context.userdata.ptr)));
|
||||
|
||||
switch (try context.index.fetch(context.env)) {
|
||||
.nil => return context.env.raise(error.BadOperation, "cannot index a table with nil"),
|
||||
.true => return context.env.raise(error.BadOperation, "cannot index a table with true"),
|
||||
.false => return context.env.raise(error.BadOperation, "cannot index a table with false"),
|
||||
|
||||
.object => |index_object| {
|
||||
const value = table.get_field(index_object);
|
||||
|
||||
errdefer if (value == .object) {
|
||||
context.env.state.release(value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try context.env.bound_refs.insert(value));
|
||||
},
|
||||
|
||||
.number => |index_number| {
|
||||
const value = table.get_index(@intFromFloat(index_number));
|
||||
|
||||
errdefer if (value == .object) {
|
||||
context.env.state.release(value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try context.env.bound_refs.insert(value));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const table_info = ObjectInfo{
|
||||
.clean = table_clean,
|
||||
.get = table_get,
|
||||
.set = table_set,
|
||||
};
|
||||
|
||||
fn table_set(context: IndexContext, value: *const RuntimeRef) RuntimeError!void {
|
||||
const table = @as(*Table, @ptrCast(@alignCast(context.userdata.ptr)));
|
||||
|
||||
switch (try context.index.fetch(context.env)) {
|
||||
.nil => return context.env.raise(error.BadOperation, "cannot index a table with nil"),
|
||||
.true => return context.env.raise(error.BadOperation, "cannot index a table with true"),
|
||||
.false => return context.env.raise(error.BadOperation, "cannot index a table with false"),
|
||||
|
||||
.object => |index_object| {
|
||||
const fetched_value = try value.fetch(context.env);
|
||||
|
||||
if (fetched_value == .object) {
|
||||
try table.set_field(index_object, .{
|
||||
.object = context.env.state.acquire_instance(fetched_value.object),
|
||||
});
|
||||
} else {
|
||||
try table.set_field(index_object, fetched_value);
|
||||
}
|
||||
},
|
||||
|
||||
.number => |index_number| {
|
||||
const fetched_value = try value.fetch(context.env);
|
||||
|
||||
if (fetched_value == .object) {
|
||||
try table.set_index(@intFromFloat(index_number), .{
|
||||
.object = context.env.state.acquire_instance(fetched_value.object),
|
||||
});
|
||||
} else {
|
||||
try table.set_index(@intFromFloat(index_number), fetched_value);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn to_number(env: *RuntimeEnv, variant: State.Variant) RuntimeError!State.Float {
|
||||
return switch (variant) {
|
||||
.nil => env.raise(error.BadOperation, "cannot convert nil to number"),
|
||||
.true => env.raise(error.BadOperation, "cannot convert true to number"),
|
||||
.false => env.raise(error.BadOperation, "cannot convert false to number"),
|
||||
.number => |number| number,
|
||||
.object => env.raise(error.BadOperation, "cannot convert object to number"),
|
||||
};
|
||||
}
|
||||
|
||||
fn to_object(env: *RuntimeEnv, variant: State.Variant) RuntimeError!*State.Object {
|
||||
return switch (variant) {
|
||||
.nil => env.raise(error.BadOperation, "cannot convert nil to object"),
|
||||
.true => env.raise(error.BadOperation, "cannot convert true to object"),
|
||||
.false => env.raise(error.BadOperation, "cannot convert false to object"),
|
||||
.number => env.raise(error.BadOperation, "cannot convert number to object"),
|
||||
.object => |object| object,
|
||||
};
|
||||
}
|
||||
|
||||
const string_info = ObjectInfo{
|
||||
|
||||
};
|
||||
|
|
|
@ -2,13 +2,32 @@ const coral = @import("coral");
|
|||
|
||||
const tokens = @import("./tokens.zig");
|
||||
|
||||
const types = @import("./types.zig");
|
||||
|
||||
allocator: coral.io.Allocator,
|
||||
arena: coral.arena.Stacking,
|
||||
statements: StatementList,
|
||||
statements: Statement.List,
|
||||
error_message: []const u8,
|
||||
|
||||
pub const Expression = union (enum) {
|
||||
nil_literal,
|
||||
true_literal,
|
||||
false_literal,
|
||||
number_literal: []const u8,
|
||||
string_literal: []const u8,
|
||||
table_literal: NamedList,
|
||||
grouped_expression: *Expression,
|
||||
get_local: []const u8,
|
||||
|
||||
binary_operation: struct {
|
||||
operator: BinaryOperator,
|
||||
lhs_expression: *Expression,
|
||||
rhs_expression: *Expression,
|
||||
},
|
||||
|
||||
unary_operation: struct {
|
||||
operator: UnaryOperator,
|
||||
expression: *Expression,
|
||||
},
|
||||
|
||||
pub const BinaryOperator = enum {
|
||||
addition,
|
||||
subtraction,
|
||||
|
@ -35,53 +54,43 @@ pub const BinaryOperator = enum {
|
|||
}
|
||||
};
|
||||
|
||||
pub const Expression = union (enum) {
|
||||
nil_literal,
|
||||
true_literal,
|
||||
false_literal,
|
||||
integer_literal: types.Integer,
|
||||
float_literal: types.Float,
|
||||
string_literal: []const u8,
|
||||
array_literal: coral.list.Stack(Expression),
|
||||
|
||||
table_literal: coral.list.Stack(struct {
|
||||
pub const NamedList = coral.list.Stack(struct {
|
||||
identifier: []const u8,
|
||||
expression: Expression,
|
||||
}),
|
||||
});
|
||||
|
||||
grouped_expression: *Expression,
|
||||
|
||||
binary_operation: struct {
|
||||
operator: BinaryOperator,
|
||||
lhs_expression: *Expression,
|
||||
rhs_expression: *Expression,
|
||||
},
|
||||
|
||||
unary_operation: struct {
|
||||
operator: UnaryOperator,
|
||||
expression: *Expression,
|
||||
},
|
||||
pub const List = coral.list.Stack(Expression);
|
||||
};
|
||||
|
||||
const ExpressionParser = fn (self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!Expression;
|
||||
const ExpressionParser = fn (self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression;
|
||||
|
||||
pub const ParseError = error {
|
||||
OutOfMemory,
|
||||
BadSyntax,
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub const Statement = union (enum) {
|
||||
return_expression: Expression,
|
||||
return_nothing,
|
||||
};
|
||||
|
||||
const StatementList = coral.list.Stack(Statement);
|
||||
set_local: struct {
|
||||
identifier: []const coral.io.Byte,
|
||||
expression: Expression,
|
||||
},
|
||||
|
||||
const List = coral.list.Stack(Statement);
|
||||
};
|
||||
|
||||
const UnaryOperator = enum {
|
||||
boolean_negation,
|
||||
numeric_negation,
|
||||
};
|
||||
|
||||
fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime operators: []const BinaryOperator) ExpressionParser {
|
||||
fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime operators: []const Expression.BinaryOperator) ExpressionParser {
|
||||
return struct {
|
||||
fn parse(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!Expression {
|
||||
fn parse(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression {
|
||||
var expression = try parse_next(self, tokenizer);
|
||||
|
||||
{
|
||||
|
@ -111,34 +120,30 @@ fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime opera
|
|||
}.parse;
|
||||
}
|
||||
|
||||
fn check_syntax(self: *Self, condition: bool, error_message: []const u8) types.ParseError!void {
|
||||
fn check_syntax(self: *Self, condition: bool, message: []const u8) ParseError!void {
|
||||
if (condition) {
|
||||
return;
|
||||
}
|
||||
|
||||
return self.fail_syntax(error_message);
|
||||
return self.fail_syntax(message);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.arena.clear_allocations();
|
||||
self.statements.deinit(self.allocator);
|
||||
}
|
||||
|
||||
fn fail_syntax(self: *Self, error_message: []const u8) types.ParseError {
|
||||
self.error_message = error_message;
|
||||
fn fail_syntax(self: *Self, message: []const u8) ParseError {
|
||||
self.error_message = message;
|
||||
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator) coral.io.AllocationError!Self {
|
||||
return Self{
|
||||
.arena = .{
|
||||
.base_allocator = allocator,
|
||||
.min_page_size = 4096,
|
||||
},
|
||||
pub fn free(self: *Self) void {
|
||||
self.arena.free();
|
||||
self.statements.free();
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator) Self {
|
||||
return Self{
|
||||
.arena = coral.arena.Stacking.make(allocator, 4096),
|
||||
.allocator = allocator,
|
||||
.statements = .{},
|
||||
.statements = Statement.List.make(allocator),
|
||||
.error_message = "",
|
||||
};
|
||||
}
|
||||
|
@ -147,19 +152,17 @@ pub fn list_statements(self: Self) []const Statement {
|
|||
return self.statements.values;
|
||||
}
|
||||
|
||||
pub fn parse(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!void {
|
||||
self.reset();
|
||||
pub fn parse(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!void {
|
||||
self.free();
|
||||
|
||||
errdefer self.reset();
|
||||
|
||||
var has_not_returned_yet = true;
|
||||
var has_returned = false;
|
||||
|
||||
while (tokenizer.step(.{.include_newlines = false})) {
|
||||
switch (tokenizer.current_token) {
|
||||
.keyword_return => {
|
||||
try self.check_syntax(has_not_returned_yet, "cannot return more than once per function scope");
|
||||
try self.check_syntax(!has_returned, "multiple returns in function scope but expected only one");
|
||||
|
||||
try self.statements.push_one(self.allocator, get_statement: {
|
||||
try self.statements.push_one(get_statement: {
|
||||
if (tokenizer.step(.{.include_newlines = true})) {
|
||||
if (tokenizer.current_token != .newline) {
|
||||
break: get_statement .{.return_expression = try self.parse_expression(tokenizer)};
|
||||
|
@ -175,7 +178,34 @@ pub fn parse(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!void {
|
|||
break: get_statement .return_nothing;
|
||||
});
|
||||
|
||||
has_not_returned_yet = false;
|
||||
has_returned = true;
|
||||
},
|
||||
|
||||
.local => |identifier| {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = true}), "statement has no effect");
|
||||
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_equals => {
|
||||
try self.check_syntax(
|
||||
tokenizer.step(.{.include_newlines = true}),
|
||||
"expected expression after `=`");
|
||||
|
||||
try self.statements.push_one(.{
|
||||
.set_local = .{
|
||||
.identifier = identifier,
|
||||
.expression = try self.parse_expression(tokenizer)
|
||||
}
|
||||
});
|
||||
|
||||
if (tokenizer.step(.{.include_newlines = true})) {
|
||||
try self.check_syntax(
|
||||
tokenizer.current_token == .newline,
|
||||
"expected end of declaration after variable assignment");
|
||||
}
|
||||
},
|
||||
|
||||
else => return self.fail_syntax("expected `=` after local"),
|
||||
}
|
||||
},
|
||||
|
||||
else => return self.fail_syntax("invalid statement"),
|
||||
|
@ -199,7 +229,7 @@ const parse_expression = binary_operation_parser(parse_equality, &.{
|
|||
.subtraction,
|
||||
});
|
||||
|
||||
fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!Expression {
|
||||
fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression {
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_paren_left => {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "expected an expression after `(`");
|
||||
|
@ -213,77 +243,25 @@ fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!Expr
|
|||
return Expression{.grouped_expression = try coral.io.allocate_one(self.arena.as_allocator(), expression)};
|
||||
},
|
||||
|
||||
.integer => |value| {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return Expression{
|
||||
.integer_literal = coral.utf8.parse_decimal(types.Integer, value, .{}) catch |parse_error| {
|
||||
return self.fail_syntax(switch (parse_error) {
|
||||
error.BadSyntax => "invalid integer literal",
|
||||
error.IntOverflow => "integer literal is too big",
|
||||
});
|
||||
},
|
||||
};
|
||||
},
|
||||
|
||||
.real => |value| {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return Expression{
|
||||
.float_literal = coral.utf8.parse_decimal(types.Float, value, .{}) catch |parse_error| {
|
||||
return self.fail_syntax(switch (parse_error) {
|
||||
error.BadSyntax => "invalid float literal",
|
||||
});
|
||||
},
|
||||
};
|
||||
.number => |value| {
|
||||
return Expression{.number_literal = value};
|
||||
},
|
||||
|
||||
.string => |value| {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return Expression{.string_literal = value};
|
||||
},
|
||||
|
||||
.symbol_bracket_left => {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end of array literal");
|
||||
|
||||
var expression = Expression{.array_literal = .{}};
|
||||
|
||||
coral.debug.assert(expression == .array_literal);
|
||||
|
||||
const allocator = self.arena.as_allocator();
|
||||
const array_average_maximum = 32;
|
||||
|
||||
try expression.array_literal.grow(allocator, array_average_maximum);
|
||||
|
||||
while (true) {
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_bracket_right => {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return expression;
|
||||
},
|
||||
|
||||
else => {
|
||||
try self.check_syntax(
|
||||
tokenizer.step(.{.include_newlines = false}),
|
||||
"expected `]` or expression after `[`");
|
||||
|
||||
try expression.array_literal.push_one(allocator, try self.parse_expression(tokenizer));
|
||||
},
|
||||
}
|
||||
}
|
||||
.local => |identifier| {
|
||||
return Expression{.get_local = identifier};
|
||||
},
|
||||
|
||||
.symbol_brace_left => {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end of table literal");
|
||||
|
||||
var expression = Expression{.table_literal = .{}};
|
||||
var expression = Expression{.table_literal = Expression.NamedList.make(self.arena.as_allocator())};
|
||||
|
||||
coral.debug.assert(expression == .table_literal);
|
||||
|
||||
const allocator = self.arena.as_allocator();
|
||||
|
||||
while (true) {
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_brace_right => {
|
||||
|
@ -299,11 +277,13 @@ fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) types.ParseError!Expr
|
|||
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end after `=`");
|
||||
|
||||
try expression.table_literal.push_one(allocator, .{
|
||||
try expression.table_literal.push_one(.{
|
||||
.identifier = identifier,
|
||||
.expression = try self.parse_expression(tokenizer),
|
||||
});
|
||||
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end of table");
|
||||
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_comma => _ = tokenizer.step(.{.include_newlines = false}),
|
||||
|
||||
|
@ -362,8 +342,3 @@ const parse_term = binary_operation_parser(parse_factor, &.{
|
|||
.multiplication,
|
||||
.divsion,
|
||||
});
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.statements.clear();
|
||||
self.arena.clear_allocations();
|
||||
}
|
||||
|
|
|
@ -1,240 +0,0 @@
|
|||
const Ast = @import("./Ast.zig");
|
||||
|
||||
const Environment = @import("./Environment.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
const types = @import("./types.zig");
|
||||
|
||||
const tokens = @import("./tokens.zig");
|
||||
|
||||
env: *Environment,
|
||||
message_name_len: usize,
|
||||
message_data: Buffer,
|
||||
bytecode_buffer: Buffer,
|
||||
|
||||
const Buffer = coral.list.Stack(u8);
|
||||
|
||||
const Opcode = enum (u8) {
|
||||
ret,
|
||||
|
||||
push_nil,
|
||||
push_true,
|
||||
push_false,
|
||||
push_zero,
|
||||
push_integer,
|
||||
push_float,
|
||||
push_object,
|
||||
push_array,
|
||||
push_table,
|
||||
|
||||
not,
|
||||
neg,
|
||||
|
||||
add,
|
||||
sub,
|
||||
mul,
|
||||
div,
|
||||
|
||||
compare_eq,
|
||||
compare_gt,
|
||||
compare_lt,
|
||||
compare_ge,
|
||||
compare_le,
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn clear_error_details(self: *Self) void {
|
||||
coral.debug.assert(self.message_data.values.len >= self.message_name_len);
|
||||
coral.debug.assert(self.message_data.drop(self.message_data.values.len - self.message_name_len));
|
||||
}
|
||||
|
||||
pub fn compile(self: *Self, data: []const u8) types.RuntimeError!void {
|
||||
var ast = try Ast.init(self.env.allocator);
|
||||
|
||||
defer ast.deinit();
|
||||
|
||||
{
|
||||
var tokenizer = tokens.Tokenizer{.source = data};
|
||||
|
||||
ast.parse(&tokenizer) catch |init_error| {
|
||||
if (init_error == error.BadSyntax) {
|
||||
self.clear_error_details();
|
||||
|
||||
var writable_data = coral.list.Writable{
|
||||
.allocator = self.env.allocator,
|
||||
.list = .{.stack = &self.message_data},
|
||||
};
|
||||
|
||||
coral.utf8.print_formatted(writable_data.as_writer(), "@({line}): {name}", .{
|
||||
.line = tokenizer.lines_stepped,
|
||||
.name = ast.error_message,
|
||||
}) catch return error.OutOfMemory;
|
||||
}
|
||||
|
||||
return init_error;
|
||||
};
|
||||
}
|
||||
|
||||
for (ast.list_statements()) |statement| {
|
||||
switch (statement) {
|
||||
.return_expression => |return_expression| {
|
||||
try self.compile_expression(return_expression);
|
||||
try self.emit_opcode(.ret);
|
||||
},
|
||||
|
||||
.return_nothing => {
|
||||
try self.emit_opcode(.push_nil);
|
||||
try self.emit_opcode(.ret);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compile_expression(self: *Self, expression: Ast.Expression) types.RuntimeError!void {
|
||||
switch (expression) {
|
||||
.nil_literal => try self.emit_opcode(.push_nil),
|
||||
.true_literal => try self.emit_opcode(.push_true),
|
||||
.false_literal => try self.emit_opcode(.push_false),
|
||||
|
||||
.integer_literal => |literal| {
|
||||
if (literal == 0) {
|
||||
try self.emit_opcode(.push_zero);
|
||||
} else {
|
||||
try self.emit_opcode(.push_integer);
|
||||
try self.emit_float(0);
|
||||
}
|
||||
},
|
||||
|
||||
.float_literal => |literal| {
|
||||
if (literal == 0) {
|
||||
try self.emit_opcode(.push_zero);
|
||||
} else {
|
||||
try self.emit_opcode(.push_float);
|
||||
try self.emit_float(literal);
|
||||
}
|
||||
},
|
||||
|
||||
.string_literal => |literal| {
|
||||
try self.emit_opcode(.push_object);
|
||||
try self.emit_object(try self.intern(literal));
|
||||
},
|
||||
|
||||
.array_literal => |elements| {
|
||||
if (elements.values.len > coral.math.max_int(@typeInfo(types.Integer).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
for (elements.values) |element_expression| {
|
||||
try self.compile_expression(element_expression);
|
||||
}
|
||||
|
||||
try self.emit_opcode(.push_array);
|
||||
try self.emit_integer(@intCast(types.Integer, elements.values.len));
|
||||
},
|
||||
|
||||
.table_literal => |fields| {
|
||||
if (fields.values.len > coral.math.max_int(@typeInfo(types.Integer).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
for (fields.values) |field| {
|
||||
try self.compile_expression(field.expression);
|
||||
try self.emit_opcode(.push_object);
|
||||
try self.emit_object(try self.intern(field.identifier));
|
||||
}
|
||||
|
||||
try self.emit_opcode(.push_table);
|
||||
try self.emit_integer(@intCast(types.Integer, fields.values.len));
|
||||
},
|
||||
|
||||
.binary_operation => |operation| {
|
||||
try self.compile_expression(operation.lhs_expression.*);
|
||||
try self.compile_expression(operation.rhs_expression.*);
|
||||
|
||||
try self.emit_opcode(switch (operation.operator) {
|
||||
.addition => .add,
|
||||
.subtraction => .sub,
|
||||
.multiplication => .mul,
|
||||
.divsion => .div,
|
||||
.greater_equals_comparison => .compare_eq,
|
||||
.greater_than_comparison => .compare_gt,
|
||||
.equals_comparison => .compare_ge,
|
||||
.less_than_comparison => .compare_lt,
|
||||
.less_equals_comparison => .compare_le,
|
||||
});
|
||||
},
|
||||
|
||||
.unary_operation => |operation| {
|
||||
try self.compile_expression(operation.expression.*);
|
||||
|
||||
try self.emit_opcode(switch (operation.operator) {
|
||||
.boolean_negation => .not,
|
||||
.numeric_negation => .neg,
|
||||
});
|
||||
},
|
||||
|
||||
.grouped_expression => |grouped_expression| {
|
||||
try self.compile_expression(grouped_expression.*);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.bytecode_buffer.deinit(self.env.allocator);
|
||||
self.message_data.deinit(self.env.allocator);
|
||||
|
||||
self.message_name_len = 0;
|
||||
}
|
||||
|
||||
pub fn emit_float(self: *Self, float: types.Float) coral.io.AllocationError!void {
|
||||
try self.bytecode_buffer.push_all(self.env.allocator, coral.io.bytes_of(&float));
|
||||
}
|
||||
|
||||
pub fn emit_integer(self: *Self, integer: types.Integer) coral.io.AllocationError!void {
|
||||
try self.bytecode_buffer.push_all(self.env.allocator, coral.io.bytes_of(&integer));
|
||||
}
|
||||
|
||||
pub fn emit_object(self: *Self, object: types.Object) coral.io.AllocationError!void {
|
||||
try self.bytecode_buffer.push_all(self.env.allocator, coral.io.bytes_of(&object));
|
||||
}
|
||||
|
||||
pub fn emit_opcode(self: *Self, opcode: Opcode) coral.io.AllocationError!void {
|
||||
try self.bytecode_buffer.push_one(self.env.allocator, @enumToInt(opcode));
|
||||
}
|
||||
|
||||
pub fn error_details(self: Self) []const u8 {
|
||||
coral.debug.assert(self.message_data.values.len >= self.message_name_len);
|
||||
|
||||
return self.message_data.values;
|
||||
}
|
||||
|
||||
pub fn init(env: *Environment, chunk_name: []const u8) coral.io.AllocationError!Self {
|
||||
var message_data = Buffer{};
|
||||
|
||||
try message_data.push_all(env.allocator, chunk_name);
|
||||
|
||||
errdefer message_data.deinit(env.allocator);
|
||||
|
||||
return Self{
|
||||
.env = env,
|
||||
.message_data = message_data,
|
||||
.bytecode_buffer = .{},
|
||||
.message_name_len = chunk_name.len,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn intern(self: *Self, string: []const u8) coral.io.AllocationError!types.Object {
|
||||
const interned_string = try self.env.intern(string);
|
||||
|
||||
coral.debug.assert(interned_string == .object);
|
||||
|
||||
return interned_string.object;
|
||||
}
|
||||
|
||||
pub fn name(self: Self) []const u8 {
|
||||
coral.debug.assert(self.message_data.values.len >= self.message_name_len);
|
||||
|
||||
return self.message_data.values[0 .. self.message_name_len];
|
||||
}
|
|
@ -1,461 +0,0 @@
|
|||
const Chunk = @import("./Chunk.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
const file = @import("../file.zig");
|
||||
|
||||
const types = @import("./types.zig");
|
||||
|
||||
const tokens = @import("./tokens.zig");
|
||||
|
||||
allocator: coral.io.Allocator,
|
||||
heap: ObjectSlab,
|
||||
global_object: types.Object,
|
||||
interned: InternTable,
|
||||
reporter: Reporter,
|
||||
values: ValueStack,
|
||||
calls: CallStack,
|
||||
|
||||
const CallStack = coral.list.Stack(struct {
|
||||
ip: usize,
|
||||
slots: []types.Val,
|
||||
});
|
||||
|
||||
pub const DataSource = struct {
|
||||
name: []const u8,
|
||||
data: []const u8,
|
||||
};
|
||||
|
||||
pub const ExecuteFileError = file.System.OpenError || coral.io.StreamError || file.ReadError || types.RuntimeError;
|
||||
|
||||
pub const InitOptions = struct {
|
||||
values_max: u32,
|
||||
calls_max: u32,
|
||||
reporter: Reporter,
|
||||
};
|
||||
|
||||
const InternTable = coral.table.Hashed([]const u8, types.Object, coral.table.string_keyer);
|
||||
|
||||
const Object = struct {
|
||||
ref_count: usize,
|
||||
|
||||
state: struct {
|
||||
info: ObjectInfo,
|
||||
userdata: []u8,
|
||||
fields: Fields,
|
||||
},
|
||||
|
||||
const Fields = coral.table.Hashed(*Object, types.Val, .{
|
||||
.hasher = struct {
|
||||
fn hash(object: *Object) coral.table.Hash {
|
||||
coral.debug.assert(object.state.info.identity == null);
|
||||
|
||||
return coral.table.hash_string(object.state.userdata);
|
||||
}
|
||||
}.hash,
|
||||
|
||||
.comparer = struct {
|
||||
fn compare(object_a: *Object, object_b: *Object) isize {
|
||||
coral.debug.assert(object_a.state.info.identity == null);
|
||||
coral.debug.assert(object_b.state.info.identity == null);
|
||||
|
||||
return coral.io.compare(object_a.state.userdata, object_b.state.userdata);
|
||||
}
|
||||
}.compare,
|
||||
});
|
||||
|
||||
pub fn acquire(self: *Object) void {
|
||||
coral.debug.assert(self.ref_count != 0);
|
||||
|
||||
self.ref_count += 1;
|
||||
}
|
||||
};
|
||||
|
||||
pub const ObjectInfo = struct {
|
||||
caller: *const Caller = default_call,
|
||||
deinitializer: *const Deinitializer = default_deinitialize,
|
||||
getter: *const Getter = default_get,
|
||||
identity: ?*const anyopaque = null,
|
||||
setter: *const Setter = default_set,
|
||||
|
||||
pub const CallContext = struct {
|
||||
env: *Self,
|
||||
caller: types.Ref,
|
||||
callable: types.Ref,
|
||||
args: []const types.Ref,
|
||||
};
|
||||
|
||||
pub const Caller = fn (context: CallContext) types.RuntimeError!types.Val;
|
||||
|
||||
pub const DeinitializeContext = struct {
|
||||
env: *Self,
|
||||
obj: types.Ref,
|
||||
};
|
||||
|
||||
pub const Deinitializer = fn (context: DeinitializeContext) void;
|
||||
|
||||
pub const GetContext = struct {
|
||||
env: *Self,
|
||||
indexable: types.Ref,
|
||||
index: types.Ref,
|
||||
};
|
||||
|
||||
pub const Getter = fn (context: GetContext) types.RuntimeError!types.Val;
|
||||
|
||||
pub const SetContext = struct {
|
||||
env: *Self,
|
||||
indexable: types.Ref,
|
||||
index: types.Ref,
|
||||
value: types.Ref,
|
||||
};
|
||||
|
||||
pub const Setter = fn (context: SetContext) types.RuntimeError!void;
|
||||
|
||||
fn default_call(context: CallContext) types.RuntimeError!types.Val {
|
||||
return context.env.fail("attempt to call non-callable");
|
||||
}
|
||||
|
||||
fn default_deinitialize(_: DeinitializeContext) void {
|
||||
// Nothing to deinitialize by default.
|
||||
}
|
||||
|
||||
fn default_get(context: GetContext) types.RuntimeError!types.Val {
|
||||
return context.env.get_field(context.indexable, context.index);
|
||||
}
|
||||
|
||||
fn default_set(context: SetContext) types.RuntimeError!void {
|
||||
return context.env.fail("attempt to set non-indexable");
|
||||
}
|
||||
};
|
||||
|
||||
const ObjectSlab = coral.slab.Map(@typeInfo(u32).Int, Object);
|
||||
|
||||
pub const Reporter = coral.io.Functor(void, []const u8);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const ValueStack = coral.list.Stack(types.Ref);
|
||||
|
||||
pub fn call(self: *Self, caller: types.Ref, maybe_index: ?types.Ref, args: []const types.Ref) types.RuntimeError!types.Val {
|
||||
if (maybe_index) |index| {
|
||||
try self.check(caller == .object, "invalid type conversion: object");
|
||||
|
||||
const callable = try self.get_object(caller, index);
|
||||
|
||||
defer self.discard(callable);
|
||||
try self.check(callable == .object, "invalid type conversion: object");
|
||||
|
||||
return self.heap.fetch(callable.object).state.info.caller(.{
|
||||
.env = self,
|
||||
.callable = callable.as_ref(),
|
||||
.caller = caller,
|
||||
.args = args,
|
||||
});
|
||||
}
|
||||
|
||||
return self.heap.fetch(caller.object).state.info.caller(.{
|
||||
.env = self,
|
||||
.callable = caller,
|
||||
.caller = .{.object = self.global_object},
|
||||
.args = args,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn check(self: *Self, condition: bool, failure_message: []const u8) !void {
|
||||
if (condition) {
|
||||
return;
|
||||
}
|
||||
|
||||
return self.fail(failure_message);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.object_release(self.global_object);
|
||||
|
||||
{
|
||||
var interned_iterable = InternTable.Iterable{.hashed_map = &self.interned};
|
||||
|
||||
while (interned_iterable.next()) |entry| {
|
||||
self.object_release(entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
self.interned.deinit(self.allocator);
|
||||
self.values.deinit(self.allocator);
|
||||
self.calls.deinit(self.allocator);
|
||||
coral.debug.assert(self.heap.is_empty());
|
||||
self.heap.deinit(self.allocator);
|
||||
}
|
||||
|
||||
pub fn discard(self: *Self, val: types.Val) void {
|
||||
switch (val) {
|
||||
.object => |object| self.object_release(object),
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn execute_data(self: *Self, source: DataSource) types.RuntimeError!types.Val {
|
||||
const typeid = "<chunk>";
|
||||
|
||||
const Behaviors = struct {
|
||||
fn deinitialize(context: ObjectInfo.DeinitializeContext) void {
|
||||
(context.env.native_cast(context.obj, typeid, Chunk) catch unreachable).deinit();
|
||||
}
|
||||
};
|
||||
|
||||
var compiled_chunk = init_compiled_chunk: {
|
||||
var chunk = try Chunk.init(self, source.name);
|
||||
|
||||
errdefer chunk.deinit();
|
||||
|
||||
chunk.compile(source.data) catch |compile_error| {
|
||||
self.reporter.invoke(chunk.error_details());
|
||||
|
||||
return compile_error;
|
||||
};
|
||||
|
||||
break: init_compiled_chunk chunk;
|
||||
};
|
||||
|
||||
const script = try self.new_object(coral.io.bytes_of(&compiled_chunk), .{
|
||||
.identity = typeid,
|
||||
.deinitializer = Behaviors.deinitialize,
|
||||
});
|
||||
|
||||
defer self.discard(script);
|
||||
|
||||
return try self.call(script.as_ref(), null, &.{});
|
||||
}
|
||||
|
||||
pub fn execute_file(self: *Self, fs: file.System, file_path: file.Path) ExecuteFileError!types.Val {
|
||||
const readable_file = try fs.open_readable(file_path);
|
||||
|
||||
defer readable_file.close();
|
||||
|
||||
var file_data = coral.list.Stack(u8){};
|
||||
const file_size = (try fs.query_info(file_path)).size;
|
||||
|
||||
try file_data.grow(self.allocator, file_size);
|
||||
|
||||
defer file_data.deinit(self.allocator);
|
||||
|
||||
{
|
||||
var writable_data = coral.list.Writable{
|
||||
.allocator = self.allocator,
|
||||
.list = .{.stack = &file_data},
|
||||
};
|
||||
|
||||
var stream_buffer = [_]u8{0} ** 4096;
|
||||
|
||||
if ((try coral.io.stream(writable_data.as_writer(), readable_file.as_reader(), &stream_buffer)) != file_size) {
|
||||
return error.ReadFailure;
|
||||
}
|
||||
}
|
||||
|
||||
return try self.execute_data(.{
|
||||
.name = try file_path.to_string(),
|
||||
.data = file_data.values,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn fail(self: *Self, failure_message: []const u8) types.CheckError {
|
||||
self.reporter.invoke(failure_message);
|
||||
|
||||
return error.CheckFailed;
|
||||
}
|
||||
|
||||
pub fn get_field(self: *Self, indexable: types.Ref, field: types.Ref) !types.Val {
|
||||
try self.check(indexable == .object, "invalid type conversion: object");
|
||||
try self.check(field == .object, "invalid type conversion: object");
|
||||
|
||||
const value = get_value: {
|
||||
var field_data = self.heap.fetch(field.object);
|
||||
|
||||
break: get_value self.heap.fetch(indexable.object).state.fields.lookup(&field_data) orelse {
|
||||
return .nil;
|
||||
};
|
||||
};
|
||||
|
||||
if (value == .object) {
|
||||
var value_data = self.heap.fetch(value.object);
|
||||
|
||||
value_data.acquire();
|
||||
self.heap.assign(value.object, value_data);
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
pub fn get_object(self: *Self, indexable: types.Ref, index: types.Ref) types.RuntimeError!types.Val {
|
||||
try self.check(indexable == .object, "invalid type conversion: object");
|
||||
|
||||
return self.heap.fetch(indexable.object).state.info.getter(.{
|
||||
.env = self,
|
||||
.indexable = indexable,
|
||||
.index = index,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator, options: InitOptions) !Self {
|
||||
var env = Self{
|
||||
.global_object = 0,
|
||||
.allocator = allocator,
|
||||
.reporter = options.reporter,
|
||||
.interned = .{},
|
||||
.values = .{},
|
||||
.calls = .{},
|
||||
.heap = .{},
|
||||
};
|
||||
|
||||
errdefer {
|
||||
env.values.deinit(allocator);
|
||||
env.calls.deinit(allocator);
|
||||
}
|
||||
|
||||
try env.values.grow(allocator, options.values_max * options.calls_max);
|
||||
try env.calls.grow(allocator, options.calls_max);
|
||||
|
||||
{
|
||||
const globals = try env.new_object(&.{}, .{
|
||||
.identity = "KYM GLOBAL OBJECT OC DO NOT STEAL",
|
||||
});
|
||||
|
||||
coral.debug.assert(globals == .object);
|
||||
|
||||
env.global_object = globals.object;
|
||||
}
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
pub fn intern(self: *Self, string: []const u8) coral.io.AllocationError!types.Ref {
|
||||
return .{.object = self.interned.lookup(string) orelse {
|
||||
const reference = try self.new_string(string);
|
||||
|
||||
coral.debug.assert(reference == .object);
|
||||
coral.debug.assert(try self.interned.insert(self.allocator, string, reference.object));
|
||||
|
||||
return .{.object = reference.object};
|
||||
}};
|
||||
}
|
||||
|
||||
pub fn native_cast(self: *Self, castable: types.Ref, id: *const anyopaque, comptime Type: type) types.RuntimeError!*Type {
|
||||
try self.check(castable == .object, "invalid type conversion: object");
|
||||
|
||||
const object = self.heap.fetch(castable.object);
|
||||
const is_expected_type = (object.state.info.identity == id) and (object.state.userdata.len == @sizeOf(Type));
|
||||
|
||||
try self.check(is_expected_type, "invalid object cast: native type");
|
||||
|
||||
return @ptrCast(*Type, @alignCast(@alignOf(Type), object.state.userdata));
|
||||
}
|
||||
|
||||
pub fn new_array(self: *Self) coral.io.AllocationError!types.Val {
|
||||
return try self.new_object(.{
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
pub fn new_object(self: *Self, userdata: []const u8, info: ObjectInfo) coral.io.AllocationError!types.Val {
|
||||
const allocation = try coral.io.allocate_many(self.allocator, userdata.len, u8);
|
||||
|
||||
errdefer coral.io.deallocate(self.allocator, allocation);
|
||||
|
||||
coral.io.copy(allocation, userdata);
|
||||
|
||||
return .{.object = try self.heap.insert(self.allocator, .{
|
||||
.ref_count = 1,
|
||||
|
||||
.state = .{
|
||||
.info = info,
|
||||
.userdata = allocation,
|
||||
.fields = .{},
|
||||
},
|
||||
})};
|
||||
}
|
||||
|
||||
pub fn new_string(self: *Self, data: []const u8) coral.io.AllocationError!types.Val {
|
||||
const Behavior = struct {
|
||||
fn get_byte(context: ObjectInfo.GetContext) types.RuntimeError!types.Val {
|
||||
switch (context.index) {
|
||||
.integer => |integer| {
|
||||
const string = context.env.string_cast(context.indexable) catch unreachable;
|
||||
|
||||
try context.env.check(coral.math.is_clamped(integer, 0, string.len), "index out of string bounds");
|
||||
|
||||
return types.Val{.integer = string[@intCast(usize, integer)]};
|
||||
},
|
||||
|
||||
else => return context.env.fail("attempt to index string with non-integer value"),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return try self.new_object(data, .{
|
||||
.getter = Behavior.get_byte,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn object_release(self: *Self, object: types.Object) void {
|
||||
var data = self.heap.fetch(object);
|
||||
|
||||
coral.debug.assert(data.ref_count != 0);
|
||||
|
||||
data.ref_count -= 1;
|
||||
|
||||
if (data.ref_count == 0) {
|
||||
data.state.info.deinitializer(.{
|
||||
.env = self,
|
||||
.obj = .{.object = object},
|
||||
});
|
||||
|
||||
// TODO: Free individual key-value pairs of fields
|
||||
data.state.fields.deinit(self.allocator);
|
||||
coral.io.deallocate(self.allocator, data.state.userdata);
|
||||
self.heap.remove(object);
|
||||
} else {
|
||||
self.heap.assign(object, data);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_global(self: *Self, global_name: []const u8, value: types.Ref) coral.io.AllocationError!void {
|
||||
try self.globals.assign(self.allocator, global_name, value);
|
||||
}
|
||||
|
||||
pub fn set_object(self: *Self, obj: *Object, index: types.Ref, value: types.Ref) types.RuntimeError!void {
|
||||
return obj.behavior.setter(.{
|
||||
.env = self,
|
||||
.obj = obj,
|
||||
.index = index,
|
||||
.value = value,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn string_cast(self: *Self, value: types.Ref) ![]const u8 {
|
||||
try self.check(value == .object, "invalid type conversion: object");
|
||||
|
||||
const object = self.heap.fetch(value.object);
|
||||
|
||||
try self.check(object.state.info.identity == null, "invalid object cast: string");
|
||||
|
||||
return object.state.userdata;
|
||||
}
|
||||
|
||||
pub fn to_integer(self: *Self, value: types.Ref) !types.Integer {
|
||||
const fail_message = "invalid type conversion: integer";
|
||||
|
||||
switch (value) {
|
||||
.float => |float| {
|
||||
const int = @typeInfo(types.Integer).Int;
|
||||
|
||||
if (coral.math.is_clamped(float, coral.math.min_int(int), coral.math.max_int(int))) {
|
||||
return @floatToInt(types.Integer, float);
|
||||
}
|
||||
},
|
||||
|
||||
.integer => |integer| return integer,
|
||||
else => {},
|
||||
}
|
||||
|
||||
return self.fail(fail_message);
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
allocator: coral.io.Allocator,
|
||||
interned: SymbolTable,
|
||||
globals: Object,
|
||||
values: DataStack,
|
||||
frames: CallStack,
|
||||
|
||||
pub const Float = f64;
|
||||
|
||||
const CallStack = coral.list.Stack(struct {
|
||||
callable: *Object,
|
||||
opcode_index: usize,
|
||||
stack_index: usize,
|
||||
});
|
||||
|
||||
const DataStack = coral.list.Stack(Variant);
|
||||
|
||||
pub const Object = struct {
|
||||
ref_count: usize,
|
||||
userdata: []coral.io.Byte,
|
||||
userinfo: *const anyopaque,
|
||||
};
|
||||
|
||||
pub const PopError = error {
|
||||
StackOverflow,
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const SymbolTable = coral.map.Table([]const coral.io.Byte, *Object, coral.map.string_table_traits);
|
||||
|
||||
pub const Variant = union (enum) {
|
||||
nil,
|
||||
true,
|
||||
false,
|
||||
number: Float,
|
||||
object: *Object,
|
||||
|
||||
pub fn equals(self: Variant, other: Variant) bool {
|
||||
return switch (self) {
|
||||
.nil => other == .nil,
|
||||
.true => other == .true,
|
||||
.false => other == .false,
|
||||
|
||||
.number => |number| switch (other) {
|
||||
.number => |other_number| number == other_number,
|
||||
else => false,
|
||||
},
|
||||
|
||||
.object => |object| switch (other) {
|
||||
.object => |other_object| object == other_object,
|
||||
else => false,
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub fn acquire_instance(_: *Self, object: *Object) *Object {
|
||||
// TODO: safety-check object belongs to state.
|
||||
object.ref_count += 1;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
pub fn acquire_interned(self: *Self, userdata: []const u8, userinfo: *const anyopaque) coral.io.AllocationError!*Object {
|
||||
// TODO: Include userinfo in matching lookup.
|
||||
if (self.interned.lookup(userdata)) |object| {
|
||||
return self.acquire_instance(object);
|
||||
} else {
|
||||
const data_object = try self.acquire_new(userdata, userinfo);
|
||||
|
||||
errdefer self.release(data_object);
|
||||
|
||||
coral.debug.assert(try self.interned.insert(data_object.userdata, data_object));
|
||||
|
||||
return data_object;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acquire_new(self: *Self, userdata: []const u8, userinfo: *const anyopaque) coral.io.AllocationError!*Object {
|
||||
const allocated_userdata = try coral.io.allocate_copy(self.allocator, userdata);
|
||||
|
||||
errdefer self.allocator.deallocate(allocated_userdata);
|
||||
|
||||
const allocated_object = try coral.io.allocate_one(self.allocator, Object{
|
||||
.ref_count = 1,
|
||||
.userdata = allocated_userdata,
|
||||
.userinfo = userinfo,
|
||||
});
|
||||
|
||||
errdefer self.allocator.deallocate(allocated_object);
|
||||
|
||||
return allocated_object;
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
self.values.free();
|
||||
self.frames.free();
|
||||
self.interned.free();
|
||||
}
|
||||
|
||||
pub fn get_value(self: *Self, tail_index: usize) Variant {
|
||||
if (tail_index >= self.values.values.len) {
|
||||
return .nil;
|
||||
}
|
||||
|
||||
return self.values.values[self.values.values.len - (1 + tail_index)];
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator) Self {
|
||||
return .{
|
||||
.values = DataStack.make(allocator),
|
||||
.frames = CallStack.make(allocator),
|
||||
.interned = SymbolTable.make(allocator),
|
||||
.allocator = allocator,
|
||||
|
||||
.globals = .{
|
||||
.ref_count = 0,
|
||||
.userdata = &.{},
|
||||
.userinfo = &.{},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn pop_value(self: *Self) PopError!Variant {
|
||||
return self.values.pop() orelse error.StackOverflow;
|
||||
}
|
||||
|
||||
pub fn push_value(self: *Self, value: Variant) coral.io.AllocationError!void {
|
||||
return self.values.push_one(value);
|
||||
}
|
||||
|
||||
pub fn release(self: *Self, object: *Object) void {
|
||||
coral.debug.assert(object.ref_count != 0);
|
||||
|
||||
object.ref_count -= 1;
|
||||
|
||||
if (object.ref_count == 0) {
|
||||
self.allocator.deallocate(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_value(self: *Self, tail_index: usize, value: Variant) bool {
|
||||
if (tail_index >= self.values.values.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.values.values[self.values.values.len - (1 + tail_index)] = value;
|
||||
|
||||
return true;
|
||||
}
|
|
@ -0,0 +1,76 @@
|
|||
const State = @import("./State.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
state: *State,
|
||||
fields: FieldTable,
|
||||
array: ArrayList,
|
||||
|
||||
const ArrayList = coral.list.Stack(State.Variant);
|
||||
|
||||
const Field = struct {
|
||||
field: *State.Object,
|
||||
value: State.Variant,
|
||||
|
||||
fn release_objects(self: Field, state: *State) void {
|
||||
state.release(self.field);
|
||||
|
||||
if (self.value == .object) {
|
||||
state.release(self.value.object);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const FieldTable = coral.map.Table([]const coral.io.Byte, Field, coral.map.string_table_traits);
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
{
|
||||
var field_iterator = FieldTable.Iterable{.table = &self.fields};
|
||||
|
||||
while (field_iterator.next()) |entry| {
|
||||
entry.value.release_objects(self.state);
|
||||
}
|
||||
}
|
||||
|
||||
self.fields.free();
|
||||
self.array.free();
|
||||
}
|
||||
|
||||
pub fn get_field(self: *Self, field_name: *State.Object) State.Variant {
|
||||
const field = self.fields.lookup(field_name.userdata) orelse return .nil;
|
||||
|
||||
if (field.value == .object) {
|
||||
return .{.object = self.state.acquire_instance(field.value.object)};
|
||||
}
|
||||
|
||||
return field.value;
|
||||
}
|
||||
|
||||
pub fn get_index(self: *Self, index: usize) State.Variant {
|
||||
return self.array.values[index];
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator, state: *State) Self {
|
||||
return .{
|
||||
.state = state,
|
||||
.fields = FieldTable.make(allocator),
|
||||
.array = ArrayList.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn set_field(self: *Self, field_name: *State.Object, value: State.Variant) coral.io.AllocationError!void {
|
||||
const previous_entry = try self.fields.replace(field_name.userdata, .{
|
||||
.field = field_name,
|
||||
.value = value,
|
||||
});
|
||||
|
||||
if (previous_entry) |entry| {
|
||||
entry.value.release_objects(self.state);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_index(self: *Self, index: usize, value: State.Variant) coral.io.AllocationError!void {
|
||||
self.array.values[index] = value;
|
||||
}
|
|
@ -29,8 +29,7 @@ pub const Token = union(enum) {
|
|||
symbol_equals,
|
||||
symbol_double_equals,
|
||||
|
||||
integer: []const u8,
|
||||
real: []const u8,
|
||||
number: []const u8,
|
||||
string: []const u8,
|
||||
|
||||
keyword_nil,
|
||||
|
@ -38,10 +37,11 @@ pub const Token = union(enum) {
|
|||
keyword_true,
|
||||
keyword_return,
|
||||
keyword_self,
|
||||
keyword_const,
|
||||
|
||||
pub fn text(self: Token) []const u8 {
|
||||
return switch (self) {
|
||||
.unknown => |unknown| @ptrCast([*]const u8, &unknown)[0 .. 1],
|
||||
.unknown => |unknown| @as([*]const u8, @ptrCast(&unknown))[0 .. 1],
|
||||
.newline => "newline",
|
||||
|
||||
.global => |identifier| identifier,
|
||||
|
@ -69,10 +69,10 @@ pub const Token = union(enum) {
|
|||
.symbol_equals => "=",
|
||||
.symbol_double_equals => "==",
|
||||
|
||||
.integer => |literal| literal,
|
||||
.real => |literal| literal,
|
||||
.number => |literal| literal,
|
||||
.string => |literal| literal,
|
||||
|
||||
.keyword_const => "const",
|
||||
.keyword_nil => "nil",
|
||||
.keyword_false => "false",
|
||||
.keyword_true => "true",
|
||||
|
@ -134,7 +134,7 @@ pub const Tokenizer = struct {
|
|||
else => break,
|
||||
};
|
||||
|
||||
self.current_token = .{.real = self.source[begin .. cursor]};
|
||||
self.current_token = .{.number = self.source[begin .. cursor]};
|
||||
|
||||
return true;
|
||||
},
|
||||
|
@ -142,7 +142,7 @@ pub const Tokenizer = struct {
|
|||
else => break,
|
||||
};
|
||||
|
||||
self.current_token = .{.integer = self.source[begin .. cursor]};
|
||||
self.current_token = .{.number = self.source[begin .. cursor]};
|
||||
|
||||
return true;
|
||||
},
|
||||
|
@ -162,6 +162,12 @@ pub const Tokenizer = struct {
|
|||
coral.debug.assert(identifier.len != 0);
|
||||
|
||||
switch (identifier[0]) {
|
||||
'c' => if (coral.io.ends_with(identifier, "onst")) {
|
||||
self.current_token = .keyword_const;
|
||||
|
||||
return true;
|
||||
},
|
||||
|
||||
'n' => if (coral.io.ends_with(identifier, "il")) {
|
||||
self.current_token = .keyword_nil;
|
||||
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
pub const CheckError = error {
|
||||
CheckFailed
|
||||
};
|
||||
|
||||
pub const Float = f32;
|
||||
|
||||
pub const Integer = i32;
|
||||
|
||||
pub const Object = u32;
|
||||
|
||||
pub const Primitive = enum {
|
||||
nil,
|
||||
false,
|
||||
true,
|
||||
float,
|
||||
integer,
|
||||
object,
|
||||
};
|
||||
|
||||
pub const Ref = union (Primitive) {
|
||||
nil,
|
||||
false,
|
||||
true,
|
||||
float: Float,
|
||||
integer: Integer,
|
||||
object: Object,
|
||||
};
|
||||
|
||||
pub const ParseError = error {
|
||||
OutOfMemory,
|
||||
BadSyntax,
|
||||
};
|
||||
|
||||
pub const RuntimeError = CheckError || ParseError;
|
||||
|
||||
pub const Val = union (Primitive) {
|
||||
nil,
|
||||
false,
|
||||
true,
|
||||
float: Float,
|
||||
integer: Integer,
|
||||
object: Object,
|
||||
|
||||
pub fn as_ref(self: *const Val) Ref {
|
||||
return switch (self.*) {
|
||||
.nil => .nil,
|
||||
.false => .false,
|
||||
.true => .true,
|
||||
.float => .{.float = self.float},
|
||||
.integer => .{.integer = self.integer},
|
||||
.object => .{.object = self.object},
|
||||
};
|
||||
}
|
||||
};
|
|
@ -1,6 +1,4 @@
|
|||
const builtin = @import("builtin");
|
||||
|
||||
const canvas = @import("./canvas.zig");
|
||||
const app = @import("./app.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
|
@ -8,113 +6,87 @@ const ext = @import("./ext.zig");
|
|||
|
||||
pub const file = @import("./file.zig");
|
||||
|
||||
pub const heap = @import("./heap.zig");
|
||||
const heap = @import("./heap.zig");
|
||||
|
||||
const kym = @import("./kym.zig");
|
||||
|
||||
const AppManifest = struct {
|
||||
title: [255:0]u8 = [_:0]u8{0} ** 255,
|
||||
width: u16 = 640,
|
||||
height: u16 = 480,
|
||||
|
||||
pub fn load_script(self: *AppManifest, env: *kym.Environment, fs: file.System, file_path: []const u8) !void {
|
||||
const manifest = try env.execute_file(fs, file.Path.from(&.{file_path}));
|
||||
|
||||
defer env.discard(manifest);
|
||||
|
||||
const manifest_ref = manifest.as_ref();
|
||||
|
||||
{
|
||||
const title = try env.get_field(manifest_ref, try env.intern("title"));
|
||||
|
||||
defer env.discard(title);
|
||||
|
||||
const title_string = try env.string_cast(title.as_ref());
|
||||
|
||||
try env.check(title_string.len <= self.title.len, "`title` cannot exceed 255 bytes in length");
|
||||
coral.io.copy(&self.title, title_string);
|
||||
}
|
||||
|
||||
const u16_int = @typeInfo(u16).Int;
|
||||
|
||||
{
|
||||
const width = try env.get_field(manifest_ref, try env.intern("width"));
|
||||
|
||||
errdefer env.discard(width);
|
||||
|
||||
self.width = try coral.math.checked_cast(u16_int, try env.to_integer(width.as_ref()));
|
||||
}
|
||||
|
||||
{
|
||||
const height = try env.get_field(manifest_ref, try env.intern("height"));
|
||||
|
||||
errdefer env.discard(height);
|
||||
|
||||
self.width = try coral.math.checked_cast(u16_int, try env.to_integer(height.as_ref()));
|
||||
}
|
||||
}
|
||||
pub const RuntimeError = error {
|
||||
OutOfMemory,
|
||||
InitFailure,
|
||||
BadManifest,
|
||||
};
|
||||
|
||||
pub fn run_app(base_file_system: file.System) void {
|
||||
defer heap.trace_leaks();
|
||||
|
||||
const Logger = struct {
|
||||
const Self = @This();
|
||||
|
||||
fn log(_: *const Self, message: []const u8) void {
|
||||
ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%.*s", message.len, message.ptr);
|
||||
fn last_sdl_error() [:0]const u8 {
|
||||
return coral.io.slice_sentineled(@as(u8, 0), @as([*:0]const u8, @ptrCast(ext.SDL_GetError())));
|
||||
}
|
||||
};
|
||||
|
||||
var script_environment = kym.Environment.init(heap.allocator, .{
|
||||
.values_max = 512,
|
||||
.calls_max = 512,
|
||||
.reporter = kym.Environment.Reporter.bind(Logger, &.{}, Logger.log),
|
||||
}) catch {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "failed to initialize Kym vm\n");
|
||||
};
|
||||
pub fn run_app(file_access: file.Access) RuntimeError!void {
|
||||
var info_log = app.WritableLog.make(.info, heap.allocator);
|
||||
|
||||
defer script_environment.deinit();
|
||||
defer info_log.free();
|
||||
|
||||
const app_file_name = "app.ona";
|
||||
var app_manifest = AppManifest{};
|
||||
var fail_log = app.WritableLog.make(.fail, heap.allocator);
|
||||
|
||||
app_manifest.load_script(&script_environment, base_file_system, app_file_name) catch {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "failed to load %s\n", app_file_name);
|
||||
};
|
||||
defer fail_log.free();
|
||||
|
||||
if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
try fail_log.write(last_sdl_error());
|
||||
|
||||
return error.InitFailure;
|
||||
}
|
||||
|
||||
defer ext.SDL_Quit();
|
||||
|
||||
{
|
||||
const base_prefix = ext.SDL_GetBasePath() orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
var script_env = kym.RuntimeEnv.make(heap.allocator, .{
|
||||
.out_writer = info_log.as_writer(),
|
||||
.err_writer = fail_log.as_writer(),
|
||||
}) catch {
|
||||
try fail_log.write("failed to initialize script runtime");
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
|
||||
defer ext.SDL_free(base_prefix);
|
||||
defer script_env.free();
|
||||
|
||||
const window_flags = 0;
|
||||
const window_pos = ext.SDL_WINDOWPOS_CENTERED;
|
||||
var manifest = app.Manifest{};
|
||||
|
||||
const window = ext.SDL_CreateWindow(&app_manifest.title, window_pos, window_pos, app_manifest.width, app_manifest.height, window_flags) orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
manifest.load(&script_env, file_access) catch {
|
||||
fail_log.write("failed to load / execute app.ona manifest") catch {};
|
||||
|
||||
return error.BadManifest;
|
||||
};
|
||||
|
||||
const window = create: {
|
||||
const pos = ext.SDL_WINDOWPOS_CENTERED;
|
||||
const flags = 0;
|
||||
|
||||
break: create ext.SDL_CreateWindow(&manifest.title, pos, pos, manifest.width, manifest.height, flags) orelse {
|
||||
fail_log.write(last_sdl_error()) catch {};
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyWindow(window);
|
||||
|
||||
const renderer_flags = 0;
|
||||
const renderer = create: {
|
||||
const defaultDriverIndex = -1;
|
||||
const flags = ext.SDL_RENDERER_ACCELERATED;
|
||||
|
||||
const renderer = ext.SDL_CreateRenderer(window, -1, renderer_flags) orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
break: create ext.SDL_CreateRenderer(window, defaultDriverIndex, flags) orelse {
|
||||
fail_log.write(last_sdl_error()) catch {};
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyRenderer(renderer);
|
||||
|
||||
{
|
||||
var previous_ticks = ext.SDL_GetTicks64();
|
||||
|
||||
while (true) {
|
||||
// TODO: Delta timing.
|
||||
{
|
||||
var event = @as(ext.SDL_Event, undefined);
|
||||
|
||||
while (ext.SDL_PollEvent(&event) != 0) {
|
||||
|
@ -123,18 +95,22 @@ pub fn run_app(base_file_system: file.System) void {
|
|||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
if (ext.SDL_SetRenderDrawColor(renderer, 0, 0, 0, 0) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
}
|
||||
|
||||
if (ext.SDL_RenderClear(renderer) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
{
|
||||
// Based on https://fabiensanglard.net/timer_and_framerate/index.php.
|
||||
const current_ticks = ext.SDL_GetTicks64();
|
||||
const milliseconds_per_second = 1000.0;
|
||||
const tick_frequency = @as(u64, @intFromFloat(milliseconds_per_second / manifest.tick_rate));
|
||||
|
||||
while (previous_ticks < current_ticks) {
|
||||
previous_ticks += tick_frequency;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Render here.
|
||||
|
||||
ext.SDL_RenderPresent(renderer);
|
||||
_ = ext.SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
|
||||
_ = ext.SDL_RenderClear(renderer);
|
||||
_ = ext.SDL_RenderPresent(renderer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
const ona = @import("ona");
|
||||
|
||||
pub fn main() !void {
|
||||
ona.run_app(.{.sandboxed_path = &ona.file.Path.cwd});
|
||||
pub fn main() ona.RuntimeError!void {
|
||||
try ona.run_app(.{.sandboxed_path = &ona.file.Path.cwd});
|
||||
}
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
const _coral = @import("coral");
|
||||
const coral = @import("coral");
|
||||
|
||||
const _ona = @import("ona");
|
||||
const ona = @import("ona");
|
||||
|
|
Loading…
Reference in New Issue