Implement Bytecode Executor for Kym #19
|
@ -2,20 +2,12 @@
|
|||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Runtime",
|
||||
"name": "Runner",
|
||||
"type": "gdb",
|
||||
"request": "launch",
|
||||
"target": "${workspaceRoot}/zig-out/bin/ona-runner",
|
||||
"target": "${workspaceRoot}/zig-out/bin/runner",
|
||||
"cwd": "${workspaceRoot}/debug/",
|
||||
"valuesFormatting": "parseText"
|
||||
},
|
||||
{
|
||||
"name": "Build Script",
|
||||
"type": "python",
|
||||
"request": "launch",
|
||||
"program": "./build.py",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"editor.minimap.maxColumn": 120,
|
||||
"editor.detectIndentation": false,
|
||||
"editor.insertSpaces": false,
|
||||
"editor.rulers": [120],
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"files.insertFinalNewline": true,
|
||||
"zig.formattingProvider": "off",
|
||||
"zig.zls.enableAutofix": false,
|
||||
"editor.formatOnSave": false,
|
||||
"spellright.language": ["en-US-10-1."],
|
||||
"spellright.documentTypes": ["markdown"],
|
||||
"files.insertFinalNewline": true,
|
||||
"files.trimTrailingWhitespace": true,
|
||||
|
||||
"[zig]": {
|
||||
"editor.formatOnSave": false,
|
||||
"files.eol": "\n",
|
||||
"editor.minimap.maxColumn": 120,
|
||||
"editor.detectIndentation": false,
|
||||
"editor.insertSpaces": false,
|
||||
"editor.rulers": [120],
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,24 +2,23 @@
|
|||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "build",
|
||||
"type": "process",
|
||||
"command": "zig",
|
||||
"args": ["build"],
|
||||
"label": "Build All",
|
||||
"type": "shell",
|
||||
"command": "zig build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"problemMatcher": "$gcc",
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "silent",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"showReuseMessage": false,
|
||||
"clear": true,
|
||||
"revealProblems": "onProblem"
|
||||
},
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
|
@ -1,6 +1,9 @@
|
|||
const std = @import("std");
|
||||
|
||||
pub fn build(b: *std.Build) void {
|
||||
const target = b.standardTargetOptions(.{});
|
||||
const optimize = b.standardOptimizeOption(.{});
|
||||
|
||||
const coral_module = b.createModule(.{.source_file = .{.path = "./source/coral/coral.zig"}});
|
||||
|
||||
const ona_module = b.createModule(.{
|
||||
|
@ -14,37 +17,18 @@ pub fn build(b: *std.Build) void {
|
|||
},
|
||||
});
|
||||
|
||||
const cross_target = b.standardTargetOptions(.{});
|
||||
const optimize_mode = std.builtin.Mode.Debug;
|
||||
|
||||
// Ona runner.
|
||||
{
|
||||
const ona_exe = b.addExecutable(.{
|
||||
.name = "ona-runner",
|
||||
.root_source_file = .{.path = "./source/runner.zig"},
|
||||
.target = cross_target,
|
||||
.optimize = optimize_mode,
|
||||
b.installArtifact(create: {
|
||||
const runner_exe = b.addExecutable(.{
|
||||
.name = "runner",
|
||||
.root_source_file = .{ .path = "source/runner.zig" },
|
||||
.target = target,
|
||||
.optimize = optimize,
|
||||
});
|
||||
|
||||
ona_exe.addModule("coral", coral_module);
|
||||
ona_exe.addModule("ona", ona_module);
|
||||
runner_exe.addModule("ona", ona_module);
|
||||
runner_exe.linkLibC();
|
||||
runner_exe.linkSystemLibrary("SDL2");
|
||||
|
||||
// ona_exe.addIncludeDir("./ext");
|
||||
ona_exe.linkSystemLibrary("SDL2");
|
||||
ona_exe.linkLibC();
|
||||
b.installArtifact(ona_exe);
|
||||
}
|
||||
|
||||
// Test step.
|
||||
{
|
||||
const exe_tests = b.addTest(.{
|
||||
.root_source_file = .{.path = "source/test.zig"},
|
||||
.target = cross_target,
|
||||
.optimize = optimize_mode,
|
||||
});
|
||||
|
||||
const test_step = b.step("test", "Run unit tests");
|
||||
|
||||
test_step.dependOn(&exe_tests.step);
|
||||
}
|
||||
break: create runner_exe;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
return {
|
||||
title = "Afterglow",
|
||||
width = 1280,
|
||||
height = 800,
|
||||
title = "Afterglow",
|
||||
width = 1280,
|
||||
height = 800,
|
||||
tick_rate = 60,
|
||||
}
|
||||
|
|
|
@ -26,8 +26,6 @@ Ona is also the Catalan word for "wave".
|
|||
|
||||
* Provide utilities for handling rendering but otherwise leave the higher-level game logic and data structuring to the programmer.
|
||||
|
||||
* Provide a simple scripting interface for people who want to do something quick and a powerful plug-in API for engine-level extensions and speed-critical application logic.
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Requirements
|
||||
|
|
|
@ -12,46 +12,23 @@ pub const Stacking = struct {
|
|||
allocations: list.Stack(usize),
|
||||
pages: list.Stack(Page),
|
||||
|
||||
const AllocationsList = list.Stack(usize);
|
||||
|
||||
const Page = struct {
|
||||
buffer: []u8,
|
||||
buffer: []io.Byte,
|
||||
used: usize,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
fn available(self: Self) usize {
|
||||
fn available(self: Page) usize {
|
||||
return self.buffer.len - self.used;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn allocate(self: *Stacking, allocation_size: usize) io.AllocationError![]u8 {
|
||||
const alignment = @as(usize, 4);
|
||||
const aligned_allocation_size = (allocation_size + alignment - 1) & ~(alignment - 1);
|
||||
|
||||
if (self.pages.values.len == 0) {
|
||||
const page = try self.allocate_page(math.max(self.min_page_size, aligned_allocation_size));
|
||||
|
||||
page.used = allocation_size;
|
||||
|
||||
return page.buffer[0 .. allocation_size];
|
||||
}
|
||||
|
||||
var page = self.current_page() orelse unreachable;
|
||||
|
||||
if (page.available() <= aligned_allocation_size) {
|
||||
page = try self.allocate_page(math.max(self.min_page_size, aligned_allocation_size));
|
||||
}
|
||||
|
||||
debug.assert(page.available() >= allocation_size);
|
||||
|
||||
defer page.used += aligned_allocation_size;
|
||||
|
||||
return page.buffer[page.used .. (page.used + allocation_size)];
|
||||
}
|
||||
const PageList = list.Stack(Page);
|
||||
|
||||
fn allocate_page(self: *Stacking, page_size: usize) io.AllocationError!*Page {
|
||||
var buffer = try io.allocate_many(self.page_allocator, page_size, u8);
|
||||
var buffer = try self.page_allocator.reallocate(null, page_size);
|
||||
|
||||
errdefer io.deallocate(self.page_allocator, buffer);
|
||||
errdefer self.page_allocator.deallocate(buffer);
|
||||
|
||||
try self.pages.push_one(.{
|
||||
.buffer = buffer,
|
||||
|
@ -62,25 +39,10 @@ pub const Stacking = struct {
|
|||
}
|
||||
|
||||
pub fn as_allocator(self: *Stacking) io.Allocator {
|
||||
return io.Allocator.bind(Stacking, self, struct {
|
||||
fn reallocate(stacking: *Stacking, options: io.AllocationOptions) ?[]u8 {
|
||||
const allocation = options.allocation orelse {
|
||||
return stacking.allocate(options.size) catch null;
|
||||
};
|
||||
|
||||
if (allocation.len == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const reallocation = stacking.allocate(allocation.len) catch {
|
||||
return null;
|
||||
};
|
||||
|
||||
io.copy(reallocation, allocation);
|
||||
|
||||
return reallocation;
|
||||
}
|
||||
}.reallocate);
|
||||
return io.Allocator.bind(Stacking, self, .{
|
||||
.deallocate = deallocate,
|
||||
.reallocate = reallocate,
|
||||
});
|
||||
}
|
||||
|
||||
fn current_page(self: Stacking) ?*Page {
|
||||
|
@ -91,20 +53,58 @@ pub const Stacking = struct {
|
|||
return &self.pages.values[self.pages.values.len - 1];
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Stacking) void {
|
||||
pub fn free(self: *Stacking) void {
|
||||
for (self.pages.values) |page| {
|
||||
io.deallocate(self.page_allocator, page.buffer);
|
||||
self.page_allocator.deallocate(page.buffer);
|
||||
}
|
||||
|
||||
self.pages.deinit();
|
||||
self.allocations.deinit();
|
||||
self.pages.free();
|
||||
self.allocations.free();
|
||||
}
|
||||
|
||||
pub fn init(allocator: io.Allocator, min_page_size: usize) io.AllocationError!Stacking {
|
||||
pub fn deallocate(_: *Stacking, _: []io.Byte) void {
|
||||
// TODO: Decide how to implement.
|
||||
}
|
||||
|
||||
pub fn reallocate(self: *Stacking, return_address: usize, existing_allocation: ?[]io.Byte, size: usize) io.AllocationError![]io.Byte {
|
||||
// TODO: Safety-check existing allocation is from allocator or null.
|
||||
_ = return_address;
|
||||
|
||||
const alignment = @as(usize, 4);
|
||||
const aligned_size = (size + alignment - 1) & ~(alignment - 1);
|
||||
|
||||
if (self.pages.values.len == 0) {
|
||||
const page = try self.allocate_page(math.max(self.min_page_size, aligned_size));
|
||||
|
||||
page.used = size;
|
||||
|
||||
return page.buffer[0 .. size];
|
||||
}
|
||||
|
||||
var page = self.current_page() orelse unreachable;
|
||||
|
||||
if (page.available() <= aligned_size) {
|
||||
page = try self.allocate_page(math.max(self.min_page_size, aligned_size));
|
||||
}
|
||||
|
||||
debug.assert(page.available() >= size);
|
||||
|
||||
defer page.used += aligned_size;
|
||||
|
||||
const reallocation = page.buffer[page.used .. (page.used + size)];
|
||||
|
||||
if (existing_allocation) |allocation| {
|
||||
io.copy(reallocation, allocation);
|
||||
}
|
||||
|
||||
return reallocation;
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator, min_page_size: usize) Stacking {
|
||||
return Stacking{
|
||||
.allocations = AllocationsList.make(allocator),
|
||||
.pages = PageList.make(allocator),
|
||||
.page_allocator = allocator,
|
||||
.allocations = .{.allocator = allocator},
|
||||
.pages = .{.allocator = allocator},
|
||||
.min_page_size = min_page_size,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,44 +1,14 @@
|
|||
///
|
||||
/// Arena-based memory allocation strategies.
|
||||
///
|
||||
|
||||
pub const arena = @import("./arena.zig");
|
||||
|
||||
///
|
||||
/// Debug build-only utilities and sanity-checkers.
|
||||
///
|
||||
pub const debug = @import("./debug.zig");
|
||||
|
||||
///
|
||||
/// Platform-agnostic data input and output operations.
|
||||
///
|
||||
pub const io = @import("./io.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for sequential, "list-like" collections.
|
||||
///
|
||||
pub const list = @import("./list.zig");
|
||||
|
||||
///
|
||||
/// Types and functions designed for mathematics in interactive media applications.
|
||||
///
|
||||
pub const map = @import("./map.zig");
|
||||
|
||||
pub const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for fragmented, "heap-like" collections.
|
||||
///
|
||||
pub const slab = @import("./slab.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for the highly-specialized "slotmap" collection.
|
||||
///
|
||||
pub const slots = @import("./slots.zig");
|
||||
|
||||
///
|
||||
/// Data structures and utilities for associative, "table-like" collections.
|
||||
///
|
||||
pub const table = @import("./table.zig");
|
||||
|
||||
///
|
||||
/// Converters, parsers, and validators for sequences of bytes treated as UTF8 unicode strings.
|
||||
///
|
||||
pub const utf8 = @import("./utf8.zig");
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
///
|
||||
/// Active code comment to assert that `condition` should always be true.
|
||||
///
|
||||
/// Safety-checked behavior is invoked where `condition` evaluates to false.
|
||||
///
|
||||
|
||||
pub fn assert(condition: bool) void {
|
||||
if (!condition) {
|
||||
unreachable;
|
||||
|
|
|
@ -1,310 +1,283 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
pub const AllocationError = error {
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub const AllocationOptions = struct {
|
||||
return_address: usize,
|
||||
allocation: ?[]u8 = null,
|
||||
size: usize,
|
||||
};
|
||||
|
||||
pub const Allocator = Generator(?[]u8, AllocationOptions);
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const Byte = u8;
|
||||
|
||||
///
|
||||
/// Function pointer coupled with an immutable state context for providing dynamic dispatch over a given `Input` and
|
||||
/// `Output`.
|
||||
///
|
||||
pub fn Functor(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *const anyopaque,
|
||||
invoker: *const fn (capture: *const anyopaque, input: Input) Output,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *const State, comptime invoker: fn (capture: *const State, input: Input) Output) Self {
|
||||
const alignment = @alignOf(State);
|
||||
const is_zero_aligned = alignment == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(*const anyopaque, state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke_opaque(context: *const anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(*const State, context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(*const State, @alignCast(alignment, context)), input);
|
||||
}
|
||||
}.invoke_opaque,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn invoke(self: Self, input: Input) Output {
|
||||
return self.invoker(self.context, input);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Function pointer coupled with a mutable state context for providing dynamic dispatch over a given `Input` and
|
||||
/// `Output`.
|
||||
///
|
||||
pub fn Generator(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *anyopaque,
|
||||
invoker: *const fn (capture: *anyopaque, input: Input) Output,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *State, comptime invoker: fn (capture: *State, input: Input) Output) Self {
|
||||
const alignment = @alignOf(State);
|
||||
const is_zero_aligned = alignment == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(*anyopaque, state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke_opaque(context: *anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(*State, context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(*State, @alignCast(alignment, context)), input);
|
||||
}
|
||||
}.invoke_opaque,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn invoke(self: Self, input: Input) Output {
|
||||
return self.invoker(self.context, input);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub const Reader = Generator(?usize, []u8);
|
||||
|
||||
pub const StreamError = error {
|
||||
ReadFailure,
|
||||
WriteFailure,
|
||||
};
|
||||
|
||||
pub fn Tag(comptime Element: type) type {
|
||||
return switch (@typeInfo(Element)) {
|
||||
.Enum => |info| info.tag_type,
|
||||
.Union => |info| info.tag_type orelse @compileError(@typeName(Element) ++ " has no tag type"),
|
||||
else => @compileError("expected enum or union type, found '" ++ @typeName(Element) ++ "'"),
|
||||
};
|
||||
}
|
||||
|
||||
pub const FixedBuffer = struct {
|
||||
slice: []u8,
|
||||
|
||||
pub fn as_writer(self: *FixedBuffer) Writer {
|
||||
return Writer.bind(FixedBuffer, self, struct {
|
||||
fn write(writable_memory: *FixedBuffer, data: []const u8) ?usize {
|
||||
return writable_memory.write(data);
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn put(self: *FixedBuffer, byte: u8) bool {
|
||||
if (self.slice.len == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.slice[0] = byte;
|
||||
self.slice = self.slice[1 ..];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn write(self: *FixedBuffer, bytes: []const u8) usize {
|
||||
const writable = math.min(self.slice.len, bytes.len);
|
||||
|
||||
copy(self.slice, bytes);
|
||||
|
||||
self.slice = self.slice[writable ..];
|
||||
|
||||
return writable;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Writer = Generator(?usize, []const Byte);
|
||||
|
||||
pub fn allocate_many(allocator: Allocator, amount: usize, comptime Type: type) AllocationError![]Type {
|
||||
if (@sizeOf(Type) == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
return @ptrCast([*]Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type) * amount,
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory))[0 .. amount];
|
||||
}
|
||||
|
||||
pub fn allocate_one(allocator: Allocator, value: anytype) AllocationError!*@TypeOf(value) {
|
||||
const Type = @TypeOf(value);
|
||||
|
||||
if (@sizeOf(Type) == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
const allocation = @ptrCast(*Type, @alignCast(@alignOf(Type), allocator.invoke(.{
|
||||
.size = @sizeOf(Type),
|
||||
.return_address = @returnAddress(),
|
||||
}) orelse return error.OutOfMemory));
|
||||
|
||||
allocation.* = value;
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn bytes_of(value: anytype) []const u8 {
|
||||
const pointer_info = @typeInfo(@TypeOf(value)).Pointer;
|
||||
|
||||
debug.assert(pointer_info.size == .One);
|
||||
|
||||
return @ptrCast([*]const u8, value)[0 .. @sizeOf(pointer_info.child)];
|
||||
}
|
||||
|
||||
pub fn compare(this: []const u8, that: []const u8) isize {
|
||||
const range = math.min(this.len, that.len);
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < range) : (index += 1) {
|
||||
const difference = @intCast(isize, this[index]) - @intCast(isize, that[index]);
|
||||
|
||||
if (difference != 0) {
|
||||
return difference;
|
||||
}
|
||||
}
|
||||
|
||||
return @intCast(isize, this.len) - @intCast(isize, that.len);
|
||||
}
|
||||
|
||||
pub fn copy(target: []u8, source: []const u8) void {
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < source.len) : (index += 1) target[index] = source[index];
|
||||
}
|
||||
|
||||
pub fn deallocate(allocator: Allocator, allocation: anytype) void {
|
||||
switch (@typeInfo(@TypeOf(allocation))) {
|
||||
.Pointer => |pointer| {
|
||||
_ = allocator.invoke(.{
|
||||
.allocation = switch (pointer.size) {
|
||||
.One => @ptrCast([*]u8, allocation)[0 .. @sizeOf(pointer.child)],
|
||||
.Slice => @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(pointer.child) * allocation.len)],
|
||||
.Many, .C => @compileError("length of allocation must be known to deallocate"),
|
||||
},
|
||||
|
||||
.return_address = @returnAddress(),
|
||||
.size = 0,
|
||||
});
|
||||
},
|
||||
|
||||
else => @compileError("cannot deallocate " ++ allocation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ends_with(target: []const u8, match: []const u8) bool {
|
||||
if (target.len < match.len) return false;
|
||||
|
||||
var index = @as(usize, 0);
|
||||
|
||||
while (index < match.len) : (index += 1) {
|
||||
if (target[target.len - (1 + index)] != match[match.len - (1 + index)]) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn equals(this: []const u8, that: []const u8) bool {
|
||||
if (this.len != that.len) return false;
|
||||
|
||||
{
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < this.len) : (index += 1) if (this[index] != that[index]) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
var null_context = @as(usize, 0);
|
||||
|
||||
pub const null_allocator = Allocator.bind(&null_context, struct {
|
||||
fn reallocate(context: *usize, options: AllocationOptions) ?[]u8 {
|
||||
debug.assert(context.* == 0);
|
||||
debug.assert(options.allocation == null);
|
||||
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
pub const null_writer = Writer.bind(&null_context, struct {
|
||||
fn write(context: *usize, buffer: []const u8) usize {
|
||||
debug.assert(context.* == 0);
|
||||
|
||||
return buffer.len;
|
||||
}
|
||||
}.write);
|
||||
|
||||
pub fn reallocate(allocator: Allocator, allocation: anytype, amount: usize) AllocationError![]@typeInfo(@TypeOf(allocation)).Pointer.child {
|
||||
const pointer_info = @typeInfo(@TypeOf(allocation)).Pointer;
|
||||
const Element = pointer_info.child;
|
||||
|
||||
return @ptrCast([*]Element, @alignCast(@alignOf(Element), (allocator.invoke(switch (pointer_info.size) {
|
||||
.Slice => .{
|
||||
.allocation = @ptrCast([*]u8, allocation.ptr)[0 .. (@sizeOf(Element) * allocation.len)],
|
||||
.size = @sizeOf(Element) * amount,
|
||||
},
|
||||
|
||||
.Many, .C, .One => @compileError("allocation must be a slice to reallocate"),
|
||||
}) orelse return error.OutOfMemory).ptr))[0 .. amount];
|
||||
}
|
||||
|
||||
pub fn sentinel_index(comptime element: type, comptime sentinel: element, sequence: [*:sentinel]const element) usize {
|
||||
var index: usize = 0;
|
||||
|
||||
while (sequence[index] != sentinel) : (index += 1) {}
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
pub fn stream(output: Writer, input: Reader, buffer: []u8) StreamError!u64 {
|
||||
var total_written: u64 = 0;
|
||||
var read = input.invoke(buffer) orelse return error.ReadFailure;
|
||||
|
||||
while (read != 0) {
|
||||
total_written += output.invoke(buffer[0..read]) orelse return error.WriteFailure;
|
||||
read = input.invoke(buffer) orelse return error.ReadFailure;
|
||||
}
|
||||
|
||||
return total_written;
|
||||
}
|
||||
|
||||
pub fn swap(comptime Element: type, this: *Element, that: *Element) void {
|
||||
const temp = this.*;
|
||||
|
||||
this.* = that.*;
|
||||
that.* = temp;
|
||||
}
|
||||
|
||||
pub fn tag_of(comptime value: anytype) Tag(@TypeOf(value)) {
|
||||
return @as(Tag(@TypeOf(value)), value);
|
||||
}
|
||||
|
||||
pub fn zero(target: []u8) void {
|
||||
for (target) |*t| t.* = 0;
|
||||
}
|
||||
const debug = @import("./debug.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
pub const AllocationError = error {
|
||||
OutOfMemory,
|
||||
};
|
||||
|
||||
pub const Allocator = struct {
|
||||
context: *anyopaque,
|
||||
|
||||
actions: *const struct {
|
||||
deallocate: *const fn (context: *anyopaque, allocation: []Byte) void,
|
||||
reallocate: *const fn (context: *anyopaque, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte,
|
||||
},
|
||||
|
||||
pub fn Actions(comptime State: type) type {
|
||||
return struct {
|
||||
deallocate: fn (state: *State, allocation: []Byte) void,
|
||||
reallocate: fn (state: *State, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn bind(comptime State: type, state: *State, comptime actions: Actions(State)) Allocator {
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.actions = &.{
|
||||
.deallocate = struct {
|
||||
fn deallocate(context: *anyopaque, allocation: []Byte) void {
|
||||
if (is_zero_aligned) {
|
||||
return actions.deallocator(@ptrCast(context), allocation);
|
||||
}
|
||||
|
||||
return actions.deallocate(@ptrCast(@alignCast(context)), allocation);
|
||||
}
|
||||
}.deallocate,
|
||||
|
||||
.reallocate = struct {
|
||||
fn reallocate(context: *anyopaque, return_address: usize, existing_allocation: ?[]Byte, size: usize) AllocationError![]Byte {
|
||||
if (is_zero_aligned) {
|
||||
return actions.reallocator(@ptrCast(context), return_address, existing_allocation, size);
|
||||
}
|
||||
|
||||
return actions.reallocate(@ptrCast(@alignCast(context)), return_address, existing_allocation, size);
|
||||
}
|
||||
}.reallocate,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deallocate(self: Allocator, allocation: anytype) void {
|
||||
switch (@typeInfo(@TypeOf(allocation))) {
|
||||
.Pointer => |pointer| {
|
||||
self.actions.deallocate(self.context, switch (pointer.size) {
|
||||
.One => @as([*]Byte, @ptrCast(allocation))[0 .. @sizeOf(pointer.child)],
|
||||
.Slice => @as([*]Byte, @ptrCast(allocation.ptr))[0 .. (@sizeOf(pointer.child) * allocation.len)],
|
||||
.Many, .C => @compileError("length of allocation must be known to deallocate"),
|
||||
});
|
||||
},
|
||||
|
||||
else => @compileError("cannot deallocate " ++ allocation),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn reallocate(self: Allocator, allocation: ?[]Byte, allocation_size: usize) AllocationError![]Byte {
|
||||
return self.actions.reallocate(self.context, @returnAddress(), allocation, allocation_size);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Byte = u8;
|
||||
|
||||
pub const FixedBuffer = struct {
|
||||
bytes: []Byte,
|
||||
|
||||
pub fn as_writer(self: *FixedBuffer) Writer {
|
||||
return Writer.bind(FixedBuffer, self, struct {
|
||||
fn write(writable_memory: *FixedBuffer, data: []const Byte) ?usize {
|
||||
return writable_memory.write(data);
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn put(self: *FixedBuffer, byte: Byte) bool {
|
||||
if (self.bytes.len == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.bytes[0] = byte;
|
||||
self.bytes = self.bytes[1 ..];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn write(self: *FixedBuffer, bytes: []const Byte) usize {
|
||||
const writable = math.min(self.bytes.len, bytes.len);
|
||||
|
||||
copy(self.bytes, bytes);
|
||||
|
||||
self.bytes = self.bytes[writable ..];
|
||||
|
||||
return writable;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn Functor(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *const anyopaque,
|
||||
invoker: *const fn (capture: *const anyopaque, input: Input) Output,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *const State, comptime invoker: fn (capture: *const State, input: Input) Output) Self {
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke(context: *const anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(@alignCast(context)), input);
|
||||
}
|
||||
}.invoke,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn invoke(self: Self, input: Input) Output {
|
||||
return self.invoker(self.context, input);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Generator(comptime Output: type, comptime Input: type) type {
|
||||
return struct {
|
||||
context: *anyopaque,
|
||||
invoker: *const fn (capture: *anyopaque, input: Input) Output,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn bind(comptime State: type, state: *State, comptime invoker: fn (capture: *State, input: Input) Output) Self {
|
||||
const is_zero_aligned = @alignOf(State) == 0;
|
||||
|
||||
return .{
|
||||
.context = if (is_zero_aligned) state else @ptrCast(state),
|
||||
|
||||
.invoker = struct {
|
||||
fn invoke(context: *anyopaque, input: Input) Output {
|
||||
if (is_zero_aligned) {
|
||||
return invoker(@ptrCast(context), input);
|
||||
}
|
||||
|
||||
return invoker(@ptrCast(@alignCast(context)), input);
|
||||
}
|
||||
}.invoke,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn invoke(self: Self, input: Input) Output {
|
||||
return self.invoker(self.context, input);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Tag(comptime Element: type) type {
|
||||
return switch (@typeInfo(Element)) {
|
||||
.Enum => |info| info.tag_type,
|
||||
.Union => |info| info.tag_type orelse @compileError(@typeName(Element) ++ " has no tag type"),
|
||||
else => @compileError("expected enum or union type, found '" ++ @typeName(Element) ++ "'"),
|
||||
};
|
||||
}
|
||||
|
||||
pub const Writer = Generator(?usize, []const Byte);
|
||||
|
||||
pub fn allocate_copy(allocator: Allocator, source: []const Byte) AllocationError![]Byte {
|
||||
const allocation = try allocator.actions.reallocate(allocator.context, @returnAddress(), null, source.len);
|
||||
|
||||
copy(allocation, source);
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn allocate_one(allocator: Allocator, value: anytype) AllocationError!*@TypeOf(value) {
|
||||
const Type = @TypeOf(value);
|
||||
const typeSize = @sizeOf(Type);
|
||||
|
||||
if (typeSize == 0) {
|
||||
@compileError("Cannot allocate memory for 0-byte sized type " ++ @typeName(Type));
|
||||
}
|
||||
|
||||
const allocation = @as(*Type, @ptrCast(@alignCast(try allocator.actions.reallocate(
|
||||
allocator.context,
|
||||
@returnAddress(),
|
||||
null,
|
||||
typeSize))));
|
||||
|
||||
allocation.* = value;
|
||||
|
||||
return allocation;
|
||||
}
|
||||
|
||||
pub fn bytes_of(value: anytype) []const Byte {
|
||||
const pointer_info = @typeInfo(@TypeOf(value)).Pointer;
|
||||
|
||||
return switch (pointer_info.size) {
|
||||
.One => @as([*]const Byte, @ptrCast(value))[0 .. @sizeOf(pointer_info.child)],
|
||||
.Slice => @as([*]const Byte, @ptrCast(value.ptr))[0 .. @sizeOf(pointer_info.child) * value.len],
|
||||
else => @compileError("`value` must be single-element pointer or slice type"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn copy(target: []Byte, source: []const Byte) void {
|
||||
var index: usize = 0;
|
||||
|
||||
while (index < source.len) : (index += 1) {
|
||||
target[index] = source[index];
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ends_with(target: []const Byte, match: []const Byte) bool {
|
||||
if (target.len < match.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
{
|
||||
var index = @as(usize, 0);
|
||||
|
||||
while (index < match.len) : (index += 1) {
|
||||
if (target[target.len - (1 + index)] != match[match.len - (1 + index)]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn equals(target: []const Byte, match: []const Byte) bool {
|
||||
if (target.len != match.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
for (0 .. target.len) |index| {
|
||||
if (target[index] != match[index]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
var null_context = @as(usize, 0);
|
||||
|
||||
pub const null_writer = Writer.bind(usize, &null_context, struct {
|
||||
fn write(context: *usize, buffer: []const u8) ?usize {
|
||||
debug.assert(context.* == 0);
|
||||
|
||||
return buffer.len;
|
||||
}
|
||||
}.write);
|
||||
|
||||
pub fn slice_sentineled(comptime sen: anytype, ptr: [*:sen]const @TypeOf(sen)) [:sen]const @TypeOf(sen) {
|
||||
var len = @as(usize, 0);
|
||||
|
||||
while (ptr[len] != sen) {
|
||||
len += 1;
|
||||
}
|
||||
|
||||
return ptr[0 .. len:sen];
|
||||
}
|
||||
|
||||
pub fn tag_of(comptime value: anytype) Tag(@TypeOf(value)) {
|
||||
return @as(Tag(@TypeOf(value)), value);
|
||||
}
|
||||
|
||||
pub fn zero(target: []Byte) void {
|
||||
for (target) |*t| t.* = 0;
|
||||
}
|
||||
|
|
|
@ -1,97 +1,54 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Returns a dynamically sized stack capable of holding `Value`.
|
||||
///
|
||||
pub const ByteStack = Stack(io.Byte);
|
||||
|
||||
pub fn Stack(comptime Value: type) type {
|
||||
return struct {
|
||||
allocator: io.Allocator,
|
||||
capacity: usize = 0,
|
||||
values: []Value = &.{},
|
||||
capacity: usize,
|
||||
values: []Value,
|
||||
|
||||
///
|
||||
/// Stack type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Clears all elements from `self` while preserving the current internal buffer.
|
||||
///
|
||||
/// To clean up memory allocations made by the stack and deinitialize it, see [deinit] instead.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
self.values = self.values[0 .. 0];
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// To clear all items from the stack while preserving the current internal buffer, see [clear] instead.
|
||||
///
|
||||
/// *Note* if the `capacity` field of `self` is a non-zero value, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current internal buffer.
|
||||
///
|
||||
pub fn deinit(self: *Self) void {
|
||||
pub fn free(self: *Self) void {
|
||||
if (self.capacity == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(self.allocator, self.values.ptr[0 .. self.capacity]);
|
||||
self.allocator.deallocate(self.values.ptr[0 .. self.capacity]);
|
||||
|
||||
self.values = &.{};
|
||||
self.capacity = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove `amount` number of `Value`s from the stack, returning `bool` if it was successful,
|
||||
/// otherwise `false` if the stack contains fewer elements than `amount`.
|
||||
///
|
||||
pub fn drop(self: *Self, amount: usize) bool {
|
||||
if (amount > self.values.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.values = self.values[0 .. self.values.len - amount];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||
///
|
||||
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer by `growth_amount`, leaving `self` in the same state that it was in prior to starting the
|
||||
/// grow.
|
||||
///
|
||||
/// Growing ahead of multiple push operations is useful when the upper bound of pushes is well-understood, as it
|
||||
/// can reduce the number of allocations required per push.
|
||||
///
|
||||
pub fn grow(self: *Self, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.capacity + growth_amount;
|
||||
const values = (try io.allocate_many(self.allocator, grown_capacity, Value))[0 .. self.values.len];
|
||||
const buffer = try self.allocator.reallocate(null, @sizeOf(Value) * grown_capacity);
|
||||
|
||||
errdefer io.deallocate(self.allocator, values);
|
||||
errdefer self.allocator.deallocate(buffer);
|
||||
|
||||
if (self.capacity != 0) {
|
||||
for (0 .. self.values.len) |index| {
|
||||
values[index] = self.values[index];
|
||||
}
|
||||
|
||||
io.deallocate(self.allocator, self.values.ptr[0 .. self.capacity]);
|
||||
io.copy(buffer, io.bytes_of(self.values));
|
||||
self.allocator.deallocate(self.values.ptr[0 .. self.capacity]);
|
||||
}
|
||||
|
||||
self.values = values;
|
||||
self.values = @as([*]Value, @ptrCast(@alignCast(buffer)))[0 .. self.values.len];
|
||||
self.capacity = grown_capacity;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove the last element of `self` that was inserted, if one exists, returning it or `null` if
|
||||
/// `self` is empty.
|
||||
///
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.capacity = 0,
|
||||
.values = &.{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn pop(self: *Self) ?Value {
|
||||
if (self.values.len == 0) {
|
||||
return null;
|
||||
|
@ -104,59 +61,6 @@ pub fn Stack(comptime Value: type) type {
|
|||
return self.values[last_index];
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push every `Value` in `values` to `self` using `allocator` to grow the internal buffer as
|
||||
/// necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
pub fn push_all(self: *Self, values: []const Value) io.AllocationError!void {
|
||||
const new_length = self.values.len + values.len;
|
||||
|
||||
if (new_length > self.capacity) {
|
||||
try self.grow(values.len + values.len);
|
||||
}
|
||||
|
||||
const offset_index = self.values.len;
|
||||
|
||||
self.values = self.values.ptr[0 .. new_length];
|
||||
|
||||
for (0 .. values.len) |index| {
|
||||
self.values[offset_index + index] = values[index];
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push the `Value` in `value` to `self` by `amount` number of times using `allocator` to grow
|
||||
/// the internal buffer as necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
pub fn push_many(self: *Self, value: Value, amount: usize) io.AllocationError!void {
|
||||
const new_length = self.values.len + amount;
|
||||
|
||||
if (new_length >= self.capacity) {
|
||||
try self.grow(amount + amount);
|
||||
}
|
||||
|
||||
const offset_index = self.values.len;
|
||||
|
||||
self.values = self.values.ptr[0 .. new_length];
|
||||
|
||||
for (0 .. amount) |index| {
|
||||
self.values[offset_index + index] = value;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to push the `Value` in `value` to `self` using `allocator` to grow the internal buffer as
|
||||
/// necessary.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
pub fn push_one(self: *Self, value: Value) io.AllocationError!void {
|
||||
if (self.values.len == self.capacity) {
|
||||
try self.grow(math.max(1, self.capacity));
|
||||
|
@ -170,21 +74,3 @@ pub fn Stack(comptime Value: type) type {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const ByteStack = Stack(io.Byte);
|
||||
|
||||
///
|
||||
/// Returns a [io.Writer] instance that binds a reference of `self` to the [write] operation.
|
||||
///
|
||||
pub fn stack_as_writer(self: *ByteStack) io.Writer {
|
||||
return io.Writer.bind(ByteStack, self, struct {
|
||||
fn write(stack: *ByteStack, bytes: []const io.Byte) ?usize {
|
||||
stack.push_all(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const list = @import("./list.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
pub fn Slab(comptime Value: type) type {
|
||||
return struct {
|
||||
next_index: usize,
|
||||
entries: EntryList,
|
||||
|
||||
const EntryList = list.Stack(union (enum) {
|
||||
value: Value,
|
||||
next_index: usize,
|
||||
});
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn lookup(self: Self, key: usize) ?Value {
|
||||
if (key == 0 or key > self.entries.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return switch (self.entries.values[key - 1]) {
|
||||
.value => |value| value,
|
||||
.next_index => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
self.entries.free();
|
||||
|
||||
self.next_index = 0;
|
||||
}
|
||||
|
||||
pub fn insert(self: *Self, value: Value) io.AllocationError!usize {
|
||||
if (self.next_index < self.entries.values.len) {
|
||||
const index = self.next_index;
|
||||
const entry = &self.entries.values[index];
|
||||
|
||||
debug.assert(entry.* == .next_index);
|
||||
|
||||
self.next_index = entry.next_index;
|
||||
entry.* = .{.value = value};
|
||||
|
||||
return index + 1;
|
||||
} else {
|
||||
try self.entries.push_one(.{.value = value});
|
||||
|
||||
self.next_index += 1;
|
||||
|
||||
return self.next_index;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.next_index = 0,
|
||||
.entries = EntryList.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn remove(self: *Self, key: usize) ?Value {
|
||||
if (key == 0 or key > self.entries.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const index = key - 1;
|
||||
const entry = &self.entries.values[index];
|
||||
|
||||
return switch (entry.*) {
|
||||
.next_index => null,
|
||||
|
||||
.value => get_value: {
|
||||
const value = entry.value;
|
||||
|
||||
entry.* = .{.next_index = self.next_index};
|
||||
self.next_index = index;
|
||||
|
||||
break: get_value value;
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn Table(comptime Key: type, comptime Value: type, comptime traits: TableTraits(Key)) type {
|
||||
const load_max = 0.75;
|
||||
|
||||
return struct {
|
||||
allocator: io.Allocator,
|
||||
count: usize,
|
||||
entries: []?Entry,
|
||||
|
||||
pub const Entry = struct {
|
||||
key: Key,
|
||||
value: Value,
|
||||
|
||||
fn write_into(self: Entry, entry_table: []?Entry) bool {
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), entry_table.len);
|
||||
var hashed_key = math.wrap(traits.hash(self.key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (true) : (iterations += 1) {
|
||||
debug.assert(iterations < entry_table.len);
|
||||
|
||||
const table_entry = &(entry_table[hashed_key] orelse {
|
||||
entry_table[hashed_key] = .{
|
||||
.key = self.key,
|
||||
.value = self.value,
|
||||
};
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (traits.match(table_entry.key, self.key)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Iterable = struct {
|
||||
table: *Self,
|
||||
iterations: usize = 0,
|
||||
|
||||
pub fn next(self: *Iterable) ?Entry {
|
||||
while (self.iterations < self.table.entries.len) {
|
||||
defer self.iterations += 1;
|
||||
|
||||
if (self.table.entries[self.iterations]) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn replace(self: *Self, key: Key, value: Value) io.AllocationError!?Entry {
|
||||
try self.rehash(load_max);
|
||||
|
||||
debug.assert(self.entries.len > self.count);
|
||||
|
||||
{
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), self.entries.len);
|
||||
var hashed_key = math.wrap(traits.hash(key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
|
||||
while (true) {
|
||||
const entry = &(self.entries[hashed_key] orelse {
|
||||
self.entries[hashed_key] = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
self.count += 1;
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (traits.match(entry.key, key)) {
|
||||
const original_entry = entry.*;
|
||||
|
||||
entry.* = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return original_entry;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_load_factor(self: Self) f32 {
|
||||
return if (self.entries.len == 0) 1 else @as(f32, @floatFromInt(self.count)) / @as(f32, @floatFromInt(self.entries.len));
|
||||
}
|
||||
|
||||
pub fn clear(self: *Self) void {
|
||||
for (self.entries) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
if (self.entries.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
self.allocator.deallocate(self.entries);
|
||||
|
||||
self.entries = &.{};
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
pub fn insert(self: *Self, key: Key, value: Value) io.AllocationError!bool {
|
||||
try self.rehash(load_max);
|
||||
|
||||
debug.assert(self.entries.len > self.count);
|
||||
|
||||
defer self.count += 1;
|
||||
|
||||
const entry = Entry{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return entry.write_into(self.entries);
|
||||
}
|
||||
|
||||
pub fn lookup(self: Self, key: Key) ?Value {
|
||||
if (self.count == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hash_max = math.min(math.max_int(@typeInfo(usize).Int), self.entries.len);
|
||||
var hashed_key = math.wrap(traits.hash(key), math.min_int(@typeInfo(usize).Int), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (iterations < self.count) : (iterations += 1) {
|
||||
const entry = &(self.entries[hashed_key] orelse return null);
|
||||
|
||||
if (traits.match(entry.key, key)) {
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn make(allocator: io.Allocator) Self {
|
||||
return .{
|
||||
.allocator = allocator,
|
||||
.count = 0,
|
||||
.entries = &.{},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn rehash(self: *Self, max_load: f32) io.AllocationError!void {
|
||||
if (self.calculate_load_factor() <= max_load) {
|
||||
return;
|
||||
}
|
||||
|
||||
const min_count = math.max(1, self.count);
|
||||
const table_size = min_count * 2;
|
||||
const allocation = @as([*]?Entry, @ptrCast(@alignCast(try self.allocator.reallocate(null, @sizeOf(?Entry) * table_size))))[0 .. table_size];
|
||||
|
||||
errdefer self.allocator.deallocate(allocation);
|
||||
|
||||
self.entries = replace_table: {
|
||||
for (allocation) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
if (self.entries.len != 0) {
|
||||
for (self.entries) |maybe_entry| {
|
||||
if (maybe_entry) |entry| {
|
||||
debug.assert(entry.write_into(allocation));
|
||||
}
|
||||
}
|
||||
|
||||
self.allocator.deallocate(self.entries);
|
||||
}
|
||||
|
||||
break: replace_table allocation;
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn TableTraits(comptime Key: type) type {
|
||||
return struct {
|
||||
hash: fn (key: Key) usize,
|
||||
match: fn (key: Key, key: Key) bool,
|
||||
};
|
||||
}
|
||||
|
||||
pub const string_table_traits = TableTraits([]const io.Byte){
|
||||
.hash = struct {
|
||||
fn hash(key: []const io.Byte) usize {
|
||||
var hash_code = @as(usize, 5381);
|
||||
|
||||
for (key) |byte| {
|
||||
hash_code = ((hash_code << 5) + hash_code) + byte;
|
||||
}
|
||||
|
||||
return hash_code;
|
||||
}
|
||||
}.hash,
|
||||
|
||||
.match = io.equals,
|
||||
};
|
|
@ -1,147 +1,21 @@
|
|||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Errors that may occur during checked integer arithmetic operations.
|
||||
///
|
||||
pub const CheckedArithmeticError = error {
|
||||
IntOverflow,
|
||||
};
|
||||
|
||||
///
|
||||
/// Returns the float type described by `float`.
|
||||
///
|
||||
pub fn Float(comptime float: std.builtin.Type.Float) type {
|
||||
return @Type(.{.Float = float});
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the integer type described by `int`.
|
||||
///
|
||||
pub fn Int(comptime int: std.builtin.Type.Int) type {
|
||||
return @Type(.{.Int = int});
|
||||
}
|
||||
|
||||
///
|
||||
/// Two-dimensional vector type.
|
||||
///
|
||||
pub const Vector2 = extern struct {
|
||||
x: f32,
|
||||
y: f32,
|
||||
|
||||
///
|
||||
/// A [Vector2] with a value of `0` assigned to all of the components.
|
||||
///
|
||||
pub const zero = Vector2{.x = 0, .y = 0};
|
||||
};
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked addition between `a` and `b`, returning the result or [CheckedArithmeticError] if the
|
||||
/// operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_add` can be seen as an alternative to the language-native addition operator (+) that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_add(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a + b) {
|
||||
const result = @addWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked integer cast to the type expressed by `int` on `value`, returning the result or
|
||||
/// [CheckedArithmeticError] if the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_cast` can be seen as an alternative to the language-native `@intCast` builtin that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_cast(comptime int: std.builtin.Type.Int, value: anytype) CheckedArithmeticError!Int(int) {
|
||||
if ((value < min_int(int)) or (value > max_int(int))) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return @intCast(Int(int), value);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked multiplication between `a` and `b`, returning the result or [CheckedArithmeticError]
|
||||
/// if the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_mul` can be seen as an alternative to the language-native multiplication operator (*) that exposes the
|
||||
/// safety-checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_mul(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a * b) {
|
||||
const result = @mulWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to perform a checked subtraction between `a` and `b`, returning the result or [CheckedArithmeticError] if
|
||||
/// the operation tried to invoke safety-checked behavior.
|
||||
///
|
||||
/// `checked_sub` can be seen as an alternative to the language-native subtraction operator (-) that exposes the safety-
|
||||
/// checked behavior in the form of an error type that may be caught or tried on.
|
||||
///
|
||||
pub fn checked_sub(a: anytype, b: anytype) CheckedArithmeticError!@TypeOf(a - b) {
|
||||
const result = @subWithOverflow(a, b);
|
||||
|
||||
if (result.@"1" != 0) {
|
||||
return error.IntOverflow;
|
||||
}
|
||||
|
||||
return result.@"0";
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `value` clamped between the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn clamp(value: anytype, lower: anytype, upper: anytype) @TypeOf(value, lower, upper) {
|
||||
return max(lower, min(upper, value));
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `true` if `value` is clamped within the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn is_clamped(value: anytype, lower: anytype, upper: anytype) bool {
|
||||
return (value >= lower) and (value <= upper);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the maximum value between `a` and `b`.
|
||||
///
|
||||
pub fn max(a: anytype, b: anytype) @TypeOf(a, b) {
|
||||
return @max(a, b);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the maximum value that the integer described by `int` may express.
|
||||
///
|
||||
pub fn max_int(comptime int: std.builtin.Type.Int) comptime_int {
|
||||
const bit_count = int.bits;
|
||||
|
||||
if (bit_count == 0) return 0;
|
||||
|
||||
return (1 << (bit_count - @boolToInt(int.signedness == .signed))) - 1;
|
||||
return (1 << (bit_count - @intFromBool(int.signedness == .signed))) - 1;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the minimum value between `a` and `b`.
|
||||
///
|
||||
pub fn min(a: anytype, b: anytype) @TypeOf(a, b) {
|
||||
return @min(a, b);
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the minimum value that the integer described by `int` may express.
|
||||
///
|
||||
pub fn min_int(comptime int: std.builtin.Type.Int) comptime_int {
|
||||
if (int.signedness == .unsigned) return 0;
|
||||
|
||||
|
@ -152,9 +26,6 @@ pub fn min_int(comptime int: std.builtin.Type.Int) comptime_int {
|
|||
return -(1 << (bit_count - 1));
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `value` wrapped around the inclusive bounds of `lower` and `upper`.
|
||||
///
|
||||
pub fn wrap(value: anytype, lower: anytype, upper: anytype) @TypeOf(value, lower, upper) {
|
||||
const range = upper - lower;
|
||||
|
||||
|
|
|
@ -1,178 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Addressable mapping of integers described by `index_int` to values of type `Value`.
|
||||
///
|
||||
/// Slab maps are similar to slot maps in that they have O(1) insertion and removal, however, use a flat table layout
|
||||
/// instead of parallel arrays. This reduces memory usage in some cases and can be useful for data that does not need to
|
||||
/// be quickly iterated over, as values ordering is not guaranteed.
|
||||
///
|
||||
/// *Note* `index_int` values may be as big or as small as desired per the use-case of the consumer, however, integers
|
||||
/// smaller than `usize` may result in the map reporting it is out of memory due to exhausting the addressable space
|
||||
/// provided by the integer.
|
||||
///
|
||||
pub fn Map(comptime index_int: std.builtin.Type.Int, comptime Value: type) type {
|
||||
return struct {
|
||||
allocator: io.Allocator,
|
||||
free_index: Index = 0,
|
||||
count: Index = 0,
|
||||
table: []Entry = &.{},
|
||||
|
||||
///
|
||||
/// Table entry which may either store an inserted value or an index to the next free entry in the table.
|
||||
///
|
||||
const Entry = union (enum) {
|
||||
free_index: Index,
|
||||
value: Value,
|
||||
};
|
||||
|
||||
///
|
||||
/// Used for indexing into the slab map.
|
||||
///
|
||||
const Index = math.Int(index_int);
|
||||
|
||||
///
|
||||
/// Slab map type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Overwrites the value referenced by `index` in `self`.
|
||||
///
|
||||
pub fn assign(self: *Self, index: Index, value: Value) void {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
entry.value = value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
if (self.table.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.table);
|
||||
|
||||
self.table = &.{};
|
||||
self.count = 0;
|
||||
self.free_index = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Fetches the value referenced by `index` in `self`, returning it.
|
||||
///
|
||||
pub fn fetch(self: *Self, index: Index) *Value {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
return &entry.value;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to grow the internal buffer of `self` by `growth_amount` using `allocator`.
|
||||
///
|
||||
/// The function returns [io.AllocatorError] if `allocator` could not commit the memory required to grow the
|
||||
/// table by `growth_amount`, leaving `self` in the same state that it was in prior to starting the grow.
|
||||
///
|
||||
/// Growing ahead of multiple insertion operations is useful when the upper bound of insertions is well-
|
||||
/// understood, as it can reduce the number of allocations required per insertion.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.table.len + growth_amount;
|
||||
const entries = try io.allocate_many(allocator, grown_capacity, Entry);
|
||||
|
||||
errdefer io.deallocate(allocator, entries);
|
||||
|
||||
if (self.table.len != 0) {
|
||||
for (0 .. self.table.len) |index| {
|
||||
entries[index] = self.table[index];
|
||||
}
|
||||
|
||||
for (self.table.len .. entries.len) |index| {
|
||||
entries[index] = .{.free_index = 0};
|
||||
}
|
||||
|
||||
io.deallocate(allocator, self.table);
|
||||
}
|
||||
|
||||
self.table = entries;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to insert `value` into `self` as a new entry using `allocator` as the allocation strategy,
|
||||
/// returning an index value representing a reference to the inserted value that may be queried through `self`
|
||||
/// after.
|
||||
///
|
||||
/// The function returns [io.AllocationError] if `allocator` could not commit the memory required to grow the
|
||||
/// internal buffer of `self` when necessary.
|
||||
///
|
||||
/// *Note* if the `table` field of `self` is an allocated slice, `allocator` must reference the same allocation
|
||||
/// strategy as the one originally used to allocate the current table.
|
||||
///
|
||||
pub fn insert(self: *Self, value: Value) io.AllocationError!Index {
|
||||
if (self.count == self.table.len) {
|
||||
try self.grow(self.allocator, math.max(1, self.count));
|
||||
}
|
||||
|
||||
if (self.free_index == self.count) {
|
||||
const entry_index = self.count;
|
||||
const entry = &self.table[entry_index];
|
||||
|
||||
entry.* = .{.value = value};
|
||||
|
||||
self.count += 1;
|
||||
self.free_index += 1;
|
||||
|
||||
return entry_index;
|
||||
}
|
||||
|
||||
const entry_index = self.free_index;
|
||||
const entry = &self.table[self.free_index];
|
||||
|
||||
debug.assert(entry.* == .free_index);
|
||||
|
||||
self.count += 1;
|
||||
self.free_index = entry.free_index;
|
||||
entry.* = .{.value = value};
|
||||
|
||||
return entry_index;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns `true` if `self` contains no values, otherwise `false`.
|
||||
///
|
||||
pub fn is_empty(self: Self) bool {
|
||||
return self.count == 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Removes the value referenced by `index` from `self`.
|
||||
///
|
||||
pub fn remove(self: *Self, index: Index) void {
|
||||
const entry = &self.table[index];
|
||||
|
||||
debug.assert(entry.* == .value);
|
||||
|
||||
self.count -= 1;
|
||||
entry.* = .{.free_index = self.free_index};
|
||||
self.free_index = index;
|
||||
}
|
||||
};
|
||||
}
|
|
@ -1,236 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Retruns a dense mapping slots that may store `Element`s indexable by a [Slot], where `key` defines how many bits the
|
||||
/// [Slot] used is made from.
|
||||
///
|
||||
pub fn Map(comptime key: Key, comptime Element: type) type {
|
||||
const KeySlot = Slot(key);
|
||||
const Index = math.Unsigned(key.index_bits);
|
||||
|
||||
return struct {
|
||||
capacity: usize,
|
||||
values: []Element,
|
||||
slots: [*]KeySlot,
|
||||
erase: [*]Index,
|
||||
next_free: Index,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Clears all elements from the slots in `self`.
|
||||
///
|
||||
/// *Note* that clearing the slots is not the same as deinitializing them, as it does not deallocate any memory
|
||||
/// that has already been allocated to the slots structure.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
self.next_free = 0;
|
||||
self.values = self.values[0 .. 0];
|
||||
|
||||
{
|
||||
var index = @as(usize, 0);
|
||||
|
||||
while (index < self.capacity) : (index += 1) {
|
||||
const slot = &self.slots[index];
|
||||
|
||||
slot.salt = math.max(slot.salt +% 1, 1);
|
||||
slot.index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Frees all memory allocated by `allocator` to self.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn deinit(self: *Self, allocator: io.Allocator) void {
|
||||
io.deallocate(allocator, self.values.ptr);
|
||||
io.deallocate(allocator, self.slots);
|
||||
io.deallocate(allocator, self.erase);
|
||||
|
||||
self.values = &.{};
|
||||
self.slots = null;
|
||||
self.erase = null;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to fetch the element identified referenced by `slot` from `self`, returning it or `null` if `slot`
|
||||
/// does not reference a valid element.
|
||||
///
|
||||
pub fn fetch(self: Self, slot: KeySlot) ?*Element {
|
||||
if (slot.index >= self.values.len) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const redirect = &self.slots[slot.index];
|
||||
|
||||
if (slot.salt != redirect.salt) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return &self.values[redirect.index];
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to transactionally grow `self` by `growth_amount` using `allocator`, returning a
|
||||
/// [io.AllocationError] if it failed.
|
||||
///
|
||||
/// Should growing fail, `self` is left in an unmodified state.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn grow(self: *Self, allocator: io.Allocator, growth_amount: usize) io.AllocationError!void {
|
||||
const grown_capacity = self.capacity + growth_amount;
|
||||
const values = try io.allocate_many(Element, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
|
||||
const slots = try io.allocate_many(KeySlot, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
const erase = try io.allocate_many(Index, grown_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
self.values = values;
|
||||
self.slots = slots.ptr;
|
||||
self.erase = erase.ptr;
|
||||
self.capacity = grown_capacity;
|
||||
|
||||
// Add new values to the freelist
|
||||
{
|
||||
var index = @intCast(Index, self.values.len);
|
||||
|
||||
while (index < self.capacity) : (index += 1) {
|
||||
const slot = &self.slots.?[index];
|
||||
|
||||
slot.salt = 1;
|
||||
slot.index = index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to return an initialized slot map with an initial capacity of `initial_capacity` and `allocator` as
|
||||
/// the memory allocation strategy.
|
||||
///
|
||||
/// Upon failure, a [io.AllocationError] is returned instead.
|
||||
///
|
||||
pub fn init(allocator: io.Allocator, initial_capacity: usize) io.AllocationError!Self {
|
||||
const values = try io.allocate_many(Element, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, values);
|
||||
|
||||
const slots = try io.allocate_many(KeySlot, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, slots);
|
||||
|
||||
const erase = try io.allocate_many(Index, initial_capacity, allocator);
|
||||
|
||||
errdefer io.deallocate(allocator, erase);
|
||||
|
||||
return Self{
|
||||
.capacity = initial_capacity,
|
||||
.values = values[0 .. 0],
|
||||
.slots = slots.ptr,
|
||||
.erase = erase.ptr,
|
||||
.next_free = 0,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to insert `value` into `self`, growing the internal buffer with `allocator` if it is full and
|
||||
/// returning a `Slot` of `key` referencing the inserted element or a [io.AllocationError] if it failed.
|
||||
///
|
||||
/// *Note*: if `self` already contains allocated memory then `allocator` must reference the same [io.Allocator]
|
||||
/// that was used to create the already-allocated memory.
|
||||
///
|
||||
pub fn insert(self: *Self, allocator: io.Allocator, value: Element) io.AllocationError!KeySlot {
|
||||
if (self.values.len == self.capacity) {
|
||||
try self.grow(allocator, math.max(usize, 1, self.capacity));
|
||||
}
|
||||
|
||||
const index_of_redirect = self.next_free;
|
||||
const redirect = &self.slots.?[index_of_redirect];
|
||||
|
||||
// redirect.index points to the next free slot.
|
||||
self.next_free = redirect.index;
|
||||
redirect.index = @intCast(Index, self.values.len);
|
||||
self.values = self.values.ptr[0 .. self.values.len + 1];
|
||||
self.values[redirect.index] = value;
|
||||
self.erase.?[redirect.index] = index_of_redirect;
|
||||
|
||||
return KeySlot{
|
||||
.index = index_of_redirect,
|
||||
.salt = redirect.salt,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to remove the element referenced by `slot` from `self`, returning `true` if it was successful or
|
||||
/// `false` if `slot` does not reference a valid slot.
|
||||
///
|
||||
pub fn remove(self: *Self, slot: KeySlot) bool {
|
||||
const redirect = &self.slots.?[slot.index];
|
||||
|
||||
if (slot.salt != redirect.salt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const free_index = redirect.index;
|
||||
|
||||
self.values = self.values[0 .. (self.values.len - 1)];
|
||||
|
||||
if (self.values.len > 0) {
|
||||
const free_value = &self.values[free_index];
|
||||
const free_erase = &self.erase.?[free_index];
|
||||
const last_value = &self.values[self.values.len];
|
||||
const last_erase = &self.erase.?[self.values.len];
|
||||
|
||||
free_value.* = last_value.*;
|
||||
free_erase.* = last_erase.*;
|
||||
self.slots.?[free_erase.*].index = free_index;
|
||||
}
|
||||
|
||||
redirect.salt = math.max(Index, redirect.salt +% 1, 1);
|
||||
redirect.index = self.next_free;
|
||||
self.next_free = slot.index;
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Describes the memory layout of an element-slot mapping.
|
||||
///
|
||||
pub const Key = struct {
|
||||
index_bits: usize,
|
||||
salt_bits: usize,
|
||||
};
|
||||
|
||||
///
|
||||
/// References a slot in a slot mapping.
|
||||
///
|
||||
pub fn Slot(comptime key: Key) type {
|
||||
return extern struct {
|
||||
index: math.Unsigned(key.index_bits),
|
||||
salt: math.Unsigned(key.salt_bits),
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// [Key] that uses the same number of bits as a [usize].
|
||||
///
|
||||
pub const addressable_key = Key{
|
||||
.index_bits = (@bitSizeOf(usize) / 2),
|
||||
.salt_bits = (@bitSizeOf(usize) / 2),
|
||||
};
|
|
@ -1,299 +0,0 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
///
|
||||
/// Hash type used by tables and their associated structures.
|
||||
///
|
||||
pub const Hash = u64;
|
||||
|
||||
///
|
||||
/// Returns a table type of `Key`-`Value` pairs implementing a hash-only approach to key-value storage.
|
||||
///
|
||||
/// Entries are hashed using the `keyer` and collisions are resolved by looking for another empty space nearby. This
|
||||
/// repeats until the load factor exceeds the implementation-defined load maximum, at which point the table will rehash
|
||||
/// itself to acquire more capacity.
|
||||
///
|
||||
pub fn Hashed(comptime Key: type, comptime Value: type, comptime keyer: Keyer(Key)) type {
|
||||
const hash_info = @typeInfo(Hash).Int;
|
||||
const load_max = 0.75;
|
||||
const growth_factor = 0.6;
|
||||
|
||||
return struct {
|
||||
allocator: io.Allocator,
|
||||
count: usize = 0,
|
||||
table: []?Entry = &.{},
|
||||
|
||||
///
|
||||
/// Key-value pair bundling.
|
||||
///
|
||||
pub const Entry = struct {
|
||||
key: Key,
|
||||
value: Value,
|
||||
|
||||
///
|
||||
/// Attempts to write `self` into `entry_table`, returning `true` if no identical entry already existed,
|
||||
/// otherwise `false`.
|
||||
///
|
||||
/// Note that this does not modify the memory pointed to by `entry_table` in any way, meaning that
|
||||
/// completely filled entry tables cannot perform the write at all and will invoke safety-checked behavior.
|
||||
///
|
||||
fn write_into(self: Entry, entry_table: []?Entry) bool {
|
||||
const hash_max = math.min(math.max_int(hash_info), entry_table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(self.key), math.min_int(hash_info), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (true) : (iterations += 1) {
|
||||
debug.assert(iterations < entry_table.len);
|
||||
|
||||
const table_entry = &(entry_table[hashed_key] orelse {
|
||||
entry_table[hashed_key] = .{
|
||||
.key = self.key,
|
||||
.value = self.value,
|
||||
};
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (keyer.comparer(table_entry.key, self.key) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Iterable wrapper for [Hashed] instances to make unordered traversal of key-value entries relatively trivial.
|
||||
///
|
||||
pub const Iterable = struct {
|
||||
hashed_map: *Self,
|
||||
iterations: usize = 0,
|
||||
|
||||
///
|
||||
/// Attempts to move past the current iteration of `self` and onto the next key-value entry, returning it or
|
||||
/// `null` if there are no more elements in the referenced map.
|
||||
///
|
||||
pub fn next(self: *Iterable) ?Entry {
|
||||
while (self.iterations < self.hashed_map.table.len) {
|
||||
defer self.iterations += 1;
|
||||
|
||||
if (self.hashed_map.table[self.iterations]) |entry| {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Table type.
|
||||
///
|
||||
const Self = @This();
|
||||
|
||||
///
|
||||
/// Attempts to write the `key`-`value` pair into `self`, using `allocator` as the memory allocation strategy,
|
||||
/// and overwriting any value stored with a matching `key` and returning it if one existed.
|
||||
///
|
||||
/// The function returns [AllocationError] instead if `allocator` cannot commit the memory required to grow the
|
||||
/// entry table of `self` when necessary.
|
||||
///
|
||||
pub fn assign(self: *Self, key: Key, value: Value) io.AllocationError!?Entry {
|
||||
if (self.calculate_load_factor() >= load_max) {
|
||||
const growth_size = @intToFloat(f64, math.max(1, self.table.len)) * growth_factor;
|
||||
|
||||
if (growth_size > math.max_int(@typeInfo(usize).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
try self.rehash(@floatToInt(usize, growth_size));
|
||||
}
|
||||
|
||||
debug.assert(self.table.len > self.count);
|
||||
|
||||
{
|
||||
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
||||
|
||||
while (true) {
|
||||
const entry = &(self.table[hashed_key] orelse {
|
||||
self.table[hashed_key] = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (keyer.comparer(entry.key, key) == 0) {
|
||||
const original_entry = entry.*;
|
||||
|
||||
entry.* = .{
|
||||
.key = key,
|
||||
.value = value,
|
||||
};
|
||||
|
||||
return original_entry;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the calculated load factor of `self` at the moment.
|
||||
///
|
||||
pub fn calculate_load_factor(self: Self) f32 {
|
||||
return if (self.table.len == 0) 1 else @intToFloat(f32, self.count) / @intToFloat(f32, self.table.len);
|
||||
}
|
||||
|
||||
///
|
||||
/// Clears all entries from `self`, resetting the count to `0`.
|
||||
///
|
||||
/// To clean up memory allocations made by the stack and deinitialize it, see [deinit] instead.
|
||||
///
|
||||
pub fn clear(self: *Self) void {
|
||||
for (self.table) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Deinitializes `self` and sets it to an invalid state, freeing all memory allocated by `allocator`.
|
||||
///
|
||||
/// To clear all items from the table while preserving the current capacity, see [clear] instead.
|
||||
///
|
||||
pub fn deinit(self: *Self) void {
|
||||
if (self.table.len == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
io.deallocate(self.allocator, self.table);
|
||||
|
||||
self.table = &.{};
|
||||
self.count = 0;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to write the `key`-`value` pair into `self`, using `allocator` as the memory allocation strategy,
|
||||
/// if no value already exists with a matching `key`, returning `true` if it was inserted, otherwise `false`.
|
||||
///
|
||||
/// The function returns [AllocationError] instead if `allocator` cannot commit the memory required to grow the
|
||||
/// entry table of `self` when necessary.
|
||||
///
|
||||
pub fn insert(self: *Self, key: Key, value: Value) io.AllocationError!bool {
|
||||
if (self.calculate_load_factor() >= load_max) {
|
||||
const growth_amount = @intToFloat(f64, self.table.len) * growth_factor;
|
||||
const min_size = 1;
|
||||
|
||||
try self.rehash(self.table.len + math.max(min_size, @floatToInt(usize, growth_amount)));
|
||||
}
|
||||
|
||||
debug.assert(self.table.len > self.count);
|
||||
|
||||
defer self.count += 1;
|
||||
|
||||
return (Entry{
|
||||
.key = key,
|
||||
.value = value,
|
||||
}).write_into(self.table);
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to find an entry in `self` matching `key`, returning it or `null` if no matching entry was found.
|
||||
///
|
||||
pub fn lookup(self: Self, key: Key) ?Value {
|
||||
if (self.count == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const hash_max = math.min(math.max_int(hash_info), self.table.len);
|
||||
var hashed_key = math.wrap(keyer.hasher(key), math.min_int(hash_info), hash_max);
|
||||
var iterations = @as(usize, 0);
|
||||
|
||||
while (iterations < self.count) : (iterations += 1) {
|
||||
const entry = &(self.table[hashed_key] orelse return null);
|
||||
|
||||
if (keyer.comparer(entry.key, key) == 0) {
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
hashed_key = (hashed_key +% 1) % hash_max;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to reallocate and regenerate the table capacity in `self` using `allocator` to be equal to or
|
||||
/// greater than `requested_range`, returning [io.AllocationError] if `allocator` cannot commit the memory
|
||||
/// required for the table capacity size.
|
||||
///
|
||||
pub fn rehash(self: *Self, requested_range: usize) io.AllocationError!void {
|
||||
const old_table = self.table;
|
||||
|
||||
self.table = try io.allocate_many(self.allocator, math.max(requested_range, self.count), ?Entry);
|
||||
|
||||
errdefer {
|
||||
io.deallocate(self.allocator, self.table);
|
||||
|
||||
self.table = old_table;
|
||||
}
|
||||
|
||||
for (self.table) |*entry| {
|
||||
entry.* = null;
|
||||
}
|
||||
|
||||
if (old_table.len != 0)
|
||||
{
|
||||
for (old_table) |maybe_entry| {
|
||||
if (maybe_entry) |entry| {
|
||||
debug.assert(entry.write_into(self.table));
|
||||
}
|
||||
}
|
||||
|
||||
io.deallocate(self.allocator, old_table);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns a function group for defining table keying operations performable on `Key`.
|
||||
///
|
||||
pub fn Keyer(comptime Key: type) type {
|
||||
return struct {
|
||||
hasher: fn (key: Key) Hash,
|
||||
comparer: fn (key_a: Key, key_b: Key) isize,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// A standard [Keyer] for `[]const u8` types that provides general-purpose string keying.
|
||||
///
|
||||
pub const string_keyer = Keyer([]const u8){
|
||||
.hasher = hash_string,
|
||||
.comparer = io.compare,
|
||||
};
|
||||
|
||||
///
|
||||
/// Returns a general-purpose, non-cryptographically safe hash value for `string`.
|
||||
///
|
||||
pub fn hash_string(string: []const u8) Hash {
|
||||
var hash_code = @as(Hash, 5381);
|
||||
|
||||
for (string) |byte| {
|
||||
hash_code = ((hash_code << 5) + hash_code) + byte;
|
||||
}
|
||||
|
||||
return hash_code;
|
||||
}
|
|
@ -1,298 +1,108 @@
|
|||
const debug = @import("./debug.zig");
|
||||
|
||||
const io = @import("./io.zig");
|
||||
|
||||
const math = @import("./math.zig");
|
||||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const DecimalFormat = struct {
|
||||
delimiter: []const u8 = "",
|
||||
positive_prefix: enum {none, plus, space} = .none,
|
||||
};
|
||||
delimiter: []const io.Byte,
|
||||
positive_prefix: enum {none, plus, space},
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const HexadecimalFormat = struct {
|
||||
delimiter: []const u8 = "",
|
||||
positive_prefix: enum {none, plus, space} = .none,
|
||||
casing: enum {lower, upper} = .lower,
|
||||
};
|
||||
pub fn parse(self: DecimalFormat, utf8: []const io.Byte, comptime Decimal: type) ?Decimal {
|
||||
if (utf8.len == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
///
|
||||
/// Errors that may occur during any kind of utf8-encoded parsing.
|
||||
///
|
||||
pub const ParseError = error {
|
||||
BadSyntax,
|
||||
};
|
||||
switch (@typeInfo(Decimal)) {
|
||||
.Int => |int| {
|
||||
var has_sign = switch (utf8[0]) {
|
||||
'-', '+', ' ' => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
///
|
||||
/// Errors that may occur during any kind of utf8-encoded printing.
|
||||
///
|
||||
pub const PrintError = error {
|
||||
PrintFailed,
|
||||
PrintIncomplete,
|
||||
};
|
||||
var result = @as(Decimal, 0);
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn parse_decimal(comptime Decimal: type, utf8: []const u8, format: DecimalFormat) !Decimal {
|
||||
if (utf8.len == 0) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
for (@intFromBool(has_sign) .. utf8.len) |index| {
|
||||
const radix = 10;
|
||||
const code = utf8[index];
|
||||
|
||||
switch (@typeInfo(Decimal)) {
|
||||
.Int => |int| {
|
||||
var has_sign = switch (utf8[0]) {
|
||||
'-', '+', ' ' => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
var result = @as(Decimal, 0);
|
||||
|
||||
for (@boolToInt(has_sign) .. utf8.len) |index| {
|
||||
const radix = 10;
|
||||
const code = utf8[index];
|
||||
|
||||
switch (code) {
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||
result = try math.checked_add(
|
||||
try math.checked_mul(result, radix),
|
||||
try math.checked_sub(code, '0'));
|
||||
},
|
||||
|
||||
else => {
|
||||
if (format.delimiter.len == 0 or !io.equals(format.delimiter, utf8[index ..])) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
switch (int.signedness) {
|
||||
.signed => {
|
||||
return result * @as(Decimal, if (has_sign and utf8[0] == '-') -1 else 1);
|
||||
},
|
||||
|
||||
.unsigned => {
|
||||
if (has_sign and utf8[0] == '-') {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
return result;
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
.Float => {
|
||||
// ""
|
||||
if (utf8.len == 0) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
var has_sign = switch (utf8[0]) {
|
||||
'-', '+', ' ' => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
// "-"
|
||||
if (has_sign and utf8.len == 1) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
const sign_offset = @boolToInt(has_sign);
|
||||
var has_decimal = utf8[sign_offset] == '.';
|
||||
|
||||
// "-."
|
||||
if (has_decimal and (utf8.len == 2)) {
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
var result = @as(Decimal, 0);
|
||||
var factor = @as(Decimal, if (has_sign and utf8[0] == '-') -1 else 1);
|
||||
|
||||
for (utf8[0 .. (sign_offset + @boolToInt(has_decimal))]) |code| switch (code) {
|
||||
'.' => {
|
||||
if (has_decimal) return error.BadSyntax;
|
||||
|
||||
has_decimal = true;
|
||||
},
|
||||
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||
if (has_decimal) factor /= 10.0;
|
||||
|
||||
result = ((result * 10.0) + @intToFloat(Decimal, code - '0'));
|
||||
},
|
||||
|
||||
else => return error.BadSyntax,
|
||||
};
|
||||
|
||||
return result * factor;
|
||||
},
|
||||
|
||||
else => @compileError("`" ++ @typeName(Decimal) ++ "` cannot be formatted as a decimal string"),
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to print `utf8` to `writer`.
|
||||
///
|
||||
/// The function returns [PrintError] if the write failed to complete partially or entirely.
|
||||
///
|
||||
pub fn print(writer: io.Writer, utf8: []const u8) PrintError!void {
|
||||
if ((writer.invoke(utf8) orelse return error.PrintFailed) != utf8.len) {
|
||||
return error.PrintIncomplete;
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn print_formatted(writer: io.Writer, comptime format: []const u8, arguments: anytype) PrintError!void {
|
||||
switch (@typeInfo(@TypeOf(arguments))) {
|
||||
.Struct => |arguments_struct| {
|
||||
comptime var arg_index = 0;
|
||||
comptime var head = 0;
|
||||
comptime var tail = 0;
|
||||
|
||||
inline while (tail < format.len) : (tail += 1) {
|
||||
if (format[tail] == '{') {
|
||||
if (tail > format.len) {
|
||||
@compileError("expected an idenifier after opening `{`");
|
||||
}
|
||||
|
||||
tail += 1;
|
||||
|
||||
switch (format[tail]) {
|
||||
'{' => {
|
||||
try print(writer, format[head .. (tail - 1)]);
|
||||
|
||||
tail += 1;
|
||||
head = tail;
|
||||
},
|
||||
|
||||
'}' => {
|
||||
if (!arguments_struct.is_tuple) {
|
||||
@compileError("all format specifiers must be named when using a named struct");
|
||||
}
|
||||
|
||||
try print(writer, arguments[arg_index]);
|
||||
|
||||
arg_index += 1;
|
||||
tail += 1;
|
||||
head = tail;
|
||||
switch (code) {
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||
result = try math.checked_add(
|
||||
try math.checked_mul(result, radix),
|
||||
try math.checked_sub(code, '0'));
|
||||
},
|
||||
|
||||
else => {
|
||||
if (arguments_struct.is_tuple) {
|
||||
@compileError("format specifiers cannot be named when using a tuple struct");
|
||||
if (self.delimiter.len == 0 or !io.equals(self.delimiter, utf8[index ..])) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try print(writer, format[head .. (tail - 1)]);
|
||||
|
||||
head = tail;
|
||||
tail += 1;
|
||||
|
||||
if (tail >= format.len) {
|
||||
@compileError("expected closing `}` or another `{` after opening `{`");
|
||||
}
|
||||
|
||||
debug.assert(tail < format.len);
|
||||
|
||||
inline while (format[tail] != '}') {
|
||||
tail += 1;
|
||||
|
||||
debug.assert(tail < format.len);
|
||||
}
|
||||
|
||||
try print_value(writer, @field(arguments, format[head .. tail]));
|
||||
|
||||
tail += 1;
|
||||
head = tail;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
else => @compileError("`arguments` must be a struct type"),
|
||||
}
|
||||
}
|
||||
switch (int.signedness) {
|
||||
.signed => {
|
||||
return result * @as(Decimal, if (has_sign and utf8[0] == '-') -1 else 1);
|
||||
},
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn print_decimal(writer: io.Writer, value: anytype, format: DecimalFormat) PrintError!void {
|
||||
if (value == 0) {
|
||||
return print(writer, switch (format.positive_prefix) {
|
||||
.none => "0",
|
||||
.plus => "+0",
|
||||
.space => " 0",
|
||||
});
|
||||
}
|
||||
.unsigned => {
|
||||
if (has_sign and utf8[0] == '-') {
|
||||
return null;
|
||||
}
|
||||
|
||||
switch (@typeInfo(@TypeOf(value))) {
|
||||
.Int => |int| {
|
||||
const radix = 10;
|
||||
var buffer = [_]u8{0} ** (1 + math.max(int.bits, 1));
|
||||
var buffer_start = buffer.len - 1;
|
||||
|
||||
{
|
||||
var decomposable_value = value;
|
||||
|
||||
while (decomposable_value != 0) : (buffer_start -= 1) {
|
||||
buffer[buffer_start] = @intCast(u8, (decomposable_value % radix) + '0');
|
||||
decomposable_value = (decomposable_value / radix);
|
||||
return result;
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
if (int.signedness == .unsigned and value < 0) {
|
||||
buffer[buffer_start] = '-';
|
||||
} else {
|
||||
switch (format.positive_prefix) {
|
||||
.none => buffer_start += 1,
|
||||
.plus => buffer[buffer_start] = '+',
|
||||
.space => buffer[buffer_start] = ' ',
|
||||
.Float => {
|
||||
var has_sign = switch (utf8[0]) {
|
||||
'-', '+', ' ' => true,
|
||||
else => false,
|
||||
};
|
||||
|
||||
// "-"
|
||||
if (has_sign and utf8.len == 1) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
try print(writer, buffer[buffer_start ..]);
|
||||
},
|
||||
const sign_offset = @intFromBool(has_sign);
|
||||
var has_decimal = utf8[sign_offset] == '.';
|
||||
|
||||
else => @compileError("`arguments` must be a struct type"),
|
||||
// "-."
|
||||
if (has_decimal and (utf8.len == 2)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
var result = @as(Decimal, 0);
|
||||
var factor = @as(Decimal, if (has_sign and utf8[0] == '-') -1 else 1);
|
||||
|
||||
for (utf8[sign_offset + @intFromBool(has_decimal) .. utf8.len]) |code| {
|
||||
switch (code) {
|
||||
'.' => {
|
||||
if (has_decimal) {
|
||||
return null;
|
||||
}
|
||||
|
||||
has_decimal = true;
|
||||
},
|
||||
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9' => {
|
||||
if (has_decimal) {
|
||||
factor /= 10.0;
|
||||
}
|
||||
|
||||
result = ((result * 10.0) + @as(Decimal, @floatFromInt(code - '0')));
|
||||
},
|
||||
|
||||
else => return null,
|
||||
}
|
||||
}
|
||||
|
||||
return result * factor;
|
||||
},
|
||||
|
||||
else => @compileError("`" ++ @typeName(Decimal) ++ "` cannot be formatted as a decimal string"),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn print_hexadecimal(writer: io.Writer, value: anytype, format: HexadecimalFormat) PrintError!void {
|
||||
// TODO: Implement.
|
||||
_ = writer;
|
||||
_ = value;
|
||||
_ = format;
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
noinline fn print_value(writer: io.Writer, value: anytype) PrintError!void {
|
||||
const Value = @TypeOf(value);
|
||||
|
||||
return switch (@typeInfo(Value)) {
|
||||
.Int => print_decimal(writer, value, .{}),
|
||||
.Float => print_decimal(writer, value, .{}),
|
||||
|
||||
.Pointer => |pointer| switch (pointer.size) {
|
||||
.One, .Many, .C => print_hexadecimal(writer, @ptrToInt(value), .{}),
|
||||
.Slice => if (pointer.child == u8) print(writer, value) else @compileError(unformattableMessage(Value)),
|
||||
},
|
||||
|
||||
else => @compileError(unformattableMessage(Value)),
|
||||
};
|
||||
}
|
||||
|
||||
fn unformattableMessage(comptime Value: type) []const u8 {
|
||||
return "`" ++ @typeName(Value) ++ "` are not formattable";
|
||||
}
|
||||
|
|
|
@ -0,0 +1,128 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
const ext = @import("./ext.zig");
|
||||
|
||||
const file = @import("./file.zig");
|
||||
|
||||
const kym = @import("./kym.zig");
|
||||
|
||||
pub const Manifest = struct {
|
||||
title: [255:0]coral.io.Byte = [_:0]coral.io.Byte{0} ** 255,
|
||||
width: u16 = 640,
|
||||
height: u16 = 480,
|
||||
tick_rate: f32 = 60.0,
|
||||
|
||||
pub fn load(self: *Manifest, env: *kym.RuntimeEnv, file_access: file.Access) kym.RuntimeError!void {
|
||||
const manifest = try env.execute_file(file_access, file.Path.from(&.{"app.ona"}));
|
||||
|
||||
defer env.discard(manifest);
|
||||
|
||||
const title = try env.get_field(manifest, "title");
|
||||
|
||||
defer env.discard(title);
|
||||
|
||||
const title_string = try env.to_string(title);
|
||||
|
||||
const width = @as(u16, get: {
|
||||
const ref = try env.get_field(manifest, "width");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @intFromFloat(env.to_float(ref) catch @as(f64, @floatFromInt(self.width)));
|
||||
});
|
||||
|
||||
const height = @as(u16, get: {
|
||||
const ref = try env.get_field(manifest, "height");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @intFromFloat(env.to_float(ref) catch @as(f64, @floatFromInt(self.height)));
|
||||
});
|
||||
|
||||
const tick_rate = @as(f32, get: {
|
||||
const ref = try env.get_field(manifest, "tick_rate");
|
||||
|
||||
defer env.discard(ref);
|
||||
|
||||
break: get @floatCast(env.to_float(ref) catch self.tick_rate);
|
||||
});
|
||||
|
||||
{
|
||||
const limited_title_len = coral.math.min(title_string.len, self.title.len);
|
||||
|
||||
coral.io.copy(&self.title, title_string[0 .. limited_title_len]);
|
||||
coral.io.zero(self.title[limited_title_len .. self.title.len]);
|
||||
}
|
||||
|
||||
self.tick_rate = tick_rate;
|
||||
self.width = width;
|
||||
self.height = height;
|
||||
}
|
||||
};
|
||||
|
||||
pub const LogSeverity = enum {
|
||||
info,
|
||||
warn,
|
||||
fail,
|
||||
};
|
||||
|
||||
pub const WritableLog = struct {
|
||||
severity: LogSeverity,
|
||||
write_buffer: coral.list.ByteStack,
|
||||
|
||||
pub fn as_writer(self: *WritableLog) coral.io.Writer {
|
||||
return coral.io.Writer.bind(WritableLog, self, struct {
|
||||
fn write(writable_log: *WritableLog, bytes: []const coral.io.Byte) ?usize {
|
||||
writable_log.write(bytes) catch return null;
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
}
|
||||
|
||||
pub fn free(self: *WritableLog) void {
|
||||
self.write_buffer.free();
|
||||
}
|
||||
|
||||
pub fn make(log_severity: LogSeverity, allocator: coral.io.Allocator) WritableLog {
|
||||
return .{
|
||||
.severity = log_severity,
|
||||
.write_buffer = coral.list.ByteStack.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn write(self: *WritableLog, bytes: []const coral.io.Byte) coral.io.AllocationError!void {
|
||||
const format_string = "%.*s";
|
||||
var line_written = @as(usize, 0);
|
||||
|
||||
for (bytes) |byte| {
|
||||
if (byte == '\n') {
|
||||
ext.SDL_LogError(
|
||||
ext.SDL_LOG_CATEGORY_APPLICATION,
|
||||
format_string,
|
||||
self.write_buffer.values.len,
|
||||
self.write_buffer.values.ptr);
|
||||
|
||||
self.write_buffer.clear();
|
||||
|
||||
line_written = 0;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
try self.write_buffer.push_one(byte);
|
||||
|
||||
line_written += 1;
|
||||
}
|
||||
|
||||
if (self.write_buffer.values.len == 0) {
|
||||
ext.SDL_LogError(
|
||||
ext.SDL_LOG_CATEGORY_APPLICATION,
|
||||
format_string,
|
||||
self.write_buffer.values.len,
|
||||
self.write_buffer.values.ptr);
|
||||
|
||||
self.write_buffer.clear();
|
||||
}
|
||||
}
|
||||
};
|
|
@ -1,23 +0,0 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
pub const Item = struct {
|
||||
transform: Transform,
|
||||
|
||||
options: union (enum) {
|
||||
sprite: struct {
|
||||
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
pub const Transform = extern struct {
|
||||
x: coral.math.Vector2,
|
||||
y: coral.math.Vector2,
|
||||
origin: coral.math.Vector2,
|
||||
|
||||
pub const identity = Transform{
|
||||
.x = .{1, 0},
|
||||
.y = .{0, 1},
|
||||
.origin = .{0, 0},
|
||||
};
|
||||
};
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
pub usingnamespace @cImport({
|
||||
@cInclude("SDL2/SDL.h");
|
||||
@cInclude("SDL2/SDL.h");
|
||||
});
|
||||
|
|
|
@ -2,53 +2,53 @@ const coral = @import("coral");
|
|||
|
||||
const ext = @import("./ext.zig");
|
||||
|
||||
pub const Contents = struct {
|
||||
allocator: coral.io.Allocator,
|
||||
data: []u8,
|
||||
pub const Access = union (enum) {
|
||||
sandboxed_path: *const Path,
|
||||
|
||||
pub const InitError = coral.io.AllocationError || Readable.ReadError;
|
||||
pub fn open_readable(self: Access, readable_path: Path) ?*Readable {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const readable_path_string = sandboxed_path.joined(readable_path).to_string() orelse return null;
|
||||
|
||||
pub fn deinit(self: *Contents) void {
|
||||
coral.io.deallocate(self.allocator, self.data);
|
||||
return @ptrCast(ext.SDL_RWFromFile(readable_path_string.ptr, "rb"));
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator, readable_file: *Readable) InitError!Contents {
|
||||
const file_offset = try readable_file.skip(0);
|
||||
const file_size = try readable_file.seek_end();
|
||||
pub fn query(self: Access, path: Path) ?Info {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const path_string = sandboxed_path.joined(path).to_string() orelse return null;
|
||||
const rw_ops = ext.SDL_RWFromFile(path_string, "rb") orelse return null;
|
||||
const file_size = ext.SDL_RWseek(rw_ops, 0, ext.RW_SEEK_END);
|
||||
|
||||
_ = try readable_file.seek(file_offset);
|
||||
if (ext.SDL_RWclose(rw_ops) != 0 or file_size < 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const allocation = try coral.io.allocate_many(u8, file_size, allocator);
|
||||
|
||||
errdefer coral.io.deallocate(allocator, allocation);
|
||||
|
||||
if (try readable_file.read(allocation) != allocation.len) {
|
||||
// Read less than was allocated for.
|
||||
return error.FileUnavailable;
|
||||
return Info{
|
||||
.size = @intCast(file_size),
|
||||
};
|
||||
},
|
||||
}
|
||||
|
||||
return Contents{
|
||||
.allocator = allocator,
|
||||
.data = allocation,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Info = struct {
|
||||
size: u64,
|
||||
};
|
||||
|
||||
pub const Path = extern struct {
|
||||
data: [4096]u8 = [_]u8{0} ** 4096,
|
||||
data: [4096]coral.io.Byte = [_]coral.io.Byte{0} ** 4096,
|
||||
|
||||
pub const cwd = Path.from(&.{"./"});
|
||||
|
||||
pub const ValidationError = error {
|
||||
PathTooLong,
|
||||
};
|
||||
|
||||
pub fn from(components: []const []const u8) Path {
|
||||
// TODO: Implement proper parsing / removal of duplicate path delimiters.
|
||||
var path = Path{};
|
||||
|
||||
{
|
||||
var writable_slice = coral.io.FixedBuffer{.slice = &path.data};
|
||||
var writable_slice = coral.io.FixedBuffer{.bytes = &path.data};
|
||||
|
||||
for (components) |component| {
|
||||
if (writable_slice.write(component) != component.len) {
|
||||
|
@ -64,7 +64,7 @@ pub const Path = extern struct {
|
|||
var path = Path{};
|
||||
|
||||
{
|
||||
var writable = coral.io.FixedBuffer{.slice = &path.data};
|
||||
var writable = coral.io.FixedBuffer{.bytes = &path.data};
|
||||
var written = @as(usize, 0);
|
||||
|
||||
for (&self.data) |byte| {
|
||||
|
@ -91,32 +91,20 @@ pub const Path = extern struct {
|
|||
return path;
|
||||
}
|
||||
|
||||
pub fn to_string(self: Path) ValidationError![:0]const u8 {
|
||||
const sentineled_data = get_sentineled_data: {
|
||||
const last_index = self.data.len - 1;
|
||||
pub fn to_string(self: Path) ?[:0]const coral.io.Byte {
|
||||
const last_index = self.data.len - 1;
|
||||
|
||||
if (self.data[last_index] != 0) {
|
||||
return error.PathTooLong;
|
||||
}
|
||||
if (self.data[last_index] != 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
break: get_sentineled_data self.data[0 .. last_index:0];
|
||||
};
|
||||
|
||||
return sentineled_data[0 .. coral.io.sentinel_index(u8, 0, sentineled_data):0];
|
||||
return coral.io.slice_sentineled(@as(coral.io.Byte, 0), @as([*:0]const coral.io.Byte, @ptrCast(&self.data)));
|
||||
}
|
||||
};
|
||||
|
||||
pub const ReadError = error {
|
||||
FileUnavailable,
|
||||
};
|
||||
|
||||
pub const Readable = opaque {
|
||||
pub fn as_reader(self: *Readable) coral.io.Reader {
|
||||
return coral.io.Reader.bind(Readable, self, struct {
|
||||
fn read(readable: *Readable, buffer: []u8) ?usize {
|
||||
return readable.read(buffer) catch null;
|
||||
}
|
||||
}.read);
|
||||
return coral.io.Reader.bind(Readable, self, read_into);
|
||||
}
|
||||
|
||||
pub fn close(self: *Readable) void {
|
||||
|
@ -125,97 +113,81 @@ pub const Readable = opaque {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn read(self: *Readable, buffer: []u8) ReadError!usize {
|
||||
pub fn read_into(self: *Readable, buffer: []coral.io.Byte) ?usize {
|
||||
ext.SDL_ClearError();
|
||||
|
||||
const bytes_read = ext.SDL_RWread(rw_ops_cast(self), buffer.ptr, @sizeOf(u8), buffer.len);
|
||||
const bytes_read = ext.SDL_RWread(rw_ops_cast(self), buffer.ptr, @sizeOf(coral.io.Byte), buffer.len);
|
||||
const error_message = ext.SDL_GetError();
|
||||
|
||||
if (bytes_read == 0 and error_message != null and error_message.* != 0) {
|
||||
return error.FileUnavailable;
|
||||
return null;
|
||||
}
|
||||
|
||||
return bytes_read;
|
||||
}
|
||||
|
||||
pub fn seek(self: *Readable, cursor: u64) ReadError!u64 {
|
||||
pub fn seek_head(self: *Readable, cursor: u64) ?u64 {
|
||||
// TODO: Fix safety of int cast.
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), @intCast(i64, cursor), ext.RW_SEEK_SET);
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), @intCast(cursor), ext.RW_SEEK_SET);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
return null;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
|
||||
pub fn seek_end(self: *Readable) ReadError!usize {
|
||||
pub fn seek_tail(self: *Readable) ?usize {
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), 0, ext.RW_SEEK_END);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
|
||||
pub fn skip(self: *Readable, offset: i64) ReadError!u64 {
|
||||
pub fn skip(self: *Readable, offset: i64) ?u64 {
|
||||
const byte_offset = ext.SDL_RWseek(rw_ops_cast(self), offset, ext.RW_SEEK_CUR);
|
||||
|
||||
if (byte_offset < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return @intCast(u64, byte_offset);
|
||||
return @intCast(byte_offset);
|
||||
}
|
||||
};
|
||||
|
||||
pub const System = union (enum) {
|
||||
sandboxed_path: *const Path,
|
||||
pub fn allocate_and_load(allocator: coral.io.Allocator, access: Access, path: Path) coral.io.AllocationError!?[]coral.io.Byte {
|
||||
const allocation = try allocator.reallocate(null, query_file_size: {
|
||||
const info = access.query(path) orelse return null;
|
||||
|
||||
pub const FileInfo = struct {
|
||||
size: u64,
|
||||
break: query_file_size info.size;
|
||||
});
|
||||
|
||||
const readable = access.open_readable(path) orelse {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
pub const OpenError = Path.ValidationError || error {
|
||||
FileNotFound,
|
||||
defer _ = readable.close();
|
||||
|
||||
const bytes_read = readable.read_into(allocation) orelse {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
pub const QueryError = OpenError || ReadError;
|
||||
if (bytes_read != allocation.len) {
|
||||
allocator.deallocate(allocation);
|
||||
|
||||
pub fn open_readable(self: System, path: Path) OpenError!*Readable {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
return @ptrCast(*Readable, ext.SDL_RWFromFile(try sandboxed_path.joined(path).to_string(), "rb") orelse {
|
||||
return error.FileNotFound;
|
||||
});
|
||||
},
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn query_info(self: System, path: Path) QueryError!FileInfo {
|
||||
switch (self) {
|
||||
.sandboxed_path => |sandboxed_path| {
|
||||
const file = ext.SDL_RWFromFile(try sandboxed_path.joined(path).to_string(), "rb") orelse {
|
||||
return error.FileNotFound;
|
||||
};
|
||||
|
||||
defer coral.debug.assert(ext.SDL_RWclose(file) == 0);
|
||||
|
||||
const file_size = ext.SDL_RWseek(file, 0, ext.RW_SEEK_END);
|
||||
|
||||
if (file_size < 0) {
|
||||
return error.FileUnavailable;
|
||||
}
|
||||
|
||||
return FileInfo{
|
||||
.size = @intCast(u64, file_size),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
return allocation;
|
||||
}
|
||||
|
||||
fn rw_ops_cast(ptr: *anyopaque) *ext.SDL_RWops {
|
||||
return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), ptr));
|
||||
return @ptrCast(@alignCast(ptr));
|
||||
}
|
||||
|
|
|
@ -6,121 +6,84 @@ const ext = @import("./ext.zig");
|
|||
|
||||
const std = @import("std");
|
||||
|
||||
///
|
||||
/// Recorded allocation info state.
|
||||
///
|
||||
const AllocationInfo = struct {
|
||||
trace: AllocationTrace,
|
||||
next_info: ?*AllocationInfo,
|
||||
const AllocationNode = struct {
|
||||
trace: std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => true,
|
||||
.ReleaseFast, .ReleaseSmall => false,
|
||||
}),
|
||||
|
||||
next: ?*AllocationNode,
|
||||
size: usize,
|
||||
|
||||
fn alloc(size: usize, return_address: usize) *AllocationNode {
|
||||
const node = @as(*AllocationNode, @ptrCast(@alignCast(ext.SDL_malloc(@sizeOf(AllocationNode) + size))));
|
||||
|
||||
node.* = .{
|
||||
.size = size,
|
||||
.next = null,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
node.trace.addAddr(return_address, "");
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
fn dealloc(self: *AllocationNode) void {
|
||||
ext.SDL_free(self);
|
||||
}
|
||||
|
||||
fn realloc(self: *AllocationNode, size: usize, return_address: usize) *AllocationNode {
|
||||
const node = @as(*AllocationNode, @ptrCast(@alignCast(ext.SDL_realloc(self, @sizeOf(AllocationNode) + size))));
|
||||
|
||||
node.* = .{
|
||||
.size = size,
|
||||
.next = null,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
node.trace.addAddr(return_address, "");
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
fn owns_userdata(self: *AllocationNode, other_userdata: []const coral.io.Byte) bool {
|
||||
const self_userdata = self.userdata();
|
||||
|
||||
return self_userdata.ptr == other_userdata.ptr and self_userdata.len == other_userdata.len;
|
||||
}
|
||||
|
||||
fn userdata(self: *AllocationNode) []coral.io.Byte {
|
||||
return @as([*]coral.io.Byte, @ptrFromInt(@intFromPtr(self) + @sizeOf(AllocationNode)))[0 .. self.size];
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Recorded stack trace of allocation call site.
|
||||
///
|
||||
/// *Note* this structure is reduced to zero bytes in released builds optimized for speed or size.
|
||||
///
|
||||
const AllocationTrace = std.debug.ConfigurableTrace(2, 4, switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => true,
|
||||
.ReleaseFast, .ReleaseSmall => false,
|
||||
});
|
||||
|
||||
///
|
||||
/// Heap allocation context.
|
||||
///
|
||||
const Context = struct {
|
||||
allocation_info_head: ?*AllocationInfo = null,
|
||||
head: ?*AllocationNode = null,
|
||||
|
||||
///
|
||||
/// Attempts to allocate a buffer of `size` length from `self`, with `return_address` as the location of the
|
||||
/// allocation request origin.
|
||||
///
|
||||
/// A reference to the allocated buffer is returned via a slice if the allocation was successful, otherwise `null`
|
||||
/// is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn allocate(self: *Context, size: usize, return_address: usize) ?[]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
const total_allocation_size = allocation_info_size + size;
|
||||
const allocation = ext.SDL_malloc(total_allocation_size) orelse return null;
|
||||
const allocation_info = @ptrCast(*AllocationInfo, @alignCast(@alignOf(AllocationInfo), allocation));
|
||||
|
||||
allocation_info.* = .{
|
||||
.size = size,
|
||||
.next_info = self.allocation_info_head,
|
||||
.trace = .{},
|
||||
};
|
||||
|
||||
allocation_info.trace.addAddr(return_address, "");
|
||||
|
||||
self.allocation_info_head = allocation_info;
|
||||
|
||||
return @ptrCast([*]u8, allocation)[allocation_info_size .. total_allocation_size];
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_malloc(size) orelse return null)[0 .. size];
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Returns the assumed pointer to the [AllocationInfo] address of `allocation`.
|
||||
///
|
||||
fn allocation_info_of(allocation: [*]u8) *AllocationInfo {
|
||||
return @intToPtr(*AllocationInfo, @ptrToInt(allocation) - @sizeOf(AllocationInfo));
|
||||
}
|
||||
|
||||
///
|
||||
/// Deallocates a the allocation buffer referenced by `allocation`.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn deallocate(self: *Context, allocation: []u8) void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
const panic_message = "incorrect allocation address for deallocating";
|
||||
var current_node = self.head orelse @panic(panic_message);
|
||||
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for deallocating");
|
||||
if (current_node.owns_userdata(allocation)) {
|
||||
self.head = current_node.next;
|
||||
|
||||
return current_node.dealloc();
|
||||
}
|
||||
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
while (true) {
|
||||
const next_node = current_node.next orelse @panic(panic_message);
|
||||
|
||||
ext.SDL_free(target_allocation_info);
|
||||
if (next_node.owns_userdata(allocation)) {
|
||||
current_node.next = next_node.next;
|
||||
|
||||
return;
|
||||
return next_node.dealloc();
|
||||
}
|
||||
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
|
||||
ext.SDL_free(target_allocation_info);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
}
|
||||
current_node = next_node;
|
||||
}
|
||||
|
||||
@panic("incorrect allocation address for deallocating");
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
|
@ -129,111 +92,67 @@ const Context = struct {
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Attempts to reallocate the buffer referenced by `allocation` to be `size` length from `self`.
|
||||
///
|
||||
/// A reference to the reallocated buffer is returned via a slice if the reallocation was successful, otherwise
|
||||
/// `null` is returned.
|
||||
///
|
||||
/// *Note* the returned buffer must be deallocated with [deallocate] before program exit or it will cause a memory
|
||||
/// leak.
|
||||
///
|
||||
/// *Note* the pointer and length of `allocation` must match valid values known to `allocator` otherwise safety-
|
||||
/// checked behavior will occur.
|
||||
///
|
||||
/// *Note* the allocation referenced by `allocation` should be considered invalid once the function returns,
|
||||
/// discarding it in favor of the return value.
|
||||
///
|
||||
/// *Note* allocation checks are disabled in release builds optimized for speed or size.
|
||||
///
|
||||
fn reallocate(self: *Context, allocation: []u8, size: usize) ?[]u8 {
|
||||
fn reallocate(self: *Context, return_address: usize, existing_allocation: ?[]u8, size: usize) coral.io.AllocationError![]u8 {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
const target_allocation_info = allocation_info_of(allocation.ptr);
|
||||
if (existing_allocation) |allocation| {
|
||||
const panic_message = "incorrect allocation address for reallocating";
|
||||
var current_node = self.head orelse @panic(panic_message);
|
||||
|
||||
if (target_allocation_info.size != allocation.len) {
|
||||
@panic("incorrect allocation length for reallocating");
|
||||
}
|
||||
if (current_node.owns_userdata(allocation)) {
|
||||
const node = current_node.realloc(size, return_address);
|
||||
|
||||
const allocation_info_size = @sizeOf(AllocationInfo);
|
||||
self.head = node;
|
||||
|
||||
if (self.allocation_info_head) |allocation_info_head| {
|
||||
if (target_allocation_info == allocation_info_head) {
|
||||
self.allocation_info_head = allocation_info_head.next_info;
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
return node.userdata();
|
||||
}
|
||||
|
||||
var previous_allocation_info = allocation_info_head;
|
||||
var current_allocation_info = allocation_info_head.next_info;
|
||||
while (true) {
|
||||
const next_node = current_node.next orelse @panic(panic_message);
|
||||
|
||||
while (current_allocation_info) |allocation_info| {
|
||||
if (allocation_info == target_allocation_info) {
|
||||
previous_allocation_info.next_info = allocation_info.next_info;
|
||||
if (next_node.owns_userdata(allocation)) {
|
||||
const node = next_node.realloc(size, return_address);
|
||||
|
||||
const allocation_address = ext.SDL_realloc(target_allocation_info, size) orelse return null;
|
||||
current_node.next = node;
|
||||
|
||||
target_allocation_info.size = size;
|
||||
|
||||
return @ptrCast([*]u8, allocation_address)[
|
||||
allocation_info_size .. (allocation_info_size + size)];
|
||||
return node.userdata();
|
||||
}
|
||||
|
||||
previous_allocation_info = allocation_info;
|
||||
current_allocation_info = allocation_info.next_info;
|
||||
current_node = next_node;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const node = AllocationNode.alloc(size, return_address);
|
||||
|
||||
@panic("incorrect allocation address for reallocating");
|
||||
if (self.head) |head| {
|
||||
node.next = head;
|
||||
}
|
||||
|
||||
self.head = node;
|
||||
|
||||
return node.userdata();
|
||||
}
|
||||
},
|
||||
|
||||
.ReleaseFast, .ReleaseSmall => {
|
||||
return @ptrCast([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse return null)[0 .. size];
|
||||
if (existing_allocation) |allocation | {
|
||||
return @as([*]u8, ext.SDL_realloc(allocation.ptr, size) orelse {
|
||||
return error.OutOfMemory;
|
||||
})[0 .. size];
|
||||
}
|
||||
|
||||
return @as([*]u8, ext.SDL_malloc(size) orelse return error.OutOfMemory)[0 .. size];
|
||||
},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
///
|
||||
/// Heap context.
|
||||
///
|
||||
var context = Context{};
|
||||
|
||||
///
|
||||
/// Heap allocator.
|
||||
///
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, struct {
|
||||
fn reallocate(self: *Context, options: coral.io.AllocationOptions) ?[]u8 {
|
||||
if (options.size == 0) {
|
||||
if (options.allocation) |allocation| {
|
||||
self.deallocate(allocation);
|
||||
pub const allocator = coral.io.Allocator.bind(Context, &context, .{
|
||||
.reallocate = Context.reallocate,
|
||||
.deallocate = Context.deallocate,
|
||||
});
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
return self.allocate(0, options.return_address);
|
||||
}
|
||||
|
||||
if (options.allocation) |allocation| {
|
||||
return self.reallocate(allocation, options.size);
|
||||
}
|
||||
|
||||
return self.allocate(options.size, options.return_address);
|
||||
}
|
||||
}.reallocate);
|
||||
|
||||
///
|
||||
/// Checks for any allocations belonging to the process heap allocated through the [allocator] interface that are still
|
||||
/// alive and reports the stack traces of any detected allocations to stderr along with the allocation address and
|
||||
/// length.
|
||||
///
|
||||
/// *Note* this function becomes a no-op in release builds optimized for speed or size.
|
||||
///
|
||||
pub fn trace_leaks() void {
|
||||
switch (builtin.mode) {
|
||||
.Debug, .ReleaseSafe => {
|
||||
|
@ -242,7 +161,7 @@ pub fn trace_leaks() void {
|
|||
while (current_allocation_info) |allocation_info| : (current_allocation_info = allocation_info.next_info) {
|
||||
std.debug.print("{d} byte leak at 0x{x} detected:\n", .{
|
||||
allocation_info.size,
|
||||
@ptrToInt(allocation_info) + @sizeOf(AllocationInfo),
|
||||
@as(usize, allocation_info) + @sizeOf(AllocationNode),
|
||||
});
|
||||
|
||||
allocation_info.trace.dump();
|
||||
|
|
|
@ -1,63 +1,36 @@
|
|||
const Ast = @import("./kym/Ast.zig");
|
||||
|
||||
const State = @import("./kym/State.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
const file = @import("./file.zig");
|
||||
|
||||
const tokens = @import("./kym/tokens.zig");
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const CallContext = struct {
|
||||
env: *RuntimeEnv,
|
||||
obj: Value,
|
||||
args: []const Value = &.{},
|
||||
caller: *const RuntimeRef,
|
||||
callable: *const RuntimeRef,
|
||||
userdata: []u8,
|
||||
args: []const *const RuntimeRef = &.{},
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub fn arg_at(self: CallContext, index: Int) RuntimeError!Value {
|
||||
pub fn arg_at(self: CallContext, index: u8) RuntimeError!*const RuntimeRef {
|
||||
if (!coral.math.is_clamped(index, 0, self.args.len - 1)) {
|
||||
return self.env.check_fail("argument out of bounds");
|
||||
}
|
||||
|
||||
return self.args[@intCast(usize, index)];
|
||||
return self.args[@as(usize, index)];
|
||||
}
|
||||
};
|
||||
|
||||
const Chunk = struct {
|
||||
env: *RuntimeEnv,
|
||||
opcodes: coral.list.Stack(Opcode),
|
||||
const Compiler = struct {
|
||||
state: *State,
|
||||
opcodes: OpcodeList,
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
const Opcode = union (enum) {
|
||||
push_nil,
|
||||
push_true,
|
||||
push_false,
|
||||
push_zero,
|
||||
push_number: Float,
|
||||
push_array: i32,
|
||||
push_table: i32,
|
||||
const OpcodeList = coral.list.Stack(Opcode);
|
||||
|
||||
not,
|
||||
neg,
|
||||
|
||||
add,
|
||||
sub,
|
||||
mul,
|
||||
div,
|
||||
|
||||
eql,
|
||||
cgt,
|
||||
clt,
|
||||
cge,
|
||||
cle,
|
||||
};
|
||||
|
||||
pub fn compile_ast(self: *Chunk, ast: Ast) void {
|
||||
fn compile_ast(self: *Compiler, ast: Ast) coral.io.AllocationError!void {
|
||||
for (ast.list_statements()) |statement| {
|
||||
switch (statement) {
|
||||
.return_expression => |return_expression| {
|
||||
|
@ -65,67 +38,80 @@ const Chunk = struct {
|
|||
},
|
||||
|
||||
.return_nothing => {
|
||||
try self.emit(.push_nil);
|
||||
try self.opcodes.push_one(.push_nil);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn compile_expression(self: *Chunk, expression: Ast.Expression) void {
|
||||
fn compile_expression(self: *Compiler, expression: Ast.Expression) coral.io.AllocationError!void {
|
||||
const is_zero = struct {
|
||||
fn is_zero(utf8: []const u8) bool {
|
||||
return coral.io.equals(utf8, "0") or coral.io.equals(utf8, "0.0");
|
||||
}
|
||||
}.is_zero;
|
||||
|
||||
const number_format = coral.utf8.DecimalFormat{
|
||||
.delimiter = "_",
|
||||
.positive_prefix = .none,
|
||||
};
|
||||
|
||||
switch (expression) {
|
||||
.nil_literal => try self.emit(.push_nil),
|
||||
.true_literal => try self.emit(.push_true),
|
||||
.false_literal => try self.emit(.push_false),
|
||||
.integer_literal => |literal| try self.emit(if (literal == 0) .push_zero else .{.push_integer = literal}),
|
||||
.float_literal => |literal| try self.emit(if (literal == 0) .push_zero else .{.push_float = literal}),
|
||||
.string_literal => |literal| try self.emit(.{.push_object = try self.intern(literal)}),
|
||||
.nil_literal => try self.opcodes.push_one(.push_nil),
|
||||
.true_literal => try self.opcodes.push_one(.push_true),
|
||||
.false_literal => try self.opcodes.push_one(.push_false),
|
||||
|
||||
.array_literal => |elements| {
|
||||
if (elements.values.len > coral.math.max_int(@typeInfo(Int).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
.number_literal => |literal| {
|
||||
const parsed_number = number_format.parse(literal, State.Float);
|
||||
|
||||
for (elements.values) |element_expression| {
|
||||
try self.compile_expression(element_expression);
|
||||
}
|
||||
coral.debug.assert(parsed_number != null);
|
||||
|
||||
try self.emit(.{.push_array = @intCast(Int, elements.values.len)});
|
||||
try self.opcodes.push_one(if (is_zero(literal)) .push_zero else .{.push_number = parsed_number.?});
|
||||
},
|
||||
|
||||
.string_literal => |literal| {
|
||||
try self.opcodes.push_one(.{
|
||||
.push_object = try self.state.acquire_interned(literal, &string_info),
|
||||
});
|
||||
},
|
||||
|
||||
.table_literal => |fields| {
|
||||
if (fields.values.len > coral.math.max_int(@typeInfo(Int).Int)) {
|
||||
if (fields.values.len > coral.math.max_int(@typeInfo(u32).Int)) {
|
||||
return error.OutOfMemory;
|
||||
}
|
||||
|
||||
for (fields.values) |field| {
|
||||
try self.compile_expression(field.expression);
|
||||
try self.emit(.{.push_object = try self.intern(field.identifier)});
|
||||
|
||||
try self.opcodes.push_one(.{
|
||||
.push_object = try self.state.acquire_interned(field.identifier, &string_info),
|
||||
});
|
||||
}
|
||||
|
||||
try self.emit(.{.push_table = @intCast(Int, fields.values.len)});
|
||||
try self.opcodes.push_one(.{.push_table = @intCast(fields.values.len)});
|
||||
},
|
||||
|
||||
.binary_operation => |operation| {
|
||||
try self.compile_expression(operation.lhs_expression.*);
|
||||
try self.compile_expression(operation.rhs_expression.*);
|
||||
|
||||
try self.emit(switch (operation.operator) {
|
||||
try self.opcodes.push_one(switch (operation.operator) {
|
||||
.addition => .add,
|
||||
.subtraction => .sub,
|
||||
.multiplication => .mul,
|
||||
.divsion => .div,
|
||||
.greater_equals_comparison => .compare_eq,
|
||||
.greater_than_comparison => .compare_gt,
|
||||
.equals_comparison => .compare_ge,
|
||||
.less_than_comparison => .compare_lt,
|
||||
.less_equals_comparison => .compare_le,
|
||||
.greater_equals_comparison => .eql,
|
||||
.greater_than_comparison => .cgt,
|
||||
.equals_comparison => .cge,
|
||||
.less_than_comparison => .clt,
|
||||
.less_equals_comparison => .cle,
|
||||
});
|
||||
},
|
||||
|
||||
.unary_operation => |operation| {
|
||||
try self.compile_expression(operation.expression.*);
|
||||
|
||||
try self.emit(switch (operation.operator) {
|
||||
try self.opcodes.push_one(switch (operation.operator) {
|
||||
.boolean_negation => .not,
|
||||
.numeric_negation => .neg,
|
||||
});
|
||||
|
@ -137,405 +123,477 @@ const Chunk = struct {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Chunk) void {
|
||||
self.opcodes.deinit(self.env.allocator);
|
||||
fn free(self: *Compiler) void {
|
||||
for (self.opcodes.values) |opcode| {
|
||||
if (opcode == .push_object) {
|
||||
self.state.release(opcode.push_object);
|
||||
}
|
||||
}
|
||||
|
||||
self.opcodes.free();
|
||||
}
|
||||
|
||||
pub fn execute(self: *Chunk) RuntimeError!Value {
|
||||
_ = self;
|
||||
fn list_opcodes(self: Compiler) []const Opcode {
|
||||
return self.opcodes.values;
|
||||
}
|
||||
|
||||
return Value.nil;
|
||||
fn make(allocator: coral.io.Allocator, state: *State) Compiler {
|
||||
return .{
|
||||
.opcodes = OpcodeList.make(allocator),
|
||||
.state = state,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Float = f64;
|
||||
pub const IndexContext = struct {
|
||||
env: *RuntimeEnv,
|
||||
indexable: *const RuntimeRef,
|
||||
index: *const RuntimeRef,
|
||||
userdata: []u8,
|
||||
};
|
||||
|
||||
pub const Int = i32;
|
||||
pub const ObjectInfo = struct {
|
||||
call: *const fn (context: CallContext) RuntimeError!*RuntimeRef = default_call,
|
||||
clean: *const fn (userdata: []u8) void = default_clean,
|
||||
get: *const fn (context: IndexContext) RuntimeError!*RuntimeRef = default_get,
|
||||
set: *const fn (context: IndexContext, Any: *const RuntimeRef) RuntimeError!void = default_set,
|
||||
|
||||
pub const Objectid = u32;
|
||||
fn cast(object_info: *const anyopaque) *const ObjectInfo {
|
||||
return @ptrCast(@alignCast(object_info));
|
||||
}
|
||||
|
||||
fn default_call(context: CallContext) RuntimeError!*RuntimeRef {
|
||||
return context.env.raise(error.BadOperation, "attempt to call non-callable");
|
||||
}
|
||||
|
||||
fn default_clean(_: []u8) void {
|
||||
// Nothing to clean up by default.
|
||||
}
|
||||
|
||||
fn default_get(context: IndexContext) RuntimeError!*RuntimeRef {
|
||||
return context.env.raise(error.BadOperation, "attempt to get non-indexable");
|
||||
}
|
||||
|
||||
fn default_set(context: IndexContext, _: *const RuntimeRef) RuntimeError!void {
|
||||
return context.env.raise(error.BadOperation, "attempt to set non-indexable");
|
||||
}
|
||||
};
|
||||
|
||||
pub const Opcode = union (enum) {
|
||||
push_nil,
|
||||
push_true,
|
||||
push_false,
|
||||
push_zero,
|
||||
push_number: State.Float,
|
||||
push_table: u32,
|
||||
push_object: *State.Object,
|
||||
|
||||
not,
|
||||
neg,
|
||||
|
||||
add,
|
||||
sub,
|
||||
mul,
|
||||
div,
|
||||
|
||||
eql,
|
||||
cgt,
|
||||
clt,
|
||||
cge,
|
||||
cle,
|
||||
};
|
||||
|
||||
pub const RuntimeEnv = struct {
|
||||
output: coral.io.Writer,
|
||||
stack: coral.list.Stack(u64),
|
||||
interned: coral.table.Hashed([]const u8, Objectid, coral.table.string_keyer),
|
||||
objects: coral.slab.Map(@typeInfo(u32).Int, Object),
|
||||
user_allocator: coral.io.Allocator,
|
||||
allocator: coral.io.Allocator,
|
||||
err_writer: coral.io.Writer,
|
||||
bound_refs: VariantSlab,
|
||||
state: State,
|
||||
|
||||
pub const DataSource = struct {
|
||||
name: []const u8,
|
||||
data: []const u8,
|
||||
pub const Options = struct {
|
||||
out_writer: coral.io.Writer = coral.io.null_writer,
|
||||
err_writer: coral.io.Writer = coral.io.null_writer,
|
||||
};
|
||||
|
||||
pub const Limits = struct {
|
||||
stack_max: u32,
|
||||
calls_max: u32,
|
||||
pub const ScriptSource = struct {
|
||||
name: []const coral.io.Byte,
|
||||
data: []const coral.io.Byte,
|
||||
};
|
||||
|
||||
const Object = struct {
|
||||
ref_count: usize,
|
||||
const Table = struct {
|
||||
state: *State,
|
||||
fields: FieldTable,
|
||||
array: ArrayList,
|
||||
|
||||
state: struct {
|
||||
info: ObjectInfo,
|
||||
userdata: []u8,
|
||||
const ArrayList = coral.list.Stack(State.Variant);
|
||||
|
||||
fields: coral.table.Hashed(*Object, *Value, .{
|
||||
.hasher = struct {
|
||||
fn hash(object: *Object) coral.table.Hash {
|
||||
coral.debug.assert(object.state.info.identity == null);
|
||||
const FieldTable = coral.map.Table([]const coral.io.Byte, struct {
|
||||
field: *State.Object,
|
||||
value: State.Variant,
|
||||
|
||||
return coral.table.hash_string(object.state.userdata);
|
||||
}
|
||||
}.hash,
|
||||
const Self = @This();
|
||||
|
||||
.comparer = struct {
|
||||
fn compare(object_a: *Object, object_b: *Object) isize {
|
||||
coral.debug.assert(object_a.state.info.identity == null);
|
||||
coral.debug.assert(object_b.state.info.identity == null);
|
||||
fn release_objects(self: Self, state: *State) void {
|
||||
state.release(self.field);
|
||||
|
||||
return coral.io.compare(object_a.state.userdata, object_b.state.userdata);
|
||||
}
|
||||
}.compare,
|
||||
}),
|
||||
},
|
||||
};
|
||||
if (self.value == .object) {
|
||||
state.release(self.value.object);
|
||||
}
|
||||
}
|
||||
}, coral.map.string_table_traits);
|
||||
|
||||
pub const ObjectInfo = struct {
|
||||
caller: *const fn (caller: Value, context: CallContext) RuntimeError!Value = default_call,
|
||||
cleaner: *const fn (userdata: []u8) void = default_clean,
|
||||
getter: *const fn (context: CallContext) RuntimeError!Value = default_get,
|
||||
identity: ?*const anyopaque = null,
|
||||
setter: *const fn (context: CallContext) RuntimeError!void = default_set,
|
||||
fn free(self: *Table) void {
|
||||
{
|
||||
var field_iterator = FieldTable.Iterable{.table = &self.fields};
|
||||
|
||||
fn default_call(_: Value, context: CallContext) RuntimeError!Value {
|
||||
return context.env.fail(error.BadOperation, "attempt to call non-callable");
|
||||
while (field_iterator.next()) |entry| {
|
||||
entry.value.release_objects(self.state);
|
||||
}
|
||||
}
|
||||
|
||||
self.fields.free();
|
||||
self.array.free();
|
||||
}
|
||||
|
||||
fn default_clean(_: []u8) void {
|
||||
// Nothing to clean up by default.
|
||||
fn get_field(self: *Table, field_name: *State.Object) State.Variant {
|
||||
const field = self.fields.lookup(field_name.userdata) orelse return .nil;
|
||||
|
||||
if (field.value == .object) {
|
||||
return .{.object = self.state.acquire_instance(field.value.object)};
|
||||
}
|
||||
|
||||
return field.value;
|
||||
}
|
||||
|
||||
fn default_get(context: CallContext) RuntimeError!Value {
|
||||
return context.env.fail(error.BadOperation, "attempt to get non-indexable");
|
||||
fn get_index(self: *Table, index: usize) State.Variant {
|
||||
return self.array.values[index];
|
||||
}
|
||||
|
||||
fn default_set(context: CallContext) RuntimeError!void {
|
||||
return context.env.fail(error.BadOperation, "attempt to set non-indexable");
|
||||
}
|
||||
};
|
||||
|
||||
pub fn call(self: *RuntimeEnv, caller: Value, maybe_index: ?Value, args: []const Value) RuntimeError!RuntimeVar {
|
||||
if (maybe_index) |index| {
|
||||
const callable = try self.get(caller, index);
|
||||
|
||||
defer callable.deinit();
|
||||
|
||||
return switch (callable.value.unpack()) {
|
||||
.objectid => |callable_id| .{
|
||||
.env = self,
|
||||
|
||||
.value = try self.objects.fetch(callable_id).state.info.caller(.{
|
||||
.env = self,
|
||||
.callable = callable.value,
|
||||
.obj = caller,
|
||||
.args = args,
|
||||
}),
|
||||
},
|
||||
|
||||
else => self.fail(error.BadOperation, "attempt to call non-object type"),
|
||||
fn make(allocator: coral.io.Allocator, state: *State) Table {
|
||||
return .{
|
||||
.state = state,
|
||||
.fields = FieldTable.make(allocator),
|
||||
.array = ArrayList.make(allocator),
|
||||
};
|
||||
}
|
||||
|
||||
return self.bind(try self.objects.fetch(try self.to_objectid(caller)).state.info.caller(.{
|
||||
.env = self,
|
||||
.obj = caller,
|
||||
// .caller = .{.object = self.global_object},
|
||||
.args = args,
|
||||
}));
|
||||
}
|
||||
const object_info = ObjectInfo{
|
||||
.clean = struct {
|
||||
fn clean(userdata: []u8) void {
|
||||
@as(*Table, @ptrCast(@alignCast(userdata.ptr))).free();
|
||||
}
|
||||
}.clean,
|
||||
|
||||
pub fn check(
|
||||
self: *RuntimeEnv,
|
||||
condition: bool,
|
||||
runtime_error: RuntimeError,
|
||||
failure_message: []const u8) RuntimeError!void {
|
||||
.get = struct {
|
||||
fn get(context: IndexContext) RuntimeError!*RuntimeRef {
|
||||
const table = @as(*Table, @ptrCast(@alignCast(context.userdata.ptr)));
|
||||
|
||||
if (condition) {
|
||||
return;
|
||||
switch (try context.index.fetch(context.env)) {
|
||||
.nil => return context.env.raise(error.BadOperation, "cannot index a table with nil"),
|
||||
.true => return context.env.raise(error.BadOperation, "cannot index a table with true"),
|
||||
.false => return context.env.raise(error.BadOperation, "cannot index a table with false"),
|
||||
|
||||
.object => |index_object| {
|
||||
const value = table.get_field(index_object);
|
||||
|
||||
errdefer if (value == .object) {
|
||||
context.env.state.release(value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try context.env.bound_refs.insert(value));
|
||||
},
|
||||
|
||||
.number => |index_number| {
|
||||
const value = table.get_index(@intFromFloat(index_number));
|
||||
|
||||
errdefer if (value == .object) {
|
||||
context.env.state.release(value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try context.env.bound_refs.insert(value));
|
||||
},
|
||||
}
|
||||
}
|
||||
}.get,
|
||||
};
|
||||
|
||||
fn set_field(self: *Table, field_name: *State.Object, value: State.Variant) coral.io.AllocationError!void {
|
||||
const previous_entry = try self.fields.replace(field_name.userdata, .{
|
||||
.field = field_name,
|
||||
.value = value,
|
||||
});
|
||||
|
||||
if (previous_entry) |entry| {
|
||||
entry.value.release_objects(self.state);
|
||||
}
|
||||
}
|
||||
|
||||
return self.fail(runtime_error, failure_message);
|
||||
fn set_index(self: *Table, index: usize, value: State.Variant) coral.io.AllocationError!void {
|
||||
self.array.values[index] = value;
|
||||
}
|
||||
};
|
||||
|
||||
const VariantSlab = coral.map.Slab(State.Variant);
|
||||
|
||||
pub fn discard(self: *RuntimeEnv, ref: *RuntimeRef) void {
|
||||
coral.debug.assert(self.bound_refs.remove(@intFromPtr(ref)) != null);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *RuntimeEnv) void {
|
||||
self.stack.deinit();
|
||||
pub fn execute_chunk(self: *RuntimeEnv, name: []const coral.io.Byte, opcodes: []const Opcode) RuntimeError!*RuntimeRef {
|
||||
_ = name;
|
||||
|
||||
for (opcodes) |opcode| {
|
||||
switch (opcode) {
|
||||
.push_nil => try self.state.push_value(.nil),
|
||||
.push_true => try self.state.push_value(.true),
|
||||
.push_false => try self.state.push_value(.false),
|
||||
.push_zero => try self.state.push_value(.{.number = 0}),
|
||||
.push_number => |number| try self.state.push_value(.{.number = number}),
|
||||
|
||||
.push_table => |size| {
|
||||
var table = Table.make(self.allocator, &self.state);
|
||||
|
||||
errdefer table.free();
|
||||
|
||||
{
|
||||
var popped = @as(usize, 0);
|
||||
|
||||
while (popped < size) : (popped += 1) {
|
||||
switch (try self.state.pop_value()) {
|
||||
.object => |field| try table.set_field(field, try self.state.pop_value()),
|
||||
else => return self.raise(error.BadOperation, "attempt to set a non-object field"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const table_object = try self.state.acquire_new(coral.io.bytes_of(&table), &Table.object_info);
|
||||
|
||||
errdefer self.state.release(table_object);
|
||||
|
||||
try self.state.push_value(.{.object = table_object});
|
||||
},
|
||||
|
||||
.push_object => |object| {
|
||||
const acquired_object = self.state.acquire_instance(object);
|
||||
|
||||
errdefer self.state.release(acquired_object);
|
||||
|
||||
try self.state.push_value(.{.object = acquired_object});
|
||||
},
|
||||
|
||||
.not => {
|
||||
try self.state.push_value(switch (try self.state.pop_value()) {
|
||||
.nil => return self.raise(error.BadOperation, "cannot not nil"),
|
||||
.false => .true,
|
||||
.true => .false,
|
||||
.number => return self.raise(error.BadOperation, "cannot not a number"),
|
||||
.object => return self.raise(error.BadOperation, "cannot not an object"),
|
||||
});
|
||||
},
|
||||
|
||||
.neg => {
|
||||
try self.state.push_value(switch (try self.state.pop_value()) {
|
||||
.nil => return self.raise(error.BadOperation, "cannot not nil"),
|
||||
.false => return self.raise(error.BadOperation, "cannot not false"),
|
||||
.true => return self.raise(error.BadOperation, "cannot not true"),
|
||||
.number => |number| .{.number = -number},
|
||||
.object => return self.raise(error.BadOperation, "cannot not an object"),
|
||||
});
|
||||
},
|
||||
|
||||
.add => {
|
||||
|
||||
},
|
||||
|
||||
.sub => {
|
||||
|
||||
},
|
||||
|
||||
.mul => {
|
||||
|
||||
},
|
||||
|
||||
.div => {
|
||||
|
||||
},
|
||||
|
||||
.eql => {
|
||||
|
||||
},
|
||||
|
||||
.cgt => {
|
||||
|
||||
},
|
||||
|
||||
.clt => {
|
||||
|
||||
},
|
||||
|
||||
.cge => {
|
||||
|
||||
},
|
||||
|
||||
.cle => {
|
||||
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
const return_value = try self.state.pop_value();
|
||||
|
||||
errdefer if (return_value == .object) {
|
||||
self.state.release(return_value.object);
|
||||
};
|
||||
|
||||
return @ptrFromInt(try self.bound_refs.insert(return_value));
|
||||
}
|
||||
|
||||
pub fn execute_data(self: *RuntimeEnv, allocator: coral.io.Allocator, source: DataSource) RuntimeError!RuntimeVar {
|
||||
var ast = try Ast.init(allocator);
|
||||
pub fn execute_file(self: *RuntimeEnv, file_access: file.Access, file_path: file.Path) RuntimeError!*RuntimeRef {
|
||||
const error_message = "failed to load file";
|
||||
|
||||
defer ast.deinit();
|
||||
const file_data = (try file.allocate_and_load(self.allocator, file_access, file_path)) orelse {
|
||||
return self.raise(error.SystemFailure, error_message);
|
||||
};
|
||||
|
||||
defer self.allocator.deallocate(file_data);
|
||||
|
||||
return self.execute_script(.{
|
||||
.name = file_path.to_string() orelse return self.raise(error.SystemFailure, error_message),
|
||||
.data = file_data,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn execute_script(self: *RuntimeEnv, source: ScriptSource) RuntimeError!*RuntimeRef {
|
||||
var ast = Ast.make(self.allocator);
|
||||
|
||||
defer ast.free();
|
||||
|
||||
{
|
||||
var tokenizer = tokens.Tokenizer{.source = source.data};
|
||||
|
||||
try ast.parse(&tokenizer);
|
||||
ast.parse(&tokenizer) catch |parse_error| switch (parse_error) {
|
||||
error.BadSyntax => return self.raise(error.BadSyntax, ast.error_message),
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
};
|
||||
}
|
||||
|
||||
var chunk = Chunk{
|
||||
var compiler = Compiler.make(self.allocator, &self.state);
|
||||
|
||||
defer compiler.free();
|
||||
|
||||
try compiler.compile_ast(ast);
|
||||
|
||||
return self.execute_chunk(source.name, compiler.list_opcodes());
|
||||
}
|
||||
|
||||
pub fn free(self: *RuntimeEnv) void {
|
||||
self.bound_refs.free();
|
||||
self.state.free();
|
||||
}
|
||||
|
||||
pub fn get_field(self: *RuntimeEnv, indexable: *const RuntimeRef, field: []const u8) RuntimeError!*RuntimeRef {
|
||||
const interned_field = try self.intern(field);
|
||||
|
||||
defer self.discard(interned_field);
|
||||
|
||||
const indexable_object = try indexable.fetch_object(self);
|
||||
|
||||
return ObjectInfo.cast(indexable_object.userinfo).get(.{
|
||||
.env = self,
|
||||
.opcodes = .{.allocator = allocator},
|
||||
};
|
||||
|
||||
const typeid = "<chunk>";
|
||||
|
||||
const script = try self.new_object(allocator, coral.io.bytes_of(&chunk), .{
|
||||
.identity = typeid,
|
||||
|
||||
.cleaner = struct {
|
||||
fn clean(userdata: []const u8) void {
|
||||
@ptrCast(*Chunk, @alignCast(@alignOf(Chunk), userdata)).deinit();
|
||||
}
|
||||
}.clean,
|
||||
|
||||
.caller = struct {
|
||||
fn call(caller: Value, context: CallContext) RuntimeError!Value {
|
||||
_ = caller;
|
||||
|
||||
return (context.env.native_cast(context.obj, typeid, Chunk) catch unreachable).execute();
|
||||
}
|
||||
}.call,
|
||||
});
|
||||
|
||||
defer script.deinit();
|
||||
|
||||
return try self.call(script.value, null, &.{});
|
||||
}
|
||||
|
||||
pub fn execute_file(
|
||||
self: *RuntimeEnv,
|
||||
allocator: coral.io.Allocator,
|
||||
file_system: file.System,
|
||||
file_path: file.Path) RuntimeError!RuntimeVar {
|
||||
|
||||
const readable_file = file_system.open_readable(file_path) catch return error.SystemFailure;
|
||||
|
||||
defer readable_file.close();
|
||||
|
||||
var file_data = coral.list.ByteStack{.allocator = allocator};
|
||||
const file_size = (file_system.query_info(file_path) catch return error.SystemFailure).size;
|
||||
|
||||
try file_data.grow(file_size);
|
||||
|
||||
defer file_data.deinit();
|
||||
|
||||
{
|
||||
var stream_buffer = [_]u8{0} ** 4096;
|
||||
|
||||
if ((coral.io.stream(coral.list.stack_as_writer(&file_data), readable_file.as_reader(), &stream_buffer) catch {
|
||||
return error.SystemFailure;
|
||||
}) != file_size) {
|
||||
return error.SystemFailure;
|
||||
}
|
||||
}
|
||||
|
||||
return try self.execute_data(allocator, .{
|
||||
.name = file_path.to_string() catch return error.SystemFailure,
|
||||
.data = file_data.values,
|
||||
.indexable = indexable,
|
||||
.index = interned_field,
|
||||
.userdata = indexable_object.userdata,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn fail(self: *RuntimeEnv, runtime_error: RuntimeError, failure_message: []const u8) RuntimeError {
|
||||
// TODO: Call stack and line numbers.
|
||||
coral.utf8.print_formatted(self.output, "{name}@({line}): {message}\n", .{
|
||||
.name = ".ona",
|
||||
.line = @as(u64, 0),
|
||||
.message = failure_message,
|
||||
}) catch return error.SystemFailure;
|
||||
pub fn intern(self: *RuntimeEnv, data: []const u8) RuntimeError!*RuntimeRef {
|
||||
const data_object = try self.state.acquire_interned(data, &string_info);
|
||||
|
||||
return runtime_error;
|
||||
errdefer self.state.release(data_object);
|
||||
|
||||
return @ptrFromInt(try self.bound_refs.insert(.{.object = data_object}));
|
||||
}
|
||||
|
||||
pub fn get(self: *RuntimeEnv, indexable: Value, index: Value) RuntimeError!RuntimeVar {
|
||||
const indexable_object = self.objects.fetch(switch (indexable.unpack()) {
|
||||
.objectid => |indexable_id| indexable_id,
|
||||
else => return self.fail(error.BadOperation, "attempt to index non-indexable type"),
|
||||
});
|
||||
|
||||
return .{
|
||||
.env = self,
|
||||
|
||||
.value = try indexable_object.state.info.getter(.{
|
||||
.env = self,
|
||||
.obj = indexable,
|
||||
.args = &.{index},
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator, output: coral.io.Writer, limits: Limits) RuntimeError!RuntimeEnv {
|
||||
pub fn make(allocator: coral.io.Allocator, options: Options) RuntimeError!RuntimeEnv {
|
||||
var env = RuntimeEnv{
|
||||
.output = output,
|
||||
.stack = .{.allocator = allocator},
|
||||
.objects = .{.allocator = allocator},
|
||||
.interned = .{.allocator = allocator},
|
||||
.user_allocator = allocator,
|
||||
.allocator = allocator,
|
||||
.bound_refs = VariantSlab.make(allocator),
|
||||
.state = State.make(allocator),
|
||||
.err_writer = options.err_writer,
|
||||
};
|
||||
|
||||
try env.stack.grow(limits.stack_max * limits.calls_max);
|
||||
|
||||
errdefer env.stack.deinit();
|
||||
|
||||
return env;
|
||||
}
|
||||
|
||||
pub fn intern(self: *RuntimeEnv, string: []const u8) RuntimeError!Value {
|
||||
return Value.pack_objectid(self.interned.lookup(string) orelse {
|
||||
const interned_value = (try self.new_string(string)).value;
|
||||
pub fn new_object(self: *RuntimeEnv, userdata: []const u8, info: *const ObjectInfo) RuntimeError!*RuntimeRef {
|
||||
const data_object = try self.state.acquire_new(userdata, info);
|
||||
|
||||
switch (interned_value.unpack()) {
|
||||
.objectid => |id| coral.debug.assert(try self.interned.insert(string, id)),
|
||||
else => unreachable,
|
||||
}
|
||||
defer self.state.release(data_object);
|
||||
|
||||
return interned_value;
|
||||
});
|
||||
return @ptrFromInt(try self.bound_refs.insert(.{.object = data_object}));
|
||||
}
|
||||
|
||||
pub fn native_cast(self: *RuntimeEnv, castable: Value, id: *const anyopaque, comptime Type: type) RuntimeError!*Type {
|
||||
const object = self.objects.fetch(castable.to_objectid() orelse {
|
||||
return self.fail(error.BadOperation, "attempt to cast non-castable type");
|
||||
});
|
||||
|
||||
const is_expected_type = (object.state.info.identity == id) and (object.state.userdata.len == @sizeOf(Type));
|
||||
|
||||
try self.check(is_expected_type, "invalid object cast: native type");
|
||||
|
||||
return @ptrCast(*Type, @alignCast(@alignOf(Type), object.state.userdata));
|
||||
pub fn nil(self: *RuntimeEnv) RuntimeError!*RuntimeRef {
|
||||
return @ptrFromInt(try self.bound_refs.insert(.nil));
|
||||
}
|
||||
|
||||
pub fn new_object(self: *RuntimeEnv, allocator: coral.io.Allocator, userdata: []const u8, info: ObjectInfo) RuntimeError!RuntimeVar {
|
||||
const allocation = try coral.io.allocate_many(allocator, userdata.len, u8);
|
||||
pub fn raise(self: *RuntimeEnv, runtime_error: RuntimeError, error_message: []const u8) RuntimeError {
|
||||
if (self.err_writer.invoke(error_message) == null) {
|
||||
return error.SystemFailure;
|
||||
}
|
||||
|
||||
errdefer coral.io.deallocate(allocator, allocation);
|
||||
return runtime_error;
|
||||
}
|
||||
|
||||
coral.io.copy(allocation, userdata);
|
||||
pub fn to_float(self: *RuntimeEnv, ref: *const RuntimeRef) RuntimeError!State.Float {
|
||||
return ref.fetch_number(self);
|
||||
}
|
||||
|
||||
const objectid = try self.objects.insert(.{
|
||||
.ref_count = 1,
|
||||
|
||||
.state = .{
|
||||
.info = info,
|
||||
.userdata = allocation,
|
||||
.fields = .{.allocator = allocator},
|
||||
},
|
||||
});
|
||||
|
||||
return .{
|
||||
.env = self,
|
||||
.value = .{.data = @as(usize, 0x7FF8000000000002) | (@as(usize, objectid) << 32)},
|
||||
pub fn to_string(self: *RuntimeEnv, ref: *const RuntimeRef) RuntimeError![]const u8 {
|
||||
const object = try switch (try ref.fetch(self)) {
|
||||
.object => |object| object,
|
||||
else => self.raise(error.BadOperation, "cannot convert to object"),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn new_string(self: *RuntimeEnv, data: []const u8) RuntimeError!RuntimeVar {
|
||||
return try self.new_object(data, .{
|
||||
.getter = struct {
|
||||
fn get_byte(context: CallContext) RuntimeError!*Value {
|
||||
const string = context.env.string_cast(context.obj) catch unreachable;
|
||||
const index = try context.env.to_int(try context.arg_at(0));
|
||||
if (ObjectInfo.cast(object.userinfo) != &string_info) {
|
||||
return self.raise(error.BadOperation, "object is not a string");
|
||||
}
|
||||
|
||||
try context.env.check(coral.math.is_clamped(index, 0, string.len), "index out of string bounds");
|
||||
|
||||
return context.env.new_int(string[@intCast(usize, index)]);
|
||||
}
|
||||
}.get_byte,
|
||||
});
|
||||
return object.userdata;
|
||||
}
|
||||
};
|
||||
|
||||
pub const RuntimeError = coral.io.AllocationError || Ast.ParseError || error {
|
||||
pub const RuntimeError = coral.io.AllocationError || State.PopError || error {
|
||||
BadSyntax,
|
||||
BadOperation,
|
||||
BadArgument,
|
||||
SystemFailure,
|
||||
};
|
||||
|
||||
pub const RuntimeVar = struct {
|
||||
value: Value,
|
||||
env: *RuntimeEnv,
|
||||
|
||||
pub fn deinit(self: RuntimeVar) void {
|
||||
switch (self.value.unpack()) {
|
||||
.objectid => |id| {
|
||||
const object = self.env.objects.fetch(id);
|
||||
|
||||
coral.debug.assert(object.ref_count != 0);
|
||||
|
||||
object.ref_count -= 1;
|
||||
|
||||
if (object.ref_count == 0) {
|
||||
object.state.info.cleaner(object.state.userdata);
|
||||
// TODO: Free individual key-value pairs of fields
|
||||
object.state.fields.deinit();
|
||||
coral.io.deallocate(self.env.user_allocator, object.state.userdata);
|
||||
self.env.objects.remove(id);
|
||||
}
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Value = struct {
|
||||
data: u64,
|
||||
|
||||
pub const Unpacked = union (enum) {
|
||||
nil,
|
||||
false,
|
||||
true,
|
||||
number: Float,
|
||||
objectid: Objectid,
|
||||
};
|
||||
|
||||
fn pack_number(float: Float) Value {
|
||||
return @bitCast(Value, float);
|
||||
pub const RuntimeRef = opaque {
|
||||
fn fetch(self: *const RuntimeRef, env: *RuntimeEnv) RuntimeError!State.Variant {
|
||||
return env.bound_refs.lookup(@intFromPtr(self)) orelse env.raise(error.BadOperation, "stale ref");
|
||||
}
|
||||
|
||||
fn pack_objectid(id: Objectid) Value {
|
||||
return signature.objectid | id;
|
||||
fn fetch_number(self: *const RuntimeRef, env: *RuntimeEnv) RuntimeError!State.Float {
|
||||
return switch (try self.fetch(env)) {
|
||||
.nil => env.raise(error.BadOperation, "cannot convert nil to number"),
|
||||
.true => env.raise(error.BadOperation, "cannot convert true to number"),
|
||||
.false => env.raise(error.BadOperation, "cannot convert false to number"),
|
||||
.number => |number| number,
|
||||
.object => env.raise(error.BadOperation, "cannot convert object to number"),
|
||||
};
|
||||
}
|
||||
|
||||
pub const @"false" = @as(Value, nan | 0x0001000000000000);
|
||||
|
||||
const mask = .{
|
||||
.sign = @as(u64, 0x8000000000000000),
|
||||
.exponent = @as(u64, 0x7ff0000000000000),
|
||||
.quiet = @as(u64, 0x0008000000000000),
|
||||
.type = @as(u64, 0x0007000000000000),
|
||||
.signature = @as(u64, 0xffff000000000000),
|
||||
.object = @as(u64, 0x00000000ffffffff),
|
||||
};
|
||||
|
||||
pub const nan = @as(Value, mask.exponent | mask.quiet);
|
||||
|
||||
pub const nil = @as(Value, nan | 0x0003000000000000);
|
||||
|
||||
const signature = .{
|
||||
|
||||
};
|
||||
|
||||
pub const @"true" = @as(Value, nan | 0x0002000000000000);
|
||||
|
||||
pub fn unpack(self: Value) Unpacked {
|
||||
if ((~self.data & mask.exponent) != 0) {
|
||||
return .{.number = @bitCast(Float, self.data)};
|
||||
}
|
||||
|
||||
return switch ((self.data & mask.signature) != 0) {
|
||||
.signature_nan => .{.number = @bitCast(Float, self.data)},
|
||||
.signature_false => .false,
|
||||
.signature_true => .true,
|
||||
.signature_object => @intCast(Objectid, self.data & mask.object),
|
||||
else => return .nil,
|
||||
fn fetch_object(self: *const RuntimeRef, env: *RuntimeEnv) RuntimeError!*State.Object {
|
||||
return switch (try self.fetch(env)) {
|
||||
.nil => env.raise(error.BadOperation, "cannot convert nil to object"),
|
||||
.true => env.raise(error.BadOperation, "cannot convert true to object"),
|
||||
.false => env.raise(error.BadOperation, "cannot convert false to object"),
|
||||
.number => env.raise(error.BadOperation, "cannot convert number to object"),
|
||||
.object => |object| object,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const string_info = ObjectInfo{
|
||||
|
||||
};
|
||||
|
|
|
@ -4,49 +4,16 @@ const tokens = @import("./tokens.zig");
|
|||
|
||||
allocator: coral.io.Allocator,
|
||||
arena: coral.arena.Stacking,
|
||||
statements: StatementList,
|
||||
statements: Statement.List,
|
||||
error_message: []const u8,
|
||||
|
||||
pub const BinaryOperator = enum {
|
||||
addition,
|
||||
subtraction,
|
||||
multiplication,
|
||||
divsion,
|
||||
equals_comparison,
|
||||
greater_than_comparison,
|
||||
greater_equals_comparison,
|
||||
less_than_comparison,
|
||||
less_equals_comparison,
|
||||
|
||||
fn token(self: BinaryOperator) tokens.Token {
|
||||
return switch (self) {
|
||||
.addition => .symbol_plus,
|
||||
.subtraction => .symbol_minus,
|
||||
.multiplication => .symbol_asterisk,
|
||||
.divsion => .symbol_forward_slash,
|
||||
.equals_comparison => .symbol_double_equals,
|
||||
.greater_than_comparison => .symbol_greater_than,
|
||||
.greater_equals_comparison => .symbol_greater_equals,
|
||||
.less_than_comparison => .symbol_less_than,
|
||||
.less_equals_comparison => .symbol_less_equals,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Expression = union (enum) {
|
||||
nil_literal,
|
||||
true_literal,
|
||||
false_literal,
|
||||
integer_literal: []const u8,
|
||||
float_literal: []const u8,
|
||||
number_literal: []const u8,
|
||||
string_literal: []const u8,
|
||||
array_literal: coral.list.Stack(Expression),
|
||||
|
||||
table_literal: coral.list.Stack(struct {
|
||||
identifier: []const u8,
|
||||
expression: Expression,
|
||||
}),
|
||||
|
||||
table_literal: NamedList,
|
||||
grouped_expression: *Expression,
|
||||
|
||||
binary_operation: struct {
|
||||
|
@ -59,13 +26,43 @@ pub const Expression = union (enum) {
|
|||
operator: UnaryOperator,
|
||||
expression: *Expression,
|
||||
},
|
||||
|
||||
pub const BinaryOperator = enum {
|
||||
addition,
|
||||
subtraction,
|
||||
multiplication,
|
||||
divsion,
|
||||
equals_comparison,
|
||||
greater_than_comparison,
|
||||
greater_equals_comparison,
|
||||
less_than_comparison,
|
||||
less_equals_comparison,
|
||||
|
||||
fn token(self: BinaryOperator) tokens.Token {
|
||||
return switch (self) {
|
||||
.addition => .symbol_plus,
|
||||
.subtraction => .symbol_minus,
|
||||
.multiplication => .symbol_asterisk,
|
||||
.divsion => .symbol_forward_slash,
|
||||
.equals_comparison => .symbol_double_equals,
|
||||
.greater_than_comparison => .symbol_greater_than,
|
||||
.greater_equals_comparison => .symbol_greater_equals,
|
||||
.less_than_comparison => .symbol_less_than,
|
||||
.less_equals_comparison => .symbol_less_equals,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const NamedList = coral.list.Stack(struct {
|
||||
identifier: []const u8,
|
||||
expression: Expression,
|
||||
});
|
||||
|
||||
pub const List = coral.list.Stack(Expression);
|
||||
};
|
||||
|
||||
const ExpressionParser = fn (self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression;
|
||||
|
||||
///
|
||||
///
|
||||
///
|
||||
pub const ParseError = error {
|
||||
OutOfMemory,
|
||||
BadSyntax,
|
||||
|
@ -76,16 +73,16 @@ const Self = @This();
|
|||
pub const Statement = union (enum) {
|
||||
return_expression: Expression,
|
||||
return_nothing,
|
||||
};
|
||||
|
||||
const StatementList = coral.list.Stack(Statement);
|
||||
const List = coral.list.Stack(Statement);
|
||||
};
|
||||
|
||||
const UnaryOperator = enum {
|
||||
boolean_negation,
|
||||
numeric_negation,
|
||||
};
|
||||
|
||||
fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime operators: []const BinaryOperator) ExpressionParser {
|
||||
fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime operators: []const Expression.BinaryOperator) ExpressionParser {
|
||||
return struct {
|
||||
fn parse(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression {
|
||||
var expression = try parse_next(self, tokenizer);
|
||||
|
@ -117,30 +114,30 @@ fn binary_operation_parser(comptime parse_next: ExpressionParser, comptime opera
|
|||
}.parse;
|
||||
}
|
||||
|
||||
fn check_syntax(self: *Self, condition: bool, error_message: []const u8) ParseError!void {
|
||||
fn check_syntax(self: *Self, condition: bool, message: []const u8) ParseError!void {
|
||||
if (condition) {
|
||||
return;
|
||||
}
|
||||
|
||||
return self.fail_syntax(error_message);
|
||||
return self.fail_syntax(message);
|
||||
}
|
||||
|
||||
pub fn deinit(self: *Self) void {
|
||||
self.arena.deinit();
|
||||
self.statements.deinit();
|
||||
}
|
||||
|
||||
fn fail_syntax(self: *Self, error_message: []const u8) ParseError {
|
||||
self.error_message = error_message;
|
||||
fn fail_syntax(self: *Self, message: []const u8) ParseError {
|
||||
self.error_message = message;
|
||||
|
||||
return error.BadSyntax;
|
||||
}
|
||||
|
||||
pub fn init(allocator: coral.io.Allocator) coral.io.AllocationError!Self {
|
||||
pub fn free(self: *Self) void {
|
||||
self.arena.free();
|
||||
self.statements.free();
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator) Self {
|
||||
return Self{
|
||||
.arena = try coral.arena.Stacking.init(allocator, 4096),
|
||||
.arena = coral.arena.Stacking.make(allocator, 4096),
|
||||
.allocator = allocator,
|
||||
.statements = .{.allocator = allocator},
|
||||
.statements = Statement.List.make(allocator),
|
||||
.error_message = "",
|
||||
};
|
||||
}
|
||||
|
@ -150,16 +147,14 @@ pub fn list_statements(self: Self) []const Statement {
|
|||
}
|
||||
|
||||
pub fn parse(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!void {
|
||||
self.reset();
|
||||
self.free();
|
||||
|
||||
errdefer self.reset();
|
||||
|
||||
var has_not_returned_yet = true;
|
||||
var has_returned = false;
|
||||
|
||||
while (tokenizer.step(.{.include_newlines = false})) {
|
||||
switch (tokenizer.current_token) {
|
||||
.keyword_return => {
|
||||
try self.check_syntax(has_not_returned_yet, "cannot return more than once per function scope");
|
||||
try self.check_syntax(!has_returned, "multiple returns in function scope but expected only one");
|
||||
|
||||
try self.statements.push_one(get_statement: {
|
||||
if (tokenizer.step(.{.include_newlines = true})) {
|
||||
|
@ -177,7 +172,7 @@ pub fn parse(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!void {
|
|||
break: get_statement .return_nothing;
|
||||
});
|
||||
|
||||
has_not_returned_yet = false;
|
||||
has_returned = true;
|
||||
},
|
||||
|
||||
else => return self.fail_syntax("invalid statement"),
|
||||
|
@ -215,16 +210,10 @@ fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression
|
|||
return Expression{.grouped_expression = try coral.io.allocate_one(self.arena.as_allocator(), expression)};
|
||||
},
|
||||
|
||||
.integer => |value| {
|
||||
.number => |value| {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return Expression{.integer_literal = value};
|
||||
},
|
||||
|
||||
.real => |value| {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return Expression{.float_literal = value};
|
||||
return Expression{.number_literal = value};
|
||||
},
|
||||
|
||||
.string => |value| {
|
||||
|
@ -233,48 +222,10 @@ fn parse_factor(self: *Self, tokenizer: *tokens.Tokenizer) ParseError!Expression
|
|||
return Expression{.string_literal = value};
|
||||
},
|
||||
|
||||
.symbol_bracket_left => {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end of array literal");
|
||||
|
||||
var expression = Expression{
|
||||
.array_literal = .{
|
||||
.allocator = self.arena.as_allocator(),
|
||||
},
|
||||
};
|
||||
|
||||
coral.debug.assert(expression == .array_literal);
|
||||
|
||||
const array_average_maximum = 32;
|
||||
|
||||
try expression.array_literal.grow(array_average_maximum);
|
||||
|
||||
while (true) {
|
||||
switch (tokenizer.current_token) {
|
||||
.symbol_bracket_right => {
|
||||
_ = tokenizer.step(.{.include_newlines = false});
|
||||
|
||||
return expression;
|
||||
},
|
||||
|
||||
else => {
|
||||
try self.check_syntax(
|
||||
tokenizer.step(.{.include_newlines = false}),
|
||||
"expected `]` or expression after `[`");
|
||||
|
||||
try expression.array_literal.push_one(try self.parse_expression(tokenizer));
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
.symbol_brace_left => {
|
||||
try self.check_syntax(tokenizer.step(.{.include_newlines = false}), "unexpected end of table literal");
|
||||
|
||||
var expression = Expression{
|
||||
.table_literal = .{
|
||||
.allocator = self.arena.as_allocator(),
|
||||
},
|
||||
};
|
||||
var expression = Expression{.table_literal = Expression.NamedList.make(self.arena.as_allocator())};
|
||||
|
||||
coral.debug.assert(expression == .table_literal);
|
||||
|
||||
|
@ -356,8 +307,3 @@ const parse_term = binary_operation_parser(parse_factor, &.{
|
|||
.multiplication,
|
||||
.divsion,
|
||||
});
|
||||
|
||||
pub fn reset(self: *Self) void {
|
||||
self.statements.clear();
|
||||
self.arena.deinit();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
allocator: coral.io.Allocator,
|
||||
interned: SymbolTable,
|
||||
globals: Object,
|
||||
values: DataStack,
|
||||
frames: CallStack,
|
||||
|
||||
pub const Float = f64;
|
||||
|
||||
const CallStack = coral.list.Stack(struct {
|
||||
callable: *Object,
|
||||
opcode_index: usize,
|
||||
stack_index: usize,
|
||||
});
|
||||
|
||||
const DataStack = coral.list.Stack(Variant);
|
||||
|
||||
pub const Object = struct {
|
||||
ref_count: usize,
|
||||
userdata: []coral.io.Byte,
|
||||
userinfo: *const anyopaque,
|
||||
};
|
||||
|
||||
pub const PopError = error {
|
||||
StackOverflow,
|
||||
};
|
||||
|
||||
const Self = @This();
|
||||
|
||||
const SymbolTable = coral.map.Table([]const coral.io.Byte, *Object, coral.map.string_table_traits);
|
||||
|
||||
pub const Variant = union (enum) {
|
||||
nil,
|
||||
true,
|
||||
false,
|
||||
number: Float,
|
||||
object: *Object,
|
||||
};
|
||||
|
||||
pub fn acquire_instance(_: *Self, object: *Object) *Object {
|
||||
// TODO: safety-check object belongs to state.
|
||||
object.ref_count += 1;
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
pub fn acquire_interned(self: *Self, userdata: []const u8, userinfo: *const anyopaque) coral.io.AllocationError!*Object {
|
||||
// TODO: Include userinfo in matching lookup.
|
||||
if (self.interned.lookup(userdata)) |object| {
|
||||
return self.acquire_instance(object);
|
||||
} else {
|
||||
const data_object = try self.acquire_new(userdata, userinfo);
|
||||
|
||||
errdefer self.release(data_object);
|
||||
|
||||
coral.debug.assert(try self.interned.insert(data_object.userdata, data_object));
|
||||
|
||||
return data_object;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acquire_new(self: *Self, userdata: []const u8, userinfo: *const anyopaque) coral.io.AllocationError!*Object {
|
||||
const allocated_userdata = try coral.io.allocate_copy(self.allocator, userdata);
|
||||
|
||||
errdefer self.allocator.deallocate(allocated_userdata);
|
||||
|
||||
const allocated_object = try coral.io.allocate_one(self.allocator, Object{
|
||||
.ref_count = 1,
|
||||
.userdata = allocated_userdata,
|
||||
.userinfo = userinfo,
|
||||
});
|
||||
|
||||
errdefer self.allocator.deallocate(allocated_object);
|
||||
|
||||
return allocated_object;
|
||||
}
|
||||
|
||||
pub fn free(self: *Self) void {
|
||||
self.values.free();
|
||||
self.frames.free();
|
||||
self.interned.free();
|
||||
}
|
||||
|
||||
pub fn make(allocator: coral.io.Allocator) Self {
|
||||
return .{
|
||||
.values = DataStack.make(allocator),
|
||||
.frames = CallStack.make(allocator),
|
||||
.interned = SymbolTable.make(allocator),
|
||||
.allocator = allocator,
|
||||
|
||||
.globals = .{
|
||||
.ref_count = 0,
|
||||
.userdata = &.{},
|
||||
.userinfo = &.{},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn pop_value(self: *Self) PopError!Variant {
|
||||
return self.values.pop() orelse error.StackOverflow;
|
||||
}
|
||||
|
||||
pub fn push_value(self: *Self, value: Variant) coral.io.AllocationError!void {
|
||||
return self.values.push_one(value);
|
||||
}
|
||||
|
||||
pub fn release(self: *Self, object: *Object) void {
|
||||
coral.debug.assert(object.ref_count != 0);
|
||||
|
||||
object.ref_count -= 1;
|
||||
|
||||
if (object.ref_count == 0) {
|
||||
self.allocator.deallocate(object);
|
||||
}
|
||||
}
|
|
@ -29,8 +29,7 @@ pub const Token = union(enum) {
|
|||
symbol_equals,
|
||||
symbol_double_equals,
|
||||
|
||||
integer: []const u8,
|
||||
real: []const u8,
|
||||
number: []const u8,
|
||||
string: []const u8,
|
||||
|
||||
keyword_nil,
|
||||
|
@ -41,7 +40,7 @@ pub const Token = union(enum) {
|
|||
|
||||
pub fn text(self: Token) []const u8 {
|
||||
return switch (self) {
|
||||
.unknown => |unknown| @ptrCast([*]const u8, &unknown)[0 .. 1],
|
||||
.unknown => |unknown| @as([*]const u8, @ptrCast(&unknown))[0 .. 1],
|
||||
.newline => "newline",
|
||||
|
||||
.global => |identifier| identifier,
|
||||
|
@ -69,8 +68,7 @@ pub const Token = union(enum) {
|
|||
.symbol_equals => "=",
|
||||
.symbol_double_equals => "==",
|
||||
|
||||
.integer => |literal| literal,
|
||||
.real => |literal| literal,
|
||||
.number => |literal| literal,
|
||||
.string => |literal| literal,
|
||||
|
||||
.keyword_nil => "nil",
|
||||
|
@ -134,7 +132,7 @@ pub const Tokenizer = struct {
|
|||
else => break,
|
||||
};
|
||||
|
||||
self.current_token = .{.real = self.source[begin .. cursor]};
|
||||
self.current_token = .{.number = self.source[begin .. cursor]};
|
||||
|
||||
return true;
|
||||
},
|
||||
|
@ -142,7 +140,7 @@ pub const Tokenizer = struct {
|
|||
else => break,
|
||||
};
|
||||
|
||||
self.current_token = .{.integer = self.source[begin .. cursor]};
|
||||
self.current_token = .{.number = self.source[begin .. cursor]};
|
||||
|
||||
return true;
|
||||
},
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
const builtin = @import("builtin");
|
||||
|
||||
const canvas = @import("./canvas.zig");
|
||||
const app = @import("./app.zig");
|
||||
|
||||
const coral = @import("coral");
|
||||
|
||||
|
@ -8,163 +6,111 @@ const ext = @import("./ext.zig");
|
|||
|
||||
pub const file = @import("./file.zig");
|
||||
|
||||
pub const heap = @import("./heap.zig");
|
||||
const heap = @import("./heap.zig");
|
||||
|
||||
const kym = @import("./kym.zig");
|
||||
|
||||
const AppManifest = struct {
|
||||
title: [255:0]u8 = [_:0]u8{0} ** 255,
|
||||
width: u16 = 640,
|
||||
height: u16 = 480,
|
||||
|
||||
pub fn load_script(self: *AppManifest, env: *kym.RuntimeEnv, fs: file.System, file_path: []const u8) !void {
|
||||
var manifest = try env.execute_file(heap.allocator, fs, file.Path.from(&.{file_path}));
|
||||
|
||||
defer manifest.deinit();
|
||||
|
||||
{
|
||||
var title = try env.get(manifest.value, try env.intern("title"));
|
||||
|
||||
defer title.deinit();
|
||||
|
||||
const title_string = try env.string_cast(title.value);
|
||||
|
||||
try env.check(title_string.len <= self.title.len, "`title` cannot exceed 255 bytes in length");
|
||||
coral.io.copy(&self.title, title_string);
|
||||
}
|
||||
|
||||
const u16_max = coral.math.max_int(@typeInfo(u16).Int);
|
||||
|
||||
{
|
||||
const width = try env.get(manifest.value, try env.intern("width"));
|
||||
|
||||
errdefer width.deinit();
|
||||
|
||||
if (width.value.as_number()) |value| {
|
||||
if (value < u16_max) {
|
||||
self.width = @floatToInt(u16, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const height = try env.get(manifest.value, try env.intern("height"));
|
||||
|
||||
errdefer height.deinit();
|
||||
|
||||
if (height.value.as_number()) |value| {
|
||||
if (value < u16_max) {
|
||||
self.height = @floatToInt(u16, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
pub const RuntimeError = error {
|
||||
OutOfMemory,
|
||||
InitFailure,
|
||||
BadManifest,
|
||||
};
|
||||
|
||||
fn stack_as_log_writer(self: *coral.list.ByteStack) coral.io.Writer {
|
||||
return coral.io.Writer.bind(coral.list.ByteStack, self, struct {
|
||||
fn write(stack: *coral.list.ByteStack, bytes: []const coral.io.Byte) ?usize {
|
||||
var line_written = @as(usize, 0);
|
||||
|
||||
for (bytes) |byte| {
|
||||
if (byte == '\n') {
|
||||
ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%.*s", stack.values.len, stack.values.ptr);
|
||||
stack.clear();
|
||||
|
||||
line_written = 0;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
stack.push_one(byte) catch {
|
||||
coral.debug.assert(stack.drop(line_written));
|
||||
|
||||
return null;
|
||||
};
|
||||
|
||||
line_written += 1;
|
||||
}
|
||||
|
||||
return bytes.len;
|
||||
}
|
||||
}.write);
|
||||
fn last_sdl_error() [:0]const u8 {
|
||||
return coral.io.slice_sentineled(@as(u8, 0), @as([*:0]const u8, @ptrCast(ext.SDL_GetError())));
|
||||
}
|
||||
|
||||
pub fn run_app(base_file_system: file.System) void {
|
||||
defer heap.trace_leaks();
|
||||
pub fn run_app(file_access: file.Access) RuntimeError!void {
|
||||
var info_log = app.WritableLog.make(.info, heap.allocator);
|
||||
|
||||
var log_buffer = coral.list.ByteStack{.allocator = heap.allocator};
|
||||
defer info_log.free();
|
||||
|
||||
defer log_buffer.deinit();
|
||||
var fail_log = app.WritableLog.make(.fail, heap.allocator);
|
||||
|
||||
var script_env = kym.RuntimeEnv.init(heap.allocator, stack_as_log_writer(&log_buffer), .{
|
||||
.stack_max = 512,
|
||||
.calls_max = 512,
|
||||
}) catch {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "failed to initialize Kym vm\n");
|
||||
};
|
||||
|
||||
defer script_env.deinit();
|
||||
|
||||
const app_file_name = "app.ona";
|
||||
var app_manifest = AppManifest{};
|
||||
|
||||
app_manifest.load_script(&script_env, base_file_system, app_file_name) catch {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "failed to load %s\n", app_file_name);
|
||||
};
|
||||
defer fail_log.free();
|
||||
|
||||
if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
try fail_log.write(last_sdl_error());
|
||||
|
||||
return error.InitFailure;
|
||||
}
|
||||
|
||||
defer ext.SDL_Quit();
|
||||
|
||||
var script_env = kym.RuntimeEnv.make(heap.allocator, .{
|
||||
.out_writer = info_log.as_writer(),
|
||||
.err_writer = fail_log.as_writer(),
|
||||
}) catch {
|
||||
try fail_log.write("failed to initialize script runtime");
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
|
||||
defer script_env.free();
|
||||
|
||||
var manifest = app.Manifest{};
|
||||
|
||||
manifest.load(&script_env, file_access) catch {
|
||||
fail_log.write("failed to load / execute app.ona manifest") catch {};
|
||||
|
||||
return error.BadManifest;
|
||||
};
|
||||
|
||||
const window = create: {
|
||||
const pos = ext.SDL_WINDOWPOS_CENTERED;
|
||||
const flags = 0;
|
||||
|
||||
break: create ext.SDL_CreateWindow(&manifest.title, pos, pos, manifest.width, manifest.height, flags) orelse {
|
||||
fail_log.write(last_sdl_error()) catch {};
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyWindow(window);
|
||||
|
||||
const renderer = create: {
|
||||
const defaultDriverIndex = -1;
|
||||
const flags = ext.SDL_RENDERER_ACCELERATED;
|
||||
|
||||
break: create ext.SDL_CreateRenderer(window, defaultDriverIndex, flags) orelse {
|
||||
fail_log.write(last_sdl_error()) catch {};
|
||||
|
||||
return error.InitFailure;
|
||||
};
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyRenderer(renderer);
|
||||
|
||||
{
|
||||
const base_prefix = ext.SDL_GetBasePath() orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
};
|
||||
|
||||
defer ext.SDL_free(base_prefix);
|
||||
|
||||
const window_flags = 0;
|
||||
const window_pos = ext.SDL_WINDOWPOS_CENTERED;
|
||||
|
||||
const window = ext.SDL_CreateWindow(&app_manifest.title, window_pos, window_pos, app_manifest.width, app_manifest.height, window_flags) orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyWindow(window);
|
||||
|
||||
const renderer_flags = 0;
|
||||
|
||||
const renderer = ext.SDL_CreateRenderer(window, -1, renderer_flags) orelse {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
};
|
||||
|
||||
defer ext.SDL_DestroyRenderer(renderer);
|
||||
var previous_ticks = ext.SDL_GetTicks64();
|
||||
|
||||
while (true) {
|
||||
// TODO: Delta timing.
|
||||
var event = @as(ext.SDL_Event, undefined);
|
||||
{
|
||||
var event = @as(ext.SDL_Event, undefined);
|
||||
|
||||
while (ext.SDL_PollEvent(&event) != 0) {
|
||||
switch (event.type) {
|
||||
ext.SDL_QUIT => return,
|
||||
else => {},
|
||||
while (ext.SDL_PollEvent(&event) != 0) {
|
||||
switch (event.type) {
|
||||
ext.SDL_QUIT => return,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ext.SDL_SetRenderDrawColor(renderer, 0, 0, 0, 0) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
{
|
||||
// Based on https://fabiensanglard.net/timer_and_framerate/index.php.
|
||||
const current_ticks = ext.SDL_GetTicks64();
|
||||
const milliseconds_per_second = 1000.0;
|
||||
const tick_frequency = @as(u64, @intFromFloat(milliseconds_per_second / manifest.tick_rate));
|
||||
|
||||
while (previous_ticks < current_ticks) {
|
||||
previous_ticks += tick_frequency;
|
||||
}
|
||||
}
|
||||
|
||||
if (ext.SDL_RenderClear(renderer) != 0) {
|
||||
return ext.SDL_LogError(ext.SDL_LOG_CATEGORY_APPLICATION, "%s\n", ext.SDL_GetError());
|
||||
}
|
||||
|
||||
// TODO: Render here.
|
||||
|
||||
ext.SDL_RenderPresent(renderer);
|
||||
_ = ext.SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
|
||||
_ = ext.SDL_RenderClear(renderer);
|
||||
_ = ext.SDL_RenderPresent(renderer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
const coral = @import("coral");
|
||||
|
||||
const ona = @import("ona");
|
||||
|
||||
pub fn main() !void {
|
||||
ona.run_app(.{.sandboxed_path = &ona.file.Path.cwd});
|
||||
pub fn main() ona.RuntimeError!void {
|
||||
try ona.run_app(.{.sandboxed_path = &ona.file.Path.cwd});
|
||||
}
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
const _coral = @import("coral");
|
||||
|
||||
const _ona = @import("ona");
|
Loading…
Reference in New Issue