Compare commits

...

3 Commits

8 changed files with 925 additions and 43 deletions

16
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,16 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Ona",
"type": "gdb",
"request": "launch",
"target": "${workspaceFolder}/zig-out/bin/ona",
"cwd": "${workspaceRoot}",
"valuesFormatting": "parseText",
}
]
}

View File

@ -1,6 +1,5 @@
{
"editor.rulers": [100],
"git.detectSubmodules": false,
"files.exclude":{
"**/.git": true,
@ -12,4 +11,6 @@
"**/zig-cache": true,
"**/zig-out": true,
},
"git.detectSubmodulesLimit": 0,
}

11
.vscode/tasks.json vendored
View File

@ -3,13 +3,22 @@
"tasks": [
{
"label": "Build Ona",
"label": "Build",
"type": "shell",
"command": "zig build",
"group": {
"kind": "build",
"isDefault": true
}
},
{
"label": "Test",
"type": "shell",
"command": "zig build test",
"group": {
"kind": "test",
"isDefault": true
}
}
]
}

View File

@ -71,7 +71,7 @@ pub fn build(builder: *std.build.Builder) void {
// Ona tests.
{
const ona_tests = builder.addTest("src/main.zig");
const ona_tests = builder.addTest("src/stack.zig");
ona_tests.setTarget(target);
ona_tests.setBuildMode(mode);

7
src/errors.zig Normal file
View File

@ -0,0 +1,7 @@
///
/// Returns `true` if `value` did not return `Error`, otherwise `false`.
///
pub fn isOk(comptime Error: type, value: Error!void) bool {
return if (value) |_| true else |_| false;
}

578
src/io.zig Normal file
View File

@ -0,0 +1,578 @@
const stack = @import("./stack.zig");
const std = @import("std");
///
///
///
pub const Path = struct {
length: u16,
buffer: [max]u8,
///
///
///
pub const empty = std.mem.zeroes(Path);
///
///
///
pub fn equalsText(path: Path, text: []const u8) bool {
return std.mem.eql(u8, path.buffer[0 .. path.length], text);
}
///
/// The maximum possible byte-length of a [Path].
///
/// Note that paths are encoded using UTF-8, meaning that a character may be bigger than one
/// byte. Because of this, it is not safe to asume that a path may hold `2048` individual
/// characters.
///
pub const max = 2048;
};
///
/// Universal resource identifier (URI) that operates atop the operating system to provide a
/// platform-agnostic interface for local and networked I/O access.
///
/// For more information, see [https://en.wikipedia.org/wiki/URI].
///
pub const Uri = struct {
buffer: [max]u8,
scheme_len: u16,
user_info_range: Range,
host_range: Range,
port_number: u16,
path_range: Range,
///
/// Errors that may occur during parsing of a URI from URI-compatible source encoding.
///
/// [ParseError.TooLong] occurs when the provided source data is bigger than the max allowed
/// data representation in [max].
///
/// [ParseError.UnexpectedToken] occurs when the internal tokenization process encounters a
/// URI component token in the wrong order.
///
/// [ParseError.InvalidEncoding] occurs when the source encoding being parsed is not properly
/// encoded in its own format (malformed UTF-8, for example).
///
pub const ParseError = error {
TooLong,
UnexpectedToken,
InvalidEncoding,
};
const Range = struct {
off: u16,
len: u16,
const none = std.mem.zeroes(Range);
};
///
/// Represents an individual component of a URI sequence.
///
pub const Token = union(enum) {
scheme: []const u8,
user_info: []const u8,
host: []const u8,
port: []const u8,
path: []const u8,
query: []const u8,
fragment: []const u8,
};
///
/// Tokenizes the data in [Tokenizer.utf8_sequence] into URI tokens.
///
/// See [Component] for more information on the supported URI tokens.
///
pub const Tokenizer = struct {
cursor: usize = 0,
utf8_sequence: []const u8,
///
/// Extracts the next [Token] in sequence from `tokenizer` and returns it or `null` if
/// there are no more tokens to be extracted.
///
pub fn next(tokenizer: *Tokenizer) ?Token {
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'A' ... 'Z', 'a' ... 'z' => {
const begin = tokenizer.cursor;
tokenizer.cursor += 1;
var is_scheme = (begin == 0);
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'+', '.', '-', '0' ... '9', 'A' ... 'Z', 'a' ... 'z' =>
tokenizer.cursor += 1,
':' => {
if (is_scheme) {
defer tokenizer.cursor += 1;
return Token{.scheme =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
}
tokenizer.cursor += 1;
},
'#', '?' => break,
else => {
tokenizer.cursor += 1;
is_scheme = false;
},
};
return Token{.path =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'@' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'/', ':' => break,
else => tokenizer.cursor += 1,
};
return Token{.host =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
':' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'/' => break,
else => tokenizer.cursor += 1,
};
return Token{
.port = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'/' => {
tokenizer.cursor += 1;
if (tokenizer.utf8_sequence[tokenizer.cursor] == '/') {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'@' => return Token{.user_info =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]},
':', '/' => break,
else => tokenizer.cursor += 1,
};
return Token{
.host = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
} else {
const begin = (tokenizer.cursor - 1);
tokenizer.cursor += 1;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'?', '#' => break,
else => tokenizer.cursor += 1,
};
return Token{
.path = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
}
},
'?' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'#' => {
tokenizer.cursor -= 1;
break;
},
else => tokenizer.cursor += 1,
};
return Token{
.query = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'#' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len) tokenizer.cursor += 1;
return Token{
.fragment = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
else => {
const begin = tokenizer.cursor;
tokenizer.cursor += 1;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'#', '?' => break,
else => tokenizer.cursor += 1,
};
return Token{
.path = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
};
return null;
}
///
/// A more constrained variant of [next] that accepts a `expected_token` argument to
/// validate the component type of a [Token] before it is returned.
///
/// If the [Component] of the extracted [Token] is not identical to `expected_token`,
/// it will be discarded and `null` is returned instead.
///
pub fn nextExpect(tokenizer: *Tokenizer, expected_token: std.meta.Tag(Token)) ?Token {
if (tokenizer.next()) |token| {
if (token == expected_token) return token;
}
return null;
}
};
///
///
///
pub const empty = std.mem.zeroes(Uri);
///
/// The maximum possible byte-length of a [URI].
///
/// Note that a URI character may be encoded using multiple bytes, meaning that `2048` is not
/// identical in meaning to `2048` URI characters.
///
pub const max = 2048;
///
/// Returns `true` if `uri_scheme` matches the scheme contained in `uri`, otherwise `false`.
///
pub fn isScheme(uri: Uri, uri_scheme: []const u8) bool {
return std.mem.eql(u8, uri.buffer[0 .. uri.scheme_len], uri_scheme);
}
///
/// Attempts to parse and return a [URI] value out of `utf8_sequence`, otherwise returning
/// [ParseError] if `utf8_sequence` is invalid in any way.
///
/// [ParseError.InvalidEncoding] occurs if the data encoded in `utf8_sequence` cannot be
/// validated as UTF-8 or it contains an invalid ASCII decimal number encoding for its URL port.
///
/// See [ParseError] for more details on the other errors that may be returned.
///
pub fn parse(utf8_sequence: []const u8) ParseError!Uri {
if (!std.unicode.utf8ValidateSlice(utf8_sequence)) return error.InvalidEncoding;
var uri = Uri.empty;
if (utf8_sequence.len != 0) {
if (utf8_sequence.len > max) return error.TooLong;
var tokenizer = Tokenizer{.utf8_sequence = utf8_sequence};
const scheme_token = tokenizer.nextExpect(.scheme) orelse return error.UnexpectedToken;
var uri_buffer = stack.Unmanaged(u8){.buffer = &uri.buffer};
const uri_writer = uri_buffer.asWriter();
const assert = std.debug.assert;
// These write operations will never fail because the uri_buffer will be known to be big
// enough by this point.
assert(uri_writer.write(scheme_token.scheme) == scheme_token.scheme.len);
assert(uri_writer.writeByte(':'));
// Downcast is safe because utf8_sequence can't be greater than u16 max.
uri.scheme_len = @intCast(u16, scheme_token.scheme.len);
var last_token = scheme_token;
while (tokenizer.next()) |scheme_specific_token| {
switch (scheme_specific_token) {
.scheme => return error.UnexpectedToken,
.user_info => |user_info| {
if (last_token != .scheme) return error.UnexpectedToken;
const delimiter = "//";
assert(uri_writer.write(delimiter) == delimiter.len);
uri.user_info_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, user_info.len),
};
assert(uri_writer.write(user_info) == user_info.len);
assert(uri_writer.writeByte('@'));
},
.host => |host| {
switch (last_token) {
.scheme => {
const delimiter = "//";
assert(uri_writer.write(delimiter) == delimiter.len);
},
.user_info => {},
else => return error.UnexpectedToken,
}
assert(uri_writer.write(host) == host.len);
},
.port => |port| {
if (last_token != .host) return error.UnexpectedToken;
const port_radix = 10;
uri.port_number = std.fmt.parseInt(u16, port, port_radix) catch
return error.InvalidEncoding;
assert(uri_writer.writeByte(':'));
assert(uri_writer.write(port) == port.len);
},
.path => |path| {
if ((last_token != .scheme) and (last_token != .host) and
(last_token != .port)) return error.UnexpectedToken;
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, path.len),
};
assert(uri_writer.write(path) == path.len);
},
.query => |query| {
if ((last_token != .scheme) and (last_token != .host) and
(last_token != .port) and (last_token != .path))
return error.UnexpectedToken;
assert(uri_writer.writeByte('?'));
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, query.len),
};
assert(uri_writer.write(query) == query.len);
},
.fragment => |fragment| {
if ((last_token != .scheme) or (last_token != .host) or
(last_token != .port) or (last_token != .path) or
(last_token != .query)) return error.UnexpectedToken;
assert(uri_writer.writeByte('#'));
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, fragment.len),
};
assert(uri_writer.write(fragment) == fragment.len);
},
}
last_token = scheme_specific_token;
}
}
return uri;
}
///
/// Creates and returns a [Path] value from the path component of `uri`.
///
pub fn toPath(uri: Uri) Path {
var path = Path{
.length = uri.path_range.len,
.buffer = std.mem.zeroes([Path.max]u8),
};
std.mem.copy(u8, path.buffer[0 ..], uri.buffer[uri.path_range.off .. uri.path_range.len]);
return path;
}
///
/// Writes the path component of `uri` to `path_writer`, returning `true` if all bytes used to
/// encode the path were successfully written, otherwise `false` if it was partially completed
/// or not at all.
///
pub fn writePath(uri: Uri, path_writer: Writer) bool {
return (path_writer.write(uri.buffer[uri.path_range.off ..
uri.path_range.len]) == uri.path_range.len);
}
};
test "uri" {
const testing = @import("std").testing;
const empty_uri = Uri.empty;
try testing.expect(empty_uri.isScheme(""));
try testing.expect(empty_uri.toPath().equalsText(""));
const scheme_only_uri = try Uri.parse("uri:");
try testing.expect(scheme_only_uri.isScheme("uri"));
try testing.expect(scheme_only_uri.toPath().equalsText(""));
const absolute_file_path = "/path/to/file";
const absolute_file_uri = try Uri.parse("file:" ++ absolute_file_path);
try testing.expect(absolute_file_uri.isScheme("file"));
try testing.expect(absolute_file_uri.toPath().equalsText(absolute_file_path));
const relative_file_path = "path/to/file";
const relative_file_uri = try Uri.parse("file:" ++ relative_file_path);
try testing.expect(relative_file_uri.isScheme("file"));
try testing.expect(relative_file_uri.toPath().equalsText(relative_file_path));
}
///
/// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network
/// socket.
///
pub const Writer = struct {
context: *anyopaque,
operation: fn (*anyopaque, []const u8) usize,
///
/// Wraps and returns a reference to `write_context` of type `WriteContext` and its associated
/// `writeContext` writing operation in a [Writer].
///
pub fn wrap(
comptime WriteContext: type,
write_context: *WriteContext,
comptime writeContext: fn (*WriteContext, []const u8) usize
) Writer {
return .{
.context = write_context,
.operation = struct {
fn write(context: *anyopaque, buffer: []const u8) usize {
return writeContext(@ptrCast(*WriteContext,
@alignCast(@alignOf(WriteContext), context)), buffer);
}
}.write,
};
}
///
/// Attempts to write `buffer` to `writer`, returning the number of bytes from `buffer` that
/// were successfully written.
///
pub fn write(writer: Writer, buffer: []const u8) usize {
return writer.operation(writer.context, buffer);
}
///
/// Writes the singular `byte` to `writer`, returning `true` if it was successfully written,
/// otherwise `false`.
///
pub fn writeByte(writer: Writer, byte: u8) bool {
return (writer.operation(writer.context,
@ptrCast([*]const u8, &byte)[0 .. 1]) != 0);
}
///
/// Writes `value` as a ASCII / UTF-8 encoded integer to `writer`, returning `true` if the full
/// sequence was successfully written, otherwise `false`.
///
/// The `base` argument identifies which base system to encode `value` as, with `10` being
/// decimal, `16` being hexadecimal, `8` being octal`, so on and so forth.
///
pub fn writeInt(writer: Writer, value: anytype, base: u4) bool {
const Int = @TypeOf(value);
const type_info = @typeInfo(Int);
if (type_info != .Int) @compileError("value must be of type int");
if (value == 0) return writer.writeByte('0');
var buffer = std.mem.zeroes([28]u8);
var buffer_count = @as(usize, 0);
var n1 = value;
if ((type_info.Int.signedness == .signed) and (value < 0)) {
// Negative value.
n1 = -value;
buffer[0] = '-';
buffer_count += 1;
}
while (n1 != 0) {
buffer[buffer_count] = @intCast(u8, (n1 % base) + '0');
n1 = (n1 / base);
buffer_count += 1;
}
for (buffer[0 .. (buffer_count / 2)]) |_, i|
std.mem.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]);
return (writer.write(buffer[0 .. buffer_count]) == buffer_count);
}
};
var null_context = @as(usize, 0);
///
/// Writer that silently throws consumed data away and never fails.
///
/// This is commonly used for testing or redirected otherwise unwanted output data that can't not be
/// sent somewhere for whatever reason.
///
pub const null_writer = Writer{
.context = (&null_context),
.operation = struct {
fn write(context: *anyopaque, buffer: []const u8) usize {
// Validate context canary value.
std.debug.assert(@ptrCast(*usize, @alignCast(@alignOf(usize), context)).* == 0);
return buffer.len;
}
}.write,
};

View File

@ -5,105 +5,173 @@ const c = @cImport({
@cInclude("lua/lauxlib.h");
});
const errors = @import("./errors.zig");
const io = @import("./io.zig");
const stack = @import("./stack.zig");
const std = @import("std");
const Request = struct {
next: ?*Request = null,
frame: anyframe,
message: union(enum) {
close: struct {
file: *c.SDL_RWops,
is_closed: *bool,
},
open_readable: struct {
uri: *const io.Uri,
file: ?*c.SDL_RWops,
},
},
};
fn luaAlloc(userdata: ?*anyopaque, ptr: ?*anyopaque, original_size: usize,
updated_size: usize) callconv(.C) ?*anyopaque {
// Implementation derived from
// https://github.com/natecraddock/ziglua/blob/master/src/ziglua.zig.
const alignment = @alignOf(std.c.max_align_t);
const Allocator = std.mem.Allocator;
const allocator = @ptrCast(*Allocator, @alignCast(@alignOf(Allocator), userdata.?));
if (@ptrCast(?[*]align(alignment) u8, @alignCast(alignment, ptr))) |prev_ptr| {
// Allocator is working with an existing pointer.
const prev_slice = prev_ptr[0 .. original_size];
if (updated_size == 0) {
// Updated size of `0` to free the existing memory block.
allocator.free(prev_slice);
return null;
}
// Resize the existing memory block.
return (allocator.reallocAdvanced(prev_slice, alignment,
updated_size, .exact) catch return null).ptr;
}
// No existing pointer, allocate a new block of memory.
return (allocator.alignedAlloc(u8, alignment, updated_size) catch return null).ptr;
}
///
/// Entry point.
///
pub fn main() anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
if (c.SDL_Init(c.SDL_INIT_EVERYTHING) != 0) {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize SDL2 runtime");
return error.SystemFailure;
return error.InitFailure;
}
defer c.SDL_Quit();
var lua_allocator = gpa.allocator();
const lua_state = c.lua_newstate(luaAlloc, @ptrCast(*anyopaque, &lua_allocator));
const pref_path = create_pref_path: {
const path = c.SDL_GetPrefPath("ona", "ona") orelse {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path");
if (lua_state == null) {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION,
"Failed to initialize Lua virtual machine");
return error.InitFailure;
};
return error.SystemFailure;
}
break: create_pref_path path[0 .. std.mem.len(path)];
};
defer c.lua_close(lua_state);
defer c.SDL_free(pref_path.ptr);
const sdl_window = create_sdl_window: {
const window = create_window: {
const pos = c.SDL_WINDOWPOS_UNDEFINED;
var flags = @as(u32, 0);
break: create_sdl_window c.SDL_CreateWindow("Ona", pos, pos, 640, 480, flags);
break: create_window c.SDL_CreateWindow("Ona", pos, pos, 640, 480, flags) orelse {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION, "Failed to load SDL2 window");
return error.InitFailure;
};
};
if (sdl_window == null) {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_VIDEO, "Failed to create SDL2 window");
defer c.SDL_DestroyWindow(window);
return error.SystemFailure;
}
defer c.SDL_DestroyWindow(sdl_window);
const sdl_renderer = create_sdl_renderer: {
const renderer = create_renderer: {
var flags = @as(u32, 0);
break: create_sdl_renderer c.SDL_CreateRenderer(sdl_window, -1, flags);
break: create_renderer c.SDL_CreateRenderer(window, -1, flags) orelse {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION, "Failed to load SDL2 renderer");
return error.InitFailure;
};
};
if (sdl_renderer == null) {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_VIDEO, "Failed to create SDL2 renderer");
defer c.SDL_DestroyRenderer(renderer);
return error.SystemFailure;
}
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
var lua_allocator = gpa.allocator();
defer c.SDL_DestroyRenderer(sdl_renderer);
const lua_state = c.lua_newstate(luaAlloc, @ptrCast(*anyopaque, &lua_allocator)) orelse {
c.SDL_LogCritical(c.SDL_LOG_CATEGORY_APPLICATION,
"Failed to initialize Lua virtual machine");
return error.InitFailure;
};
defer c.lua_close(lua_state);
var request_chain = @as(?*Request, null);
var is_running = true;
while (is_running) {
var sdl_event = std.mem.zeroes(c.SDL_Event);
var event = std.mem.zeroes(c.SDL_Event);
while (c.SDL_PollEvent(&sdl_event) != 0) {
switch (sdl_event.type) {
while (c.SDL_PollEvent(&event) != 0) {
switch (event.type) {
c.SDL_QUIT => is_running = false,
else => {},
}
}
_ = c.SDL_SetRenderDrawColor(sdl_renderer, 0, 0, 0, 255);
_ = c.SDL_RenderClear(sdl_renderer);
if (c.SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255) != 0) {
c.SDL_LogError(c.SDL_LOG_CATEGORY_VIDEO, c.SDL_GetError());
c.SDL_ClearError();
}
c.SDL_RenderPresent(sdl_renderer);
if (c.SDL_RenderClear(renderer) != 0) {
c.SDL_LogError(c.SDL_LOG_CATEGORY_VIDEO, c.SDL_GetError());
c.SDL_ClearError();
}
c.SDL_RenderPresent(renderer);
while (request_chain) |request_head| {
const request = request_head;
request_chain = request_head.next;
switch (request.message) {
.close => |*close| close.is_closed.* = (c.SDL_RWclose(close.file) == 0),
.open_readable => |*open_readable| {
if (open_readable.uri.isScheme("data")) {
var path = stack.Fixed(u8, 4096){};
// These can never fail as the sum of the potential bytes written will
// always be less than 4096.
path.pushAll("./") catch unreachable;
std.debug.assert(open_readable.uri.writePath(path.asWriter()));
open_readable.file = c.SDL_RWFromFile(&path.buffer, "r");
} else if (open_readable.uri.isScheme("user")) {
var path = stack.Fixed(u8, 4096){};
const isOk = errors.isOk;
// Cannot guarantee that the sum of potential bytes written will always be
// less than path max.
if (isOk(stack.FinitePushError, path.pushAll(pref_path)) and
open_readable.uri.writePath(path.asWriter())) {
open_readable.file = c.SDL_RWFromFile(&path.buffer, "r");
}
}
},
}
resume request.frame;
}
c.SDL_Delay(1);
}
}

203
src/stack.zig Normal file
View File

@ -0,0 +1,203 @@
const io = @import("./io.zig");
const std = @import("std");
///
/// Potential errors that may occur while trying to push one or more elements into a stack of a
/// known maximum size.
///
/// [FinitePushError.Overflow] is returned if the stack does not have sufficient capacity to hold a
/// given set of elements.
///
pub const FinitePushError = error {
Overflow,
};
///
/// Returns a fixed-size stack collection capable of holding a maximum of `capacity` elements of
/// type `Element`.
///
pub fn Fixed(comptime Element: type, comptime capacity: usize) type {
return struct {
filled: usize = 0,
buffer: [capacity]Element = undefined,
const Self = @This();
///
/// Wraps `self` and returns it in a [io.Writer] value.
///
/// Note that this will raise a compilation error if [Element] is not `u8`.
///
pub fn asWriter(self: *Self) io.Writer {
if (Element != u8) @compileError("Cannot coerce fixed stack of type " ++
@typeName(Element) ++ " into a Writer");
return io.Writer.wrap(Self, self, struct {
fn write(stack: *Self, buffer: []const u8) usize {
stack.pushAll(buffer) catch |err| switch (err) {
error.Overflow => return 0,
};
return buffer.len;
}
}.write);
}
///
/// Clears all elements from `self`.
///
pub fn clear(self: *Self) void {
self.filled = 0;
}
///
/// Counts and returns the number of pushed elements in `self`.
///
pub fn count(self: Self) usize {
return self.filled;
}
///
/// Attempts to pop the tail-end of `self`, returning the element value or `null` if the
/// stack is empty.
///
pub fn pop(self: *Self) ?Element {
if (self.filled == 0) return null;
self.filled -= 1;
return self.buffer[self.filled];
}
///
/// Attempts to push `element` into `self`, returning [FinitePushError.Overflow] if the
/// stack is full.
///
pub fn push(self: *Self, element: Element) FinitePushError!void {
if (self.filled == capacity) return error.Overflow;
self.buffer[self.filled] = element;
self.filled += 1;
}
///
/// Attempts to push all of `elements` into `self`, returning [FinitePushError.Overflow] if
/// the stack does not have sufficient capacity to hold the new elements.
///
pub fn pushAll(self: *Self, elements: []const u8) FinitePushError!void {
const filled = (self.filled + elements.len);
if (filled > capacity) return error.Overflow;
std.mem.copy(u8, self.buffer[self.filled ..], elements);
self.filled = filled;
}
};
}
pub fn Unmanaged(comptime Element: type) type {
return struct {
filled: usize = 0,
buffer: []Element,
const Self = @This();
///
/// Wraps `self` and returns it in a [io.Writer] value.
///
/// Note that this will raise a compilation error if [Element] is not `u8`.
///
pub fn asWriter(self: *Self) io.Writer {
if (Element != u8) @compileError("Cannot coerce fixed stack of type " ++
@typeName(Element) ++ " into a Writer");
return io.Writer.wrap(Self, self, struct {
fn write(stack: *Self, buffer: []const u8) usize {
stack.pushAll(buffer) catch |err| switch (err) {
error.Overflow => return 0,
};
return buffer.len;
}
}.write);
}
///
/// Clears all elements from `self`.
///
pub fn clear(self: *Self) void {
self.filled = 0;
}
///
/// Counts and returns the number of pushed elements in `self`.
///
pub fn count(self: Self) usize {
return self.filled;
}
///
/// Attempts to pop the tail-end of `self`, returning the element value or `null` if the
/// stack is empty.
///
pub fn pop(self: *Self) ?Element {
if (self.filled == 0) return null;
self.filled -= 1;
return self.buffer[self.filled];
}
///
/// Attempts to push `element` into `self`, returning [FinitePushError.Overflow] if the
/// stack is full.
///
pub fn push(self: *Self, element: Element) FinitePushError!void {
if (self.filled == self.buffer.len) return error.Overflow;
self.buffer[self.filled] = element;
self.filled += 1;
}
///
/// Attempts to push all of `elements` into `self`, returning [FinitePushError.Overflow] if
/// the stack does not have sufficient capacity to hold the new elements.
///
pub fn pushAll(self: *Self, elements: []const u8) FinitePushError!void {
const filled = (self.filled + elements.len);
if (filled > self.buffer.len) return error.Overflow;
std.mem.copy(u8, self.buffer[self.filled ..], elements);
self.filled = filled;
}
};
}
test "fixed stack" {
const testing = @import("std").testing;
const expectError = testing.expectError;
const expectEqual = testing.expectEqual;
var stack = Fixed(u8, 4){};
try expectEqual(stack.count(), 0);
try expectEqual(stack.pop(), null);
try stack.push(69);
try expectEqual(stack.count(), 1);
try expectEqual(stack.pop(), 69);
try stack.pushAll(&.{42, 10, 95, 0});
try expectEqual(stack.count(), 4);
try expectError(FinitePushError.Overflow, stack.push(1));
try expectError(FinitePushError.Overflow, stack.pushAll(&.{1, 11, 11}));
stack.clear();
try expectEqual(stack.count(), 0);
const writer = stack.asWriter();
try expectEqual(writer.write(&.{0, 0, 0, 0}), 4);
try expectEqual(writer.writeByte(0), false);
}