From 01d878f9337abcee9750a001e666a519abc090df Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 3 Oct 2022 23:16:58 +0100 Subject: [PATCH 01/93] Remove hardcoded hash from VS Code launch config --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index a37980f..195c1f0 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -14,7 +14,7 @@ "name": "Test", "type": "gdb", "request": "launch", - "target": "${workspaceFolder}/zig-cache/o/b57ef32c79a05339fbe4a8eb648ff6df/test", + "target": "$(find zig-cache -name test) src/main.zig", "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", -- 2.34.1 From 217d539ff74d14be8322347979cd7205610fb504 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 3 Oct 2022 23:39:03 +0100 Subject: [PATCH 02/93] Fix calling SDL_DestroySemaphore on mutex --- src/sys.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sys.zig b/src/sys.zig index b880fef..f583974 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -613,7 +613,7 @@ pub fn runGraphics(comptime Errors: anytype, run: GraphicsRunner(Errors)) Errors defer { ext.SDL_DestroyThread(event_loop.file_system_thread); - ext.SDL_DestroySemaphore(event_loop.file_system_mutex); + ext.SDL_DestroyMutex(event_loop.file_system_mutex); ext.SDL_DestroySemaphore(event_loop.file_system_semaphore); } -- 2.34.1 From 6094dac5f14c2ec8649be44d504aad95a42a1122 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 3 Oct 2022 23:59:09 +0100 Subject: [PATCH 03/93] Temporary fix for SDL2 symbols not loading at runtime --- build.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/build.zig b/build.zig index 7eef4a8..2a81c07 100644 --- a/build.zig +++ b/build.zig @@ -13,6 +13,7 @@ pub fn build(builder: *std.build.Builder) void { ona_exe.install(); ona_exe.addIncludeDir("./ext"); ona_exe.linkSystemLibrary("SDL2"); + ona_exe.linkLibC(); const run_cmd = ona_exe.run(); -- 2.34.1 From 38211718e35f26296a696a0c17e063e6a84e3336 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 13:51:46 +0100 Subject: [PATCH 04/93] Hide away private event loop functions in implementation --- src/sys.zig | 393 +++++++++++++++++++++++++++++----------------------- 1 file changed, 223 insertions(+), 170 deletions(-) diff --git a/src/sys.zig b/src/sys.zig index f583974..8d41844 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -61,12 +61,29 @@ pub const EventLoop = opaque { /// Internal state of the event loop hidden from the API consumer. /// const Implementation = struct { - user_prefix: []const u8, + user_prefix: []u8, file_system_semaphore: *ext.SDL_sem, file_system_mutex: *ext.SDL_mutex, - file_system_thread: *ext.SDL_Thread, + file_system_thread: ?*ext.SDL_Thread, file_system_messages: ?*FileSystemMessage = null, + /// + /// + /// + const InitError = error { + OutOfSemaphores, + OutOfMutexes, + OutOfMemory, + }; + + /// + /// + /// + const StartError = error { + OutOfThreads, + AlreadyStarted, + }; + /// /// Casts `event_loop` to a [Implementation] reference. /// @@ -76,6 +93,181 @@ pub const EventLoop = opaque { fn cast(event_loop: *EventLoop) *Implementation { return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), event_loop)); } + + /// + /// + /// + fn deinit(implementation: *Implementation) void { + var message = FileSystemMessage{ + .frame = @frame(), + .request = .exit, + }; + + implementation.enqueueFileSystemMessage(&message); + + { + var status = @as(c_int, 0); + + // SDL2 defines waiting on a null thread reference as a no-op. + // https://wiki.libsdl.org/SDL_WaitThread + ext.SDL_WaitThread(implementation.file_system_thread, &status); + + if (status != 0) { + // TODO: Error check this. + } + } + + ext.SDL_free(implementation.user_prefix.ptr); + ext.SDL_DestroyMutex(implementation.file_system_mutex); + ext.SDL_DestroySemaphore(implementation.file_system_semaphore); + } + + /// + /// Enqueues `message` to the file system message processor of `implementation` to be + /// processed at a later, non-deterministic point. + /// + fn enqueueFileSystemMessage(implementation: *Implementation, + message: *FileSystemMessage) void { + + // TODO: Error check this. + _ = ext.SDL_LockMutex(implementation.file_system_mutex); + + if (implementation.file_system_messages) |messages| { + messages.next = message; + } else { + implementation.file_system_messages = message; + } + + // TODO: Error check these. + _ = ext.SDL_UnlockMutex(implementation.file_system_mutex); + _ = ext.SDL_SemPost(implementation.file_system_semaphore); + } + + /// + /// + /// + fn init() InitError!Implementation { + return Implementation{ + .user_prefix = create_pref_path: { + const path = ext.SDL_GetPrefPath("ona", "ona") orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to load user path"); + + return error.OutOfMemory; + }; + + break: create_pref_path path[0 .. std.mem.len(path)]; + }, + + .file_system_semaphore = ext.SDL_CreateSemaphore(0) + orelse return error.OutOfSemaphores, + + .file_system_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, + .file_system_thread = null, + }; + } + + /// + /// [FileSystemMessage] processing function used by a dedicated worker thread, where `data` + /// is a type-erased reference to a [EventLoop]. + /// + /// The processor returns `0` if it exited normally or any other value if an erroneous exit + /// occured. + /// + fn processFileSystemMessages(data: ?*anyopaque) callconv(.C) c_int { + const implementation = Implementation.cast(@ptrCast(*EventLoop, data orelse unreachable)); + + while (true) { + while (implementation.file_system_messages) |messages| { + switch (messages.request) { + .exit => return 0, + + .log => |*log_request| ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, + @enumToInt(log_request.kind), log_request.message.ptr), + + .open => |*open_request| { + switch (open_request.file_system_path.file_system) { + .data => { + // TODO: Implement + open_request.result = error.NotFound; + }, + + .user => { + var path_buffer = std.mem.zeroes([4096]u8); + var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; + + path.pushAll(implementation.user_prefix) catch { + open_request.result = error.NotFound; + + continue; + }; + + if (!open_request.file_system_path.write(path.writer())) { + open_request.result = error.NotFound; + + continue; + } + + if (ext.SDL_RWFromFile(&path_buffer, switch (open_request.mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + })) |rw_ops| { + open_request.result = @ptrCast(*FileAccess, rw_ops); + } else { + open_request.result = error.NotFound; + } + }, + } + }, + + .close => |*close_request| { + // TODO: Use this result somehow. + _ = ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, @alignCast( + @alignOf(ext.SDL_RWops), close_request.file_access))); + }, + + .read_file => |read_request| { + // TODO: Implement. + _ = read_request; + }, + + .seek_file => |seek_request| { + // TODO: Implement. + _ = seek_request; + }, + + .tell_file => |tell_request| { + // TODO: Implement. + _ = tell_request; + }, + } + + resume messages.frame; + + implementation.file_system_messages = messages.next; + } + + // TODO: Error check this. + _ = ext.SDL_SemWait(implementation.file_system_semaphore); + } + } + + /// + /// + /// + fn start(implementation: *Implementation) StartError!void { + if (implementation.file_system_thread != null) return error.AlreadyStarted; + + implementation.file_system_thread = ext.SDL_CreateThread(processFileSystemMessages, + "File System Worker", implementation) orelse { + + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to create file-system work processor"); + + return error.OutOfThreads; + }; + } }; /// @@ -88,7 +280,7 @@ pub const EventLoop = opaque { /// [LogKind.warning] represents a log message which is a warning about a issue that does not /// break anything important but is not ideal. /// - pub const LogKind = enum(c_int) { + pub const LogKind = enum(u32) { info = ext.SDL_LOG_PRIORITY_INFO, debug = ext.SDL_LOG_PRIORITY_DEBUG, warning = ext.SDL_LOG_PRIORITY_WARN, @@ -146,28 +338,7 @@ pub const EventLoop = opaque { .request = .{.close = .{.file_access = file_access}}, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); - } - - /// - /// Enqueues `message` to the file system message processor to be processed at a later, non- - /// deterministic point. - /// - fn enqueueFileSystemMessage(event_loop: *EventLoop, message: *FileSystemMessage) void { - const implementation = Implementation.cast(event_loop); - - // TODO: Error check this. - _ = ext.SDL_LockMutex(implementation.file_system_mutex); - - if (implementation.file_system_messages) |messages| { - messages.next = message; - } else { - implementation.file_system_messages = message; - } - - // TODO: Error check these. - _ = ext.SDL_UnlockMutex(implementation.file_system_mutex); - _ = ext.SDL_SemPost(implementation.file_system_semaphore); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); } /// @@ -185,7 +356,7 @@ pub const EventLoop = opaque { }}, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); } /// @@ -210,97 +381,11 @@ pub const EventLoop = opaque { }}, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); return file_system_message.request.open.result; } - /// - /// [FileSystemMessage] processing function used by a dedicated worker thread, where `data` is - /// a type-erased reference to a [EventLoop]. - /// - /// The processor returns `0` if it exited normally or any other value if an erroneous exit - /// occured. - /// - fn processFileSystemMessages(data: ?*anyopaque) callconv(.C) c_int { - const implementation = Implementation.cast(@ptrCast(*EventLoop, data orelse unreachable)); - - while (true) { - while (implementation.file_system_messages) |messages| { - switch (messages.request) { - .exit => return 0, - - .log => |*log_request| ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, - @enumToInt(log_request.priority), log_request.message), - - .open => |*open_request| { - switch (open_request.path.file_system) { - .data => { - // TODO: Implement - open_request.result = error.NotFound; - }, - - .user => { - var path_buffer = std.mem.zeroes([4096]u8); - var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; - - path.pushAll(implementation.user_prefix) catch { - open_request.result = error.BadFileSystem; - - continue; - }; - - if (!open_request.path.write(path.writer())) { - open_request.result = error.NotFound; - - continue; - } - - if (ext.SDL_RWFromFile(&path_buffer, switch (open_request.mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - })) |rw_ops| { - open_request.result = @ptrCast(*FileAccess, rw_ops); - } else { - open_request.result = error.NotFound; - } - }, - } - }, - - .close => |*close_request| { - // TODO: Use this result somehow. - _ = ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, @alignCast( - @alignOf(ext.SDL_RWops), close_request.file_access))); - }, - - .read_file => |read_request| { - // TODO: Implement. - _ = read_request; - }, - - .seek_file => |seek_request| { - // TODO: Implement. - _ = seek_request; - }, - - .tell_file => |tell_request| { - // TODO: Implement. - _ = tell_request; - }, - } - - resume messages.frame; - - implementation.file_system_messages = messages.next; - } - - // TODO: Error check this. - _ = ext.SDL_SemWait(implementation.file_system_semaphore); - } - } - /// /// Attempts to read the contents of the file referenced by `file_access` at the current file /// cursor position into `buffer`. @@ -320,7 +405,7 @@ pub const EventLoop = opaque { }}, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); return file_system_message.request.read_file.result; } @@ -337,7 +422,7 @@ pub const EventLoop = opaque { .request = .{.tell_file = .{.file_access = file_access}}, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); return file_system_message.request.tell_file.result; } @@ -361,7 +446,7 @@ pub const EventLoop = opaque { }, }; - suspend event_loop.enqueueFileSystemMessage(&file_system_message); + suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); return file_system_message.request.seek_file.result; } @@ -546,18 +631,6 @@ pub fn runGraphics(comptime Errors: anytype, run: GraphicsRunner(Errors)) Errors defer ext.SDL_Quit(); - const pref_path = create_pref_path: { - const path = ext.SDL_GetPrefPath("ona", "ona") orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path"); - - return error.InitFailure; - }; - - break: create_pref_path path[0 .. std.mem.len(path)]; - }; - - defer ext.SDL_free(pref_path.ptr); - const window = create_window: { const pos = ext.SDL_WINDOWPOS_UNDEFINED; var flags = @as(u32, 0); @@ -583,39 +656,34 @@ pub fn runGraphics(comptime Errors: anytype, run: GraphicsRunner(Errors)) Errors defer ext.SDL_DestroyRenderer(renderer); - var event_loop = EventLoop.Implementation{ - .file_system_semaphore = ext.SDL_CreateSemaphore(0) orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work scheduler"); + var event_loop = EventLoop.Implementation.init() catch |err| { + switch (err) { + error.OutOfMemory => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to allocate necessary memory"), - return error.InitFailure; - }, + error.OutOfMutexes => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to create file-system work lock"), - .file_system_mutex = ext.SDL_CreateMutex() orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work lock"); - - return error.InitFailure; - }, - - .file_system_thread = unreachable, - .user_prefix = pref_path, - }; - - event_loop.file_system_thread = ext.SDL_CreateThread( - EventLoop.processFileSystemMessages, "File System Worker", &event_loop) orelse { - - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work processor"); + error.OutOfSemaphores => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to create file-system work scheduler"), + } return error.InitFailure; }; - defer { - ext.SDL_DestroyThread(event_loop.file_system_thread); - ext.SDL_DestroyMutex(event_loop.file_system_mutex); - ext.SDL_DestroySemaphore(event_loop.file_system_semaphore); - } + defer event_loop.deinit(); + + event_loop.start() catch |err| { + switch (err) { + // Not possible for it to have already been started. + error.AlreadyStarted => unreachable, + + error.OutOfThreads => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to start file-system work processor"), + } + + return error.InitFailure; + }; var graphics_context = GraphicsContext.Implementation{ .event = .{ @@ -623,20 +691,5 @@ pub fn runGraphics(comptime Errors: anytype, run: GraphicsRunner(Errors)) Errors }, }; - var message = EventLoop.FileSystemMessage{ - .frame = @frame(), - .request = .exit, - }; - - @ptrCast(*EventLoop, event_loop).enqueueFileSystemMessage(&message); - - var status = @as(c_int, 0); - - ext.SDL_WaitThread(event_loop.file_system_thread, &status); - - if (status != 0) { - // TODO: Error check this. - } - return run(@ptrCast(*EventLoop, &event_loop), @ptrCast(*GraphicsContext, &graphics_context)); } -- 2.34.1 From 28e61f846a26a9018a8739beb55c371e2cde4ceb Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 13:54:17 +0100 Subject: [PATCH 05/93] Improve clarity of SDL2_WaitThread behavior comment --- src/sys.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/sys.zig b/src/sys.zig index 8d41844..ff5755f 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -108,8 +108,8 @@ pub const EventLoop = opaque { { var status = @as(c_int, 0); - // SDL2 defines waiting on a null thread reference as a no-op. - // https://wiki.libsdl.org/SDL_WaitThread + // SDL2 defines waiting on a null thread reference as a no-op. See + // https://wiki.libsdl.org/SDL_WaitThread for more information ext.SDL_WaitThread(implementation.file_system_thread, &status); if (status != 0) { -- 2.34.1 From d6fad7288d3c8ea02e36c518a6aae115822c25be Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 14:19:34 +0100 Subject: [PATCH 06/93] Fix Zig errors appearing as GCC errors in VS Code --- .vscode/tasks.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 016ab6a..61ebf8f 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -23,7 +23,7 @@ }, "problemMatcher": { - "source": "gcc", + "source": "zig", "owner": "cpptools", "fileLocation": [ -- 2.34.1 From 95f48b28c70bb52d1d660d1c3b98417bf0c6a015 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 14:20:32 +0100 Subject: [PATCH 07/93] Fix compilation error when calling runGraphics --- src/main.zig | 2 +- src/sys.zig | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main.zig b/src/main.zig index 4108ff8..4b83a2b 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,7 +11,7 @@ const sys = @import("./sys.zig"); /// Entry point. /// pub fn main() anyerror!void { - return sys.runGraphics(anyerror, run); + return nosuspend sys.runGraphics(anyerror, run); } test { diff --git a/src/sys.zig b/src/sys.zig index ff5755f..7864d70 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -616,13 +616,13 @@ pub const GraphicsContext = opaque { /// /// pub fn GraphicsRunner(comptime Errors: type) type { - return fn (*EventLoop, *GraphicsContext) Errors!void; + return fn (*EventLoop, *GraphicsContext) callconv(.Async) Errors!void; } /// /// /// -pub fn runGraphics(comptime Errors: anytype, run: GraphicsRunner(Errors)) Errors!void { +pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors)) Errors!void { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize runtime"); -- 2.34.1 From fbd79b5b41d923d05c781b4028fd63c2e6631d71 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 22:10:28 +0100 Subject: [PATCH 08/93] Fix panic when calling runGraphics in main --- src/main.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.zig b/src/main.zig index 4b83a2b..1cd1926 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,7 +11,7 @@ const sys = @import("./sys.zig"); /// Entry point. /// pub fn main() anyerror!void { - return nosuspend sys.runGraphics(anyerror, run); + return nosuspend await async sys.runGraphics(anyerror, run); } test { -- 2.34.1 From d2f4c0afe17df1c412e97ff27241ecf894c4ced3 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 4 Oct 2022 23:15:59 +0100 Subject: [PATCH 09/93] Fix paths being created with trailing "/" always --- src/sys.zig | 41 +++++++++++++++++++++++++++-------------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/src/sys.zig b/src/sys.zig index 7864d70..cd0cadd 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -550,26 +550,39 @@ pub const FileSystem = enum { .length = 0, }; - for (sequences) |sequence| if (sequence.len != 0) { - var components = mem.Spliterator(u8){ - .source = sequence, - .delimiter = "/", - }; + if (sequences.len != 0) { + const last_sequence_index = sequences.len - 1; - while (components.next()) |component| if (component.len != 0) { - for (component) |byte| { + for (sequences) |sequence, index| if (sequence.len != 0) { + var components = mem.Spliterator(u8){ + .source = sequence, + .delimiter = "/", + }; + + while (components.next()) |component| if (component.len != 0) { + for (component) |byte| { + if (path.length == Path.max) return error.TooLong; + + path.buffer[path.length] = byte; + path.length += 1; + } + + if (components.hasNext()) { + if (path.length == Path.max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + + if (index < last_sequence_index) { if (path.length == Path.max) return error.TooLong; - path.buffer[path.length] = byte; + path.buffer[path.length] = '/'; path.length += 1; } - - if (path.length == Path.max) return error.TooLong; - - path.buffer[path.length] = '/'; - path.length += 1; }; - }; + } return path; } -- 2.34.1 From 979c2a73f3b1413a0e3158def571c0b764625e96 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 6 Oct 2022 10:24:39 +0100 Subject: [PATCH 10/93] Remove global data in file system API --- src/main.zig | 17 ++- src/sys.zig | 337 ++++++++++++++++++++++++++++----------------------- 2 files changed, 196 insertions(+), 158 deletions(-) diff --git a/src/main.zig b/src/main.zig index 1cd1926..df78f01 100644 --- a/src/main.zig +++ b/src/main.zig @@ -21,30 +21,29 @@ test { _ = sys; } -fn run(event_loop: *sys.EventLoop, graphics: *sys.GraphicsContext) anyerror!void { +fn run(ev: *sys.EventLoop, fs: *const sys.FileSystem, gr: *sys.GraphicsContext) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); { - const file_access = try event_loop.open(.readonly, - try sys.FileSystem.data.joinedPath(&.{"data", "ona.lua"})); + const file_access = try ev.open(.readonly, try fs.data.joinedPath(&.{"ona.lua"})); - defer event_loop.close(file_access); + defer ev.close(file_access); - const file_size = try file_access.size(event_loop); + const file_size = try file_access.size(ev); const allocator = gpa.allocator(); const buffer = try allocator.alloc(u8, file_size); defer allocator.free(buffer); - if ((try event_loop.readFile(file_access, buffer)) != file_size) + if ((try ev.readFile(file_access, buffer)) != file_size) return error.ScriptLoadFailure; - event_loop.log(.debug, buffer); + ev.log(.debug, buffer); } - while (graphics.poll()) |_| { - graphics.present(); + while (gr.poll()) |_| { + gr.present(); } } diff --git a/src/sys.zig b/src/sys.zig index cd0cadd..973a81c 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -61,7 +61,6 @@ pub const EventLoop = opaque { /// Internal state of the event loop hidden from the API consumer. /// const Implementation = struct { - user_prefix: []u8, file_system_semaphore: *ext.SDL_sem, file_system_mutex: *ext.SDL_mutex, file_system_thread: ?*ext.SDL_Thread, @@ -71,6 +70,8 @@ pub const EventLoop = opaque { /// /// const InitError = error { + DataFileNotFound, + DataFileInvalid, OutOfSemaphores, OutOfMutexes, OutOfMemory, @@ -147,22 +148,19 @@ pub const EventLoop = opaque { /// /// fn init() InitError!Implementation { + const data_file_access = @ptrCast(*FileAccess, + ext.SDL_RWFromFile("./data.tar", "r+") orelse return error.DataFileNotFound); + return Implementation{ - .user_prefix = create_pref_path: { - const path = ext.SDL_GetPrefPath("ona", "ona") orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to load user path"); - - return error.OutOfMemory; - }; - - break: create_pref_path path[0 .. std.mem.len(path)]; + .data_archive = tar.Archive.init(data_file_access) catch |err| switch (err) { + error.Invalid, error.Inaccessible => return error.DataFileInvalid, }, .file_system_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .file_system_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, + .data_file = data_file_access, .file_system_thread = null, }; } @@ -181,51 +179,12 @@ pub const EventLoop = opaque { while (implementation.file_system_messages) |messages| { switch (messages.request) { .exit => return 0, + .log => |*log_request| .log(log_request.kind, log_request.message), - .log => |*log_request| ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, - @enumToInt(log_request.kind), log_request.message.ptr), + .open => |*open_request| open_request.result = + .open(open_request.mode, open_request.file_system_path), - .open => |*open_request| { - switch (open_request.file_system_path.file_system) { - .data => { - // TODO: Implement - open_request.result = error.NotFound; - }, - - .user => { - var path_buffer = std.mem.zeroes([4096]u8); - var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; - - path.pushAll(implementation.user_prefix) catch { - open_request.result = error.NotFound; - - continue; - }; - - if (!open_request.file_system_path.write(path.writer())) { - open_request.result = error.NotFound; - - continue; - } - - if (ext.SDL_RWFromFile(&path_buffer, switch (open_request.mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - })) |rw_ops| { - open_request.result = @ptrCast(*FileAccess, rw_ops); - } else { - open_request.result = error.NotFound; - } - }, - } - }, - - .close => |*close_request| { - // TODO: Use this result somehow. - _ = ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, @alignCast( - @alignOf(ext.SDL_RWops), close_request.file_access))); - }, + .close => |*close_request| .close(close_request.file_access), .read_file => |read_request| { // TODO: Implement. @@ -270,63 +229,6 @@ pub const EventLoop = opaque { } }; - /// - /// [LogKind.info] represents a log message which is purely informative and does not indicate - /// any kind of issue. - /// - /// [LogKind.debug] represents a log message which is purely for debugging purposes and will - /// only occurs in debug builds. - /// - /// [LogKind.warning] represents a log message which is a warning about a issue that does not - /// break anything important but is not ideal. - /// - pub const LogKind = enum(u32) { - info = ext.SDL_LOG_PRIORITY_INFO, - debug = ext.SDL_LOG_PRIORITY_DEBUG, - warning = ext.SDL_LOG_PRIORITY_WARN, - }; - - /// - /// [OpenError.NotFound] is a catch-all for when a file could not be located to be opened. This - /// may be as simple as it doesn't exist or the because the underlying file-system will not / - /// cannot give access to it at this time. - /// - pub const OpenError = error { - NotFound, - }; - - /// - /// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, - /// disallowing write access. - /// - /// [OpenMode.overwrite] indicates that an empty file has been created or an existing file has - /// been completely overwritten into. - /// - /// [OpenMode.append] indicates that an existing file that has been opened for reading from and - /// writing to on the end of existing data. - /// - pub const OpenMode = enum { - readonly, - overwrite, - append, - }; - - /// - /// [SeekOrigin.head] indicates that a seek operation will seek from the offset origin of the - /// file beginning, or "head". - /// - /// [SeekOrigin.tail] indicates that a seek operation will seek from the offset origin of the - /// file end, or "tail". - /// - /// [SeekOrigin.cursor] indicates that a seek operation will seek from the current position of - /// the file cursor. - /// - pub const SeekOrigin = enum { - head, - tail, - cursor, - }; - /// /// Closes access to the file referenced by `file_access` via `event_loop`. /// @@ -488,15 +390,15 @@ pub const FileError = error { /// Platform-agnostic mechanism for working with an abstraction of the underlying file-system(s) /// available to the application in a sandboxed environment. /// -pub const FileSystem = enum { - data, - user, +pub const FileSystem = struct { + data: Root, + user: Root, /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// pub const Path = struct { - file_system: FileSystem, + root: *const Root, length: u16, buffer: [max]u8, @@ -521,7 +423,7 @@ pub const FileSystem = enum { /// byte. Because of this, it is not safe to asume that a path may hold [max] individual /// characters. /// - pub const max = 1000; + pub const max = 512; /// /// @@ -540,52 +442,58 @@ pub const FileSystem = enum { }; /// - /// Creates and returns a [Path] value in the file system to the location specified by the - /// joining of the `sequences` path values. /// - pub fn joinedPath(file_system: FileSystem, sequences: []const []const u8) PathError!Path { - var path = Path{ - .file_system = file_system, - .buffer = std.mem.zeroes([Path.max]u8), - .length = 0, - }; + /// + pub const Root = struct { + prefix: []const u8, - if (sequences.len != 0) { - const last_sequence_index = sequences.len - 1; + /// + /// + /// + pub fn joinedPath(root: Root, sequences: []const []const u8) PathError!Path { + var path = Path{ + .root = root, + .buffer = std.mem.zeroes([Path.max]u8), + .length = 0, + }; - for (sequences) |sequence, index| if (sequence.len != 0) { - var components = mem.Spliterator(u8){ - .source = sequence, - .delimiter = "/", - }; + if (sequences.len != 0) { + const last_sequence_index = sequences.len - 1; - while (components.next()) |component| if (component.len != 0) { - for (component) |byte| { - if (path.length == Path.max) return error.TooLong; + for (sequences) |sequence, index| if (sequence.len != 0) { + var components = mem.Spliterator(u8){ + .source = sequence, + .delimiter = "/", + }; - path.buffer[path.length] = byte; - path.length += 1; - } + while (components.next()) |component| if (component.len != 0) { + for (component) |byte| { + if (path.length == Path.max) return error.TooLong; - if (components.hasNext()) { + path.buffer[path.length] = byte; + path.length += 1; + } + + if (components.hasNext()) { + if (path.length == Path.max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + + if (index < last_sequence_index) { if (path.length == Path.max) return error.TooLong; path.buffer[path.length] = '/'; path.length += 1; } }; + } - if (index < last_sequence_index) { - if (path.length == Path.max) return error.TooLong; - - path.buffer[path.length] = '/'; - path.length += 1; - } - }; + return path; } - - return path; - } + }; }; /// @@ -629,13 +537,125 @@ pub const GraphicsContext = opaque { /// /// pub fn GraphicsRunner(comptime Errors: type) type { - return fn (*EventLoop, *GraphicsContext) callconv(.Async) Errors!void; + return fn (*EventLoop, *FileSystem, *GraphicsContext) callconv(.Async) Errors!void; +} + +/// +/// [LogKind.info] represents a log message which is purely informative and does not indicate +/// any kind of issue. +/// +/// [LogKind.debug] represents a log message which is purely for debugging purposes and will +/// only occurs in debug builds. +/// +/// [LogKind.warning] represents a log message which is a warning about a issue that does not +/// break anything important but is not ideal. +/// +pub const LogKind = enum(u32) { + info = ext.SDL_LOG_PRIORITY_INFO, + debug = ext.SDL_LOG_PRIORITY_DEBUG, + warning = ext.SDL_LOG_PRIORITY_WARN, +}; + +/// +/// [OpenError.NotFound] is a catch-all for when a file could not be located to be opened. This +/// may be as simple as it doesn't exist or the because the underlying file-system will not / +/// cannot give access to it at this time. +/// +pub const OpenError = error { + NotFound, +}; + +/// +/// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, +/// disallowing write access. +/// +/// [OpenMode.overwrite] indicates that an empty file has been created or an existing file has +/// been completely overwritten into. +/// +/// [OpenMode.append] indicates that an existing file that has been opened for reading from and +/// writing to on the end of existing data. +/// +pub const OpenMode = enum { + readonly, + overwrite, + append, +}; + +/// +/// +/// +pub const RunError = error { + InitFailure, + AlreadyRunning, +}; + +/// +/// [SeekOrigin.head] indicates that a seek operation will seek from the offset origin of the +/// file beginning, or "head". +/// +/// [SeekOrigin.tail] indicates that a seek operation will seek from the offset origin of the +/// file end, or "tail". +/// +/// [SeekOrigin.cursor] indicates that a seek operation will seek from the current position of +/// the file cursor. +/// +pub const SeekOrigin = enum { + head, + tail, + cursor, +}; + +/// +/// +/// +pub fn close(file_access: *FileAccess) void { + if (!ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, + @alignCast(@alignOf(ext.SDL_RWops), file_access)))) { + + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to close file - may have been already closed"); + } } /// /// /// -pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors)) Errors!void { +pub fn log(kind: LogKind, message: []const u8) void { + ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, + @enumToInt(kind), "%.*s", message.len, message.ptr); +} + +/// +/// +/// +pub fn open(mode: OpenMode, file_system_path: FileSystem.Path) OpenError!*FileAccess { + switch (file_system_path.file_system) { + .data => { + // TODO: Implement + return error.NotFound; + }, + + .user => { + var path_buffer = std.mem.zeroes([4096]u8); + var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; + + path.pushAll("/home/kayomn/.local/share") catch return error.NotFound; + + if (file_system_path.write(path.writer())) return error.NotFound; + + return @ptrCast(*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + })) orelse error.NotFound; + }, + } +} + +/// +/// +/// +pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors)) (RunError || Errors)!void { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize runtime"); @@ -669,6 +689,25 @@ pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors defer ext.SDL_DestroyRenderer(renderer); + var file_system = FileSystem{ + .user = .{.prefix = create_pref_path: { + const path = ext.SDL_GetPrefPath("ona", "ona") orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to load user path"); + + return error.InitFailure; + }; + + break: create_pref_path path[0 .. std.mem.len(path)]; + }}, + + .data = .{.prefix = "./"}, + }; + + defer { + ext.SDL_free(file_system.user.prefix); + } + var event_loop = EventLoop.Implementation.init() catch |err| { switch (err) { error.OutOfMemory => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, -- 2.34.1 From 1289e9634b1dc8d913d1fc81ab68c9d190ad85d4 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 6 Oct 2022 23:22:07 +0100 Subject: [PATCH 11/93] Rework file system API to support different backends --- src/sys.zig | 73 +++++++++++++++++++++++------------------------------ 1 file changed, 32 insertions(+), 41 deletions(-) diff --git a/src/sys.zig b/src/sys.zig index 973a81c..06cd249 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -70,8 +70,6 @@ pub const EventLoop = opaque { /// /// const InitError = error { - DataFileNotFound, - DataFileInvalid, OutOfSemaphores, OutOfMutexes, OutOfMemory, @@ -118,7 +116,6 @@ pub const EventLoop = opaque { } } - ext.SDL_free(implementation.user_prefix.ptr); ext.SDL_DestroyMutex(implementation.file_system_mutex); ext.SDL_DestroySemaphore(implementation.file_system_semaphore); } @@ -148,19 +145,11 @@ pub const EventLoop = opaque { /// /// fn init() InitError!Implementation { - const data_file_access = @ptrCast(*FileAccess, - ext.SDL_RWFromFile("./data.tar", "r+") orelse return error.DataFileNotFound); - return Implementation{ - .data_archive = tar.Archive.init(data_file_access) catch |err| switch (err) { - error.Invalid, error.Inaccessible => return error.DataFileInvalid, - }, - .file_system_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .file_system_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, - .data_file = data_file_access, .file_system_thread = null, }; } @@ -177,14 +166,16 @@ pub const EventLoop = opaque { while (true) { while (implementation.file_system_messages) |messages| { + const root = @import("./sys.zig"); + switch (messages.request) { .exit => return 0, - .log => |*log_request| .log(log_request.kind, log_request.message), + .log => |*log_request| root.log(log_request.kind, log_request.message), .open => |*open_request| open_request.result = - .open(open_request.mode, open_request.file_system_path), + root.open(open_request.mode, open_request.file_system_path.*), - .close => |*close_request| .close(close_request.file_access), + .close => |*close_request| root.close(close_request.file_access), .read_file => |read_request| { // TODO: Implement. @@ -444,13 +435,17 @@ pub const FileSystem = struct { /// /// /// - pub const Root = struct { - prefix: []const u8, + pub const Root = union(enum) { + native: struct { + prefix: []const u8, + }, + + tar: struct {}, /// /// /// - pub fn joinedPath(root: Root, sequences: []const []const u8) PathError!Path { + pub fn joinedPath(root: *const Root, sequences: []const []const u8) PathError!Path { var path = Path{ .root = root, .buffer = std.mem.zeroes([Path.max]u8), @@ -609,8 +604,8 @@ pub const SeekOrigin = enum { /// /// pub fn close(file_access: *FileAccess) void { - if (!ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, - @alignCast(@alignOf(ext.SDL_RWops), file_access)))) { + if (ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, + @alignCast(@alignOf(ext.SDL_RWops), file_access))) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to close file - may have been already closed"); @@ -629,21 +624,21 @@ pub fn log(kind: LogKind, message: []const u8) void { /// /// pub fn open(mode: OpenMode, file_system_path: FileSystem.Path) OpenError!*FileAccess { - switch (file_system_path.file_system) { - .data => { - // TODO: Implement + switch (file_system_path.root.*) { + .tar => { + // TODO: Implement. return error.NotFound; }, - .user => { + .native => |native| { var path_buffer = std.mem.zeroes([4096]u8); var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; - path.pushAll("/home/kayomn/.local/share") catch return error.NotFound; + path.pushAll(native.prefix) catch return error.NotFound; if (file_system_path.write(path.writer())) return error.NotFound; - return @ptrCast(*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { + return @ptrCast(?*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { .readonly => "rb", .overwrite => "wb", .append => "ab", @@ -689,24 +684,19 @@ pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors defer ext.SDL_DestroyRenderer(renderer); - var file_system = FileSystem{ - .user = .{.prefix = create_pref_path: { - const path = ext.SDL_GetPrefPath("ona", "ona") orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to load user path"); + const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to load user path"); - return error.InitFailure; - }; - - break: create_pref_path path[0 .. std.mem.len(path)]; - }}, - - .data = .{.prefix = "./"}, + return error.InitFailure; }; - defer { - ext.SDL_free(file_system.user.prefix); - } + defer ext.SDL_free(user_prefix); + + var file_system = FileSystem{ + .user = .{.native = .{.prefix = user_prefix[0 .. std.mem.len(user_prefix)]}}, + .data = .{.tar = .{}}, + }; var event_loop = EventLoop.Implementation.init() catch |err| { switch (err) { @@ -743,5 +733,6 @@ pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors }, }; - return run(@ptrCast(*EventLoop, &event_loop), @ptrCast(*GraphicsContext, &graphics_context)); + return run(@ptrCast(*EventLoop, &event_loop), &file_system, + @ptrCast(*GraphicsContext, &graphics_context)); } -- 2.34.1 From c42885bf616fb9f133478e107b8d2ca03789df05 Mon Sep 17 00:00:00 2001 From: kayomn Date: Fri, 7 Oct 2022 21:25:49 +0100 Subject: [PATCH 12/93] Add stubs for Tar-based file system logic --- src/sys.zig | 62 ++++++++++++++++++++++++++++------------------------- src/tar.zig | 29 +++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 29 deletions(-) create mode 100644 src/tar.zig diff --git a/src/sys.zig b/src/sys.zig index 06cd249..1de06ec 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -6,14 +6,14 @@ const io = @import("./io.zig"); const mem = @import("./mem.zig"); const stack = @import("./stack.zig"); const std = @import("std"); +const tar = @import("./tar.zig"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. /// -pub const EventLoop = opaque { +pub const App = opaque { /// - /// Linked list of messages chained together to be processed by the internal file system message - /// processor of an [EventLoop]. + /// Linked list of tasks chained together to be processed by the work processor. /// const FileSystemMessage = struct { next: ?*FileSystemMessage = null, @@ -34,7 +34,7 @@ pub const EventLoop = opaque { open: struct { mode: OpenMode, file_system_path: *const FileSystem.Path, - result: OpenError!*FileAccess = error.NotFound, + result: OpenError!*FileAccess = error.FileNotFound, }, read_file: struct { @@ -436,11 +436,8 @@ pub const FileSystem = struct { /// /// pub const Root = union(enum) { - native: struct { - prefix: []const u8, - }, - - tar: struct {}, + native: []const u8, + archive: *tar.Archive, /// /// @@ -552,12 +549,12 @@ pub const LogKind = enum(u32) { }; /// -/// [OpenError.NotFound] is a catch-all for when a file could not be located to be opened. This +/// [OpenError.FileNotFound] is a catch-all for when a file could not be located to be opened. This /// may be as simple as it doesn't exist or the because the underlying file-system will not / /// cannot give access to it at this time. /// pub const OpenError = error { - NotFound, + FileNotFound, }; /// @@ -625,24 +622,26 @@ pub fn log(kind: LogKind, message: []const u8) void { /// pub fn open(mode: OpenMode, file_system_path: FileSystem.Path) OpenError!*FileAccess { switch (file_system_path.root.*) { - .tar => { + .archive => |archive| { // TODO: Implement. - return error.NotFound; + _ = archive; + + return error.FileNotFound; }, .native => |native| { var path_buffer = std.mem.zeroes([4096]u8); var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; - path.pushAll(native.prefix) catch return error.NotFound; + path.pushAll(native) catch return error.FileNotFound; - if (file_system_path.write(path.writer())) return error.NotFound; + if (file_system_path.write(path.writer())) return error.FileNotFound; return @ptrCast(?*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { .readonly => "rb", .overwrite => "wb", .append => "ab", - })) orelse error.NotFound; + })) orelse error.FileNotFound; }, } } @@ -650,7 +649,9 @@ pub fn open(mode: OpenMode, file_system_path: FileSystem.Path) OpenError!*FileAc /// /// /// -pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors)) (RunError || Errors)!void { +pub fn runGraphics(comptime Errors: anytype, allocator: std.mem.Allocator, + comptime run: GraphicsRunner(Errors)) (RunError || Errors)!void { + if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize runtime"); @@ -693,22 +694,25 @@ pub fn runGraphics(comptime Errors: anytype, comptime run: GraphicsRunner(Errors defer ext.SDL_free(user_prefix); + var data_archive = tar.Archive.init(allocator); + + data_archive.load("./data.tar") catch |err| switch (err) { + error.FileNotFound => { + + }, + }; + var file_system = FileSystem{ - .user = .{.native = .{.prefix = user_prefix[0 .. std.mem.len(user_prefix)]}}, - .data = .{.tar = .{}}, + .user = .{.native = user_prefix[0 .. std.mem.len(user_prefix)]}, + .data = .{.archive = &data_archive}, }; var event_loop = EventLoop.Implementation.init() catch |err| { - switch (err) { - error.OutOfMemory => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to allocate necessary memory"), - - error.OutOfMutexes => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work lock"), - - error.OutOfSemaphores => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work scheduler"), - } + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { + error.OutOfMemory => "Failed to allocate necessary memory", + error.OutOfMutexes => "Failed to create file-system work lock", + error.OutOfSemaphores => "Failed to create file-system work scheduler", + }); return error.InitFailure; }; diff --git a/src/tar.zig b/src/tar.zig new file mode 100644 index 0000000..7adb58e --- /dev/null +++ b/src/tar.zig @@ -0,0 +1,29 @@ +const std = @import("std"); + +/// +/// +/// +pub const Archive = struct { + allocator: std.mem.Allocator, + + pub const LoadError = error { + FileNotFound, + }; + + /// + /// + /// + pub fn init(allocator: std.mem.Allocator) Archive { + return Archive{ + .allocator = allocator, + }; + } + + /// + /// + /// + pub fn load(archive: *Archive, file_path: []const u8) LoadError!void { + _ = file_path; + _ = archive; + } +}; -- 2.34.1 From 9ae6e8b4a7bb6b21d8ee7710fc8b57f2a101b0e3 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 9 Oct 2022 23:10:13 +0100 Subject: [PATCH 13/93] Implement first pass of Oar archive reading mechanism --- src/main.zig | 16 +- src/math.zig | 6 + src/oar.zig | 14 + src/sys.zig | 1100 +++++++++++++++++++++++++++++++------------------- src/tar.zig | 29 -- 5 files changed, 714 insertions(+), 451 deletions(-) create mode 100644 src/math.zig create mode 100644 src/oar.zig delete mode 100644 src/tar.zig diff --git a/src/main.zig b/src/main.zig index df78f01..a3bcbf8 100644 --- a/src/main.zig +++ b/src/main.zig @@ -21,29 +21,29 @@ test { _ = sys; } -fn run(ev: *sys.EventLoop, fs: *const sys.FileSystem, gr: *sys.GraphicsContext) anyerror!void { +fn run(app: *sys.App, graphics: *sys.GraphicsContext) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); { - const file_access = try ev.open(.readonly, try fs.data.joinedPath(&.{"ona.lua"})); + const file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(app, .readonly); - defer ev.close(file_access); + defer file_access.close(app); - const file_size = try file_access.size(ev); + const file_size = try file_access.queryLength(app); const allocator = gpa.allocator(); const buffer = try allocator.alloc(u8, file_size); defer allocator.free(buffer); - if ((try ev.readFile(file_access, buffer)) != file_size) + if ((try file_access.read(app, buffer)) != file_size) return error.ScriptLoadFailure; - ev.log(.debug, buffer); + sys.Log.debug.write(app, buffer); } - while (gr.poll()) |_| { - gr.present(); + while (graphics.poll()) |_| { + graphics.present(); } } diff --git a/src/math.zig b/src/math.zig new file mode 100644 index 0000000..66c8cc9 --- /dev/null +++ b/src/math.zig @@ -0,0 +1,6 @@ +/// +/// Rounds the `Number` `value` up to the nearest `multiple`. +/// +pub fn roundUp(comptime Number: type, value: Number, multiple: Number) Number { + return value + @mod(@mod(multiple - value, multiple), multiple); +} diff --git a/src/oar.zig b/src/oar.zig new file mode 100644 index 0000000..7660c8c --- /dev/null +++ b/src/oar.zig @@ -0,0 +1,14 @@ +const std = @import("std"); +const sys = @import("./sys.zig"); + +/// +/// An entry block of an Oar archive file. +/// +/// Typically, following this block in memory is the file data it holds the meta-information for. +/// +pub const Entry = extern struct { + name_length: u8, + name_buffer: [255]u8 = std.mem.zeroes([255]u8), + file_size: u64, + padding: [248]u8, +}; diff --git a/src/sys.zig b/src/sys.zig index 1de06ec..e0c60b7 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -3,56 +3,29 @@ const ext = @cImport({ }); const io = @import("./io.zig"); +const math = @import("./math.zig"); const mem = @import("./mem.zig"); +const oar = @import("./oar.zig"); const stack = @import("./stack.zig"); const std = @import("std"); -const tar = @import("./tar.zig"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. /// pub const App = opaque { /// - /// Linked list of tasks chained together to be processed by the work processor. + /// Linked list of asynchronous messages chained together to be processed by the work processor. /// - const FileSystemMessage = struct { - next: ?*FileSystemMessage = null, + const Message = struct { + next: ?*Message = null, frame: anyframe, - request: union(enum) { - exit, + kind: union(enum) { + quit, - close: struct { - file_access: *FileAccess, - }, - - log: struct { - message: []const u8, - kind: LogKind, - }, - - open: struct { - mode: OpenMode, - file_system_path: *const FileSystem.Path, - result: OpenError!*FileAccess = error.FileNotFound, - }, - - read_file: struct { - file_access: *FileAccess, - buffer: []const u8, - result: FileError!usize = error.Inaccessible, - }, - - seek_file: struct { - file_access: *FileAccess, - origin: SeekOrigin, - offset: usize, - result: FileError!void = error.Inaccessible, - }, - - tell_file: struct { - file_access: *FileAccess, - result: FileError!usize = error.Inaccessible, + task: struct { + data: *anyopaque, + action: fn (*anyopaque) void, }, }, }; @@ -61,13 +34,22 @@ pub const App = opaque { /// Internal state of the event loop hidden from the API consumer. /// const Implementation = struct { - file_system_semaphore: *ext.SDL_sem, - file_system_mutex: *ext.SDL_mutex, - file_system_thread: ?*ext.SDL_Thread, - file_system_messages: ?*FileSystemMessage = null, + data_file_system: FileSystem, + user_file_system: FileSystem, + message_semaphore: *ext.SDL_sem, + message_mutex: *ext.SDL_mutex, + message_thread: ?*ext.SDL_Thread, + messages: ?*Message = null, /// + /// [StartError.OutOfSemaphores] indicates that the process has no more semaphores available + /// to it for use, meaning an [Implementation] may not be initialized at this time. /// + /// [StartError.OutOfMutexes] indicates that the process has no more mutexes available to it + /// for use, meaning an [Implementation] may not be initialized at this time. + /// + /// [StartError.OutOfMemory] indicates that the process has no more memory available to it + /// for use, meaning an [Implementation] may not be initialized at this time. /// const InitError = error { OutOfSemaphores, @@ -76,7 +58,12 @@ pub const App = opaque { }; /// + /// [StartError.OutOfThreads] indicates that the process has no more threads available to it + /// to use, meaning that no asynchronous work may be started on an [Implementation] at this + /// time. /// + /// [StartError.AlreadyStarted] is occurs when a request to start work processing happens on + /// an [Implementation] that is already processing work. /// const StartError = error { OutOfThreads, @@ -84,73 +71,56 @@ pub const App = opaque { }; /// - /// Casts `event_loop` to a [Implementation] reference. + /// Casts `app` to a [Implementation] reference. /// - /// *Note* that if `event_loop` does not have the same alignment as [Implementation], - /// safety-checked undefined behavior will occur. + /// *Note* that if `app` does not have the same alignment as [Implementation], safety- + /// checked undefined behavior will occur. /// - fn cast(event_loop: *EventLoop) *Implementation { - return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), event_loop)); + fn cast(app: *App) *Implementation { + return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), app)); } /// - /// + /// Deinitializes the `implementation`, requesting any running asynchronous workload + /// processes quit and waits for them to do so before freeing any resources. /// fn deinit(implementation: *Implementation) void { - var message = FileSystemMessage{ + var message = Message{ .frame = @frame(), - .request = .exit, + .kind = .quit, }; - implementation.enqueueFileSystemMessage(&message); + @ptrCast(*App, implementation).schedule(&message); { var status = @as(c_int, 0); // SDL2 defines waiting on a null thread reference as a no-op. See // https://wiki.libsdl.org/SDL_WaitThread for more information - ext.SDL_WaitThread(implementation.file_system_thread, &status); + ext.SDL_WaitThread(implementation.message_thread, &status); if (status != 0) { // TODO: Error check this. } } - ext.SDL_DestroyMutex(implementation.file_system_mutex); - ext.SDL_DestroySemaphore(implementation.file_system_semaphore); + ext.SDL_DestroyMutex(implementation.message_mutex); + ext.SDL_DestroySemaphore(implementation.message_semaphore); } /// - /// Enqueues `message` to the file system message processor of `implementation` to be - /// processed at a later, non-deterministic point. + /// Initializes a new [Implemenation] with `data_archive_path` as the read-only data archive + /// to read from and `user_path_prefix` as the native writable user data directory. /// - fn enqueueFileSystemMessage(implementation: *Implementation, - message: *FileSystemMessage) void { - - // TODO: Error check this. - _ = ext.SDL_LockMutex(implementation.file_system_mutex); - - if (implementation.file_system_messages) |messages| { - messages.next = message; - } else { - implementation.file_system_messages = message; - } - - // TODO: Error check these. - _ = ext.SDL_UnlockMutex(implementation.file_system_mutex); - _ = ext.SDL_SemPost(implementation.file_system_semaphore); - } - + /// Returns the created [Implementation] value on success or [InitError] on failure. /// - /// - /// - fn init() InitError!Implementation { + fn init(data_archive_path: []const u8, user_path_prefix: []const u8) InitError!Implementation { return Implementation{ - .file_system_semaphore = ext.SDL_CreateSemaphore(0) - orelse return error.OutOfSemaphores, - - .file_system_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, - .file_system_thread = null, + .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, + .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, + .data_file_system = .{.archive = data_archive_path}, + .user_file_system = .{.native = user_path_prefix}, + .message_thread = null, }; } @@ -161,57 +131,40 @@ pub const App = opaque { /// The processor returns `0` if it exited normally or any other value if an erroneous exit /// occured. /// - fn processFileSystemMessages(data: ?*anyopaque) callconv(.C) c_int { - const implementation = Implementation.cast(@ptrCast(*EventLoop, data orelse unreachable)); + fn processTasks(userdata: ?*anyopaque) callconv(.C) c_int { + const implementation = Implementation.cast(@ptrCast(*App, userdata orelse unreachable)); while (true) { - while (implementation.file_system_messages) |messages| { - const root = @import("./sys.zig"); + _ = ext.SDL_LockMutex(implementation.message_mutex); - switch (messages.request) { - .exit => return 0, - .log => |*log_request| root.log(log_request.kind, log_request.message), + defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); - .open => |*open_request| open_request.result = - root.open(open_request.mode, open_request.file_system_path.*), - - .close => |*close_request| root.close(close_request.file_access), - - .read_file => |read_request| { - // TODO: Implement. - _ = read_request; - }, - - .seek_file => |seek_request| { - // TODO: Implement. - _ = seek_request; - }, - - .tell_file => |tell_request| { - // TODO: Implement. - _ = tell_request; - }, + while (implementation.messages) |messages| { + switch (messages.kind) { + .quit => return 0, + .task => |task| task.action(task.data), } resume messages.frame; - implementation.file_system_messages = messages.next; + implementation.messages = messages.next; } // TODO: Error check this. - _ = ext.SDL_SemWait(implementation.file_system_semaphore); + _ = ext.SDL_SemWait(implementation.message_semaphore); } } /// + /// Attempts to start the asynchronous worker thread of `implementation` if it hasn't been + /// already. /// + /// [StartError] is returned on failure. /// fn start(implementation: *Implementation) StartError!void { - if (implementation.file_system_thread != null) return error.AlreadyStarted; - - implementation.file_system_thread = ext.SDL_CreateThread(processFileSystemMessages, - "File System Worker", implementation) orelse { + if (implementation.message_thread != null) return error.AlreadyStarted; + implementation.message_thread = ext.SDL_CreateThread(processTasks, "File System Worker", implementation) orelse { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create file-system work processor"); @@ -221,127 +174,39 @@ pub const App = opaque { }; /// - /// Closes access to the file referenced by `file_access` via `event_loop`. + /// Returns a reference to the currently loaded data file-system. /// - /// *Note* that nothing happens to `file_access` if it is already closed. - /// - pub fn close(event_loop: *EventLoop, file_access: *FileAccess) void { - var file_system_message = FileSystemMessage{ - .frame = @frame(), - .request = .{.close = .{.file_access = file_access}}, - }; - - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); + pub fn data(app: *App) *const FileSystem { + return &Implementation.cast(app).data_file_system; } /// - /// Writes `message` to the application log with `kind` via `event_loop`. + /// Enqueues `message` to the message processor of `app` to be processed at a later, non- + /// deterministic point. /// - /// *Note* that `message` is not guaranteed to be partly, wholely, or at all written. - /// - pub fn log(event_loop: *EventLoop, kind: LogKind, message: []const u8) void { - var file_system_message = FileSystemMessage{ - .frame = @frame(), + pub fn schedule(app: *App, message: *Message) void { + const implementation = Implementation.cast(app); - .request = .{.log = .{ - .message = message, - .kind = kind, - }}, - }; + // TODO: Error check these. + _ = ext.SDL_LockMutex(implementation.message_mutex); - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); + defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); + + if (implementation.messages) |messages| { + messages.next = message; + } else { + implementation.messages = message; + } + + // TODO: Error check this. + _ = ext.SDL_SemPost(implementation.message_semaphore); } /// - /// Attempts to open access to a file referenced at `file_system_path` using `mode` as the way - /// to open it via `event_loop`. + /// Returns a reference to the currently loaded user file-system. /// - /// A [FileAccess] pointer is returned referencing the opened file or a [OpenError] if the file - /// could not be opened. - /// - /// *Note* that all files are opened in "binary-mode", or Unix-mode. There are no conversions - /// applied when data is accessed from a file. - /// - pub fn open(event_loop: *EventLoop, mode: OpenMode, - file_system_path: FileSystem.Path) OpenError!*FileAccess { - - var file_system_message = FileSystemMessage{ - .frame = @frame(), - - .request = .{.open = .{ - .mode = mode, - .file_system_path = &file_system_path, - }}, - }; - - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); - - return file_system_message.request.open.result; - } - - /// - /// Attempts to read the contents of the file referenced by `file_access` at the current file - /// cursor position into `buffer`. - /// - /// The number of bytes that could be read / fitted into `buffer` is returned or a [FileError] - /// if the file failed to be read. - /// - pub fn readFile(event_loop: *EventLoop, file_access: *FileAccess, - buffer: []const u8) FileError!usize { - - var file_system_message = FileSystemMessage{ - .frame = @frame(), - - .request = .{.read_file = .{ - .file_access = file_access, - .buffer = buffer, - }}, - }; - - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); - - return file_system_message.request.read_file.result; - } - - /// - /// Attempts to tell the current file cursor position for the file referenced by `file_access`. - /// - /// Returns the number of bytes into the file that the cursor is relative to its beginning or a - /// [FileError] if the file failed to be queried. - /// - pub fn queryFile(event_loop: *EventLoop, file_access: *FileAccess) FileError!usize { - var file_system_message = FileSystemMessage{ - .frame = @frame(), - .request = .{.tell_file = .{.file_access = file_access}}, - }; - - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); - - return file_system_message.request.tell_file.result; - } - - /// - /// Attempts to seek the file cursor through the file referenced by `file_access` from `origin` - /// to `offset` via `event_loop`, returning a [FileError] if the file failed to be sought. - /// - pub fn seekFile(event_loop: *EventLoop, file_access: *FileAccess, - origin: SeekOrigin, offset: usize) FileError!void { - - var file_system_message = FileSystemMessage{ - .frame = @frame(), - - .request = .{ - .seek_file = .{ - .file_access = file_access, - .origin = origin, - .offset = offset, - }, - }, - }; - - suspend Implementation.cast(event_loop).enqueueFileSystemMessage(&file_system_message); - - return file_system_message.request.seek_file.result; + pub fn user(app: *App) *const FileSystem { + return &Implementation.cast(app).user_file_system; } }; @@ -350,49 +215,358 @@ pub const App = opaque { /// pub const FileAccess = opaque { /// - /// Scans the number of bytes in the file referenced by `file_access` via `event_loop`, returing - /// its byte size or a [FileError] if it failed. + /// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer + /// pointing to a file or the file becomming invalid for whatever reason. /// - pub fn size(file_access: *FileAccess, event_loop: *EventLoop) FileError!usize { - // Save cursor to return to it later. - const origin_cursor = try event_loop.queryFile(file_access); + pub const Error = error { + FileInaccessible, + }; - try event_loop.seekFile(file_access, .tail, 0); - - const ending_cursor = try event_loop.queryFile(file_access); - - // Return to original cursor. - try event_loop.seekFile(file_access, .head, origin_cursor); - - return ending_cursor; + /// + /// Returns `file_access` casted to a [ext.SDL_RWops]. + /// + fn asRwOps(file_access: *FileAccess) *ext.SDL_RWops { + return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), file_access)); } -}; -/// -/// With files typically being backed by a block device, they can produce a variety of errors - -/// from physical to virtual errors - these are all encapsulated by the API as general -/// [Error.Inaccessible] errors. -/// -pub const FileError = error { - Inaccessible, + /// + /// Close the file referenced by `file_access`, invalidating the reference to it and releasing + /// any associated resources. + /// + /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the + /// wasted effort. + /// + pub fn close(file_access: *FileAccess, app: *App) void { + const Task = struct { + file_access: *FileAccess, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + if (ext.SDL_RWclose(task.file_access.asRwOps()) != 0) + ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, + "Closed an invalid file reference"); + } + }; + + var task = Task{.file_access = file_access}; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + } + + /// + /// Attempts to query the current cursor position for the file referenced by `file_access`. + /// + /// Returns the number of bytes into the file that the cursor is relative to its beginning or a + /// [Error] on failure. + /// + pub fn queryCursor(file_access: *FileAccess, app: *App) Error!u64 { + const Task = struct { + file_access: *FileAccess, + result: Error!u64, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + ext.SDL_ClearError(); + + const sought = ext.SDL_RWtell(task.file_access.asRwOps()); + + if (sought < 0) { + task.result = error.FileInaccessible; + + return; + } + + task.result = @intCast(u64, sought); + } + }; + + var task = Task{ + .file_access = file_access, + .result = error.FileInaccessible, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; + } + + /// + /// Attempts to query the current length for the file referenced by `file_access`. + /// + /// Returns the current length of the file at the time of the operation or a [Error] if the file + /// failed to be queried. + /// + pub fn queryLength(file_access: *FileAccess, app: *App) Error!u64 { + const Task = struct { + file_access: *FileAccess, + result: Error!usize, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + ext.SDL_ClearError(); + + const sought = ext.SDL_RWsize(task.file_access.asRwOps()); + + if (sought < 0) { + task.result = error.FileInaccessible; + + return; + } + + task.result = @intCast(u64, sought); + } + }; + + var task = Task{ + .file_access = file_access, + .result = error.FileInaccessible, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; + } + + /// + /// Attempts to read `file_access` from the its current position into `buffer`, while using + /// `app` as the execution context. + /// + /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. + /// + pub fn read(file_access: *FileAccess, app: *App, buffer: []u8) Error!usize { + const Task = struct { + file_access: *FileAccess, + buffer: []u8, + result: Error!usize, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + ext.SDL_ClearError(); + + const buffer_read = ext.SDL_RWread(task.file_access.asRwOps(), + task.buffer.ptr, @sizeOf(u8), task.buffer.len); + + if ((buffer_read == 0) and (ext.SDL_GetError() != null)) { + task.result = error.FileInaccessible; + + return; + } + + task.result = buffer_read; + } + }; + + var task = Task{ + .file_access = file_access, + .buffer = buffer, + .result = error.FileInaccessible, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; + } + + /// + /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes while using + /// `app` as the execution context. + /// + /// Returns [Error] on failure. + /// + pub fn seek(file_access: *FileAccess, app: *App, cursor: u64) Error!void { + const Task = struct { + file_access: *FileAccess, + cursor: u64, + result: Error!void, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + if (task.cursor >= std.math.maxInt(i64)) { + task.result = error.OutOfRange; + + return; + } + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(task.file_access.asRwOps(), + @intCast(i64, task.cursor), ext.RW_SEEK_SET) < 0) { + + task.result = error.FileInaccessible; + + return; + } + + task.result = {}; + } + }; + + var task = Task{ + .file_access = file_access, + .cursor = cursor, + .result = error.FileInaccessible, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; + } + + /// + /// Attempts to seek `file_access` to the end of the file while using `app` as the execution + /// context. + /// + /// Returns [Error] on failure. + /// + pub fn seekToEnd(file_access: *FileAccess, app: *App) Error!void { + const Task = struct { + file_access: *FileAccess, + result: Error!void, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(task.file_access.asRwOps(), 0, ext.RW_SEEK_END) < 0) { + task.result = error.FileInaccessible; + + return; + } + + task.result = {}; + } + }; + + var task = Task{ + .file_access = file_access, + .result = error.FileInaccessible, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; + } }; /// /// Platform-agnostic mechanism for working with an abstraction of the underlying file-system(s) /// available to the application in a sandboxed environment. /// -pub const FileSystem = struct { - data: Root, - user: Root, +pub const FileSystem = union(enum) { + native: []const u8, + archive: []const u8, /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// pub const Path = struct { - root: *const Root, - length: u16, + file_system: *const FileSystem, + length: u8, buffer: [max]u8, + /// + /// With files typically being backed by a block device, they can produce a variety of + /// errors - from physical to virtual errors - these are all encapsulated by the API as + /// general [OpenError.FileNotFound] errors. + /// + /// When a given [FileSystem] does not support a specified [OpenMode], + /// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] + /// should be tried or, if no mode other is suitable, that the resource is effectively + /// unavailable. + /// + pub const OpenError = error { + FileNotFound, + ModeUnsupported, + }; + + /// + /// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, + /// disallowing write access. + /// + /// [OpenMode.overwrite] indicates that an empty file has been created or an existing file + /// has been completely overwritten into. + /// + /// [OpenMode.append] indicates that an existing file that has been opened for reading from + /// and writing to on the end of existing data. + /// + pub const OpenMode = enum { + readonly, + overwrite, + append, + }; + /// /// Returns `true` if the length of `path` is empty, otherwise `false`. /// @@ -404,7 +578,8 @@ pub const FileSystem = struct { /// Returns `true` if `this` is equal to `that`, otherwise `false`. /// pub fn equals(this: Path, that: Path) bool { - return std.mem.eql(u8, this.buffer[0 .. this.length], that.buffer[0 .. that.length]); + return (this.file_system == that.file_system) and + std.mem.eql(u8, this.buffer[0 .. this.length], that.buffer[0 .. that.length]); } /// @@ -414,13 +589,171 @@ pub const FileSystem = struct { /// byte. Because of this, it is not safe to asume that a path may hold [max] individual /// characters. /// - pub const max = 512; + pub const max = 255; /// + /// Attempts to open the file identified by `path` with `mode` as the mode for opening the + /// file and `app` as the execution context. /// + /// Returns a [FileAccess] reference that provides access to the file referenced by `path` + /// or a [OpenError] if it failed. /// - pub fn write(path: Path, writer: io.Writer) bool { - return (writer.write(path.buffer[0 .. path.length]) == path.length); + pub fn open(path: Path, app: *App, mode: OpenMode) OpenError!*FileAccess { + const Task = struct { + path: *const FileSystem.Path, + app: *App, + mode: OpenMode, + result: OpenError!*FileAccess, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + switch (task.path.file_system.*) { + .archive => |archive| { + if (archive.len == 0) { + task.result = error.FileNotFound; + + return; + } + + if (task.mode != .readonly) { + task.result = error.ModeUnsupported; + + return; + } + + var path_buffer = std.mem.zeroes([4096]u8); + + if (archive.len >= path_buffer.len) { + task.result = error.FileNotFound; + + return; + } + + std.mem.copy(u8, path_buffer[0 ..], archive); + + ext.SDL_ClearError(); + + const rw_ops = ext.SDL_RWFromFile(&path_buffer, "rb") orelse { + task.result = error.FileNotFound; + + return; + }; + + while (true) { + var entry = std.mem.zeroes(oar.Entry); + const entry_buffer = std.mem.asBytes(&entry); + + ext.SDL_ClearError(); + + if (ext.SDL_RWread(rw_ops, entry_buffer, @sizeOf(u8), + entry_buffer.len) != entry_buffer.len) { + + task.result = error.FileNotFound; + + return; + } + + if (std.mem.eql(u8, entry.name_buffer[0 .. entry.name_length], + task.path.buffer[0 .. task.path.length])) { + + task.result = @ptrCast(*FileAccess, rw_ops); + + return; + } + + { + var to_read = math.roundUp(u64, + entry.file_size, entry_buffer.len); + + while (to_read != 0) { + const read = @intCast(i64, std.math.min( + to_read, std.math.maxInt(i64))); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rw_ops, read, ext.RW_SEEK_CUR) < 0) { + task.result = error.FileNotFound; + + return; + } + + // Cannot be less than zero because it is derived from + // `read`. + to_read -= @intCast(u64, read); + } + } + } + }, + + .native => |native| { + if (native.len == 0) { + task.result = error.FileNotFound; + + return; + } + + var path_buffer = std.mem.zeroes([4096]u8); + const seperator = '/'; + + const seperator_length = + @boolToInt(native[native.len - 1] != seperator); + + if ((native.len + seperator_length + + task.path.length) >= path_buffer.len) { + + task.result = error.FileNotFound; + + return; + } + + std.mem.copy(u8, path_buffer[0 ..], native); + + if (seperator_length != 0) + path_buffer[native.len] = seperator; + + std.mem.copy(u8, path_buffer[native.len .. path_buffer.len], + task.path.buffer[0 .. task.path.length]); + + ext.SDL_ClearError(); + + task.result = @ptrCast(*FileAccess, ext.SDL_RWFromFile( + &path_buffer, switch (task.mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + }) orelse { + + task.result = error.FileNotFound; + + return; + }); + }, + } + } + }; + + var task = Task{ + .mode = mode, + .path = &path, + .app = app, + .result = error.FileNotFound, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }}, + }; + + suspend app.schedule(&message); + + return task.result; } }; @@ -433,59 +766,57 @@ pub const FileSystem = struct { }; /// + /// Attempts to create a [Path] with `file_system` as the file-system root and the path + /// components in `sequences` as a fully qualified path from the root. /// + /// A [Path] value is returned containing the fully qualified path from the file-system root or + /// a [PathError] if it could not be created. /// - pub const Root = union(enum) { - native: []const u8, - archive: *tar.Archive, + pub fn joinedPath(file_system: *const FileSystem, + sequences: []const []const u8) PathError!Path { - /// - /// - /// - pub fn joinedPath(root: *const Root, sequences: []const []const u8) PathError!Path { - var path = Path{ - .root = root, - .buffer = std.mem.zeroes([Path.max]u8), - .length = 0, - }; + var path = Path{ + .file_system = file_system, + .buffer = std.mem.zeroes([Path.max]u8), + .length = 0, + }; - if (sequences.len != 0) { - const last_sequence_index = sequences.len - 1; + if (sequences.len != 0) { + const last_sequence_index = sequences.len - 1; - for (sequences) |sequence, index| if (sequence.len != 0) { - var components = mem.Spliterator(u8){ - .source = sequence, - .delimiter = "/", - }; + for (sequences) |sequence, index| if (sequence.len != 0) { + var components = mem.Spliterator(u8){ + .source = sequence, + .delimiter = "/", + }; - while (components.next()) |component| if (component.len != 0) { - for (component) |byte| { - if (path.length == Path.max) return error.TooLong; + while (components.next()) |component| if (component.len != 0) { + for (component) |byte| { + if (path.length == Path.max) return error.TooLong; - path.buffer[path.length] = byte; - path.length += 1; - } + path.buffer[path.length] = byte; + path.length += 1; + } - if (components.hasNext()) { - if (path.length == Path.max) return error.TooLong; - - path.buffer[path.length] = '/'; - path.length += 1; - } - }; - - if (index < last_sequence_index) { + if (components.hasNext()) { if (path.length == Path.max) return error.TooLong; path.buffer[path.length] = '/'; path.length += 1; } }; - } - return path; + if (index < last_sequence_index) { + if (path.length == Path.max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; } - }; + + return path; + } }; /// @@ -503,6 +834,9 @@ pub const GraphicsContext = opaque { const Keys = [256]bool; }; + /// + /// + /// const Implementation = struct { event: Event, }; @@ -526,55 +860,68 @@ pub const GraphicsContext = opaque { }; /// -/// +/// Returns a graphics runner that uses `Errors` as its error set. /// pub fn GraphicsRunner(comptime Errors: type) type { - return fn (*EventLoop, *FileSystem, *GraphicsContext) callconv(.Async) Errors!void; + return fn (*App, *GraphicsContext) callconv(.Async) Errors!void; } /// -/// [LogKind.info] represents a log message which is purely informative and does not indicate -/// any kind of issue. +/// [Log.info] represents a log message which is purely informative and does not indicate any kind +/// of issue. /// -/// [LogKind.debug] represents a log message which is purely for debugging purposes and will -/// only occurs in debug builds. +/// [Log.debug] represents a log message which is purely for debugging purposes and will only occurs +/// in debug builds. /// -/// [LogKind.warning] represents a log message which is a warning about a issue that does not -/// break anything important but is not ideal. +/// [Log.warning] represents a log message which is a warning about a issue that does not break +/// anything important but is not ideal. /// -pub const LogKind = enum(u32) { +pub const Log = enum(u32) { info = ext.SDL_LOG_PRIORITY_INFO, debug = ext.SDL_LOG_PRIORITY_DEBUG, warning = ext.SDL_LOG_PRIORITY_WARN, + + /// + /// Writes `utf8_message` as the log kind identified by `log` with `app` as the execution + /// context. + /// + pub fn write(log: Log, app: *App, utf8_message: []const u8) void { + const Task = struct { + log: Log, + utf8_message: []const u8, + + const Task = @This(); + + fn process(data: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + + ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, @enumToInt(task.log), + "%.*s", task.utf8_message.len, task.utf8_message.ptr); + } + }; + + var task = Task{ + .log = log, + .utf8_message = utf8_message, + }; + + var message = App.Message{ + .frame = @frame(), + + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + }} + }; + + suspend app.schedule(&message); + } }; /// -/// [OpenError.FileNotFound] is a catch-all for when a file could not be located to be opened. This -/// may be as simple as it doesn't exist or the because the underlying file-system will not / -/// cannot give access to it at this time. -/// -pub const OpenError = error { - FileNotFound, -}; - -/// -/// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, -/// disallowing write access. -/// -/// [OpenMode.overwrite] indicates that an empty file has been created or an existing file has -/// been completely overwritten into. -/// -/// [OpenMode.append] indicates that an existing file that has been opened for reading from and -/// writing to on the end of existing data. -/// -pub const OpenMode = enum { - readonly, - overwrite, - append, -}; - -/// +/// [RunError.InitFailure] occurs if a necessary resource fails to be acquired or allocated. /// +/// [RunError.AlreadyRunning] occurs if a runner has already been started. /// pub const RunError = error { InitFailure, @@ -582,75 +929,14 @@ pub const RunError = error { }; /// -/// [SeekOrigin.head] indicates that a seek operation will seek from the offset origin of the -/// file beginning, or "head". +/// Runs a graphical application referenced by `run` with `error` as its error set and `allocator` +/// as the underlying memory allocation strategy for its graphical runtime. /// -/// [SeekOrigin.tail] indicates that a seek operation will seek from the offset origin of the -/// file end, or "tail". +/// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if +/// the underlying runtime fails and is logged. /// -/// [SeekOrigin.cursor] indicates that a seek operation will seek from the current position of -/// the file cursor. -/// -pub const SeekOrigin = enum { - head, - tail, - cursor, -}; - -/// -/// -/// -pub fn close(file_access: *FileAccess) void { - if (ext.SDL_RWclose(@ptrCast(*ext.SDL_RWops, - @alignCast(@alignOf(ext.SDL_RWops), file_access))) != 0) { - - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to close file - may have been already closed"); - } -} - -/// -/// -/// -pub fn log(kind: LogKind, message: []const u8) void { - ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, - @enumToInt(kind), "%.*s", message.len, message.ptr); -} - -/// -/// -/// -pub fn open(mode: OpenMode, file_system_path: FileSystem.Path) OpenError!*FileAccess { - switch (file_system_path.root.*) { - .archive => |archive| { - // TODO: Implement. - _ = archive; - - return error.FileNotFound; - }, - - .native => |native| { - var path_buffer = std.mem.zeroes([4096]u8); - var path = stack.Fixed(u8){.buffer = path_buffer[0 .. ]}; - - path.pushAll(native) catch return error.FileNotFound; - - if (file_system_path.write(path.writer())) return error.FileNotFound; - - return @ptrCast(?*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - })) orelse error.FileNotFound; - }, - } -} - -/// -/// -/// -pub fn runGraphics(comptime Errors: anytype, allocator: std.mem.Allocator, - comptime run: GraphicsRunner(Errors)) (RunError || Errors)!void { +pub fn runGraphics(comptime Error: anytype, + comptime run: GraphicsRunner(Error)) (RunError || Error)!void { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize runtime"); @@ -685,29 +971,18 @@ pub fn runGraphics(comptime Errors: anytype, allocator: std.mem.Allocator, defer ext.SDL_DestroyRenderer(renderer); - const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse { + const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path"); return error.InitFailure; }; - defer ext.SDL_free(user_prefix); + defer ext.SDL_free(user_path_prefix); - var data_archive = tar.Archive.init(allocator); + var app = App.Implementation.init("./data.oar", user_path_prefix + [0 .. std.mem.len(user_path_prefix)]) catch |err| { - data_archive.load("./data.tar") catch |err| switch (err) { - error.FileNotFound => { - - }, - }; - - var file_system = FileSystem{ - .user = .{.native = user_prefix[0 .. std.mem.len(user_prefix)]}, - .data = .{.archive = &data_archive}, - }; - - var event_loop = EventLoop.Implementation.init() catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { error.OutOfMemory => "Failed to allocate necessary memory", error.OutOfMutexes => "Failed to create file-system work lock", @@ -717,16 +992,14 @@ pub fn runGraphics(comptime Errors: anytype, allocator: std.mem.Allocator, return error.InitFailure; }; - defer event_loop.deinit(); + defer app.deinit(); - event_loop.start() catch |err| { - switch (err) { + app.start() catch |err| { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { // Not possible for it to have already been started. error.AlreadyStarted => unreachable, - - error.OutOfThreads => ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to start file-system work processor"), - } + error.OutOfThreads => "Failed to start file-system work processor", + }); return error.InitFailure; }; @@ -737,6 +1010,5 @@ pub fn runGraphics(comptime Errors: anytype, allocator: std.mem.Allocator, }, }; - return run(@ptrCast(*EventLoop, &event_loop), &file_system, - @ptrCast(*GraphicsContext, &graphics_context)); + return run(@ptrCast(*App, &app), @ptrCast(*GraphicsContext, &graphics_context)); } diff --git a/src/tar.zig b/src/tar.zig deleted file mode 100644 index 7adb58e..0000000 --- a/src/tar.zig +++ /dev/null @@ -1,29 +0,0 @@ -const std = @import("std"); - -/// -/// -/// -pub const Archive = struct { - allocator: std.mem.Allocator, - - pub const LoadError = error { - FileNotFound, - }; - - /// - /// - /// - pub fn init(allocator: std.mem.Allocator) Archive { - return Archive{ - .allocator = allocator, - }; - } - - /// - /// - /// - pub fn load(archive: *Archive, file_path: []const u8) LoadError!void { - _ = file_path; - _ = archive; - } -}; -- 2.34.1 From 84664b59628a016e761f99eccada1f6a31b0c631 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 10 Oct 2022 10:15:45 +0100 Subject: [PATCH 14/93] Rename "App" to "AppContext" --- src/main.zig | 2 +- src/sys.zig | 107 ++++++++++++++++++++++++++------------------------- 2 files changed, 55 insertions(+), 54 deletions(-) diff --git a/src/main.zig b/src/main.zig index a3bcbf8..f894182 100644 --- a/src/main.zig +++ b/src/main.zig @@ -21,7 +21,7 @@ test { _ = sys; } -fn run(app: *sys.App, graphics: *sys.GraphicsContext) anyerror!void { +fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); diff --git a/src/sys.zig b/src/sys.zig index e0c60b7..a31e086 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -12,7 +12,7 @@ const std = @import("std"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. /// -pub const App = opaque { +pub const AppContext = opaque { /// /// Linked list of asynchronous messages chained together to be processed by the work processor. /// @@ -71,13 +71,13 @@ pub const App = opaque { }; /// - /// Casts `app` to a [Implementation] reference. + /// Casts `app_context` to a [Implementation] reference. /// - /// *Note* that if `app` does not have the same alignment as [Implementation], safety- + /// *Note* that if `app_context` does not have the same alignment as [Implementation], safety- /// checked undefined behavior will occur. /// - fn cast(app: *App) *Implementation { - return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), app)); + fn cast(app_context: *AppContext) *Implementation { + return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), app_context)); } /// @@ -90,7 +90,7 @@ pub const App = opaque { .kind = .quit, }; - @ptrCast(*App, implementation).schedule(&message); + @ptrCast(*AppContext, implementation).schedule(&message); { var status = @as(c_int, 0); @@ -132,7 +132,7 @@ pub const App = opaque { /// occured. /// fn processTasks(userdata: ?*anyopaque) callconv(.C) c_int { - const implementation = Implementation.cast(@ptrCast(*App, userdata orelse unreachable)); + const implementation = Implementation.cast(@ptrCast(*AppContext, userdata orelse unreachable)); while (true) { _ = ext.SDL_LockMutex(implementation.message_mutex); @@ -164,7 +164,9 @@ pub const App = opaque { fn start(implementation: *Implementation) StartError!void { if (implementation.message_thread != null) return error.AlreadyStarted; - implementation.message_thread = ext.SDL_CreateThread(processTasks, "File System Worker", implementation) orelse { + implementation.message_thread = ext.SDL_CreateThread(processTasks, + "File System Worker", implementation) orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create file-system work processor"); @@ -176,16 +178,16 @@ pub const App = opaque { /// /// Returns a reference to the currently loaded data file-system. /// - pub fn data(app: *App) *const FileSystem { - return &Implementation.cast(app).data_file_system; + pub fn data(app_context: *AppContext) *const FileSystem { + return &Implementation.cast(app_context).data_file_system; } /// - /// Enqueues `message` to the message processor of `app` to be processed at a later, non- + /// Enqueues `message` to the message processor of `app_context` to be processed at a later, non- /// deterministic point. /// - pub fn schedule(app: *App, message: *Message) void { - const implementation = Implementation.cast(app); + pub fn schedule(app_context: *AppContext, message: *Message) void { + const implementation = Implementation.cast(app_context); // TODO: Error check these. _ = ext.SDL_LockMutex(implementation.message_mutex); @@ -205,8 +207,8 @@ pub const App = opaque { /// /// Returns a reference to the currently loaded user file-system. /// - pub fn user(app: *App) *const FileSystem { - return &Implementation.cast(app).user_file_system; + pub fn user(app_context: *AppContext) *const FileSystem { + return &Implementation.cast(app_context).user_file_system; } }; @@ -236,7 +238,7 @@ pub const FileAccess = opaque { /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the /// wasted effort. /// - pub fn close(file_access: *FileAccess, app: *App) void { + pub fn close(file_access: *FileAccess, app_context: *AppContext) void { const Task = struct { file_access: *FileAccess, @@ -253,7 +255,7 @@ pub const FileAccess = opaque { var task = Task{.file_access = file_access}; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -262,7 +264,7 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); } /// @@ -271,7 +273,7 @@ pub const FileAccess = opaque { /// Returns the number of bytes into the file that the cursor is relative to its beginning or a /// [Error] on failure. /// - pub fn queryCursor(file_access: *FileAccess, app: *App) Error!u64 { + pub fn queryCursor(file_access: *FileAccess, app_context: *AppContext) Error!u64 { const Task = struct { file_access: *FileAccess, result: Error!u64, @@ -300,7 +302,7 @@ pub const FileAccess = opaque { .result = error.FileInaccessible, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -309,7 +311,7 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } @@ -320,7 +322,7 @@ pub const FileAccess = opaque { /// Returns the current length of the file at the time of the operation or a [Error] if the file /// failed to be queried. /// - pub fn queryLength(file_access: *FileAccess, app: *App) Error!u64 { + pub fn queryLength(file_access: *FileAccess, app_context: *AppContext) Error!u64 { const Task = struct { file_access: *FileAccess, result: Error!usize, @@ -349,7 +351,7 @@ pub const FileAccess = opaque { .result = error.FileInaccessible, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -358,18 +360,18 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } /// /// Attempts to read `file_access` from the its current position into `buffer`, while using - /// `app` as the execution context. + /// `app_context` as the execution context. /// /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. /// - pub fn read(file_access: *FileAccess, app: *App, buffer: []u8) Error!usize { + pub fn read(file_access: *FileAccess, app_context: *AppContext, buffer: []u8) Error!usize { const Task = struct { file_access: *FileAccess, buffer: []u8, @@ -401,7 +403,7 @@ pub const FileAccess = opaque { .result = error.FileInaccessible, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -410,18 +412,18 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } /// /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes while using - /// `app` as the execution context. + /// `app_context` as the execution context. /// /// Returns [Error] on failure. /// - pub fn seek(file_access: *FileAccess, app: *App, cursor: u64) Error!void { + pub fn seek(file_access: *FileAccess, app_context: *AppContext, cursor: u64) Error!void { const Task = struct { file_access: *FileAccess, cursor: u64, @@ -458,7 +460,7 @@ pub const FileAccess = opaque { .result = error.FileInaccessible, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -467,18 +469,18 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } /// - /// Attempts to seek `file_access` to the end of the file while using `app` as the execution + /// Attempts to seek `file_access` to the end of the file while using `app_context` as the execution /// context. /// /// Returns [Error] on failure. /// - pub fn seekToEnd(file_access: *FileAccess, app: *App) Error!void { + pub fn seekToEnd(file_access: *FileAccess, app_context: *AppContext) Error!void { const Task = struct { file_access: *FileAccess, result: Error!void, @@ -505,7 +507,7 @@ pub const FileAccess = opaque { .result = error.FileInaccessible, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -514,7 +516,7 @@ pub const FileAccess = opaque { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } @@ -593,15 +595,15 @@ pub const FileSystem = union(enum) { /// /// Attempts to open the file identified by `path` with `mode` as the mode for opening the - /// file and `app` as the execution context. + /// file and `app_context` as the execution context. /// /// Returns a [FileAccess] reference that provides access to the file referenced by `path` /// or a [OpenError] if it failed. /// - pub fn open(path: Path, app: *App, mode: OpenMode) OpenError!*FileAccess { + pub fn open(path: Path, app_context: *AppContext, mode: OpenMode) OpenError!*FileAccess { const Task = struct { path: *const FileSystem.Path, - app: *App, + app_context: *AppContext, mode: OpenMode, result: OpenError!*FileAccess, @@ -738,11 +740,11 @@ pub const FileSystem = union(enum) { var task = Task{ .mode = mode, .path = &path, - .app = app, + .app_context = app_context, .result = error.FileNotFound, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -751,7 +753,7 @@ pub const FileSystem = union(enum) { }}, }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); return task.result; } @@ -863,7 +865,7 @@ pub const GraphicsContext = opaque { /// Returns a graphics runner that uses `Errors` as its error set. /// pub fn GraphicsRunner(comptime Errors: type) type { - return fn (*App, *GraphicsContext) callconv(.Async) Errors!void; + return fn (*AppContext, *GraphicsContext) callconv(.Async) Errors!void; } /// @@ -882,10 +884,10 @@ pub const Log = enum(u32) { warning = ext.SDL_LOG_PRIORITY_WARN, /// - /// Writes `utf8_message` as the log kind identified by `log` with `app` as the execution + /// Writes `utf8_message` as the log kind identified by `log` with `app_context` as the execution /// context. /// - pub fn write(log: Log, app: *App, utf8_message: []const u8) void { + pub fn write(log: Log, app_context: *AppContext, utf8_message: []const u8) void { const Task = struct { log: Log, utf8_message: []const u8, @@ -905,7 +907,7 @@ pub const Log = enum(u32) { .utf8_message = utf8_message, }; - var message = App.Message{ + var message = AppContext.Message{ .frame = @frame(), .kind = .{.task = .{ @@ -914,7 +916,7 @@ pub const Log = enum(u32) { }} }; - suspend app.schedule(&message); + suspend app_context.schedule(&message); } }; @@ -972,15 +974,14 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to load user path"); + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path"); return error.InitFailure; }; defer ext.SDL_free(user_path_prefix); - var app = App.Implementation.init("./data.oar", user_path_prefix + var app_context = AppContext.Implementation.init("./data.oar", user_path_prefix [0 .. std.mem.len(user_path_prefix)]) catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { @@ -992,9 +993,9 @@ pub fn runGraphics(comptime Error: anytype, return error.InitFailure; }; - defer app.deinit(); + defer app_context.deinit(); - app.start() catch |err| { + app_context.start() catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { // Not possible for it to have already been started. error.AlreadyStarted => unreachable, @@ -1010,5 +1011,5 @@ pub fn runGraphics(comptime Error: anytype, }, }; - return run(@ptrCast(*App, &app), @ptrCast(*GraphicsContext, &graphics_context)); + return run(@ptrCast(*AppContext, &app_context), @ptrCast(*GraphicsContext, &graphics_context)); } -- 2.34.1 From 52f4657872fac378f07e1cf0fbcf7f1fda3c5f9f Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 10 Oct 2022 17:46:25 +0100 Subject: [PATCH 15/93] Replace duplicated async interface with generic function --- src/main.zig | 10 +- src/meta.zig | 10 + src/sys.zig | 590 ++++++++++++++------------------------------------- 3 files changed, 170 insertions(+), 440 deletions(-) create mode 100644 src/meta.zig diff --git a/src/main.zig b/src/main.zig index f894182..05ac72f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -27,20 +27,20 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { defer _ = gpa.deinit(); { - const file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(app, .readonly); + const file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(.readonly); - defer file_access.close(app); + defer file_access.close(); - const file_size = try file_access.queryLength(app); + const file_size = try file_access.queryLength(); const allocator = gpa.allocator(); const buffer = try allocator.alloc(u8, file_size); defer allocator.free(buffer); - if ((try file_access.read(app, buffer)) != file_size) + if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; - sys.Log.debug.write(app, buffer); + sys.Log.debug.write(buffer); } while (graphics.poll()) |_| { diff --git a/src/meta.zig b/src/meta.zig new file mode 100644 index 0000000..9fa2c48 --- /dev/null +++ b/src/meta.zig @@ -0,0 +1,10 @@ +/// +/// Returns the return type of the function type `Fn`. +/// +pub fn FnReturn(comptime Fn: type) type { + const type_info = @typeInfo(Fn); + + if (type_info != .Fn) @compileError("`Fn` must be a function type"); + + return type_info.Fn.return_type orelse void; +} diff --git a/src/sys.zig b/src/sys.zig index a31e086..edaf8db 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -5,6 +5,7 @@ const ext = @cImport({ const io = @import("./io.zig"); const math = @import("./math.zig"); const mem = @import("./mem.zig"); +const meta = @import("./meta.zig"); const oar = @import("./oar.zig"); const stack = @import("./stack.zig"); const std = @import("std"); @@ -18,7 +19,6 @@ pub const AppContext = opaque { /// const Message = struct { next: ?*Message = null, - frame: anyframe, kind: union(enum) { quit, @@ -26,6 +26,7 @@ pub const AppContext = opaque { task: struct { data: *anyopaque, action: fn (*anyopaque) void, + frame: anyframe, }, }, }; @@ -85,12 +86,9 @@ pub const AppContext = opaque { /// processes quit and waits for them to do so before freeing any resources. /// fn deinit(implementation: *Implementation) void { - var message = Message{ - .frame = @frame(), - .kind = .quit, - }; + var message = Message{.kind = .quit}; - @ptrCast(*AppContext, implementation).schedule(&message); + implementation.enqueue(&message); { var status = @as(c_int, 0); @@ -108,13 +106,37 @@ pub const AppContext = opaque { ext.SDL_DestroySemaphore(implementation.message_semaphore); } + /// + /// Enqueues `message` to the message processor of `implementation` to be processed at a + /// later, non-deterministic point in time. + /// + fn enqueue(implementation: *Implementation, message: *Message) void { + { + // TODO: Error check these. + _ = ext.SDL_LockMutex(implementation.message_mutex); + + defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); + + if (implementation.messages) |messages| { + messages.next = message; + } else { + implementation.messages = message; + } + } + + // TODO: Error check this. + _ = ext.SDL_SemPost(implementation.message_semaphore); + } + /// /// Initializes a new [Implemenation] with `data_archive_path` as the read-only data archive /// to read from and `user_path_prefix` as the native writable user data directory. /// /// Returns the created [Implementation] value on success or [InitError] on failure. /// - fn init(data_archive_path: []const u8, user_path_prefix: []const u8) InitError!Implementation { + fn init(data_archive_path: []const u8, + user_path_prefix: []const u8) InitError!Implementation { + return Implementation{ .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, @@ -132,26 +154,31 @@ pub const AppContext = opaque { /// occured. /// fn processTasks(userdata: ?*anyopaque) callconv(.C) c_int { - const implementation = Implementation.cast(@ptrCast(*AppContext, userdata orelse unreachable)); + const implementation = Implementation.cast( + @ptrCast(*AppContext, userdata orelse unreachable)); while (true) { + // TODO: Error check these. + _ = ext.SDL_SemWait(implementation.message_semaphore); _ = ext.SDL_LockMutex(implementation.message_mutex); defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); while (implementation.messages) |messages| { switch (messages.kind) { - .quit => return 0, - .task => |task| task.action(task.data), - } + .quit => { + return 0; + }, - resume messages.frame; + .task => |task| { + task.action(task.data); + + resume task.frame; + }, + } implementation.messages = messages.next; } - - // TODO: Error check this. - _ = ext.SDL_SemWait(implementation.message_semaphore); } } @@ -183,25 +210,37 @@ pub const AppContext = opaque { } /// - /// Enqueues `message` to the message processor of `app_context` to be processed at a later, non- - /// deterministic point. /// - pub fn schedule(app_context: *AppContext, message: *Message) void { - const implementation = Implementation.cast(app_context); + /// + pub fn schedule(app_context: *AppContext, procedure: anytype, arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + const Task = struct { + procedure: @TypeOf(procedure), + arguments: *@TypeOf(arguments), + result: meta.FnReturn(@TypeOf(procedure)), - // TODO: Error check these. - _ = ext.SDL_LockMutex(implementation.message_mutex); + const Task = @This(); - defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); + fn process(userdata: *anyopaque) void { + const task = @ptrCast(*Task, @alignCast(@alignOf(Task), userdata)); - if (implementation.messages) |messages| { - messages.next = message; - } else { - implementation.messages = message; - } + task.result = @call(.{}, task.procedure, task.arguments.*); + } + }; - // TODO: Error check this. - _ = ext.SDL_SemPost(implementation.message_semaphore); + var task = Task{ + .procedure = procedure, + .arguments = &arguments, + }; + + var message = AppContext.Message{ + .kind = .{.task = .{ + .data = &task, + .action = Task.process, + .frame = @frame(), + }}, + }; + + suspend Implementation.cast(app_context).enqueue(&message); } /// @@ -232,39 +271,15 @@ pub const FileAccess = opaque { } /// - /// Close the file referenced by `file_access`, invalidating the reference to it and releasing - /// any associated resources. + /// Close the file referenced by `file_access` on the main thread, invalidating the reference to + /// it and releasing any associated resources. /// /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the /// wasted effort. /// - pub fn close(file_access: *FileAccess, app_context: *AppContext) void { - const Task = struct { - file_access: *FileAccess, - - const Task = @This(); - - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); - - if (ext.SDL_RWclose(task.file_access.asRwOps()) != 0) - ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, - "Closed an invalid file reference"); - } - }; - - var task = Task{.file_access = file_access}; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); + pub fn close(file_access: *FileAccess) void { + if (ext.SDL_RWclose(file_access.asRwOps()) != 0) + ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, "Closed an invalid file reference"); } /// @@ -273,47 +288,14 @@ pub const FileAccess = opaque { /// Returns the number of bytes into the file that the cursor is relative to its beginning or a /// [Error] on failure. /// - pub fn queryCursor(file_access: *FileAccess, app_context: *AppContext) Error!u64 { - const Task = struct { - file_access: *FileAccess, - result: Error!u64, + pub fn queryCursor(file_access: *FileAccess) Error!u64 { + ext.SDL_ClearError(); - const Task = @This(); + const sought = ext.SDL_RWtell(file_access.asRwOps()); - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + if (sought < 0) return error.FileInaccessible; - ext.SDL_ClearError(); - - const sought = ext.SDL_RWtell(task.file_access.asRwOps()); - - if (sought < 0) { - task.result = error.FileInaccessible; - - return; - } - - task.result = @intCast(u64, sought); - } - }; - - var task = Task{ - .file_access = file_access, - .result = error.FileInaccessible, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); - - return task.result; + return @intCast(u64, sought); } /// @@ -322,156 +304,51 @@ pub const FileAccess = opaque { /// Returns the current length of the file at the time of the operation or a [Error] if the file /// failed to be queried. /// - pub fn queryLength(file_access: *FileAccess, app_context: *AppContext) Error!u64 { - const Task = struct { - file_access: *FileAccess, - result: Error!usize, + pub fn queryLength(file_access: *FileAccess) Error!u64 { + ext.SDL_ClearError(); - const Task = @This(); + const sought = ext.SDL_RWsize(file_access.asRwOps()); - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + if (sought < 0) return error.FileInaccessible; - ext.SDL_ClearError(); - - const sought = ext.SDL_RWsize(task.file_access.asRwOps()); - - if (sought < 0) { - task.result = error.FileInaccessible; - - return; - } - - task.result = @intCast(u64, sought); - } - }; - - var task = Task{ - .file_access = file_access, - .result = error.FileInaccessible, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); - - return task.result; + return @intCast(u64, sought); } /// - /// Attempts to read `file_access` from the its current position into `buffer`, while using - /// `app_context` as the execution context. + /// Attempts to read `file_access` from the its current position into `buffer`. /// /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. /// - pub fn read(file_access: *FileAccess, app_context: *AppContext, buffer: []u8) Error!usize { - const Task = struct { - file_access: *FileAccess, - buffer: []u8, - result: Error!usize, + pub fn read(file_access: *FileAccess, buffer: []u8) Error!usize { + ext.SDL_ClearError(); - const Task = @This(); + const buffer_read = + ext.SDL_RWread(file_access.asRwOps(), buffer.ptr, @sizeOf(u8), buffer.len); - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + if ((buffer_read == 0) and (ext.SDL_GetError() != null)) return error.FileInaccessible; - ext.SDL_ClearError(); - - const buffer_read = ext.SDL_RWread(task.file_access.asRwOps(), - task.buffer.ptr, @sizeOf(u8), task.buffer.len); - - if ((buffer_read == 0) and (ext.SDL_GetError() != null)) { - task.result = error.FileInaccessible; - - return; - } - - task.result = buffer_read; - } - }; - - var task = Task{ - .file_access = file_access, - .buffer = buffer, - .result = error.FileInaccessible, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); - - return task.result; + return buffer_read; } /// - /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes while using - /// `app_context` as the execution context. + /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes. /// /// Returns [Error] on failure. /// - pub fn seek(file_access: *FileAccess, app_context: *AppContext, cursor: u64) Error!void { - const Task = struct { - file_access: *FileAccess, - cursor: u64, - result: Error!void, + pub fn seek(file_access: *FileAccess, cursor: u64) Error!void { + var to_seek = cursor; - const Task = @This(); + while (to_seek != 0) { + const sought = @intCast(i64, std.math.min(to_seek, std.math.maxInt(i64))); - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + ext.SDL_ClearError(); - if (task.cursor >= std.math.maxInt(i64)) { - task.result = error.OutOfRange; + if (ext.SDL_RWseek(file_access.asRwOps(), sought, ext.RW_SEEK_CUR) < 0) + return error.FileInaccessible; - return; - } - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(task.file_access.asRwOps(), - @intCast(i64, task.cursor), ext.RW_SEEK_SET) < 0) { - - task.result = error.FileInaccessible; - - return; - } - - task.result = {}; - } - }; - - var task = Task{ - .file_access = file_access, - .cursor = cursor, - .result = error.FileInaccessible, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); - - return task.result; + // Cannot be less than zero because it is derived from `read`. + to_seek -= @intCast(u64, sought); + } } /// @@ -480,45 +357,11 @@ pub const FileAccess = opaque { /// /// Returns [Error] on failure. /// - pub fn seekToEnd(file_access: *FileAccess, app_context: *AppContext) Error!void { - const Task = struct { - file_access: *FileAccess, - result: Error!void, + pub fn seekToEnd(file_access: *FileAccess) Error!void { + ext.SDL_ClearError(); - const Task = @This(); - - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(task.file_access.asRwOps(), 0, ext.RW_SEEK_END) < 0) { - task.result = error.FileInaccessible; - - return; - } - - task.result = {}; - } - }; - - var task = Task{ - .file_access = file_access, - .result = error.FileInaccessible, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; - - suspend app_context.schedule(&message); - - return task.result; + if (ext.SDL_RWseek(file_access.asRwOps(), 0, ext.RW_SEEK_END) < 0) + return error.FileInaccessible; } }; @@ -595,167 +438,72 @@ pub const FileSystem = union(enum) { /// /// Attempts to open the file identified by `path` with `mode` as the mode for opening the - /// file and `app_context` as the execution context. + /// file. /// /// Returns a [FileAccess] reference that provides access to the file referenced by `path` /// or a [OpenError] if it failed. /// - pub fn open(path: Path, app_context: *AppContext, mode: OpenMode) OpenError!*FileAccess { - const Task = struct { - path: *const FileSystem.Path, - app_context: *AppContext, - mode: OpenMode, - result: OpenError!*FileAccess, + pub fn open(path: Path, mode: OpenMode) OpenError!*FileAccess { + switch (path.file_system.*) { + .archive => |archive| { + if (archive.len == 0) return error.FileNotFound; - const Task = @This(); + if (mode != .readonly) return error.ModeUnsupported; - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); + var path_buffer = std.mem.zeroes([4096]u8); - switch (task.path.file_system.*) { - .archive => |archive| { - if (archive.len == 0) { - task.result = error.FileNotFound; + if (archive.len >= path_buffer.len) return error.FileNotFound; - return; - } + std.mem.copy(u8, path_buffer[0 ..], archive); - if (task.mode != .readonly) { - task.result = error.ModeUnsupported; + const file_access = @ptrCast(*FileAccess, ext.SDL_RWFromFile( + &path_buffer, "rb") orelse return error.FileNotFound); - return; - } + while (true) { + var entry = std.mem.zeroes(oar.Entry); + const entry_buffer = std.mem.asBytes(&entry); - var path_buffer = std.mem.zeroes([4096]u8); + if ((file_access.read(entry_buffer) catch return + error.FileNotFound) != entry_buffer.len) return error.FileNotFound; - if (archive.len >= path_buffer.len) { - task.result = error.FileNotFound; + if (std.mem.eql(u8, entry.name_buffer[0 .. entry. + name_length], path.buffer[0 .. path.length])) { - return; - } + return file_access; + } - std.mem.copy(u8, path_buffer[0 ..], archive); - - ext.SDL_ClearError(); - - const rw_ops = ext.SDL_RWFromFile(&path_buffer, "rb") orelse { - task.result = error.FileNotFound; - - return; - }; - - while (true) { - var entry = std.mem.zeroes(oar.Entry); - const entry_buffer = std.mem.asBytes(&entry); - - ext.SDL_ClearError(); - - if (ext.SDL_RWread(rw_ops, entry_buffer, @sizeOf(u8), - entry_buffer.len) != entry_buffer.len) { - - task.result = error.FileNotFound; - - return; - } - - if (std.mem.eql(u8, entry.name_buffer[0 .. entry.name_length], - task.path.buffer[0 .. task.path.length])) { - - task.result = @ptrCast(*FileAccess, rw_ops); - - return; - } - - { - var to_read = math.roundUp(u64, - entry.file_size, entry_buffer.len); - - while (to_read != 0) { - const read = @intCast(i64, std.math.min( - to_read, std.math.maxInt(i64))); - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rw_ops, read, ext.RW_SEEK_CUR) < 0) { - task.result = error.FileNotFound; - - return; - } - - // Cannot be less than zero because it is derived from - // `read`. - to_read -= @intCast(u64, read); - } - } - } - }, - - .native => |native| { - if (native.len == 0) { - task.result = error.FileNotFound; - - return; - } - - var path_buffer = std.mem.zeroes([4096]u8); - const seperator = '/'; - - const seperator_length = - @boolToInt(native[native.len - 1] != seperator); - - if ((native.len + seperator_length + - task.path.length) >= path_buffer.len) { - - task.result = error.FileNotFound; - - return; - } - - std.mem.copy(u8, path_buffer[0 ..], native); - - if (seperator_length != 0) - path_buffer[native.len] = seperator; - - std.mem.copy(u8, path_buffer[native.len .. path_buffer.len], - task.path.buffer[0 .. task.path.length]); - - ext.SDL_ClearError(); - - task.result = @ptrCast(*FileAccess, ext.SDL_RWFromFile( - &path_buffer, switch (task.mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - }) orelse { - - task.result = error.FileNotFound; - - return; - }); - }, + file_access.seek(math.roundUp(u64, entry.file_size, + entry_buffer.len)) catch return error.FileNotFound; } - } - }; + }, - var task = Task{ - .mode = mode, - .path = &path, - .app_context = app_context, - .result = error.FileNotFound, - }; + .native => |native| { + if (native.len == 0) return error.FileNotFound; - var message = AppContext.Message{ - .frame = @frame(), + var path_buffer = std.mem.zeroes([4096]u8); + const seperator = '/'; + const seperator_length = @boolToInt(native[native.len - 1] != seperator); - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }}, - }; + if ((native.len + seperator_length + path.length) >= path_buffer.len) + return error.FileNotFound; - suspend app_context.schedule(&message); + std.mem.copy(u8, path_buffer[0 ..], native); - return task.result; + if (seperator_length != 0) + path_buffer[native.len] = seperator; + + std.mem.copy(u8, path_buffer[native.len .. path_buffer.len], + path.buffer[0 .. path.length]); + + ext.SDL_ClearError(); + + return @ptrCast(*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + }) orelse return error.FileNotFound); + }, + } } }; @@ -884,39 +632,11 @@ pub const Log = enum(u32) { warning = ext.SDL_LOG_PRIORITY_WARN, /// - /// Writes `utf8_message` as the log kind identified by `log` with `app_context` as the execution - /// context. + /// Writes `utf8_message` as the log kind identified by `log`. /// - pub fn write(log: Log, app_context: *AppContext, utf8_message: []const u8) void { - const Task = struct { - log: Log, - utf8_message: []const u8, - - const Task = @This(); - - fn process(data: *anyopaque) void { - const task = @ptrCast(*Task, @alignCast(@alignOf(Task), data)); - - ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, @enumToInt(task.log), - "%.*s", task.utf8_message.len, task.utf8_message.ptr); - } - }; - - var task = Task{ - .log = log, - .utf8_message = utf8_message, - }; - - var message = AppContext.Message{ - .frame = @frame(), - - .kind = .{.task = .{ - .data = &task, - .action = Task.process, - }} - }; - - suspend app_context.schedule(&message); + pub fn write(log: Log, utf8_message: []const u8) void { + ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, + @enumToInt(log), "%.*s", utf8_message.len, utf8_message.ptr); } }; -- 2.34.1 From e108486c17c33af715e593fdd12c57fe77cb674b Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 11 Oct 2022 01:03:02 +0100 Subject: [PATCH 16/93] Add data validation to Oar entries --- src/oar.zig | 36 +++++++++++++++++++++++++++++++++++- src/sys.zig | 20 +++++++------------- 2 files changed, 42 insertions(+), 14 deletions(-) diff --git a/src/oar.zig b/src/oar.zig index 7660c8c..b85304a 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -7,8 +7,42 @@ const sys = @import("./sys.zig"); /// Typically, following this block in memory is the file data it holds the meta-information for. /// pub const Entry = extern struct { + signature: [3]u8, + revision: u8, name_length: u8, name_buffer: [255]u8 = std.mem.zeroes([255]u8), file_size: u64, - padding: [248]u8, + padding: [244]u8, + + /// + /// Returns `true` if `entry` correctly identifies itself as a valid Oar entry, otherwise + /// `false`. + /// + pub fn isValid(entry: Entry) bool { + return std.mem.eql(u8, &entry.signature, "oar"); + } + + /// + /// Attempts to read an [Entry] from `file_access` at its current cursor position. + /// + /// Returns the read [Entry] value or `null` if the end of file is reached before completing the + /// read. + /// + pub fn read(file_access: *sys.FileAccess) sys.FileAccess.Error!?Entry { + var entry = std.mem.zeroes(Entry); + const origin = try file_access.queryCursor(); + + if ((try file_access.read(std.mem.asBytes(&entry))) != @sizeOf(Entry)) { + try file_access.seek(origin); + + return null; + } + + return entry; + } + + /// + /// Magic identifier used to validate [Entry] data. + /// + const signature_magic = "oar"; }; diff --git a/src/sys.zig b/src/sys.zig index edaf8db..fa7b1a3 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -459,22 +459,16 @@ pub const FileSystem = union(enum) { const file_access = @ptrCast(*FileAccess, ext.SDL_RWFromFile( &path_buffer, "rb") orelse return error.FileNotFound); - while (true) { - var entry = std.mem.zeroes(oar.Entry); - const entry_buffer = std.mem.asBytes(&entry); + while (oar.Entry.read(file_access) catch return error.FileNotFound) |entry| { + if (!entry.isValid()) break; - if ((file_access.read(entry_buffer) catch return - error.FileNotFound) != entry_buffer.len) return error.FileNotFound; + if (std.mem.eql(u8, entry.name_buffer[0 .. entry.name_length], + path.buffer[0 .. path.length])) return file_access; - if (std.mem.eql(u8, entry.name_buffer[0 .. entry. - name_length], path.buffer[0 .. path.length])) { - - return file_access; - } - - file_access.seek(math.roundUp(u64, entry.file_size, - entry_buffer.len)) catch return error.FileNotFound; + file_access.seek(entry.file_size) catch break; } + + return error.FileNotFound; }, .native => |native| { -- 2.34.1 From 6ecf6779e5571d009f7bddbcf40abbb693c5efe5 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 11 Oct 2022 01:14:47 +0100 Subject: [PATCH 17/93] Remove unused math module logic --- src/math.zig | 6 ------ src/sys.zig | 1 - 2 files changed, 7 deletions(-) delete mode 100644 src/math.zig diff --git a/src/math.zig b/src/math.zig deleted file mode 100644 index 66c8cc9..0000000 --- a/src/math.zig +++ /dev/null @@ -1,6 +0,0 @@ -/// -/// Rounds the `Number` `value` up to the nearest `multiple`. -/// -pub fn roundUp(comptime Number: type, value: Number, multiple: Number) Number { - return value + @mod(@mod(multiple - value, multiple), multiple); -} diff --git a/src/sys.zig b/src/sys.zig index fa7b1a3..6d6fb4f 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -3,7 +3,6 @@ const ext = @cImport({ }); const io = @import("./io.zig"); -const math = @import("./math.zig"); const mem = @import("./mem.zig"); const meta = @import("./meta.zig"); const oar = @import("./oar.zig"); -- 2.34.1 From 287a054d228b3b12d8b7721605e7b5cbb5af3891 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 11 Oct 2022 01:15:19 +0100 Subject: [PATCH 18/93] Tidy up Oar Entry default data --- src/oar.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/oar.zig b/src/oar.zig index b85304a..be4f3d4 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -7,19 +7,19 @@ const sys = @import("./sys.zig"); /// Typically, following this block in memory is the file data it holds the meta-information for. /// pub const Entry = extern struct { - signature: [3]u8, + signature: [3]u8 = signature_magic, revision: u8, - name_length: u8, + name_length: u8 = 0, name_buffer: [255]u8 = std.mem.zeroes([255]u8), file_size: u64, - padding: [244]u8, + padding: [244]u8 = std.mem.zeroes([244]u8), /// /// Returns `true` if `entry` correctly identifies itself as a valid Oar entry, otherwise /// `false`. /// pub fn isValid(entry: Entry) bool { - return std.mem.eql(u8, &entry.signature, "oar"); + return std.mem.eql(u8, &entry.signature, signature_magic[0 ..]); } /// @@ -44,5 +44,5 @@ pub const Entry = extern struct { /// /// Magic identifier used to validate [Entry] data. /// - const signature_magic = "oar"; + const signature_magic = [3]u8{'o', 'a', 'r'}; }; -- 2.34.1 From 961e79200d803e8ba9e4ec953a203b3f08e22102 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 13 Oct 2022 14:37:38 +0100 Subject: [PATCH 19/93] Implement Oar file reading logic in archive file system --- src/main.zig | 2 +- src/oar.zig | 38 +++--- src/sys.zig | 334 ++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 259 insertions(+), 115 deletions(-) diff --git a/src/main.zig b/src/main.zig index 05ac72f..2f85626 100644 --- a/src/main.zig +++ b/src/main.zig @@ -27,7 +27,7 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { defer _ = gpa.deinit(); { - const file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(.readonly); + var file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(.readonly); defer file_access.close(); diff --git a/src/oar.zig b/src/oar.zig index be4f3d4..6a72116 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -9,36 +9,34 @@ const sys = @import("./sys.zig"); pub const Entry = extern struct { signature: [3]u8 = signature_magic, revision: u8, - name_length: u8 = 0, name_buffer: [255]u8 = std.mem.zeroes([255]u8), + name_length: u8 = 0, file_size: u64, - padding: [244]u8 = std.mem.zeroes([244]u8), + file_offset: u64, + padding: [232]u8 = std.mem.zeroes([232]u8), - /// - /// Returns `true` if `entry` correctly identifies itself as a valid Oar entry, otherwise - /// `false`. - /// - pub fn isValid(entry: Entry) bool { - return std.mem.eql(u8, &entry.signature, signature_magic[0 ..]); + comptime { + const entry_size = @sizeOf(Entry); + + if (entry_size != 512) + @compileError("Entry is " ++ std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); } /// - /// Attempts to read an [Entry] from `file_access` at its current cursor position. /// - /// Returns the read [Entry] value or `null` if the end of file is reached before completing the - /// read. /// - pub fn read(file_access: *sys.FileAccess) sys.FileAccess.Error!?Entry { - var entry = std.mem.zeroes(Entry); - const origin = try file_access.queryCursor(); + pub const FindError = sys.FileAccess.Error || error { + EntryNotFound, + }; - if ((try file_access.read(std.mem.asBytes(&entry))) != @sizeOf(Entry)) { - try file_access.seek(origin); + /// + /// + /// + pub fn find(file_access: sys.FileAccess, entry_name: []const u8) FindError!Entry { + _ = file_access; + _ = entry_name; - return null; - } - - return entry; + return error.EntryNotFound; } /// diff --git a/src/sys.zig b/src/sys.zig index 6d6fb4f..4bb595e 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -128,19 +128,19 @@ pub const AppContext = opaque { } /// - /// Initializes a new [Implemenation] with `data_archive_path` as the read-only data archive - /// to read from and `user_path_prefix` as the native writable user data directory. + /// Initializes a new [Implemenation] with `data_archive_file_access` as the data archive to + /// read from and `user_path_prefix` as the native writable user data directory. /// /// Returns the created [Implementation] value on success or [InitError] on failure. /// - fn init(data_archive_path: []const u8, + fn init(data_archive_file_access: FileAccess, user_path_prefix: []const u8) InitError!Implementation { return Implementation{ .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, - .data_file_system = .{.archive = data_archive_path}, - .user_file_system = .{.native = user_path_prefix}, + .data_file_system = .{.archive = .{.file_access = data_archive_file_access}}, + .user_file_system = .{.native = .{.path_prefix = user_path_prefix}}, .message_thread = null, }; } @@ -204,7 +204,7 @@ pub const AppContext = opaque { /// /// Returns a reference to the currently loaded data file-system. /// - pub fn data(app_context: *AppContext) *const FileSystem { + pub fn data(app_context: *AppContext) *FileSystem { return &Implementation.cast(app_context).data_file_system; } @@ -245,7 +245,7 @@ pub const AppContext = opaque { /// /// Returns a reference to the currently loaded user file-system. /// - pub fn user(app_context: *AppContext) *const FileSystem { + pub fn user(app_context: *AppContext) *FileSystem { return &Implementation.cast(app_context).user_file_system; } }; @@ -253,7 +253,22 @@ pub const AppContext = opaque { /// /// File-system agnostic abstraction for manipulating a file. /// -pub const FileAccess = opaque { +pub const FileAccess = struct { + context: *anyopaque, + implementation: *const Implementation, + + /// + /// Provides a set of implementation-specific behaviors to a [FileAccess] instance. + /// + pub const Implementation = struct { + close: fn (*anyopaque) void, + queryCursor: fn (*anyopaque) Error!u64, + queryLength: fn (*anyopaque) Error!u64, + read: fn (*anyopaque, []u8) Error!usize, + seek: fn (*anyopaque, u64) Error!void, + seekToEnd: fn (*anyopaque) Error!void, + }; + /// /// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer /// pointing to a file or the file becomming invalid for whatever reason. @@ -262,13 +277,6 @@ pub const FileAccess = opaque { FileInaccessible, }; - /// - /// Returns `file_access` casted to a [ext.SDL_RWops]. - /// - fn asRwOps(file_access: *FileAccess) *ext.SDL_RWops { - return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), file_access)); - } - /// /// Close the file referenced by `file_access` on the main thread, invalidating the reference to /// it and releasing any associated resources. @@ -277,8 +285,7 @@ pub const FileAccess = opaque { /// wasted effort. /// pub fn close(file_access: *FileAccess) void { - if (ext.SDL_RWclose(file_access.asRwOps()) != 0) - ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, "Closed an invalid file reference"); + return file_access.implementation.close(file_access.context); } /// @@ -288,13 +295,7 @@ pub const FileAccess = opaque { /// [Error] on failure. /// pub fn queryCursor(file_access: *FileAccess) Error!u64 { - ext.SDL_ClearError(); - - const sought = ext.SDL_RWtell(file_access.asRwOps()); - - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); + return file_access.implementation.queryCursor(file_access.context); } /// @@ -304,13 +305,7 @@ pub const FileAccess = opaque { /// failed to be queried. /// pub fn queryLength(file_access: *FileAccess) Error!u64 { - ext.SDL_ClearError(); - - const sought = ext.SDL_RWsize(file_access.asRwOps()); - - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); + return file_access.implementation.queryLength(file_access.context); } /// @@ -319,14 +314,7 @@ pub const FileAccess = opaque { /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. /// pub fn read(file_access: *FileAccess, buffer: []u8) Error!usize { - ext.SDL_ClearError(); - - const buffer_read = - ext.SDL_RWread(file_access.asRwOps(), buffer.ptr, @sizeOf(u8), buffer.len); - - if ((buffer_read == 0) and (ext.SDL_GetError() != null)) return error.FileInaccessible; - - return buffer_read; + return file_access.implementation.read(file_access.context, buffer); } /// @@ -335,19 +323,7 @@ pub const FileAccess = opaque { /// Returns [Error] on failure. /// pub fn seek(file_access: *FileAccess, cursor: u64) Error!void { - var to_seek = cursor; - - while (to_seek != 0) { - const sought = @intCast(i64, std.math.min(to_seek, std.math.maxInt(i64))); - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(file_access.asRwOps(), sought, ext.RW_SEEK_CUR) < 0) - return error.FileInaccessible; - - // Cannot be less than zero because it is derived from `read`. - to_seek -= @intCast(u64, sought); - } + return file_access.implementation.seek(file_access.context, cursor); } /// @@ -357,10 +333,7 @@ pub const FileAccess = opaque { /// Returns [Error] on failure. /// pub fn seekToEnd(file_access: *FileAccess) Error!void { - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(file_access.asRwOps(), 0, ext.RW_SEEK_END) < 0) - return error.FileInaccessible; + return file_access.implementation.seekToEnd(file_access.context); } }; @@ -369,14 +342,33 @@ pub const FileAccess = opaque { /// available to the application in a sandboxed environment. /// pub const FileSystem = union(enum) { - native: []const u8, - archive: []const u8, + native: struct { + path_prefix: []const u8, + }, + + archive: struct { + file_access: FileAccess, + + entry_table: [max_open_entries]ArchiveEntry = + std.mem.zeroes([max_open_entries]ArchiveEntry), + + const max_open_entries = 16; + }, + + /// + /// Handles the state of an opened archive entry. + /// + const ArchiveEntry = struct { + using: ?*FileAccess, + header: oar.Entry, + cursor: u64, + }; /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// pub const Path = struct { - file_system: *const FileSystem, + file_system: *FileSystem, length: u8, buffer: [max]u8, @@ -390,9 +382,13 @@ pub const FileSystem = union(enum) { /// should be tried or, if no mode other is suitable, that the resource is effectively /// unavailable. /// + /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] + /// is used to communicate this. + /// pub const OpenError = error { FileNotFound, ModeUnsupported, + OutOfFiles, }; /// @@ -442,59 +438,205 @@ pub const FileSystem = union(enum) { /// Returns a [FileAccess] reference that provides access to the file referenced by `path` /// or a [OpenError] if it failed. /// - pub fn open(path: Path, mode: OpenMode) OpenError!*FileAccess { + pub fn open(path: Path, mode: OpenMode) OpenError!FileAccess { switch (path.file_system.*) { - .archive => |archive| { - if (archive.len == 0) return error.FileNotFound; - + .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; - var path_buffer = std.mem.zeroes([4096]u8); + for (archive.entry_table) |_, index| { + if (archive.entry_table[index].using == null) { + archive.entry_table[index] = .{ + .header = oar.Entry.find(archive.file_access, path. + buffer[0 .. path.length]) catch return error.FileNotFound, - if (archive.len >= path_buffer.len) return error.FileNotFound; + .using = &archive.file_access, + .cursor = 0, + }; - std.mem.copy(u8, path_buffer[0 ..], archive); + const Implementation = struct { + fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { + return @ptrCast(*ArchiveEntry, @alignCast( + @alignOf(ArchiveEntry), context)); + } - const file_access = @ptrCast(*FileAccess, ext.SDL_RWFromFile( - &path_buffer, "rb") orelse return error.FileNotFound); + fn close(context: *anyopaque) void { + archiveEntryCast(context).using = null; + } - while (oar.Entry.read(file_access) catch return error.FileNotFound) |entry| { - if (!entry.isValid()) break; + fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { + const archive_entry = archiveEntryCast(context); - if (std.mem.eql(u8, entry.name_buffer[0 .. entry.name_length], - path.buffer[0 .. path.length])) return file_access; + if (archive_entry.using == null) return error.FileInaccessible; - file_access.seek(entry.file_size) catch break; + return archive_entry.cursor; + } + + fn queryLength(context: *anyopaque) FileAccess.Error!u64 { + const archive_entry = archiveEntryCast(context); + + if (archive_entry.using == null) return error.FileInaccessible; + + return archive_entry.header.file_size; + } + + fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { + const archive_entry = archiveEntryCast(context); + + const file_access = archive_entry.using orelse + return error.FileInaccessible; + + if (archive_entry.cursor >= archive_entry.header.file_size) + return error.FileInaccessible; + + try file_access.seek(archive_entry.header.file_offset); + + return file_access.read(buffer[0 .. std.math.min( + buffer.len, archive_entry.header.file_size)]); + } + + fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { + const archive_entry = archiveEntryCast(context); + + if (archive_entry.using == null) return error.FileInaccessible; + + archive_entry.cursor = cursor; + } + + fn seekToEnd(context: *anyopaque) FileAccess.Error!void { + const archive_entry = archiveEntryCast(context); + + if (archive_entry.using == null) return error.FileInaccessible; + + archive_entry.cursor = archive_entry.header.file_size; + } + }; + + return FileAccess{ + .context = &archive.entry_table[index], + + .implementation = &.{ + .close = Implementation.close, + .queryCursor = Implementation.queryCursor, + .queryLength = Implementation.queryLength, + .read = Implementation.read, + .seek = Implementation.seek, + .seekToEnd = Implementation.seekToEnd, + }, + }; + } } - return error.FileNotFound; + return error.OutOfFiles; }, .native => |native| { - if (native.len == 0) return error.FileNotFound; + if (native.path_prefix.len == 0) return error.FileNotFound; var path_buffer = std.mem.zeroes([4096]u8); const seperator = '/'; - const seperator_length = @boolToInt(native[native.len - 1] != seperator); - if ((native.len + seperator_length + path.length) >= path_buffer.len) - return error.FileNotFound; + const seperator_length = @boolToInt(native.path_prefix[ + native.path_prefix.len - 1] != seperator); - std.mem.copy(u8, path_buffer[0 ..], native); + if ((native.path_prefix.len + seperator_length + path.length) >= + path_buffer.len) return error.FileNotFound; + + std.mem.copy(u8, path_buffer[0 ..], native.path_prefix); if (seperator_length != 0) - path_buffer[native.len] = seperator; + path_buffer[native.path_prefix.len] = seperator; - std.mem.copy(u8, path_buffer[native.len .. path_buffer.len], - path.buffer[0 .. path.length]); + std.mem.copy(u8, path_buffer[native.path_prefix.len .. + path_buffer.len], path.buffer[0 .. path.length]); ext.SDL_ClearError(); - return @ptrCast(*FileAccess, ext.SDL_RWFromFile(&path_buffer, switch (mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - }) orelse return error.FileNotFound); + const Implementation = struct { + fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { + return @ptrCast(*ext.SDL_RWops, @alignCast( + @alignOf(ext.SDL_RWops), context)); + } + + fn close(context: *anyopaque) void { + ext.SDL_ClearError(); + + if (ext.SDL_RWclose(rwOpsCast(context)) != 0) + ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, ext.SDL_GetError()); + } + + fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { + ext.SDL_ClearError(); + + const sought = ext.SDL_RWtell(rwOpsCast(context)); + + if (sought < 0) return error.FileInaccessible; + + return @intCast(u64, sought); + } + + fn queryLength(context: *anyopaque) FileAccess.Error!u64 { + ext.SDL_ClearError(); + + const sought = ext.SDL_RWsize(rwOpsCast(context)); + + if (sought < 0) return error.FileInaccessible; + + return @intCast(u64, sought); + } + + fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { + ext.SDL_ClearError(); + + const buffer_read = ext.SDL_RWread(rwOpsCast( + context), buffer.ptr, @sizeOf(u8), buffer.len); + + if ((buffer_read == 0) and (ext.SDL_GetError() != null)) + return error.FileInaccessible; + + return buffer_read; + } + + fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { + var to_seek = cursor; + + while (to_seek != 0) { + const math = std.math; + const sought = @intCast(i64, math.min(to_seek, math.maxInt(i64))); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), sought, ext.RW_SEEK_CUR) < 0) + return error.FileInaccessible; + + // Cannot be less than zero because it is derived from `read`. + to_seek -= @intCast(u64, sought); + } + } + + fn seekToEnd(context: *anyopaque) FileAccess.Error!void { + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), 0, ext.RW_SEEK_END) < 0) + return error.FileInaccessible; + } + }; + + return FileAccess{ + .context = ext.SDL_RWFromFile(&path_buffer, switch (mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + }) orelse return error.FileNotFound, + + .implementation = &.{ + .close = Implementation.close, + .queryCursor = Implementation.queryCursor, + .queryLength = Implementation.queryLength, + .read = Implementation.read, + .seek = Implementation.seek, + .seekToEnd = Implementation.seekToEnd, + }, + }; }, } } @@ -515,9 +657,7 @@ pub const FileSystem = union(enum) { /// A [Path] value is returned containing the fully qualified path from the file-system root or /// a [PathError] if it could not be created. /// - pub fn joinedPath(file_system: *const FileSystem, - sequences: []const []const u8) PathError!Path { - + pub fn joinedPath(file_system: *FileSystem, sequences: []const []const u8) PathError!Path { var path = Path{ .file_system = file_system, .buffer = std.mem.zeroes([Path.max]u8), @@ -644,8 +784,7 @@ pub const RunError = error { }; /// -/// Runs a graphical application referenced by `run` with `error` as its error set and `allocator` -/// as the underlying memory allocation strategy for its graphical runtime. +/// Runs a graphical application referenced by `run` with `error` as its error set. /// /// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if /// the underlying runtime fails and is logged. @@ -694,8 +833,15 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_free(user_path_prefix); - var app_context = AppContext.Implementation.init("./data.oar", user_path_prefix - [0 .. std.mem.len(user_path_prefix)]) catch |err| { + var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}}; + + var data_archive_file_access = try (try cwd_file_system. + joinedPath(&.{"./data.oar"})).open(.readonly); + + defer data_archive_file_access.close(); + + var app_context = AppContext.Implementation.init(data_archive_file_access, + user_path_prefix[0 .. std.mem.len(user_path_prefix)]) catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { error.OutOfMemory => "Failed to allocate necessary memory", -- 2.34.1 From 4e5883f384173f7da5f82deab526340451434acf Mon Sep 17 00:00:00 2001 From: kayomn Date: Fri, 14 Oct 2022 01:13:26 +0100 Subject: [PATCH 20/93] Add stubs for archive entry caching --- src/oar.zig | 2 +- src/sys.zig | 26 +++++++++++++---- src/table.zig | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 6 deletions(-) create mode 100644 src/table.zig diff --git a/src/oar.zig b/src/oar.zig index 6a72116..f1e3b0f 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -23,7 +23,7 @@ pub const Entry = extern struct { } /// - /// + /// [FindError.EntryNotFound] happens when an entry could not be found. /// pub const FindError = sys.FileAccess.Error || error { EntryNotFound, diff --git a/src/sys.zig b/src/sys.zig index 4bb595e..f427eeb 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -8,6 +8,7 @@ const meta = @import("./meta.zig"); const oar = @import("./oar.zig"); const stack = @import("./stack.zig"); const std = @import("std"); +const table = @import("./table.zig"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. @@ -211,7 +212,9 @@ pub const AppContext = opaque { /// /// /// - pub fn schedule(app_context: *AppContext, procedure: anytype, arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + pub fn schedule(app_context: *AppContext, procedure: anytype, + arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + const Task = struct { procedure: @TypeOf(procedure), arguments: *@TypeOf(arguments), @@ -348,6 +351,7 @@ pub const FileSystem = union(enum) { archive: struct { file_access: FileAccess, + index_cache: *table.Dynamic([]const u8, ArchiveEntry, table.string_context), entry_table: [max_open_entries]ArchiveEntry = std.mem.zeroes([max_open_entries]ArchiveEntry), @@ -445,10 +449,21 @@ pub const FileSystem = union(enum) { for (archive.entry_table) |_, index| { if (archive.entry_table[index].using == null) { - archive.entry_table[index] = .{ - .header = oar.Entry.find(archive.file_access, path. - buffer[0 .. path.length]) catch return error.FileNotFound, + const archive_path = path.buffer[0 .. path.length]; + const entry_header = archive.index_cache.lookup(archive_path) orelse { + const header = oar.Entry.find(archive.file_access, + archive_path) catch return error.FileNotFound; + + archive.index_cache.insert(archive_path, header) catch { + // If caching fails... oh well... + }; + + break header; + }; + + archive.entry_table[index] = .{ + .header = entry_header, .using = &archive.file_access, .cursor = 0, }; @@ -533,7 +548,6 @@ pub const FileSystem = union(enum) { if (native.path_prefix.len == 0) return error.FileNotFound; var path_buffer = std.mem.zeroes([4096]u8); - const seperator = '/'; const seperator_length = @boolToInt(native.path_prefix[ native.path_prefix.len - 1] != seperator); @@ -640,6 +654,8 @@ pub const FileSystem = union(enum) { }, } } + + pub const seperator = '/'; }; /// diff --git a/src/table.zig b/src/table.zig new file mode 100644 index 0000000..cec8dba --- /dev/null +++ b/src/table.zig @@ -0,0 +1,78 @@ +const std = @import("std"); + +pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { + return struct { + buckets_used: usize, + bucket_map: []?Bucket, + + /// + /// + /// + const Bucket = struct { + key: Key, + value: Value, + next: ?usize, + }; + + /// + /// + /// + pub const InsertError = error { + + }; + + /// + /// + /// + const Self = @This(); + + /// + /// + /// + pub fn insert(self: Self, key: Key, value: Value) InsertError!void { + _ = value; + _ = key; + _ = self; + } + + /// + /// + /// + pub fn lookup(self: Self, key: Key) ?*Value { + var bucket = &(self.bucket_map[@mod(key_context.hash( + key), self.bucket_map.len)] orelse return null); + + while (bucket) { + if (key_context.equals(bucket.key, key)) return &bucket.value; + + bucket = bucket.next; + } + + return null; + } + }; +} + +pub fn KeyContext(comptime Key: type) type { + return struct { + hash: fn (Key) usize, + equals: fn (Key, Key) bool, + }; +} + +fn equalsString(this_string: []const u8, that_string: []const u8) bool { + return std.mem.eql(u8, this_string, that_string); +} + +fn hashString(string: []const u8) usize { + var hash = @as(usize, 5381); + + for (string) |byte| hash = ((hash << 5) + hash) + byte; + + return hash; +} + +pub const string_context = KeyContext([]const u8){ + .hash = hashString, + .equals = equalsString, +}; -- 2.34.1 From 53a369952e1ebc4948495aca05655757f241178f Mon Sep 17 00:00:00 2001 From: kayomn Date: Fri, 14 Oct 2022 17:10:27 +0100 Subject: [PATCH 21/93] Implement hash table lookup logic --- src/table.zig | 66 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 49 insertions(+), 17 deletions(-) diff --git a/src/table.zig b/src/table.zig index cec8dba..81d7f03 100644 --- a/src/table.zig +++ b/src/table.zig @@ -2,23 +2,20 @@ const std = @import("std"); pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { return struct { + load_maximum: f32, buckets_used: usize, - bucket_map: []?Bucket, + buckets: []Bucket, /// /// /// const Bucket = struct { - key: Key, - value: Value, - next: ?usize, - }; - - /// - /// - /// - pub const InsertError = error { + maybe_entry: ?struct { + key: Key, + value: Value, + }, + maybe_next_index: ?usize, }; /// @@ -29,8 +26,7 @@ pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: K /// /// /// - pub fn insert(self: Self, key: Key, value: Value) InsertError!void { - _ = value; + pub fn delete(self: Self, key: Key) bool { _ = key; _ = self; } @@ -38,14 +34,43 @@ pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: K /// /// /// + pub fn insert(self: Self, key: Key, value: Value) InsertError!void { + if ((@intToFloat(f32, self.buckets_used) / @intToFloat( + f32, self.buckets.len)) >= self.load_maximum) try self.rehash(); + + var hash = @mod(key_context.hash(key), self.buckets.len); + + while (true) { + const bucket = &(self.buckets[hash]); + + const entry = &(bucket.maybe_entry orelse { + bucket.maybe_entry = .{ + .key = key, + .value = value + }; + }); + + if (key_context.equals(entry.key, key)) return error.KeyExists; + + hash = @mod(hashHash(hash), self.buckets.len); + } + } + + /// + /// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching + /// entry was found. + /// pub fn lookup(self: Self, key: Key) ?*Value { - var bucket = &(self.bucket_map[@mod(key_context.hash( - key), self.bucket_map.len)] orelse return null); + var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]); - while (bucket) { - if (key_context.equals(bucket.key, key)) return &bucket.value; + if (bucket.maybe_entry) |*entry| + if (key_context.equals(entry.key, key)) return &entry.value; - bucket = bucket.next; + while (bucket.maybe_next_index) |index| { + bucket = &(self.buckets[index]); + + if (bucket.maybe_entry) |*entry| + if (key_context.equals(entry.key, key)) return &entry.value; } return null; @@ -53,6 +78,13 @@ pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: K }; } +/// +/// +/// +pub const InsertError = std.mem.Allocator || error { + KeyExists, +}; + pub fn KeyContext(comptime Key: type) type { return struct { hash: fn (Key) usize, -- 2.34.1 From b8517d3b2258bde2ae215a1ef9dd6190b47856b8 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 01:08:03 +0100 Subject: [PATCH 22/93] Tidy up naming conventions in stack module --- src/stack.zig | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/stack.zig b/src/stack.zig index 3f8284a..c4b5963 100755 --- a/src/stack.zig +++ b/src/stack.zig @@ -1,11 +1,17 @@ const io = @import("./io.zig"); const std = @import("std"); +/// +/// Returns a fixed-size stack type of `Element`s. +/// pub fn Fixed(comptime Element: type) type { return struct { filled: usize = 0, buffer: []Element, + /// + /// Stack type. + /// const Self = @This(); /// @@ -57,8 +63,8 @@ pub fn Fixed(comptime Element: type) type { /// /// Attempts to push `element` into `self`, returning a [FixedPushError] if it failed. /// - pub fn push(self: *Self, element: Element) FixedPushError!void { - if (self.filled == self.buffer.len) return error.Overflow; + pub fn push(self: *Self, element: Element) PushError!void { + if (self.filled == self.buffer.len) return error.OutOfMemory; self.buffer[self.filled] = element; self.filled += 1; @@ -68,10 +74,10 @@ pub fn Fixed(comptime Element: type) type { /// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it /// failed. /// - pub fn pushAll(self: *Self, elements: []const u8) FixedPushError!void { + pub fn pushAll(self: *Self, elements: []const u8) PushError!void { const filled = (self.filled + elements.len); - if (filled > self.buffer.len) return error.Overflow; + if (filled > self.buffer.len) return error.OutOfMemory; std.mem.copy(u8, self.buffer[self.filled ..], elements); @@ -81,15 +87,9 @@ pub fn Fixed(comptime Element: type) type { } /// -/// Potential errors that may occur while trying to push one or more elements into a stack of a -/// known maximum size. +/// Potential errors that may occur while trying to push one or more elements into a stack. /// -/// [FinitePushError.Overflow] is returned if the stack does not have sufficient capacity to hold a -/// given set of elements. -/// -pub const FixedPushError = error { - Overflow, -}; +pub const PushError = std.mem.Allocator.Error; test { const testing = std.testing; @@ -103,8 +103,8 @@ test { try testing.expectEqual(stack.pop(), 69); try stack.pushAll(&.{42, 10, 95, 0}); try testing.expectEqual(stack.count(), 4); - try testing.expectError(FixedPushError.Overflow, stack.push(1)); - try testing.expectError(FixedPushError.Overflow, stack.pushAll(&.{1, 11, 11})); + try testing.expectError(PushError.OutOfMemory, stack.push(1)); + try testing.expectError(PushError.OutOfMemory, stack.pushAll(&.{1, 11, 11})); stack.clear(); -- 2.34.1 From 26f342e51888300c065fb614c7d4a60246ffd8b0 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 01:25:21 +0100 Subject: [PATCH 23/93] Add entry caching to archive file systems --- src/main.zig | 4 +- src/sys.zig | 172 ++++++++++++++++++++++++++------------------------ src/table.zig | 98 ++++++++++++++++++++++------ 3 files changed, 169 insertions(+), 105 deletions(-) diff --git a/src/main.zig b/src/main.zig index 2f85626..9b95b9d 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,7 +11,9 @@ const sys = @import("./sys.zig"); /// Entry point. /// pub fn main() anyerror!void { - return nosuspend await async sys.runGraphics(anyerror, run); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + + return nosuspend await async sys.runGraphics(gpa.allocator(), anyerror, run); } test { diff --git a/src/sys.zig b/src/sys.zig index f427eeb..cbfe2a8 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -35,6 +35,7 @@ pub const AppContext = opaque { /// Internal state of the event loop hidden from the API consumer. /// const Implementation = struct { + user_path_prefix: [*]u8, data_file_system: FileSystem, user_file_system: FileSystem, message_semaphore: *ext.SDL_sem, @@ -104,6 +105,7 @@ pub const AppContext = opaque { ext.SDL_DestroyMutex(implementation.message_mutex); ext.SDL_DestroySemaphore(implementation.message_semaphore); + ext.SDL_free(implementation.user_path_prefix); } /// @@ -134,14 +136,25 @@ pub const AppContext = opaque { /// /// Returns the created [Implementation] value on success or [InitError] on failure. /// - fn init(data_archive_file_access: FileAccess, - user_path_prefix: []const u8) InitError!Implementation { + fn init(allocator: std.mem.Allocator, + data_archive_file_access: FileAccess) InitError!Implementation { + + const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse + return error.OutOfMemory; return Implementation{ + .user_file_system = .{.native = .{.path_prefix = + user_path_prefix[0 .. std.mem.len(user_path_prefix)]}}, + .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, - .data_file_system = .{.archive = .{.file_access = data_archive_file_access}}, - .user_file_system = .{.native = .{.path_prefix = user_path_prefix}}, + .user_path_prefix = user_path_prefix, + + .data_file_system = .{.archive = .{ + .file_access = data_archive_file_access, + .index_cache = try FileSystem.ArchiveIndexCache.init(allocator), + }}, + .message_thread = null, }; } @@ -351,7 +364,7 @@ pub const FileSystem = union(enum) { archive: struct { file_access: FileAccess, - index_cache: *table.Dynamic([]const u8, ArchiveEntry, table.string_context), + index_cache: ArchiveIndexCache, entry_table: [max_open_entries]ArchiveEntry = std.mem.zeroes([max_open_entries]ArchiveEntry), @@ -368,6 +381,8 @@ pub const FileSystem = union(enum) { cursor: u64, }; + const ArchiveIndexCache = table.Hashed([]const u8, oar.Entry, table.string_context); + /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// @@ -447,99 +462,98 @@ pub const FileSystem = union(enum) { .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; - for (archive.entry_table) |_, index| { - if (archive.entry_table[index].using == null) { - const archive_path = path.buffer[0 .. path.length]; + for (archive.entry_table) |*entry| if (entry.using == null) { + const entry_path = path.buffer[0 .. path.length]; + + entry.* = .{ + .header = find_header: { + if (archive.index_cache.lookup(entry_path)) |header| + break: find_header header.*; - const entry_header = archive.index_cache.lookup(archive_path) orelse { const header = oar.Entry.find(archive.file_access, - archive_path) catch return error.FileNotFound; + entry_path) catch return error.FileNotFound; - archive.index_cache.insert(archive_path, header) catch { - // If caching fails... oh well... - }; + // If caching fails... oh well... + archive.index_cache.insert(entry_path, header) catch {}; - break header; - }; + break: find_header header; + }, - archive.entry_table[index] = .{ - .header = entry_header, - .using = &archive.file_access, - .cursor = 0, - }; + .using = &archive.file_access, + .cursor = 0, + }; - const Implementation = struct { - fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { - return @ptrCast(*ArchiveEntry, @alignCast( - @alignOf(ArchiveEntry), context)); - } + const Implementation = struct { + fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { + return @ptrCast(*ArchiveEntry, @alignCast( + @alignOf(ArchiveEntry), context)); + } - fn close(context: *anyopaque) void { - archiveEntryCast(context).using = null; - } + fn close(context: *anyopaque) void { + archiveEntryCast(context).using = null; + } - fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); + fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { + const archive_entry = archiveEntryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.using == null) return error.FileInaccessible; - return archive_entry.cursor; - } + return archive_entry.cursor; + } - fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); + fn queryLength(context: *anyopaque) FileAccess.Error!u64 { + const archive_entry = archiveEntryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.using == null) return error.FileInaccessible; - return archive_entry.header.file_size; - } + return archive_entry.header.file_size; + } - fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - const archive_entry = archiveEntryCast(context); + fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { + const archive_entry = archiveEntryCast(context); - const file_access = archive_entry.using orelse - return error.FileInaccessible; + const file_access = archive_entry.using orelse + return error.FileInaccessible; - if (archive_entry.cursor >= archive_entry.header.file_size) - return error.FileInaccessible; + if (archive_entry.cursor >= archive_entry.header.file_size) + return error.FileInaccessible; - try file_access.seek(archive_entry.header.file_offset); + try file_access.seek(archive_entry.header.file_offset); - return file_access.read(buffer[0 .. std.math.min( - buffer.len, archive_entry.header.file_size)]); - } + return file_access.read(buffer[0 .. std.math.min( + buffer.len, archive_entry.header.file_size)]); + } - fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); + fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { + const archive_entry = archiveEntryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.using == null) return error.FileInaccessible; - archive_entry.cursor = cursor; - } + archive_entry.cursor = cursor; + } - fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); + fn seekToEnd(context: *anyopaque) FileAccess.Error!void { + const archive_entry = archiveEntryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.using == null) return error.FileInaccessible; - archive_entry.cursor = archive_entry.header.file_size; - } - }; + archive_entry.cursor = archive_entry.header.file_size; + } + }; - return FileAccess{ - .context = &archive.entry_table[index], + return FileAccess{ + .context = entry, - .implementation = &.{ - .close = Implementation.close, - .queryCursor = Implementation.queryCursor, - .queryLength = Implementation.queryLength, - .read = Implementation.read, - .seek = Implementation.seek, - .seekToEnd = Implementation.seekToEnd, - }, - }; - } - } + .implementation = &.{ + .close = Implementation.close, + .queryCursor = Implementation.queryCursor, + .queryLength = Implementation.queryLength, + .read = Implementation.read, + .seek = Implementation.seek, + .seekToEnd = Implementation.seekToEnd, + }, + }; + }; return error.OutOfFiles; }, @@ -805,7 +819,7 @@ pub const RunError = error { /// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if /// the underlying runtime fails and is logged. /// -pub fn runGraphics(comptime Error: anytype, +pub fn runGraphics(allocator: std.mem.Allocator, comptime Error: anytype, comptime run: GraphicsRunner(Error)) (RunError || Error)!void { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { @@ -841,14 +855,6 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); - const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path"); - - return error.InitFailure; - }; - - defer ext.SDL_free(user_path_prefix); - var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}}; var data_archive_file_access = try (try cwd_file_system. @@ -856,9 +862,7 @@ pub fn runGraphics(comptime Error: anytype, defer data_archive_file_access.close(); - var app_context = AppContext.Implementation.init(data_archive_file_access, - user_path_prefix[0 .. std.mem.len(user_path_prefix)]) catch |err| { - + var app_context = AppContext.Implementation.init(allocator, data_archive_file_access) catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { error.OutOfMemory => "Failed to allocate necessary memory", error.OutOfMutexes => "Failed to create file-system work lock", diff --git a/src/table.zig b/src/table.zig index 81d7f03..c89439c 100644 --- a/src/table.zig +++ b/src/table.zig @@ -1,61 +1,119 @@ const std = @import("std"); -pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { +/// +/// Returns a hash-backed table type of `Value`s indexed by `Key` and using `key_context` as the key +/// context. +/// +pub fn Hashed(comptime Key: type, comptime Value: type, + comptime key_context: KeyContext(Key)) type { + + const Allocator = std.mem.Allocator; + return struct { - load_maximum: f32, - buckets_used: usize, + allocator: Allocator, + load_limit: f32, buckets: []Bucket, + filled: usize, /// - /// + /// A slot in the hash table. /// const Bucket = struct { maybe_entry: ?struct { key: Key, value: Value, - }, + } = null, - maybe_next_index: ?usize, + maybe_next_index: ?usize = null, }; /// - /// + /// Hash table type. /// const Self = @This(); /// + /// Searches for `key` to delete it, returning the deleted value or `null` if no matching + /// key was found. /// - /// - pub fn delete(self: Self, key: Key) bool { - _ = key; - _ = self; + pub fn remove(self: Self, key: Key) ?Value { + var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]); + + if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) { + defer entry.value = null; + + self.filled -= 1; + + return entry.value; + }; + + while (bucket.maybe_next_index) |index| { + bucket = &(self.buckets[index]); + + if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) { + defer entry.value = null; + + self.filled -= 1; + + return entry.value; + }; + } + + return null; + } + + pub fn init(allocator: Allocator) Allocator.Error!Self { + return Self{ + .buckets = try allocator.alloc(Bucket, 4), + .filled = 0, + .allocator = allocator, + .load_limit = 0.75, + }; } /// + /// Attempts to insert the value at `key` to be `value` in `self`, returning an + /// [InsertError] if it fails. /// - /// - pub fn insert(self: Self, key: Key, value: Value) InsertError!void { - if ((@intToFloat(f32, self.buckets_used) / @intToFloat( - f32, self.buckets.len)) >= self.load_maximum) try self.rehash(); + pub fn insert(self: *Self, key: Key, value: Value) InsertError!void { + if (self.loadFactor() >= self.load_limit) { + const old_buckets = self.buckets; + + defer self.allocator.free(old_buckets); + + self.buckets = try self.allocator.alloc(Bucket, old_buckets.len * 2); + + for (old_buckets) |bucket, index| self.buckets[index] = bucket; + } var hash = @mod(key_context.hash(key), self.buckets.len); while (true) { const bucket = &(self.buckets[hash]); - const entry = &(bucket.maybe_entry orelse { + if (key_context.equals((bucket.maybe_entry orelse { bucket.maybe_entry = .{ .key = key, .value = value }; - }); - if (key_context.equals(entry.key, key)) return error.KeyExists; + self.filled += 1; - hash = @mod(hashHash(hash), self.buckets.len); + break; + }).key, key)) return error.KeyExists; + + hash = @mod(hash + 1, self.buckets.len); } } + /// + /// Returns the current load factor of `self`, which is derived from the number of capacity + /// that has been filled. + /// + pub fn loadFactor(self: Self) f32 { + return @intToFloat(f32, self.filled) / @intToFloat(f32, self.buckets.len); + } + /// /// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching /// entry was found. @@ -81,7 +139,7 @@ pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: K /// /// /// -pub const InsertError = std.mem.Allocator || error { +pub const InsertError = std.mem.Allocator.Error || error { KeyExists, }; -- 2.34.1 From 01e6a7cb566e07125438ae1c2cc2692eb9021e0d Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 01:28:13 +0100 Subject: [PATCH 24/93] Fix compilation error in tests --- src/stack.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stack.zig b/src/stack.zig index c4b5963..4f82925 100755 --- a/src/stack.zig +++ b/src/stack.zig @@ -26,7 +26,7 @@ pub fn Fixed(comptime Element: type) type { return io.Writer.wrap(Self, self, struct { fn write(stack: *Self, buffer: []const u8) usize { stack.pushAll(buffer) catch |err| switch (err) { - error.Overflow => return 0, + error.OutOfMemory => return 0, }; return buffer.len; -- 2.34.1 From 5f4e4cc81178dc7cd3d2f5cda6c1c362b9a9321b Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 11:38:22 +0100 Subject: [PATCH 25/93] Implement more hash table logic --- src/main.zig | 4 +--- src/sys.zig | 12 +++++++----- src/table.zig | 48 ++++++++++++++++++++++++++++++++++++++---------- 3 files changed, 46 insertions(+), 18 deletions(-) diff --git a/src/main.zig b/src/main.zig index 9b95b9d..2f85626 100644 --- a/src/main.zig +++ b/src/main.zig @@ -11,9 +11,7 @@ const sys = @import("./sys.zig"); /// Entry point. /// pub fn main() anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - return nosuspend await async sys.runGraphics(gpa.allocator(), anyerror, run); + return nosuspend await async sys.runGraphics(anyerror, run); } test { diff --git a/src/sys.zig b/src/sys.zig index cbfe2a8..e431003 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -819,7 +819,7 @@ pub const RunError = error { /// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if /// the underlying runtime fails and is logged. /// -pub fn runGraphics(allocator: std.mem.Allocator, comptime Error: anytype, +pub fn runGraphics(comptime Error: anytype, comptime run: GraphicsRunner(Error)) (RunError || Error)!void { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { @@ -856,13 +856,15 @@ pub fn runGraphics(allocator: std.mem.Allocator, comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}}; + var data_access = try (try cwd_file_system.joinedPath(&.{"./data.oar"})).open(.readonly); - var data_archive_file_access = try (try cwd_file_system. - joinedPath(&.{"./data.oar"})).open(.readonly); + defer data_access.close(); - defer data_archive_file_access.close(); + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - var app_context = AppContext.Implementation.init(allocator, data_archive_file_access) catch |err| { + defer _ = gpa.deinit(); + + var app_context = AppContext.Implementation.init(gpa.allocator(), data_access) catch |err| { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { error.OutOfMemory => "Failed to allocate necessary memory", error.OutOfMutexes => "Failed to create file-system work lock", diff --git a/src/table.zig b/src/table.zig index c89439c..aecd12d 100644 --- a/src/table.zig +++ b/src/table.zig @@ -32,6 +32,29 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// const Self = @This(); + /// + /// Deinitializes `self`, preventing any further use. + /// + pub fn deinit(self: *Self) void { + self.allocator.free(self.buckets); + + self.buckets = &.{}; + } + + /// + /// Initializes a [Self] using `allocator` as the memory allocation strategy. + /// + /// Returns a new [Self] value or an [Allocator.Error] if initializing failed. + /// + pub fn init(allocator: Allocator) Allocator.Error!Self { + return Self{ + .buckets = try allocator.alloc(Bucket, 4), + .filled = 0, + .allocator = allocator, + .load_limit = 0.75, + }; + } + /// /// Searches for `key` to delete it, returning the deleted value or `null` if no matching /// key was found. @@ -62,15 +85,6 @@ pub fn Hashed(comptime Key: type, comptime Value: type, return null; } - pub fn init(allocator: Allocator) Allocator.Error!Self { - return Self{ - .buckets = try allocator.alloc(Bucket, 4), - .filled = 0, - .allocator = allocator, - .load_limit = 0.75, - }; - } - /// /// Attempts to insert the value at `key` to be `value` in `self`, returning an /// [InsertError] if it fails. @@ -137,12 +151,17 @@ pub fn Hashed(comptime Key: type, comptime Value: type, } /// -/// +/// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key +/// already present. /// pub const InsertError = std.mem.Allocator.Error || error { KeyExists, }; +/// +/// Returns a context type for handling `Key` as a key in a table, associating hashing and equality +/// behaviors to it. +/// pub fn KeyContext(comptime Key: type) type { return struct { hash: fn (Key) usize, @@ -150,10 +169,16 @@ pub fn KeyContext(comptime Key: type) type { }; } +/// +/// Tests if the contents of `this_string` lexically equals the contents of `that_string`. +/// fn equalsString(this_string: []const u8, that_string: []const u8) bool { return std.mem.eql(u8, this_string, that_string); } +/// +/// Hashes `string` into a hash value of `usize`. +/// fn hashString(string: []const u8) usize { var hash = @as(usize, 5381); @@ -162,6 +187,9 @@ fn hashString(string: []const u8) usize { return hash; } +/// +/// A [KeyContext] for handling `[]const u8` types. +/// pub const string_context = KeyContext([]const u8){ .hash = hashString, .equals = equalsString, -- 2.34.1 From 2792f27473c40fb1cd349dc29aa7f53e5ad3b22b Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 13:14:02 +0100 Subject: [PATCH 26/93] Split archive-specific logic into own module --- src/oar.zig | 130 ++++++++++++++++++++++++++++++++++-------- src/sys.zig | 160 +++++++++++++++++++++++++--------------------------- 2 files changed, 183 insertions(+), 107 deletions(-) diff --git a/src/oar.zig b/src/oar.zig index f1e3b0f..ed8db92 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -1,29 +1,16 @@ const std = @import("std"); const sys = @import("./sys.zig"); +const table = @import("./table.zig"); /// -/// An entry block of an Oar archive file. /// -/// Typically, following this block in memory is the file data it holds the meta-information for. /// -pub const Entry = extern struct { - signature: [3]u8 = signature_magic, - revision: u8, - name_buffer: [255]u8 = std.mem.zeroes([255]u8), - name_length: u8 = 0, - file_size: u64, - file_offset: u64, - padding: [232]u8 = std.mem.zeroes([232]u8), - - comptime { - const entry_size = @sizeOf(Entry); - - if (entry_size != 512) - @compileError("Entry is " ++ std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); - } +pub const Archive = struct { + file_access: sys.FileAccess, + index_cache: IndexCache, /// - /// [FindError.EntryNotFound] happens when an entry could not be found. + /// [OpenError.EntryNotFound] happens when an entry could not be found. /// pub const FindError = sys.FileAccess.Error || error { EntryNotFound, @@ -32,15 +19,110 @@ pub const Entry = extern struct { /// /// /// - pub fn find(file_access: sys.FileAccess, entry_name: []const u8) FindError!Entry { - _ = file_access; - _ = entry_name; + pub const InitError = std.mem.Allocator.Error; - return error.EntryNotFound; + /// + /// + /// + const IndexCache = table.Hashed([]const u8, Entry.Header, table.string_context); + + /// + /// Finds an entry matching `entry_path` in `archive`. + /// + /// The found [Entry] value is returned or a [FindError] if it failed to be found. + /// + pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry { + return Entry{ + .header = find_header: { + if (archive.index_cache.lookup(entry_path)) |entry_header| + break: find_header entry_header.*; + + // Start from beginning of archive. + try archive.file_access.seek(0); + + var entry_header = Entry.Header{ + .revision = 0, + .file_size = 0, + .file_offset = 0 + }; + + const read_buffer = std.mem.asBytes(&entry_header); + + // Read first entry. + while ((try archive.file_access.read(read_buffer)) == @sizeOf(Entry.Header)) { + if (std.mem.eql(u8, entry_path, entry_header. + name_buffer[0 .. entry_header.name_length])) { + + // If caching fails... oh well... + archive.index_cache.insert(entry_path, entry_header) catch {}; + + break: find_header entry_header; + } + + // Move over file data following the entry. + var to_skip = entry_header.file_size; + + while (to_skip != 0) { + const skipped = std.math.min(to_skip, std.math.maxInt(i64)); + + try archive.file_access.skip(@intCast(i64, skipped)); + + to_skip -= skipped; + } + } + + return error.EntryNotFound; + }, + + .owner = &archive.file_access, + .cursor = 0, + }; } /// - /// Magic identifier used to validate [Entry] data. /// - const signature_magic = [3]u8{'o', 'a', 'r'}; + /// + pub fn init(allocator: std.mem.Allocator, archive_file_access: sys.FileAccess) InitError!Archive { + return Archive{ + .index_cache = try IndexCache.init(allocator), + .file_access = archive_file_access, + }; + } +}; + +/// +/// Handles the state of an opened archive entry. +/// +pub const Entry = struct { + owner: ?*sys.FileAccess, + cursor: u64, + header: Header, + + /// + /// An entry block of an Oar archive file. + /// + /// Typically, following the block in memory is the file data it holds the meta-information for. + /// + pub const Header = extern struct { + signature: [signature_magic.len]u8 = signature_magic, + revision: u8, + name_buffer: [255]u8 = std.mem.zeroes([255]u8), + name_length: u8 = 0, + file_size: u64, + file_offset: u64, + padding: [232]u8 = std.mem.zeroes([232]u8), + + comptime { + const entry_size = @sizeOf(Header); + + if (entry_size != 512) + @compileError("Entry is " ++ + std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); + } + + /// + /// Magic identifier used to validate [Entry] data. + /// + pub const signature_magic = [3]u8{'o', 'a', 'r'}; + }; }; diff --git a/src/sys.zig b/src/sys.zig index e431003..6b3dc10 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -8,7 +8,6 @@ const meta = @import("./meta.zig"); const oar = @import("./oar.zig"); const stack = @import("./stack.zig"); const std = @import("std"); -const table = @import("./table.zig"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. @@ -131,28 +130,25 @@ pub const AppContext = opaque { } /// - /// Initializes a new [Implemenation] with `data_archive_file_access` as the data archive to - /// read from and `user_path_prefix` as the native writable user data directory. + /// Initializes a new [Implemenation] with `data_access` as the data archive to read from + /// and `user_path_prefix` as the native writable user data directory. /// /// Returns the created [Implementation] value on success or [InitError] on failure. /// - fn init(allocator: std.mem.Allocator, - data_archive_file_access: FileAccess) InitError!Implementation { - + fn init(allocator: std.mem.Allocator, data_access: FileAccess) InitError!Implementation { const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.OutOfMemory; return Implementation{ - .user_file_system = .{.native = .{.path_prefix = - user_path_prefix[0 .. std.mem.len(user_path_prefix)]}}, + .user_file_system = .{.native = + user_path_prefix[0 .. std.mem.len(user_path_prefix)]}, .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, .user_path_prefix = user_path_prefix, .data_file_system = .{.archive = .{ - .file_access = data_archive_file_access, - .index_cache = try FileSystem.ArchiveIndexCache.init(allocator), + .instance = try oar.Archive.init(allocator, data_access), }}, .message_thread = null, @@ -283,6 +279,7 @@ pub const FileAccess = struct { read: fn (*anyopaque, []u8) Error!usize, seek: fn (*anyopaque, u64) Error!void, seekToEnd: fn (*anyopaque) Error!void, + skip: fn (*anyopaque, i64) Error!void, }; /// @@ -343,14 +340,22 @@ pub const FileAccess = struct { } /// - /// Attempts to seek `file_access` to the end of the file while using `app_context` as the execution - /// context. + /// Attempts to seek `file_access` to the end of the file. /// /// Returns [Error] on failure. /// pub fn seekToEnd(file_access: *FileAccess) Error!void { return file_access.implementation.seekToEnd(file_access.context); } + + /// + /// Attempts to seek `file_access` by `offset` from the current file position. + /// + /// Returns [Error] on failure; + /// + pub fn skip(file_access: *FileAccess, offset: i64) Error!void { + return file_access.implementation.skip(file_access.context, offset); + } }; /// @@ -358,31 +363,15 @@ pub const FileAccess = struct { /// available to the application in a sandboxed environment. /// pub const FileSystem = union(enum) { - native: struct { - path_prefix: []const u8, - }, + native: []const u8, archive: struct { - file_access: FileAccess, - index_cache: ArchiveIndexCache, + instance: oar.Archive, + entry_table: [max_open_entries]oar.Entry = std.mem.zeroes([max_open_entries]oar.Entry), - entry_table: [max_open_entries]ArchiveEntry = - std.mem.zeroes([max_open_entries]ArchiveEntry), - - const max_open_entries = 16; + pub const max_open_entries = 16; }, - /// - /// Handles the state of an opened archive entry. - /// - const ArchiveEntry = struct { - using: ?*FileAccess, - header: oar.Entry, - cursor: u64, - }; - - const ArchiveIndexCache = table.Hashed([]const u8, oar.Entry, table.string_context); - /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// @@ -462,57 +451,37 @@ pub const FileSystem = union(enum) { .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; - for (archive.entry_table) |*entry| if (entry.using == null) { - const entry_path = path.buffer[0 .. path.length]; - - entry.* = .{ - .header = find_header: { - if (archive.index_cache.lookup(entry_path)) |header| - break: find_header header.*; - - const header = oar.Entry.find(archive.file_access, - entry_path) catch return error.FileNotFound; - - // If caching fails... oh well... - archive.index_cache.insert(entry_path, header) catch {}; - - break: find_header header; - }, - - .using = &archive.file_access, - .cursor = 0, - }; - + for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { - fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { - return @ptrCast(*ArchiveEntry, @alignCast( - @alignOf(ArchiveEntry), context)); + fn close(context: *anyopaque) void { + entryCast(context).owner = null; } - fn close(context: *anyopaque) void { - archiveEntryCast(context).using = null; + fn entryCast(context: *anyopaque) *oar.Entry { + return @ptrCast(*oar.Entry, @alignCast( + @alignOf(oar.Entry), context)); } fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); + const archive_entry = entryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; return archive_entry.cursor; } fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); + const archive_entry = entryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; return archive_entry.header.file_size; } fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - const archive_entry = archiveEntryCast(context); + const archive_entry = entryCast(context); - const file_access = archive_entry.using orelse + const file_access = archive_entry.owner orelse return error.FileInaccessible; if (archive_entry.cursor >= archive_entry.header.file_size) @@ -525,22 +494,42 @@ pub const FileSystem = union(enum) { } fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); + const archive_entry = entryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; archive_entry.cursor = cursor; } fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); + const archive_entry = entryCast(context); - if (archive_entry.using == null) return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; archive_entry.cursor = archive_entry.header.file_size; } + + fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { + const math = std.math; + const archive_entry = entryCast(context); + + if (archive_entry.owner == null) return error.FileInaccessible; + + if (offset < 0) { + archive_entry.cursor = math.max(0, + archive_entry.cursor - math.absCast(offset)); + } else { + archive_entry.cursor += @intCast(u64, offset); + } + } }; + entry.* = archive.instance.find(path.buffer[0 .. path.length]) catch |err| + return switch (err) { + error.FileInaccessible => error.FileNotFound, + error.EntryNotFound => error.FileNotFound, + }; + return FileAccess{ .context = entry, @@ -551,6 +540,7 @@ pub const FileSystem = union(enum) { .read = Implementation.read, .seek = Implementation.seek, .seekToEnd = Implementation.seekToEnd, + .skip = Implementation.skip, }, }; }; @@ -559,22 +549,19 @@ pub const FileSystem = union(enum) { }, .native => |native| { - if (native.path_prefix.len == 0) return error.FileNotFound; + if (native.len == 0) return error.FileNotFound; var path_buffer = std.mem.zeroes([4096]u8); + const seperator_length = @boolToInt(native[native.len - 1] != seperator); - const seperator_length = @boolToInt(native.path_prefix[ - native.path_prefix.len - 1] != seperator); - - if ((native.path_prefix.len + seperator_length + path.length) >= + if ((native.len + seperator_length + path.length) >= path_buffer.len) return error.FileNotFound; - std.mem.copy(u8, path_buffer[0 ..], native.path_prefix); + std.mem.copy(u8, path_buffer[0 ..], native); - if (seperator_length != 0) - path_buffer[native.path_prefix.len] = seperator; + if (seperator_length != 0) path_buffer[native.len] = seperator; - std.mem.copy(u8, path_buffer[native.path_prefix.len .. + std.mem.copy(u8, path_buffer[native.len .. path_buffer.len], path.buffer[0 .. path.length]); ext.SDL_ClearError(); @@ -629,15 +616,14 @@ pub const FileSystem = union(enum) { while (to_seek != 0) { const math = std.math; - const sought = @intCast(i64, math.min(to_seek, math.maxInt(i64))); + const sought = math.min(to_seek, math.maxInt(i64)); ext.SDL_ClearError(); - if (ext.SDL_RWseek(rwOpsCast(context), sought, ext.RW_SEEK_CUR) < 0) - return error.FileInaccessible; + if (ext.SDL_RWseek(rwOpsCast(context), @intCast(i64, sought), + ext.RW_SEEK_CUR) < 0) return error.FileInaccessible; - // Cannot be less than zero because it is derived from `read`. - to_seek -= @intCast(u64, sought); + to_seek -= sought; } } @@ -647,6 +633,13 @@ pub const FileSystem = union(enum) { if (ext.SDL_RWseek(rwOpsCast(context), 0, ext.RW_SEEK_END) < 0) return error.FileInaccessible; } + + fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), offset, ext.RW_SEEK_SET) < 0) + return error.FileInaccessible; + } }; return FileAccess{ @@ -663,6 +656,7 @@ pub const FileSystem = union(enum) { .read = Implementation.read, .seek = Implementation.seek, .seekToEnd = Implementation.seekToEnd, + .skip = Implementation.skip, }, }; }, @@ -855,7 +849,7 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); - var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}}; + var cwd_file_system = FileSystem{.native ="./"}; var data_access = try (try cwd_file_system.joinedPath(&.{"./data.oar"})).open(.readonly); defer data_access.close(); -- 2.34.1 From 98372cc85f00c70e76eaf42a70c574ddd87eccfa Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 21:08:52 +0100 Subject: [PATCH 27/93] Improve memory safety of hash table --- src/table.zig | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/src/table.zig b/src/table.zig index aecd12d..5bdaa81 100644 --- a/src/table.zig +++ b/src/table.zig @@ -56,16 +56,18 @@ pub fn Hashed(comptime Key: type, comptime Value: type, } /// - /// Searches for `key` to delete it, returning the deleted value or `null` if no matching - /// key was found. + /// Searches for `key` and deletes it from `self. /// - pub fn remove(self: Self, key: Key) ?Value { + /// The removed value is returned or `null` if no key matching `key` was found. + /// + pub fn remove(self: *Self, key: Key) ?Value { var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]); if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) { - defer entry.value = null; - - self.filled -= 1; + defer { + bucket.maybe_entry = null; + self.filled -= 1; + } return entry.value; }; @@ -74,9 +76,10 @@ pub fn Hashed(comptime Key: type, comptime Value: type, bucket = &(self.buckets[index]); if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) { - defer entry.value = null; - - self.filled -= 1; + defer { + bucket.maybe_entry = null; + self.filled -= 1; + } return entry.value; }; @@ -129,20 +132,21 @@ pub fn Hashed(comptime Key: type, comptime Value: type, } /// - /// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching - /// entry was found. + /// Searches for a value indexed with `key` in `self`. /// - pub fn lookup(self: Self, key: Key) ?*Value { + /// The found value is returned or `null` if an key matching `key` failed to be found. + /// + pub fn lookup(self: Self, key: Key) ?Value { var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]); - if (bucket.maybe_entry) |*entry| - if (key_context.equals(entry.key, key)) return &entry.value; + if (bucket.maybe_entry) |entry| + if (key_context.equals(entry.key, key)) return entry.value; while (bucket.maybe_next_index) |index| { bucket = &(self.buckets[index]); - if (bucket.maybe_entry) |*entry| - if (key_context.equals(entry.key, key)) return &entry.value; + if (bucket.maybe_entry) |entry| + if (key_context.equals(entry.key, key)) return entry.value; } return null; -- 2.34.1 From 449b56947ed82abfc9d650d59ebde01b46655f01 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 21:11:15 +0100 Subject: [PATCH 28/93] Improve memory safety of Oar archive lookups --- src/oar.zig | 58 +++++++++++++++++++++++++++++++---------------------- src/sys.zig | 2 +- 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/src/oar.zig b/src/oar.zig index ed8db92..1a1cb77 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -24,7 +24,7 @@ pub const Archive = struct { /// /// /// - const IndexCache = table.Hashed([]const u8, Entry.Header, table.string_context); + const IndexCache = table.Hashed([]const u8, u64, table.string_context); /// /// Finds an entry matching `entry_path` in `archive`. @@ -34,40 +34,50 @@ pub const Archive = struct { pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry { return Entry{ .header = find_header: { - if (archive.index_cache.lookup(entry_path)) |entry_header| - break: find_header entry_header.*; - - // Start from beginning of archive. - try archive.file_access.seek(0); - - var entry_header = Entry.Header{ + var header = Entry.Header{ .revision = 0, .file_size = 0, - .file_offset = 0 + .absolute_offset = 0 }; - const read_buffer = std.mem.asBytes(&entry_header); + const header_size = @sizeOf(Entry.Header); - // Read first entry. - while ((try archive.file_access.read(read_buffer)) == @sizeOf(Entry.Header)) { - if (std.mem.eql(u8, entry_path, entry_header. - name_buffer[0 .. entry_header.name_length])) { + if (archive.index_cache.lookup(entry_path)) |cursor| { + try archive.file_access.seek(cursor); - // If caching fails... oh well... - archive.index_cache.insert(entry_path, entry_header) catch {}; + if ((try archive.file_access.read(std.mem.asBytes(&header))) != header_size) { + std.debug.assert(archive.index_cache.remove(entry_path) != null); - break: find_header entry_header; + return error.EntryNotFound; } - // Move over file data following the entry. - var to_skip = entry_header.file_size; + break: find_header header; + } else { + const mem = std.mem; - while (to_skip != 0) { - const skipped = std.math.min(to_skip, std.math.maxInt(i64)); + // Start from beginning of archive. + try archive.file_access.seek(0); - try archive.file_access.skip(@intCast(i64, skipped)); + // Read first entry. + while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) { + if (mem.eql(u8, entry_path, header.name_buffer[0 .. header.name_length])) { + // If caching fails... oh well... + archive.index_cache.insert(entry_path, header.absolute_offset) catch {}; - to_skip -= skipped; + break: find_header header; + } + + // Move over file data following the entry. + var to_skip = header.file_size; + + while (to_skip != 0) { + const math = std.math; + const skipped = math.min(to_skip, math.maxInt(i64)); + + try archive.file_access.skip(@intCast(i64, skipped)); + + to_skip -= skipped; + } } } @@ -109,7 +119,7 @@ pub const Entry = struct { name_buffer: [255]u8 = std.mem.zeroes([255]u8), name_length: u8 = 0, file_size: u64, - file_offset: u64, + absolute_offset: u64, padding: [232]u8 = std.mem.zeroes([232]u8), comptime { diff --git a/src/sys.zig b/src/sys.zig index 6b3dc10..d0a4aee 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -487,7 +487,7 @@ pub const FileSystem = union(enum) { if (archive_entry.cursor >= archive_entry.header.file_size) return error.FileInaccessible; - try file_access.seek(archive_entry.header.file_offset); + try file_access.seek(archive_entry.header.absolute_offset); return file_access.read(buffer[0 .. std.math.min( buffer.len, archive_entry.header.file_size)]); -- 2.34.1 From 3d3d0e488acc1ffcc47dd267209ead88b6802926 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 15 Oct 2022 21:21:02 +0100 Subject: [PATCH 29/93] Fix incorrectly cleaning up data archive memory in app context --- src/oar.zig | 27 +++++++++++++++++++++++---- src/sys.zig | 3 ++- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/oar.zig b/src/oar.zig index 1a1cb77..743057a 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -3,7 +3,7 @@ const sys = @import("./sys.zig"); const table = @import("./table.zig"); /// -/// +/// Thin file-wrapper and in-memory layout cache of an OAR archive file. /// pub const Archive = struct { file_access: sys.FileAccess, @@ -17,15 +17,28 @@ pub const Archive = struct { }; /// - /// + /// See [std.mem.Allocator.Error]. /// pub const InitError = std.mem.Allocator.Error; /// + /// In-memory archive layout cache. /// + /// As the archive is queried via [find], the cache is lazily assembled with the absolute + /// offsets of each queried file. /// const IndexCache = table.Hashed([]const u8, u64, table.string_context); + /// + /// Deinitializes the index cache of `archive`, freeing all associated memory. + /// + /// **Note** that this does nothing to the [FileAccess] value that was provided as part of + /// [init]. + /// + pub fn deint(archive: *Archive) void { + archive.index_cache.deinit(); + } + /// /// Finds an entry matching `entry_path` in `archive`. /// @@ -90,11 +103,17 @@ pub const Archive = struct { } /// + /// Attempts to initialize a new [Archive] with `cache_allocator` as the allocator for managing + /// the in-memory archive layout caches and `archive_file_access` as the currently open archive + /// file. /// + /// **Note** that `archive_file_access` does nothing to manage the lifetime of the open file. /// - pub fn init(allocator: std.mem.Allocator, archive_file_access: sys.FileAccess) InitError!Archive { + pub fn init(cache_allocator: std.mem.Allocator, + archive_file_access: sys.FileAccess) InitError!Archive { + return Archive{ - .index_cache = try IndexCache.init(allocator), + .index_cache = try IndexCache.init(cache_allocator), .file_access = archive_file_access, }; } diff --git a/src/sys.zig b/src/sys.zig index d0a4aee..cdda969 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -102,9 +102,10 @@ pub const AppContext = opaque { } } + implementation.data_file_system.archive.instance.deint(); + ext.SDL_free(implementation.user_path_prefix); ext.SDL_DestroyMutex(implementation.message_mutex); ext.SDL_DestroySemaphore(implementation.message_semaphore); - ext.SDL_free(implementation.user_path_prefix); } /// -- 2.34.1 From 489ece4b7ba8e6db9fd0badeb155272396599917 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 10:34:04 +0100 Subject: [PATCH 30/93] Refactor codebase --- build.zig | 42 +++++++++--- src/{ => engine}/main.zig | 21 ++---- src/{ => engine}/sys.zig | 125 +++++----------------------------- src/{oar.zig => oar/main.zig} | 32 ++++++--- src/{ => ona}/io.zig | 96 ++++++++++++++++++++++++++ src/ona/main.zig | 25 +++++++ src/{ => ona}/mem.zig | 0 src/{ => ona}/meta.zig | 0 src/{ => ona}/stack.zig | 0 src/{ => ona}/table.zig | 0 10 files changed, 198 insertions(+), 143 deletions(-) rename src/{ => engine}/main.zig (75%) rename src/{ => engine}/sys.zig (87%) rename src/{oar.zig => oar/main.zig} (87%) rename src/{ => ona}/io.zig (57%) create mode 100644 src/ona/main.zig rename src/{ => ona}/mem.zig (100%) rename src/{ => ona}/meta.zig (100%) rename src/{ => ona}/stack.zig (100%) rename src/{ => ona}/table.zig (100%) diff --git a/build.zig b/build.zig index 2a81c07..f0a0a5b 100644 --- a/build.zig +++ b/build.zig @@ -1,21 +1,28 @@ const std = @import("std"); +/// +/// Builds the engine, tools, and dependencies of all. +/// pub fn build(builder: *std.build.Builder) void { const target = builder.standardTargetOptions(.{}); const mode = builder.standardReleaseOptions(); + const ona_pkg = projectPkg("ona", &.{}); - // Ona executable. + // Engine executable. { - const ona_exe = builder.addExecutable("ona", "./src/main.zig"); + const engine_exe = builder.addExecutable("ona", "./src/engine/main.zig"); + const oar_pkg = projectPkg("oar", &.{ona_pkg}); - ona_exe.setTarget(target); - ona_exe.setBuildMode(mode); - ona_exe.install(); - ona_exe.addIncludeDir("./ext"); - ona_exe.linkSystemLibrary("SDL2"); - ona_exe.linkLibC(); + engine_exe.addPackage(oar_pkg); + engine_exe.addPackage(ona_pkg); + engine_exe.setTarget(target); + engine_exe.setBuildMode(mode); + engine_exe.install(); + engine_exe.addIncludeDir("./ext"); + engine_exe.linkSystemLibrary("SDL2"); + engine_exe.linkLibC(); - const run_cmd = ona_exe.run(); + const run_cmd = engine_exe.run(); run_cmd.step.dependOn(builder.getInstallStep()); @@ -24,6 +31,11 @@ pub fn build(builder: *std.build.Builder) void { builder.step("run", "Run Ona application").dependOn(&run_cmd.step); } + // Oar executable. + { + + } + // Ona tests. { const ona_tests = builder.addTestExe("test", "./src/main.zig"); @@ -33,3 +45,15 @@ pub fn build(builder: *std.build.Builder) void { builder.step("test", "Run Ona unit tests").dependOn(&ona_tests.step); } } + +/// +/// Returns a [std.build.Pkg] within the project codebase path at `name` with `dependencies` as its +/// dependencies. +/// +fn projectPkg(comptime name: []const u8, dependencies: []const std.build.Pkg) std.build.Pkg { + return std.build.Pkg{ + .name = name, + .path = .{.path = "./src/" ++ name ++ "/main.zig"}, + .dependencies = dependencies, + }; +} diff --git a/src/main.zig b/src/engine/main.zig similarity index 75% rename from src/main.zig rename to src/engine/main.zig index 2f85626..98a5af0 100644 --- a/src/main.zig +++ b/src/engine/main.zig @@ -1,9 +1,4 @@ -const ext = @cImport({ - @cInclude("SDL2/SDL.h"); -}); - -const io = @import("./io.zig"); -const stack = @import("./stack.zig"); +const ona = @import("ona"); const std = @import("std"); const sys = @import("./sys.zig"); @@ -14,13 +9,6 @@ pub fn main() anyerror!void { return nosuspend await async sys.runGraphics(anyerror, run); } -test { - _ = io; - _ = stack; - _ = std; - _ = sys; -} - fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; @@ -37,8 +25,7 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { defer allocator.free(buffer); - if ((try file_access.read(buffer)) != file_size) - return error.ScriptLoadFailure; + if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; sys.Log.debug.write(buffer); } @@ -47,3 +34,7 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { graphics.present(); } } + +test { + _ = sys; +} diff --git a/src/sys.zig b/src/engine/sys.zig similarity index 87% rename from src/sys.zig rename to src/engine/sys.zig index cdda969..709c689 100644 --- a/src/sys.zig +++ b/src/engine/sys.zig @@ -2,11 +2,8 @@ const ext = @cImport({ @cInclude("SDL2/SDL.h"); }); -const io = @import("./io.zig"); -const mem = @import("./mem.zig"); -const meta = @import("./meta.zig"); -const oar = @import("./oar.zig"); -const stack = @import("./stack.zig"); +const oar = @import("oar"); +const ona = @import("ona"); const std = @import("std"); /// @@ -131,12 +128,14 @@ pub const AppContext = opaque { } /// - /// Initializes a new [Implemenation] with `data_access` as the data archive to read from - /// and `user_path_prefix` as the native writable user data directory. + /// Initializes a new [Implemenation] with `data_archive_file_access` as the data archive to + /// read from and `user_path_prefix` as the native writable user data directory. /// /// Returns the created [Implementation] value on success or [InitError] on failure. /// - fn init(allocator: std.mem.Allocator, data_access: FileAccess) InitError!Implementation { + fn init(allocator: std.mem.Allocator, + data_archive_file_access: ona.io.FileAccess) InitError!Implementation { + const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.OutOfMemory; @@ -149,7 +148,7 @@ pub const AppContext = opaque { .user_path_prefix = user_path_prefix, .data_file_system = .{.archive = .{ - .instance = try oar.Archive.init(allocator, data_access), + .instance = try oar.Archive.init(allocator, data_archive_file_access), }}, .message_thread = null, @@ -223,12 +222,12 @@ pub const AppContext = opaque { /// /// pub fn schedule(app_context: *AppContext, procedure: anytype, - arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + arguments: anytype) ona.meta.FnReturn(@TypeOf(procedure)) { const Task = struct { procedure: @TypeOf(procedure), arguments: *@TypeOf(arguments), - result: meta.FnReturn(@TypeOf(procedure)), + result: ona.meta.FnReturn(@TypeOf(procedure)), const Task = @This(); @@ -263,102 +262,6 @@ pub const AppContext = opaque { } }; -/// -/// File-system agnostic abstraction for manipulating a file. -/// -pub const FileAccess = struct { - context: *anyopaque, - implementation: *const Implementation, - - /// - /// Provides a set of implementation-specific behaviors to a [FileAccess] instance. - /// - pub const Implementation = struct { - close: fn (*anyopaque) void, - queryCursor: fn (*anyopaque) Error!u64, - queryLength: fn (*anyopaque) Error!u64, - read: fn (*anyopaque, []u8) Error!usize, - seek: fn (*anyopaque, u64) Error!void, - seekToEnd: fn (*anyopaque) Error!void, - skip: fn (*anyopaque, i64) Error!void, - }; - - /// - /// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer - /// pointing to a file or the file becomming invalid for whatever reason. - /// - pub const Error = error { - FileInaccessible, - }; - - /// - /// Close the file referenced by `file_access` on the main thread, invalidating the reference to - /// it and releasing any associated resources. - /// - /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the - /// wasted effort. - /// - pub fn close(file_access: *FileAccess) void { - return file_access.implementation.close(file_access.context); - } - - /// - /// Attempts to query the current cursor position for the file referenced by `file_access`. - /// - /// Returns the number of bytes into the file that the cursor is relative to its beginning or a - /// [Error] on failure. - /// - pub fn queryCursor(file_access: *FileAccess) Error!u64 { - return file_access.implementation.queryCursor(file_access.context); - } - - /// - /// Attempts to query the current length for the file referenced by `file_access`. - /// - /// Returns the current length of the file at the time of the operation or a [Error] if the file - /// failed to be queried. - /// - pub fn queryLength(file_access: *FileAccess) Error!u64 { - return file_access.implementation.queryLength(file_access.context); - } - - /// - /// Attempts to read `file_access` from the its current position into `buffer`. - /// - /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. - /// - pub fn read(file_access: *FileAccess, buffer: []u8) Error!usize { - return file_access.implementation.read(file_access.context, buffer); - } - - /// - /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes. - /// - /// Returns [Error] on failure. - /// - pub fn seek(file_access: *FileAccess, cursor: u64) Error!void { - return file_access.implementation.seek(file_access.context, cursor); - } - - /// - /// Attempts to seek `file_access` to the end of the file. - /// - /// Returns [Error] on failure. - /// - pub fn seekToEnd(file_access: *FileAccess) Error!void { - return file_access.implementation.seekToEnd(file_access.context); - } - - /// - /// Attempts to seek `file_access` by `offset` from the current file position. - /// - /// Returns [Error] on failure; - /// - pub fn skip(file_access: *FileAccess, offset: i64) Error!void { - return file_access.implementation.skip(file_access.context, offset); - } -}; - /// /// Platform-agnostic mechanism for working with an abstraction of the underlying file-system(s) /// available to the application in a sandboxed environment. @@ -447,11 +350,13 @@ pub const FileSystem = union(enum) { /// Returns a [FileAccess] reference that provides access to the file referenced by `path` /// or a [OpenError] if it failed. /// - pub fn open(path: Path, mode: OpenMode) OpenError!FileAccess { + pub fn open(path: Path, mode: OpenMode) OpenError!ona.io.FileAccess { switch (path.file_system.*) { .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; + const FileAccess = ona.io.FileAccess; + for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { fn close(context: *anyopaque) void { @@ -567,6 +472,8 @@ pub const FileSystem = union(enum) { ext.SDL_ClearError(); + const FileAccess = ona.io.FileAccess; + const Implementation = struct { fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { return @ptrCast(*ext.SDL_RWops, @alignCast( @@ -693,7 +600,7 @@ pub const FileSystem = union(enum) { const last_sequence_index = sequences.len - 1; for (sequences) |sequence, index| if (sequence.len != 0) { - var components = mem.Spliterator(u8){ + var components = ona.mem.Spliterator(u8){ .source = sequence, .delimiter = "/", }; diff --git a/src/oar.zig b/src/oar/main.zig similarity index 87% rename from src/oar.zig rename to src/oar/main.zig index 743057a..98429ed 100644 --- a/src/oar.zig +++ b/src/oar/main.zig @@ -1,18 +1,17 @@ +const ona = @import("ona"); const std = @import("std"); -const sys = @import("./sys.zig"); -const table = @import("./table.zig"); /// /// Thin file-wrapper and in-memory layout cache of an OAR archive file. /// pub const Archive = struct { - file_access: sys.FileAccess, + file_access: ona.io.FileAccess, index_cache: IndexCache, /// /// [OpenError.EntryNotFound] happens when an entry could not be found. /// - pub const FindError = sys.FileAccess.Error || error { + pub const FindError = ona.io.FileAccess.Error || error { EntryNotFound, }; @@ -27,7 +26,7 @@ pub const Archive = struct { /// As the archive is queried via [find], the cache is lazily assembled with the absolute /// offsets of each queried file. /// - const IndexCache = table.Hashed([]const u8, u64, table.string_context); + const IndexCache = ona.table.Hashed([]const u8, u64, ona.table.string_context); /// /// Deinitializes the index cache of `archive`, freeing all associated memory. @@ -49,6 +48,7 @@ pub const Archive = struct { .header = find_header: { var header = Entry.Header{ .revision = 0, + .path = Path.empty, .file_size = 0, .absolute_offset = 0 }; @@ -73,7 +73,7 @@ pub const Archive = struct { // Read first entry. while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) { - if (mem.eql(u8, entry_path, header.name_buffer[0 .. header.name_length])) { + if (mem.eql(u8, entry_path, header.path.buffer[0 .. header.path.length])) { // If caching fails... oh well... archive.index_cache.insert(entry_path, header.absolute_offset) catch {}; @@ -110,7 +110,7 @@ pub const Archive = struct { /// **Note** that `archive_file_access` does nothing to manage the lifetime of the open file. /// pub fn init(cache_allocator: std.mem.Allocator, - archive_file_access: sys.FileAccess) InitError!Archive { + archive_file_access: ona.io.FileAccess) InitError!Archive { return Archive{ .index_cache = try IndexCache.init(cache_allocator), @@ -123,7 +123,7 @@ pub const Archive = struct { /// Handles the state of an opened archive entry. /// pub const Entry = struct { - owner: ?*sys.FileAccess, + owner: ?*ona.io.FileAccess, cursor: u64, header: Header, @@ -135,8 +135,7 @@ pub const Entry = struct { pub const Header = extern struct { signature: [signature_magic.len]u8 = signature_magic, revision: u8, - name_buffer: [255]u8 = std.mem.zeroes([255]u8), - name_length: u8 = 0, + path: Path, file_size: u64, absolute_offset: u64, padding: [232]u8 = std.mem.zeroes([232]u8), @@ -155,3 +154,16 @@ pub const Entry = struct { pub const signature_magic = [3]u8{'o', 'a', 'r'}; }; }; + +/// +/// +/// +pub const Path = extern struct { + buffer: [255]u8, + length: u8, + + /// + /// + /// + pub const empty = std.mem.zeroes(Path); +}; diff --git a/src/io.zig b/src/ona/io.zig similarity index 57% rename from src/io.zig rename to src/ona/io.zig index 58a3c38..6ac1d87 100644 --- a/src/io.zig +++ b/src/ona/io.zig @@ -1,6 +1,102 @@ const stack = @import("./stack.zig"); const std = @import("std"); +/// +/// File-system agnostic abstraction for manipulating a file. +/// +pub const FileAccess = struct { + context: *anyopaque, + implementation: *const Implementation, + + /// + /// Provides a set of implementation-specific behaviors to a [FileAccess] instance. + /// + pub const Implementation = struct { + close: fn (*anyopaque) void, + queryCursor: fn (*anyopaque) Error!u64, + queryLength: fn (*anyopaque) Error!u64, + read: fn (*anyopaque, []u8) Error!usize, + seek: fn (*anyopaque, u64) Error!void, + seekToEnd: fn (*anyopaque) Error!void, + skip: fn (*anyopaque, i64) Error!void, + }; + + /// + /// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer + /// pointing to a file or the file becomming invalid for whatever reason. + /// + pub const Error = error { + FileInaccessible, + }; + + /// + /// Close the file referenced by `file_access` on the main thread, invalidating the reference to + /// it and releasing any associated resources. + /// + /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the + /// wasted effort. + /// + pub fn close(file_access: *FileAccess) void { + return file_access.implementation.close(file_access.context); + } + + /// + /// Attempts to query the current cursor position for the file referenced by `file_access`. + /// + /// Returns the number of bytes into the file that the cursor is relative to its beginning or a + /// [Error] on failure. + /// + pub fn queryCursor(file_access: *FileAccess) Error!u64 { + return file_access.implementation.queryCursor(file_access.context); + } + + /// + /// Attempts to query the current length for the file referenced by `file_access`. + /// + /// Returns the current length of the file at the time of the operation or a [Error] if the file + /// failed to be queried. + /// + pub fn queryLength(file_access: *FileAccess) Error!u64 { + return file_access.implementation.queryLength(file_access.context); + } + + /// + /// Attempts to read `file_access` from the its current position into `buffer`. + /// + /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. + /// + pub fn read(file_access: *FileAccess, buffer: []u8) Error!usize { + return file_access.implementation.read(file_access.context, buffer); + } + + /// + /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes. + /// + /// Returns [Error] on failure. + /// + pub fn seek(file_access: *FileAccess, cursor: u64) Error!void { + return file_access.implementation.seek(file_access.context, cursor); + } + + /// + /// Attempts to seek `file_access` to the end of the file. + /// + /// Returns [Error] on failure. + /// + pub fn seekToEnd(file_access: *FileAccess) Error!void { + return file_access.implementation.seekToEnd(file_access.context); + } + + /// + /// Attempts to seek `file_access` by `offset` from the current file position. + /// + /// Returns [Error] on failure; + /// + pub fn skip(file_access: *FileAccess, offset: i64) Error!void { + return file_access.implementation.skip(file_access.context, offset); + } +}; + /// /// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network /// socket. diff --git a/src/ona/main.zig b/src/ona/main.zig new file mode 100644 index 0000000..96ef49c --- /dev/null +++ b/src/ona/main.zig @@ -0,0 +1,25 @@ + +/// +/// +/// +pub const io = @import("./io.zig"); + +/// +/// +/// +pub const mem = @import("./mem.zig"); + +/// +/// +/// +pub const meta = @import("./meta.zig"); + +/// +/// +/// +pub const stack = @import("./stack.zig"); + +/// +/// +/// +pub const table = @import("./table.zig"); diff --git a/src/mem.zig b/src/ona/mem.zig similarity index 100% rename from src/mem.zig rename to src/ona/mem.zig diff --git a/src/meta.zig b/src/ona/meta.zig similarity index 100% rename from src/meta.zig rename to src/ona/meta.zig diff --git a/src/stack.zig b/src/ona/stack.zig similarity index 100% rename from src/stack.zig rename to src/ona/stack.zig diff --git a/src/table.zig b/src/ona/table.zig similarity index 100% rename from src/table.zig rename to src/ona/table.zig -- 2.34.1 From 1891a420e86db0d3b54e12b9da600736f3f0bb81 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 12:20:35 +0100 Subject: [PATCH 31/93] Reorganize and refactor project tooling --- .vscode/launch.json | 4 ++-- .vscode/tasks.json | 11 +---------- build.zig | 42 +++++++++++++++++++----------------------- src/engine/main.zig | 2 +- src/oar/main.zig | 16 ++++++++++------ src/tests.zig | 5 +++++ 6 files changed, 38 insertions(+), 42 deletions(-) create mode 100644 src/tests.zig diff --git a/.vscode/launch.json b/.vscode/launch.json index 195c1f0..4476fb8 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -2,13 +2,13 @@ "version": "0.2.0", "configurations": [ { - "name": "Build", + "name": "Debug", "type": "gdb", "request": "launch", "target": "${workspaceFolder}/zig-out/bin/ona", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", - "preLaunchTask": "Build", + "preLaunchTask": "Build Debug", }, { "name": "Test", diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 61ebf8f..c94af69 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -3,7 +3,7 @@ "tasks": [ { - "label": "Build", + "label": "Build Debug", "type": "shell", "command": "zig build", @@ -41,15 +41,6 @@ } } }, - { - "label": "Test", - "type": "shell", - "command": "$(find zig-cache -name test) src/main.zig", - "group": { - "kind": "test", - "isDefault": true - }, - }, { "label": "Build Test", "type": "shell", diff --git a/build.zig b/build.zig index f0a0a5b..89dc34a 100644 --- a/build.zig +++ b/build.zig @@ -10,39 +10,35 @@ pub fn build(builder: *std.build.Builder) void { // Engine executable. { - const engine_exe = builder.addExecutable("ona", "./src/engine/main.zig"); - const oar_pkg = projectPkg("oar", &.{ona_pkg}); + const exe = builder.addExecutable("engine", "./src/engine/main.zig"); - engine_exe.addPackage(oar_pkg); - engine_exe.addPackage(ona_pkg); - engine_exe.setTarget(target); - engine_exe.setBuildMode(mode); - engine_exe.install(); - engine_exe.addIncludeDir("./ext"); - engine_exe.linkSystemLibrary("SDL2"); - engine_exe.linkLibC(); - - const run_cmd = engine_exe.run(); - - run_cmd.step.dependOn(builder.getInstallStep()); - - if (builder.args) |args| run_cmd.addArgs(args); - - builder.step("run", "Run Ona application").dependOn(&run_cmd.step); + exe.addPackage(projectPkg("oar", &.{ona_pkg})); + exe.addPackage(ona_pkg); + exe.setTarget(target); + exe.setBuildMode(mode); + exe.install(); + exe.addIncludeDir("./ext"); + exe.linkSystemLibrary("SDL2"); + exe.linkLibC(); } // Oar executable. { + const exe = builder.addExecutable("oar", "./src/oar/main.zig"); + exe.addPackage(ona_pkg); + exe.setTarget(target); + exe.setBuildMode(mode); + exe.install(); } - // Ona tests. + // Tests executable. { - const ona_tests = builder.addTestExe("test", "./src/main.zig"); + const tests = builder.addTestExe("test", "./src/tests.zig"); - ona_tests.setTarget(target); - ona_tests.setBuildMode(mode); - builder.step("test", "Run Ona unit tests").dependOn(&ona_tests.step); + tests.setTarget(target); + tests.setBuildMode(mode); + builder.step("test", "Run unit tests").dependOn(&tests.step); } } diff --git a/src/engine/main.zig b/src/engine/main.zig index 98a5af0..1dd0322 100644 --- a/src/engine/main.zig +++ b/src/engine/main.zig @@ -3,7 +3,7 @@ const std = @import("std"); const sys = @import("./sys.zig"); /// -/// Entry point. +/// Starts the the game engine. /// pub fn main() anyerror!void { return nosuspend await async sys.runGraphics(anyerror, run); diff --git a/src/oar/main.zig b/src/oar/main.zig index 98429ed..d5358fb 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -48,7 +48,7 @@ pub const Archive = struct { .header = find_header: { var header = Entry.Header{ .revision = 0, - .path = Path.empty, + .path = std.mem.zeroes(Path), .file_size = 0, .absolute_offset = 0 }; @@ -161,9 +161,13 @@ pub const Entry = struct { pub const Path = extern struct { buffer: [255]u8, length: u8, - - /// - /// - /// - pub const empty = std.mem.zeroes(Path); }; + +/// +/// Starts the **O**na **Ar**chive packer utility. +/// +pub fn main() u8 { + // TODO: Implement. + + return 0; +} diff --git a/src/tests.zig b/src/tests.zig new file mode 100644 index 0000000..0444b9e --- /dev/null +++ b/src/tests.zig @@ -0,0 +1,5 @@ +test { + _ = @import("./engine/main.zig"); + _ = @import("./oar/main.zig"); + _ = @import("./ona/main.zig"); +} -- 2.34.1 From d05800a6d3ff00f5ecff59d1e0e9467a22ef695a Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 12:24:00 +0100 Subject: [PATCH 32/93] Add missing tests to module entry points --- src/oar/main.zig | 4 ++++ src/ona/main.zig | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/src/oar/main.zig b/src/oar/main.zig index d5358fb..8b74a32 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -171,3 +171,7 @@ pub fn main() u8 { return 0; } + +test { + +} diff --git a/src/ona/main.zig b/src/ona/main.zig index 96ef49c..deffbfd 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -23,3 +23,11 @@ pub const stack = @import("./stack.zig"); /// /// pub const table = @import("./table.zig"); + +test { + _ = io; + _ = mem; + _ = meta; + _ = stack; + _ = table; +} -- 2.34.1 From 87d7126632b1f99988e46a8a5f8920917e792872 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 12:29:14 +0100 Subject: [PATCH 33/93] Add missing comments to Ona module --- src/ona/main.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ona/main.zig b/src/ona/main.zig index deffbfd..bef57aa 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -1,26 +1,26 @@ /// -/// +/// Platform-agnostic input and output interfaces for working with memory, files, and networks. /// pub const io = @import("./io.zig"); /// -/// +/// Memory utilities. /// pub const mem = @import("./mem.zig"); /// -/// +/// Metaprogramming introspection utilities /// pub const meta = @import("./meta.zig"); /// -/// +/// Sequential last-in first-out data structures. /// pub const stack = @import("./stack.zig"); /// -/// +/// Unordered key-value association data structures. /// pub const table = @import("./table.zig"); -- 2.34.1 From d96364e04bd4ccc9789c3bad19a59645095cc275 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 12:44:32 +0100 Subject: [PATCH 34/93] Merge I/O and memory logic in Ona module --- src/engine/sys.zig | 2 +- src/ona/io.zig | 86 +++++++++++++++++++++++++++++++++++++++++++++ src/ona/main.zig | 6 ---- src/ona/mem.zig | 87 ---------------------------------------------- 4 files changed, 87 insertions(+), 94 deletions(-) delete mode 100644 src/ona/mem.zig diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 709c689..79167f1 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -600,7 +600,7 @@ pub const FileSystem = union(enum) { const last_sequence_index = sequences.len - 1; for (sequences) |sequence, index| if (sequence.len != 0) { - var components = ona.mem.Spliterator(u8){ + var components = ona.io.Spliterator(u8){ .source = sequence, .delimiter = "/", }; diff --git a/src/ona/io.zig b/src/ona/io.zig index 6ac1d87..de8627b 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -97,6 +97,78 @@ pub const FileAccess = struct { } }; +/// +/// Returns a state machine for lazily computing all `Element` components of a given source input +/// that match a delimiting pattern. +/// +pub fn Spliterator(comptime Element: type) type { + return struct { + source: []const Element, + delimiter: []const Element, + + const Self = @This(); + + /// + /// Returns `true` if there is more data to be processed, otherwise `false`. + /// + pub fn hasNext(self: Self) bool { + return (self.source.len != 0); + } + + /// + /// Iterates on `self` and returns the next view of [Spliterator.source] that matches + /// [Spliterator.delimiter], or `null` if there is no more data to be processed. + /// + pub fn next(self: *Self) ?[]const Element { + if (!self.hasNext()) return null; + + if (std.mem.indexOfPos(Element, self.source, 0, self.delimiter)) |index| { + defer self.source = self.source[(index + self.delimiter.len) .. self.source.len]; + + return self.source[0 .. index]; + } + + defer self.source = self.source[self.source.len .. self.source.len]; + + return self.source; + } + }; +} + +test { + const testing = std.testing; + + // Single-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "single.character.separated.hello.world", + .delimiter = ".", + }; + + const components = [_][]const u8{"single", "character", "separated", "hello", "world"}; + var index = @as(usize, 0); + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(std.mem.eql(u8, split, components[index])); + } + } + + // Multi-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "finding a needle in a needle stack", + .delimiter = "needle", + }; + + const components = [_][]const u8{"finding a ", " in a ", " stack"}; + var index = @as(usize, 0); + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(std.mem.eql(u8, split, components[index])); + } + } +} + /// /// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network /// socket. @@ -214,6 +286,20 @@ pub const Writer = struct { } }; +/// +/// Searches the slice of `Data` referenced by `data` for the first instance of `sought_datum`, +/// returning its index or `null` if it could not be found. +/// +pub fn findFirst(comptime Data: type, data: []const Data, sought_datum: Data) ?usize { + for (data) |datum, index| if (datum == sought_datum) return index; + + return null; +} + +test { + try std.testing.expectEqual(findFirst(u8, "1234567890", '7'), 6); +} + /// /// Writer that silently throws consumed data away and never fails. /// diff --git a/src/ona/main.zig b/src/ona/main.zig index bef57aa..0aa7691 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -4,11 +4,6 @@ /// pub const io = @import("./io.zig"); -/// -/// Memory utilities. -/// -pub const mem = @import("./mem.zig"); - /// /// Metaprogramming introspection utilities /// @@ -26,7 +21,6 @@ pub const table = @import("./table.zig"); test { _ = io; - _ = mem; _ = meta; _ = stack; _ = table; diff --git a/src/ona/mem.zig b/src/ona/mem.zig deleted file mode 100644 index 3df40b9..0000000 --- a/src/ona/mem.zig +++ /dev/null @@ -1,87 +0,0 @@ -const std = @import("std"); - -/// -/// State machine for lazily computing all components of [Spliterator.source] that match the pattern -/// in [Spliterator.delimiter]. -/// -pub fn Spliterator(comptime Element: type) type { - return struct { - source: []const Element, - delimiter: []const Element, - - const Self = @This(); - - /// - /// Returns `true` if there is more data to be processed, otherwise `false`. - /// - pub fn hasNext(self: Self) bool { - return (self.source.len != 0); - } - - /// - /// Iterates on `self` and returns the next view of [Spliterator.source] that matches - /// [Spliterator.delimiter], or `null` if there is no more data to be processed. - /// - pub fn next(self: *Self) ?[]const Element { - if (!self.hasNext()) return null; - - if (std.mem.indexOfPos(Element, self.source, 0, self.delimiter)) |index| { - defer self.source = self.source[(index + self.delimiter.len) .. self.source.len]; - - return self.source[0 .. index]; - } - - defer self.source = self.source[self.source.len .. self.source.len]; - - return self.source; - } - }; -} - -test { - const testing = std.testing; - - // Single-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "single.character.separated.hello.world", - .delimiter = ".", - }; - - const components = [_][]const u8{"single", "character", "separated", "hello", "world"}; - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(std.mem.eql(u8, split, components[index])); - } - } - - // Multi-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "finding a needle in a needle stack", - .delimiter = "needle", - }; - - const components = [_][]const u8{"finding a ", " in a ", " stack"}; - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(std.mem.eql(u8, split, components[index])); - } - } -} - -/// -/// Searches the slice of `Data` referenced by `data` for the first instance of `sought_datum`, -/// returning its index or `null` if it could not be found. -/// -pub fn findFirst(comptime Data: type, data: []const Data, sought_datum: Data) ?usize { - for (data) |datum, index| if (datum == sought_datum) return index; - - return null; -} - -test { - try std.testing.expectEqual(findFirst(u8, "1234567890", '7'), 6); -} -- 2.34.1 From c8992fec99c049f9fce2e0ec55f26d6571c40fe0 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 14:00:52 +0100 Subject: [PATCH 35/93] Add missing unit tests --- src/ona/io.zig | 18 ++---------------- src/ona/stack.zig | 2 +- src/ona/table.zig | 14 ++++++++++++++ 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/ona/io.zig b/src/ona/io.zig index de8627b..159be80 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -135,7 +135,7 @@ pub fn Spliterator(comptime Element: type) type { }; } -test { +test "Spliteration" { const testing = std.testing; // Single-character delimiter. @@ -286,20 +286,6 @@ pub const Writer = struct { } }; -/// -/// Searches the slice of `Data` referenced by `data` for the first instance of `sought_datum`, -/// returning its index or `null` if it could not be found. -/// -pub fn findFirst(comptime Data: type, data: []const Data, sought_datum: Data) ?usize { - for (data) |datum, index| if (datum == sought_datum) return index; - - return null; -} - -test { - try std.testing.expectEqual(findFirst(u8, "1234567890", '7'), 6); -} - /// /// Writer that silently throws consumed data away and never fails. /// @@ -316,7 +302,7 @@ pub const null_writer = Writer{ }.write, }; -test { +test "Null writing" { const testing = std.testing; { diff --git a/src/ona/stack.zig b/src/ona/stack.zig index 4f82925..9ebf9bc 100755 --- a/src/ona/stack.zig +++ b/src/ona/stack.zig @@ -91,7 +91,7 @@ pub fn Fixed(comptime Element: type) type { /// pub const PushError = std.mem.Allocator.Error; -test { +test "Fixed stack manipulation" { const testing = std.testing; var buffer = std.mem.zeroes([4]u8); var stack = Fixed(u8){.buffer = &buffer}; diff --git a/src/ona/table.zig b/src/ona/table.zig index 5bdaa81..d42580d 100644 --- a/src/ona/table.zig +++ b/src/ona/table.zig @@ -198,3 +198,17 @@ pub const string_context = KeyContext([]const u8){ .hash = hashString, .equals = equalsString, }; + +test "Hashed table manipulation with string context" { + const testing = std.testing; + var table = try Hashed([]const u8, u32, string_context).init(testing.allocator); + + defer table.deinit(); + + const foo = @as(u32, 69); + + testing.expectEqual(table.remove("foo"), null); + try table.insert("foo", foo); + testing.expectEqual(table.remove("foo"), foo); + testing.expectEqual(table.remove("foo"), null); +} -- 2.34.1 From a471fd4663e25ed476b54aab101593eb6aef415f Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 14:04:24 +0100 Subject: [PATCH 36/93] Improve test name in Ona module --- src/ona/io.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ona/io.zig b/src/ona/io.zig index 159be80..7c8bee5 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -135,7 +135,7 @@ pub fn Spliterator(comptime Element: type) type { }; } -test "Spliteration" { +test "Spliterating text" { const testing = std.testing; // Single-character delimiter. -- 2.34.1 From 1997c38e9796e7019dc30515cd2a97dc670761d5 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 14:04:44 +0100 Subject: [PATCH 37/93] Fix test build error in hash table --- src/ona/table.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/ona/table.zig b/src/ona/table.zig index d42580d..040a4f7 100644 --- a/src/ona/table.zig +++ b/src/ona/table.zig @@ -207,8 +207,8 @@ test "Hashed table manipulation with string context" { const foo = @as(u32, 69); - testing.expectEqual(table.remove("foo"), null); + try testing.expectEqual(table.remove("foo"), null); try table.insert("foo", foo); - testing.expectEqual(table.remove("foo"), foo); - testing.expectEqual(table.remove("foo"), null); + try testing.expectEqual(table.remove("foo"), foo); + try testing.expectEqual(table.remove("foo"), null); } -- 2.34.1 From 1d2356e942fb565d4ab52ba40cef82741c266947 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 14:42:41 +0100 Subject: [PATCH 38/93] Add library support for UTF-8 to Ona module --- src/ona/utf8.zig | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 src/ona/utf8.zig diff --git a/src/ona/utf8.zig b/src/ona/utf8.zig new file mode 100644 index 0000000..352a9d6 --- /dev/null +++ b/src/ona/utf8.zig @@ -0,0 +1,29 @@ +const std = @import("std"); +const table = @import("./table.zig"); + +/// +/// Tests if the contents of `this_utf8_sequence` lexically equals the contents of +/// `that_utf8_sequence`. +/// +pub fn equals(this_utf8_sequence: []const u8, that_utf8_sequence: []const u8) bool { + return std.mem.eql(u8, this_utf8_sequence, that_utf8_sequence); +} + +/// +/// Returns a deterministic hash for `utf8_sequence`. +/// +pub fn hash(utf8_sequence: []const u8) usize { + var utf8_hash = @as(usize, 5381); + + for (utf8_sequence) |utf8_code| utf8_hash = ((utf8_hash << 5) + utf8_hash) + utf8_code; + + return utf8_hash; +} + +/// +/// A [table.KeyContext] for handling UTF-8 character sequences. +/// +pub const key_context = table.KeyContext([]const u8){ + .hash = hash, + .equals = equals, +}; -- 2.34.1 From 2a44f5bf11884813e609344ed565327b81711dd4 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 15:33:49 +0100 Subject: [PATCH 39/93] Abort if debug errors occur in project settings --- .vscode/settings.json | 1 + 1 file changed, 1 insertion(+) diff --git a/.vscode/settings.json b/.vscode/settings.json index 4beb35f..02f06b3 100755 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,4 +14,5 @@ "git.detectSubmodulesLimit": 0, "git.ignoreSubmodules": true, + "debug.onTaskErrors": "abort", } -- 2.34.1 From 9f411025a7bf900fba29475c18bc2f0c20a435dd Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 15:36:46 +0100 Subject: [PATCH 40/93] Make all build configurations show formatted errors in VS Code --- .vscode/settings.json | 2 +- .vscode/tasks.json | 38 +++++++++++++++++++------------------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 02f06b3..33c8d57 100755 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -14,5 +14,5 @@ "git.detectSubmodulesLimit": 0, "git.ignoreSubmodules": true, - "debug.onTaskErrors": "abort", + "debug.onTaskErrors": "showErrors", } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index c94af69..b3326fe 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -1,6 +1,25 @@ { "version": "2.0.0", + "problemMatcher": { + "source": "zig", + "owner": "cpptools", + + "fileLocation": [ + "autoDetect", + "${cwd}", + ], + + "pattern": { + "regexp": "^(.*?):(\\d+):(\\d*):?\\s+(?:fatal\\s+)?(warning|error):\\s+(.*)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5, + } + }, + "tasks": [ { "label": "Build Debug", @@ -21,25 +40,6 @@ "clear": true, "revealProblems": "onProblem", }, - - "problemMatcher": { - "source": "zig", - "owner": "cpptools", - - "fileLocation": [ - "autoDetect", - "${cwd}", - ], - - "pattern": { - "regexp": "^(.*?):(\\d+):(\\d*):?\\s+(?:fatal\\s+)?(warning|error):\\s+(.*)$", - "file": 1, - "line": 2, - "column": 3, - "severity": 4, - "message": 5, - } - } }, { "label": "Build Test", -- 2.34.1 From 033227b24319f8fdac46ae67dd57e059be923cf3 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 15:48:56 +0100 Subject: [PATCH 41/93] Replace UTF-8 compares with standard memory compares --- src/ona/io.zig | 37 +++++++++++++++++++++++++++++++++++++ src/ona/table.zig | 35 +++++++---------------------------- src/ona/utf8.zig | 29 ----------------------------- 3 files changed, 44 insertions(+), 57 deletions(-) delete mode 100644 src/ona/utf8.zig diff --git a/src/ona/io.zig b/src/ona/io.zig index 7c8bee5..ca74716 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -286,6 +286,43 @@ pub const Writer = struct { } }; +/// +/// Returns `true` if `this_bytes` is the same length and contains the same data as `that_bytes`, +/// otherwise `false`. +/// +pub fn equalsBytes(this_bytes: []const u8, that_bytes: []const u8) bool { + return std.mem.eql(u8, this_bytes, that_bytes); +} + +test "Equivalence of bytes" { + const bytes_sequence = &.{69, 42, 0}; + const testing = std.testing; + + try testing.expect(equalsBytes(bytes_sequence, bytes_sequence)); + try testing.expect(!equalsBytes(bytes_sequence, &.{69, 42})); +} + +/// +/// Returns a deterministic hash code compiled from each byte in `bytes`. +/// +/// **Note** that this operation has `O(n)` time complexity. +/// +pub fn hashBytes(bytes: []const u8) usize { + var hash = @as(usize, 5381); + + for (bytes) |byte| hash = ((hash << 5) + hash) + byte; + + return hash; +} + +test "Hashing bytes" { + const bytes_sequence = &.{69, 42, 0}; + const testing = std.testing; + + try testing.expect(hashBytes(bytes_sequence) == hashBytes(bytes_sequence)); + try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42})); +} + /// /// Writer that silently throws consumed data away and never fails. /// diff --git a/src/ona/table.zig b/src/ona/table.zig index 040a4f7..b844766 100644 --- a/src/ona/table.zig +++ b/src/ona/table.zig @@ -173,35 +173,14 @@ pub fn KeyContext(comptime Key: type) type { }; } -/// -/// Tests if the contents of `this_string` lexically equals the contents of `that_string`. -/// -fn equalsString(this_string: []const u8, that_string: []const u8) bool { - return std.mem.eql(u8, this_string, that_string); -} - -/// -/// Hashes `string` into a hash value of `usize`. -/// -fn hashString(string: []const u8) usize { - var hash = @as(usize, 5381); - - for (string) |byte| hash = ((hash << 5) + hash) + byte; - - return hash; -} - -/// -/// A [KeyContext] for handling `[]const u8` types. -/// -pub const string_context = KeyContext([]const u8){ - .hash = hashString, - .equals = equalsString, -}; - -test "Hashed table manipulation with string context" { +test "Hashed table manipulation with bytes context" { const testing = std.testing; - var table = try Hashed([]const u8, u32, string_context).init(testing.allocator); + const io = @import("./io.zig"); + + var table = try Hashed([]const u8, u32, .{ + .equals = io.equalsBytes, + .hash = io.hashBytes, + }).init(testing.allocator); defer table.deinit(); diff --git a/src/ona/utf8.zig b/src/ona/utf8.zig deleted file mode 100644 index 352a9d6..0000000 --- a/src/ona/utf8.zig +++ /dev/null @@ -1,29 +0,0 @@ -const std = @import("std"); -const table = @import("./table.zig"); - -/// -/// Tests if the contents of `this_utf8_sequence` lexically equals the contents of -/// `that_utf8_sequence`. -/// -pub fn equals(this_utf8_sequence: []const u8, that_utf8_sequence: []const u8) bool { - return std.mem.eql(u8, this_utf8_sequence, that_utf8_sequence); -} - -/// -/// Returns a deterministic hash for `utf8_sequence`. -/// -pub fn hash(utf8_sequence: []const u8) usize { - var utf8_hash = @as(usize, 5381); - - for (utf8_sequence) |utf8_code| utf8_hash = ((utf8_hash << 5) + utf8_hash) + utf8_code; - - return utf8_hash; -} - -/// -/// A [table.KeyContext] for handling UTF-8 character sequences. -/// -pub const key_context = table.KeyContext([]const u8){ - .hash = hash, - .equals = equals, -}; -- 2.34.1 From 1cc19d41da0efbc1aa0a99b20507899069a01181 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 17 Oct 2022 15:49:35 +0100 Subject: [PATCH 42/93] Replace byte slices with Path structure in Oar module --- src/engine/sys.zig | 39 ++++++++++++++++++--------------------- src/oar/main.zig | 33 +++++++++++++++++++++++++++++---- 2 files changed, 47 insertions(+), 25 deletions(-) diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 79167f1..4dc6cdf 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -281,8 +281,7 @@ pub const FileSystem = union(enum) { /// pub const Path = struct { file_system: *FileSystem, - length: u8, - buffer: [max]u8, + path: oar.Path, /// /// With files typically being backed by a block device, they can produce a variety of @@ -430,11 +429,10 @@ pub const FileSystem = union(enum) { } }; - entry.* = archive.instance.find(path.buffer[0 .. path.length]) catch |err| - return switch (err) { - error.FileInaccessible => error.FileNotFound, - error.EntryNotFound => error.FileNotFound, - }; + entry.* = archive.instance.find(path.path) catch |err| return switch (err) { + error.FileInaccessible => error.FileNotFound, + error.EntryNotFound => error.FileNotFound, + }; return FileAccess{ .context = entry, @@ -460,15 +458,15 @@ pub const FileSystem = union(enum) { var path_buffer = std.mem.zeroes([4096]u8); const seperator_length = @boolToInt(native[native.len - 1] != seperator); - if ((native.len + seperator_length + path.length) >= + if ((native.len + seperator_length + path.path.length) >= path_buffer.len) return error.FileNotFound; std.mem.copy(u8, path_buffer[0 ..], native); if (seperator_length != 0) path_buffer[native.len] = seperator; - std.mem.copy(u8, path_buffer[native.len .. - path_buffer.len], path.buffer[0 .. path.length]); + std.mem.copy(u8, path_buffer[native.len .. path_buffer. + len], path.path.buffer[0 .. path.path.length]); ext.SDL_ClearError(); @@ -592,8 +590,7 @@ pub const FileSystem = union(enum) { pub fn joinedPath(file_system: *FileSystem, sequences: []const []const u8) PathError!Path { var path = Path{ .file_system = file_system, - .buffer = std.mem.zeroes([Path.max]u8), - .length = 0, + .path = oar.Path.empty, }; if (sequences.len != 0) { @@ -607,25 +604,25 @@ pub const FileSystem = union(enum) { while (components.next()) |component| if (component.len != 0) { for (component) |byte| { - if (path.length == Path.max) return error.TooLong; + if (path.path.length == Path.max) return error.TooLong; - path.buffer[path.length] = byte; - path.length += 1; + path.path.buffer[path.path.length] = byte; + path.path.length += 1; } if (components.hasNext()) { - if (path.length == Path.max) return error.TooLong; + if (path.path.length == Path.max) return error.TooLong; - path.buffer[path.length] = '/'; - path.length += 1; + path.path.buffer[path.path.length] = '/'; + path.path.length += 1; } }; if (index < last_sequence_index) { - if (path.length == Path.max) return error.TooLong; + if (path.path.length == Path.max) return error.TooLong; - path.buffer[path.length] = '/'; - path.length += 1; + path.path.buffer[path.path.length] = '/'; + path.path.length += 1; } }; } diff --git a/src/oar/main.zig b/src/oar/main.zig index 8b74a32..98db1a2 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -26,7 +26,10 @@ pub const Archive = struct { /// As the archive is queried via [find], the cache is lazily assembled with the absolute /// offsets of each queried file. /// - const IndexCache = ona.table.Hashed([]const u8, u64, ona.table.string_context); + const IndexCache = ona.table.Hashed(Path, u64, .{ + .equals = Path.equals, + .hash = Path.hash, + }); /// /// Deinitializes the index cache of `archive`, freeing all associated memory. @@ -43,12 +46,12 @@ pub const Archive = struct { /// /// The found [Entry] value is returned or a [FindError] if it failed to be found. /// - pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry { + pub fn find(archive: *Archive, entry_path: Path) FindError!Entry { return Entry{ .header = find_header: { var header = Entry.Header{ .revision = 0, - .path = std.mem.zeroes(Path), + .path = Path.empty, .file_size = 0, .absolute_offset = 0 }; @@ -73,7 +76,7 @@ pub const Archive = struct { // Read first entry. while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) { - if (mem.eql(u8, entry_path, header.path.buffer[0 .. header.path.length])) { + if (entry_path.equals(header.path)) { // If caching fails... oh well... archive.index_cache.insert(entry_path, header.absolute_offset) catch {}; @@ -156,11 +159,33 @@ pub const Entry = struct { }; /// +/// Unique identifier pointing to an entry within an archive. /// +/// A path does not do any verification that the given entry pointed to actually exists. /// pub const Path = extern struct { buffer: [255]u8, length: u8, + + /// + /// An empty [Path] with a length of `0`. + /// + pub const empty = std.mem.zeroes(Path); + + /// + /// Returns `true` if `this_path` is equal to `that_path, otherwise `false`. + /// + pub fn equals(this_path: Path, that_path: Path) bool { + return ona.io.equalsBytes(this_path.buffer[0 ..this_path. + length], that_path.buffer[0 .. that_path.length]); + } + + /// + /// Returns the hash of the text in `path`. + /// + pub fn hash(path: Path) usize { + return ona.io.hashBytes(path.buffer[0 .. path.length]); + } }; /// -- 2.34.1 From 3a23f5feca34594847e1501d4972a637eb6d5ea7 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 19 Oct 2022 00:02:23 +0100 Subject: [PATCH 43/93] Change FileAccess wrapper functions to pass by value --- src/ona/io.zig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/ona/io.zig b/src/ona/io.zig index ca74716..83a7de3 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -36,7 +36,7 @@ pub const FileAccess = struct { /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the /// wasted effort. /// - pub fn close(file_access: *FileAccess) void { + pub fn close(file_access: FileAccess) void { return file_access.implementation.close(file_access.context); } @@ -46,7 +46,7 @@ pub const FileAccess = struct { /// Returns the number of bytes into the file that the cursor is relative to its beginning or a /// [Error] on failure. /// - pub fn queryCursor(file_access: *FileAccess) Error!u64 { + pub fn queryCursor(file_access: FileAccess) Error!u64 { return file_access.implementation.queryCursor(file_access.context); } @@ -56,7 +56,7 @@ pub const FileAccess = struct { /// Returns the current length of the file at the time of the operation or a [Error] if the file /// failed to be queried. /// - pub fn queryLength(file_access: *FileAccess) Error!u64 { + pub fn queryLength(file_access: FileAccess) Error!u64 { return file_access.implementation.queryLength(file_access.context); } @@ -65,7 +65,7 @@ pub const FileAccess = struct { /// /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. /// - pub fn read(file_access: *FileAccess, buffer: []u8) Error!usize { + pub fn read(file_access: FileAccess, buffer: []u8) Error!usize { return file_access.implementation.read(file_access.context, buffer); } @@ -74,7 +74,7 @@ pub const FileAccess = struct { /// /// Returns [Error] on failure. /// - pub fn seek(file_access: *FileAccess, cursor: u64) Error!void { + pub fn seek(file_access: FileAccess, cursor: u64) Error!void { return file_access.implementation.seek(file_access.context, cursor); } @@ -83,7 +83,7 @@ pub const FileAccess = struct { /// /// Returns [Error] on failure. /// - pub fn seekToEnd(file_access: *FileAccess) Error!void { + pub fn seekToEnd(file_access: FileAccess) Error!void { return file_access.implementation.seekToEnd(file_access.context); } @@ -92,7 +92,7 @@ pub const FileAccess = struct { /// /// Returns [Error] on failure; /// - pub fn skip(file_access: *FileAccess, offset: i64) Error!void { + pub fn skip(file_access: FileAccess, offset: i64) Error!void { return file_access.implementation.skip(file_access.context, offset); } }; -- 2.34.1 From 6769ea92afa0139506b5d8b00e2d9f09c93f69d1 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 19 Oct 2022 00:05:49 +0100 Subject: [PATCH 44/93] Simplify Oar API --- src/engine/sys.zig | 83 ++++++++++++++++------ src/oar/main.zig | 170 ++++++++------------------------------------- 2 files changed, 92 insertions(+), 161 deletions(-) diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 4dc6cdf..427ac7c 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -99,7 +99,7 @@ pub const AppContext = opaque { } } - implementation.data_file_system.archive.instance.deint(); + implementation.data_file_system.archive.index_cache.deinit(); ext.SDL_free(implementation.user_path_prefix); ext.SDL_DestroyMutex(implementation.message_mutex); ext.SDL_DestroySemaphore(implementation.message_semaphore); @@ -148,7 +148,8 @@ pub const AppContext = opaque { .user_path_prefix = user_path_prefix, .data_file_system = .{.archive = .{ - .instance = try oar.Archive.init(allocator, data_archive_file_access), + .index_cache = try FileSystem.ArchiveIndexCache.init(allocator), + .file_access = data_archive_file_access, }}, .message_thread = null, @@ -270,12 +271,32 @@ pub const FileSystem = union(enum) { native: []const u8, archive: struct { - instance: oar.Archive, - entry_table: [max_open_entries]oar.Entry = std.mem.zeroes([max_open_entries]oar.Entry), + file_access: ona.io.FileAccess, + index_cache: ArchiveIndexCache, - pub const max_open_entries = 16; + entry_table: [max_open_entries]ArchiveEntry = + std.mem.zeroes([max_open_entries]ArchiveEntry), + + const max_open_entries = 16; }, + /// + /// + /// + const ArchiveEntry = struct { + owner: ?*ona.io.FileAccess, + cursor: u64, + header: oar.Entry, + }; + + /// + /// + /// + const ArchiveIndexCache = ona.table.Hashed(oar.Path, u64, .{ + .equals = oar.Path.equals, + .hash = oar.Path.hash, + }); + /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// @@ -358,17 +379,17 @@ pub const FileSystem = union(enum) { for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { - fn close(context: *anyopaque) void { - entryCast(context).owner = null; + fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { + return @ptrCast(*ArchiveEntry, @alignCast( + @alignOf(ArchiveEntry), context)); } - fn entryCast(context: *anyopaque) *oar.Entry { - return @ptrCast(*oar.Entry, @alignCast( - @alignOf(oar.Entry), context)); + fn close(context: *anyopaque) void { + archiveEntryCast(context).owner = null; } fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; @@ -376,7 +397,7 @@ pub const FileSystem = union(enum) { } fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; @@ -384,7 +405,7 @@ pub const FileSystem = union(enum) { } fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); const file_access = archive_entry.owner orelse return error.FileInaccessible; @@ -399,7 +420,7 @@ pub const FileSystem = union(enum) { } fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; @@ -407,7 +428,7 @@ pub const FileSystem = union(enum) { } fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; @@ -416,7 +437,7 @@ pub const FileSystem = union(enum) { fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { const math = std.math; - const archive_entry = entryCast(context); + const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; @@ -429,10 +450,32 @@ pub const FileSystem = union(enum) { } }; - entry.* = archive.instance.find(path.path) catch |err| return switch (err) { - error.FileInaccessible => error.FileNotFound, - error.EntryNotFound => error.FileNotFound, - }; + if (archive.index_cache.lookup(path.path)) |index| { + archive.file_access.seek(index) catch return error.FileNotFound; + + entry.* = .{ + .owner = &archive.file_access, + .cursor = 0, + + .header = (oar.Entry.next(archive.file_access) catch return error.FileNotFound) orelse { + // Remove cannot fail if lookup succeeded. + std.debug.assert(archive.index_cache.remove(path.path) != null); + + return error.FileNotFound; + }, + }; + } else { + while (oar.Entry.next(archive.file_access) catch return error.FileNotFound) |entry_header| { + if (entry.header.path.equals(path.path)) + entry.* = .{ + .owner = &archive.file_access, + .cursor = 0, + .header = entry_header, + }; + } + + return error.FileNotFound; + } return FileAccess{ .context = entry, diff --git a/src/oar/main.zig b/src/oar/main.zig index 98db1a2..63ba2e0 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -2,160 +2,48 @@ const ona = @import("ona"); const std = @import("std"); /// -/// Thin file-wrapper and in-memory layout cache of an OAR archive file. +/// An entry block of an Oar archive file. /// -pub const Archive = struct { - file_access: ona.io.FileAccess, - index_cache: IndexCache, +/// Typically, following the block in memory is the file data it holds the meta-information for. +/// +pub const Entry = extern struct { + signature: [signature_magic.len]u8 = signature_magic, + revision: u8, + path: Path, + file_size: u64, + absolute_offset: u64, + padding: [232]u8 = std.mem.zeroes([232]u8), - /// - /// [OpenError.EntryNotFound] happens when an entry could not be found. - /// - pub const FindError = ona.io.FileAccess.Error || error { - EntryNotFound, - }; + comptime { + const entry_size = @sizeOf(Entry); - /// - /// See [std.mem.Allocator.Error]. - /// - pub const InitError = std.mem.Allocator.Error; - - /// - /// In-memory archive layout cache. - /// - /// As the archive is queried via [find], the cache is lazily assembled with the absolute - /// offsets of each queried file. - /// - const IndexCache = ona.table.Hashed(Path, u64, .{ - .equals = Path.equals, - .hash = Path.hash, - }); - - /// - /// Deinitializes the index cache of `archive`, freeing all associated memory. - /// - /// **Note** that this does nothing to the [FileAccess] value that was provided as part of - /// [init]. - /// - pub fn deint(archive: *Archive) void { - archive.index_cache.deinit(); + if (entry_size != 512) + @compileError("Entry is " ++ + std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); } /// - /// Finds an entry matching `entry_path` in `archive`. /// - /// The found [Entry] value is returned or a [FindError] if it failed to be found. /// - pub fn find(archive: *Archive, entry_path: Path) FindError!Entry { - return Entry{ - .header = find_header: { - var header = Entry.Header{ - .revision = 0, - .path = Path.empty, - .file_size = 0, - .absolute_offset = 0 - }; + pub fn next(file_access: ona.io.FileAccess) ona.io.FileAccess.Error!?Entry { + var entry = std.mem.zeroes(Entry); + const origin = try file_access.queryCursor(); - const header_size = @sizeOf(Entry.Header); + if (((try file_access.read(std.mem.asBytes(&entry))) != @sizeOf(Entry)) and + ona.io.equalsBytes(entry.signature[0 ..], signature_magic[0 ..])) { - if (archive.index_cache.lookup(entry_path)) |cursor| { - try archive.file_access.seek(cursor); + try file_access.seek(origin); - if ((try archive.file_access.read(std.mem.asBytes(&header))) != header_size) { - std.debug.assert(archive.index_cache.remove(entry_path) != null); - - return error.EntryNotFound; - } - - break: find_header header; - } else { - const mem = std.mem; - - // Start from beginning of archive. - try archive.file_access.seek(0); - - // Read first entry. - while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) { - if (entry_path.equals(header.path)) { - // If caching fails... oh well... - archive.index_cache.insert(entry_path, header.absolute_offset) catch {}; - - break: find_header header; - } - - // Move over file data following the entry. - var to_skip = header.file_size; - - while (to_skip != 0) { - const math = std.math; - const skipped = math.min(to_skip, math.maxInt(i64)); - - try archive.file_access.skip(@intCast(i64, skipped)); - - to_skip -= skipped; - } - } - } - - return error.EntryNotFound; - }, - - .owner = &archive.file_access, - .cursor = 0, - }; - } - - /// - /// Attempts to initialize a new [Archive] with `cache_allocator` as the allocator for managing - /// the in-memory archive layout caches and `archive_file_access` as the currently open archive - /// file. - /// - /// **Note** that `archive_file_access` does nothing to manage the lifetime of the open file. - /// - pub fn init(cache_allocator: std.mem.Allocator, - archive_file_access: ona.io.FileAccess) InitError!Archive { - - return Archive{ - .index_cache = try IndexCache.init(cache_allocator), - .file_access = archive_file_access, - }; - } -}; - -/// -/// Handles the state of an opened archive entry. -/// -pub const Entry = struct { - owner: ?*ona.io.FileAccess, - cursor: u64, - header: Header, - - /// - /// An entry block of an Oar archive file. - /// - /// Typically, following the block in memory is the file data it holds the meta-information for. - /// - pub const Header = extern struct { - signature: [signature_magic.len]u8 = signature_magic, - revision: u8, - path: Path, - file_size: u64, - absolute_offset: u64, - padding: [232]u8 = std.mem.zeroes([232]u8), - - comptime { - const entry_size = @sizeOf(Header); - - if (entry_size != 512) - @compileError("Entry is " ++ - std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); + return null; } - /// - /// Magic identifier used to validate [Entry] data. - /// - pub const signature_magic = [3]u8{'o', 'a', 'r'}; - }; + return entry; + } + + /// + /// Magic identifier used to validate [Entry] data. + /// + pub const signature_magic = [3]u8{'o', 'a', 'r'}; }; /// -- 2.34.1 From 174b16199e6d893b3baefac7f8ecc243d5f032a8 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 19 Oct 2022 11:21:54 +0100 Subject: [PATCH 45/93] Split archive FS logic into its own file --- src/engine/sys.zig | 38 +++++++------------------------------- src/engine/sys/Archive.zig | 29 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 31 deletions(-) create mode 100644 src/engine/sys/Archive.zig diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 427ac7c..63a01c5 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -1,3 +1,5 @@ +const Archive = @import("./sys/Archive.zig"); + const ext = @cImport({ @cInclude("SDL2/SDL.h"); }); @@ -148,7 +150,7 @@ pub const AppContext = opaque { .user_path_prefix = user_path_prefix, .data_file_system = .{.archive = .{ - .index_cache = try FileSystem.ArchiveIndexCache.init(allocator), + .index_cache = try Archive.IndexCache.init(allocator), .file_access = data_archive_file_access, }}, @@ -269,33 +271,7 @@ pub const AppContext = opaque { /// pub const FileSystem = union(enum) { native: []const u8, - - archive: struct { - file_access: ona.io.FileAccess, - index_cache: ArchiveIndexCache, - - entry_table: [max_open_entries]ArchiveEntry = - std.mem.zeroes([max_open_entries]ArchiveEntry), - - const max_open_entries = 16; - }, - - /// - /// - /// - const ArchiveEntry = struct { - owner: ?*ona.io.FileAccess, - cursor: u64, - header: oar.Entry, - }; - - /// - /// - /// - const ArchiveIndexCache = ona.table.Hashed(oar.Path, u64, .{ - .equals = oar.Path.equals, - .hash = oar.Path.hash, - }); + archive: Archive, /// /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. @@ -379,9 +355,9 @@ pub const FileSystem = union(enum) { for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { - fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { - return @ptrCast(*ArchiveEntry, @alignCast( - @alignOf(ArchiveEntry), context)); + fn archiveEntryCast(context: *anyopaque) *Archive.Entry { + return @ptrCast(*Archive.Entry, @alignCast( + @alignOf(Archive.Entry), context)); } fn close(context: *anyopaque) void { diff --git a/src/engine/sys/Archive.zig b/src/engine/sys/Archive.zig new file mode 100644 index 0000000..500ff4a --- /dev/null +++ b/src/engine/sys/Archive.zig @@ -0,0 +1,29 @@ +const oar = @import("oar"); +const ona = @import("ona"); +const std = @import("std"); + +file_access: ona.io.FileAccess, +index_cache: IndexCache, +entry_table: [max_open_entries]Entry = std.mem.zeroes([max_open_entries]Entry), + +/// +/// Hard limit on the maximum number of entries open at once. +/// +const max_open_entries = 16; + +/// +/// Stateful extension of an [oar.Entry]. +/// +pub const Entry = struct { + owner: ?*ona.io.FileAccess, + cursor: u64, + header: oar.Entry, +}; + +/// +/// Table cache for associating [oar.Path] values with offsets to entries in a given file. +/// +pub const IndexCache = ona.table.Hashed(oar.Path, u64, .{ + .equals = oar.Path.equals, + .hash = oar.Path.hash, +}); -- 2.34.1 From 1a28dc240488e531dca2937640bebd77d074a389 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 19 Oct 2022 14:16:05 +0100 Subject: [PATCH 46/93] Share Oar path type with file system --- src/engine/main.zig | 2 +- src/engine/sys.zig | 537 +++++++++++++++++++------------------------- src/oar/main.zig | 65 ++++++ 3 files changed, 293 insertions(+), 311 deletions(-) diff --git a/src/engine/main.zig b/src/engine/main.zig index 1dd0322..3202d11 100644 --- a/src/engine/main.zig +++ b/src/engine/main.zig @@ -15,7 +15,7 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { defer _ = gpa.deinit(); { - var file_access = try (try app.data().joinedPath(&.{"ona.lua"})).open(.readonly); + var file_access = try app.data().open(try sys.Path.joined(&.{"ona.lua"}), .readonly); defer file_access.close(); diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 63a01c5..0c628e2 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -274,305 +274,160 @@ pub const FileSystem = union(enum) { archive: Archive, /// - /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. + /// With files typically being backed by a block device, they can produce a variety of + /// errors - from physical to virtual errors - these are all encapsulated by the API as + /// general [OpenError.FileNotFound] errors. /// - pub const Path = struct { - file_system: *FileSystem, - path: oar.Path, + /// When a given [FileSystem] does not support a specified [OpenMode], + /// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] + /// should be tried or, if no mode other is suitable, that the resource is effectively + /// unavailable. + /// + /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] + /// is used to communicate this. + /// + pub const OpenError = error { + FileNotFound, + ModeUnsupported, + OutOfFiles, + }; - /// - /// With files typically being backed by a block device, they can produce a variety of - /// errors - from physical to virtual errors - these are all encapsulated by the API as - /// general [OpenError.FileNotFound] errors. - /// - /// When a given [FileSystem] does not support a specified [OpenMode], - /// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] - /// should be tried or, if no mode other is suitable, that the resource is effectively - /// unavailable. - /// - /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] - /// is used to communicate this. - /// - pub const OpenError = error { - FileNotFound, - ModeUnsupported, - OutOfFiles, - }; + /// + /// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, + /// disallowing write access. + /// + /// [OpenMode.overwrite] indicates that an empty file has been created or an existing file has + /// been completely overwritten into. + /// + /// [OpenMode.append] indicates that an existing file that has been opened for reading from and + /// writing to on the end of existing data. + /// + pub const OpenMode = enum { + readonly, + overwrite, + append, + }; - /// - /// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, - /// disallowing write access. - /// - /// [OpenMode.overwrite] indicates that an empty file has been created or an existing file - /// has been completely overwritten into. - /// - /// [OpenMode.append] indicates that an existing file that has been opened for reading from - /// and writing to on the end of existing data. - /// - pub const OpenMode = enum { - readonly, - overwrite, - append, - }; + /// + /// Attempts to open the file identified by `path` with `mode` as the mode for opening the file. + /// + /// Returns a [FileAccess] reference that provides access to the file referenced by `path`or a + /// [OpenError] if it failed. + /// + pub fn open(file_system: *FileSystem, path: Path, mode: OpenMode) OpenError!ona.io.FileAccess { + switch (file_system.*) { + .archive => |*archive| { + if (mode != .readonly) return error.ModeUnsupported; - /// - /// Returns `true` if the length of `path` is empty, otherwise `false`. - /// - pub fn isEmpty(path: Path) bool { - return (path.length == 0); - } - - /// - /// Returns `true` if `this` is equal to `that`, otherwise `false`. - /// - pub fn equals(this: Path, that: Path) bool { - return (this.file_system == that.file_system) and - std.mem.eql(u8, this.buffer[0 .. this.length], that.buffer[0 .. that.length]); - } - - /// - /// The maximum possible byte-length of a [Path]. - /// - /// Note that paths are encoded using UTF-8, meaning that a character may be bigger than one - /// byte. Because of this, it is not safe to asume that a path may hold [max] individual - /// characters. - /// - pub const max = 255; - - /// - /// Attempts to open the file identified by `path` with `mode` as the mode for opening the - /// file. - /// - /// Returns a [FileAccess] reference that provides access to the file referenced by `path` - /// or a [OpenError] if it failed. - /// - pub fn open(path: Path, mode: OpenMode) OpenError!ona.io.FileAccess { - switch (path.file_system.*) { - .archive => |*archive| { - if (mode != .readonly) return error.ModeUnsupported; - - const FileAccess = ona.io.FileAccess; - - for (archive.entry_table) |*entry| if (entry.owner == null) { - const Implementation = struct { - fn archiveEntryCast(context: *anyopaque) *Archive.Entry { - return @ptrCast(*Archive.Entry, @alignCast( - @alignOf(Archive.Entry), context)); - } - - fn close(context: *anyopaque) void { - archiveEntryCast(context).owner = null; - } - - fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - return archive_entry.cursor; - } - - fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - return archive_entry.header.file_size; - } - - fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - const archive_entry = archiveEntryCast(context); - - const file_access = archive_entry.owner orelse - return error.FileInaccessible; - - if (archive_entry.cursor >= archive_entry.header.file_size) - return error.FileInaccessible; - - try file_access.seek(archive_entry.header.absolute_offset); - - return file_access.read(buffer[0 .. std.math.min( - buffer.len, archive_entry.header.file_size)]); - } - - fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - archive_entry.cursor = cursor; - } - - fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - archive_entry.cursor = archive_entry.header.file_size; - } - - fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { - const math = std.math; - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - if (offset < 0) { - archive_entry.cursor = math.max(0, - archive_entry.cursor - math.absCast(offset)); - } else { - archive_entry.cursor += @intCast(u64, offset); - } - } - }; - - if (archive.index_cache.lookup(path.path)) |index| { - archive.file_access.seek(index) catch return error.FileNotFound; - - entry.* = .{ - .owner = &archive.file_access, - .cursor = 0, - - .header = (oar.Entry.next(archive.file_access) catch return error.FileNotFound) orelse { - // Remove cannot fail if lookup succeeded. - std.debug.assert(archive.index_cache.remove(path.path) != null); - - return error.FileNotFound; - }, - }; - } else { - while (oar.Entry.next(archive.file_access) catch return error.FileNotFound) |entry_header| { - if (entry.header.path.equals(path.path)) - entry.* = .{ - .owner = &archive.file_access, - .cursor = 0, - .header = entry_header, - }; - } - - return error.FileNotFound; - } - - return FileAccess{ - .context = entry, - - .implementation = &.{ - .close = Implementation.close, - .queryCursor = Implementation.queryCursor, - .queryLength = Implementation.queryLength, - .read = Implementation.read, - .seek = Implementation.seek, - .seekToEnd = Implementation.seekToEnd, - .skip = Implementation.skip, - }, - }; - }; - - return error.OutOfFiles; - }, - - .native => |native| { - if (native.len == 0) return error.FileNotFound; - - var path_buffer = std.mem.zeroes([4096]u8); - const seperator_length = @boolToInt(native[native.len - 1] != seperator); - - if ((native.len + seperator_length + path.path.length) >= - path_buffer.len) return error.FileNotFound; - - std.mem.copy(u8, path_buffer[0 ..], native); - - if (seperator_length != 0) path_buffer[native.len] = seperator; - - std.mem.copy(u8, path_buffer[native.len .. path_buffer. - len], path.path.buffer[0 .. path.path.length]); - - ext.SDL_ClearError(); - - const FileAccess = ona.io.FileAccess; + const FileAccess = ona.io.FileAccess; + for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { - fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { - return @ptrCast(*ext.SDL_RWops, @alignCast( - @alignOf(ext.SDL_RWops), context)); + fn archiveEntryCast(context: *anyopaque) *Archive.Entry { + return @ptrCast(*Archive.Entry, @alignCast( + @alignOf(Archive.Entry), context)); } fn close(context: *anyopaque) void { - ext.SDL_ClearError(); - - if (ext.SDL_RWclose(rwOpsCast(context)) != 0) - ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, ext.SDL_GetError()); + archiveEntryCast(context).owner = null; } fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - ext.SDL_ClearError(); + const archive_entry = archiveEntryCast(context); - const sought = ext.SDL_RWtell(rwOpsCast(context)); + if (archive_entry.owner == null) return error.FileInaccessible; - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); + return archive_entry.cursor; } fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - ext.SDL_ClearError(); + const archive_entry = archiveEntryCast(context); - const sought = ext.SDL_RWsize(rwOpsCast(context)); + if (archive_entry.owner == null) return error.FileInaccessible; - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); + return archive_entry.header.file_size; } fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - ext.SDL_ClearError(); + const archive_entry = archiveEntryCast(context); - const buffer_read = ext.SDL_RWread(rwOpsCast( - context), buffer.ptr, @sizeOf(u8), buffer.len); - - if ((buffer_read == 0) and (ext.SDL_GetError() != null)) + const file_access = archive_entry.owner orelse return error.FileInaccessible; - return buffer_read; + if (archive_entry.cursor >= archive_entry.header.file_size) + return error.FileInaccessible; + + try file_access.seek(archive_entry.header.absolute_offset); + + return file_access.read(buffer[0 .. std.math.min( + buffer.len, archive_entry.header.file_size)]); } fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - var to_seek = cursor; + const archive_entry = archiveEntryCast(context); - while (to_seek != 0) { - const math = std.math; - const sought = math.min(to_seek, math.maxInt(i64)); + if (archive_entry.owner == null) return error.FileInaccessible; - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rwOpsCast(context), @intCast(i64, sought), - ext.RW_SEEK_CUR) < 0) return error.FileInaccessible; - - to_seek -= sought; - } + archive_entry.cursor = cursor; } fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - ext.SDL_ClearError(); + const archive_entry = archiveEntryCast(context); - if (ext.SDL_RWseek(rwOpsCast(context), 0, ext.RW_SEEK_END) < 0) - return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; + + archive_entry.cursor = archive_entry.header.file_size; } fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { - ext.SDL_ClearError(); + const math = std.math; + const archive_entry = archiveEntryCast(context); - if (ext.SDL_RWseek(rwOpsCast(context), offset, ext.RW_SEEK_SET) < 0) - return error.FileInaccessible; + if (archive_entry.owner == null) return error.FileInaccessible; + + if (offset < 0) { + archive_entry.cursor = math.max(0, + archive_entry.cursor - math.absCast(offset)); + } else { + archive_entry.cursor += @intCast(u64, offset); + } } }; + const Header = oar.Entry; + + if (archive.index_cache.lookup(path)) |index| { + archive.file_access.seek(index) catch return error.FileNotFound; + + entry.* = .{ + .owner = &archive.file_access, + .cursor = 0, + + .header = (Header.next(archive.file_access) catch + return error.FileNotFound) orelse { + + // Remove cannot fail if lookup succeeded. + std.debug.assert(archive.index_cache.remove(path) != null); + + return error.FileNotFound; + }, + }; + } else { + while (Header.next(archive.file_access) catch + return error.FileNotFound) |entry_header| { + + if (entry.header.path.equals(path)) entry.* = .{ + .owner = &archive.file_access, + .cursor = 0, + .header = entry_header, + }; + } + + return error.FileNotFound; + } + return FileAccess{ - .context = ext.SDL_RWFromFile(&path_buffer, switch (mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - }) orelse return error.FileNotFound, + .context = entry, .implementation = &.{ .close = Implementation.close, @@ -584,69 +439,126 @@ pub const FileSystem = union(enum) { .skip = Implementation.skip, }, }; - }, - } - } - - pub const seperator = '/'; - }; - - /// - /// [PathError.TooLong] occurs when creating a path that is greater than the maximum size **in - /// bytes**. - /// - pub const PathError = error { - TooLong, - }; - - /// - /// Attempts to create a [Path] with `file_system` as the file-system root and the path - /// components in `sequences` as a fully qualified path from the root. - /// - /// A [Path] value is returned containing the fully qualified path from the file-system root or - /// a [PathError] if it could not be created. - /// - pub fn joinedPath(file_system: *FileSystem, sequences: []const []const u8) PathError!Path { - var path = Path{ - .file_system = file_system, - .path = oar.Path.empty, - }; - - if (sequences.len != 0) { - const last_sequence_index = sequences.len - 1; - - for (sequences) |sequence, index| if (sequence.len != 0) { - var components = ona.io.Spliterator(u8){ - .source = sequence, - .delimiter = "/", }; - while (components.next()) |component| if (component.len != 0) { - for (component) |byte| { - if (path.path.length == Path.max) return error.TooLong; + return error.OutOfFiles; + }, - path.path.buffer[path.path.length] = byte; - path.path.length += 1; + .native => |native| { + if (native.len == 0) return error.FileNotFound; + + var path_buffer = std.mem.zeroes([4096]u8); + const seperator_length = @boolToInt(native[native.len - 1] != oar.Path.seperator); + + if ((native.len + seperator_length + path.length) >= + path_buffer.len) return error.FileNotFound; + + std.mem.copy(u8, path_buffer[0 ..], native); + + if (seperator_length != 0) path_buffer[native.len] = oar.Path.seperator; + + std.mem.copy(u8, path_buffer[native.len .. path_buffer. + len], path.buffer[0 .. path.length]); + + const FileAccess = ona.io.FileAccess; + + const Implementation = struct { + fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { + return @ptrCast(*ext.SDL_RWops, @alignCast( + @alignOf(ext.SDL_RWops), context)); } - if (components.hasNext()) { - if (path.path.length == Path.max) return error.TooLong; + fn close(context: *anyopaque) void { + ext.SDL_ClearError(); - path.path.buffer[path.path.length] = '/'; - path.path.length += 1; + if (ext.SDL_RWclose(rwOpsCast(context)) != 0) + ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, ext.SDL_GetError()); + } + + fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { + ext.SDL_ClearError(); + + const sought = ext.SDL_RWtell(rwOpsCast(context)); + + if (sought < 0) return error.FileInaccessible; + + return @intCast(u64, sought); + } + + fn queryLength(context: *anyopaque) FileAccess.Error!u64 { + ext.SDL_ClearError(); + + const sought = ext.SDL_RWsize(rwOpsCast(context)); + + if (sought < 0) return error.FileInaccessible; + + return @intCast(u64, sought); + } + + fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { + ext.SDL_ClearError(); + + const buffer_read = ext.SDL_RWread(rwOpsCast( + context), buffer.ptr, @sizeOf(u8), buffer.len); + + if ((buffer_read == 0) and (ext.SDL_GetError() != null)) + return error.FileInaccessible; + + return buffer_read; + } + + fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { + var to_seek = cursor; + + while (to_seek != 0) { + const math = std.math; + const sought = math.min(to_seek, math.maxInt(i64)); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), @intCast(i64, sought), + ext.RW_SEEK_CUR) < 0) return error.FileInaccessible; + + to_seek -= sought; + } + } + + fn seekToEnd(context: *anyopaque) FileAccess.Error!void { + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), 0, ext.RW_SEEK_END) < 0) + return error.FileInaccessible; + } + + fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rwOpsCast(context), offset, ext.RW_SEEK_SET) < 0) + return error.FileInaccessible; } }; - if (index < last_sequence_index) { - if (path.path.length == Path.max) return error.TooLong; + ext.SDL_ClearError(); - path.path.buffer[path.path.length] = '/'; - path.path.length += 1; - } - }; + return FileAccess{ + .context = ext.SDL_RWFromFile(&path_buffer, switch (mode) { + .readonly => "rb", + .overwrite => "wb", + .append => "ab", + }) orelse return error.FileNotFound, + + .implementation = &.{ + .close = Implementation.close, + .queryCursor = Implementation.queryCursor, + .queryLength = Implementation.queryLength, + .read = Implementation.read, + .seek = Implementation.seek, + .seekToEnd = Implementation.seekToEnd, + .skip = Implementation.skip, + }, + }; + }, } - - return path; } }; @@ -721,6 +633,11 @@ pub const Log = enum(u32) { } }; +/// +/// Path to a file on a [FileSystem]. +/// +pub const Path = oar.Path; + /// /// [RunError.InitFailure] occurs if a necessary resource fails to be acquired or allocated. /// @@ -774,7 +691,7 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); var cwd_file_system = FileSystem{.native ="./"}; - var data_access = try (try cwd_file_system.joinedPath(&.{"./data.oar"})).open(.readonly); + var data_access = try cwd_file_system.open(try Path.joined(&.{"./data.oar"}), .readonly); defer data_access.close(); diff --git a/src/oar/main.zig b/src/oar/main.zig index 63ba2e0..e11d69e 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -55,6 +55,14 @@ pub const Path = extern struct { buffer: [255]u8, length: u8, + /// + /// [Error.TooLong] occurs when creating a path that is greater than the maximum path size **in + /// bytes**. + /// + pub const Error = error { + TooLong, + }; + /// /// An empty [Path] with a length of `0`. /// @@ -74,6 +82,63 @@ pub const Path = extern struct { pub fn hash(path: Path) usize { return ona.io.hashBytes(path.buffer[0 .. path.length]); } + + /// + /// Attempts to create a [Path] with the path components in `sequences` as a fully qualified + /// path from root. + /// + /// A [Path] value is returned containing the fully qualified path from the file-system root or + /// a [Error] if it could not be created. + /// + pub fn joined(sequences: []const []const u8) Error!Path { + var path = empty; + + if (sequences.len != 0) { + const last_sequence_index = sequences.len - 1; + + for (sequences) |sequence, index| if (sequence.len != 0) { + var components = ona.io.Spliterator(u8){ + .source = sequence, + .delimiter = "/", + }; + + while (components.next()) |component| if (component.len != 0) { + for (component) |byte| { + if (path.length == max) return error.TooLong; + + path.buffer[path.length] = byte; + path.length += 1; + } + + if (components.hasNext()) { + if (path.length == max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + + if (index < last_sequence_index) { + if (path.length == max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + } + + return path; + } + + /// + /// Maximum number of **bytes** in a [Path]. + /// + pub const max = 255; + + /// + /// Textual separator between components of a [Path]. + /// + pub const seperator = '/'; }; /// -- 2.34.1 From 32bb049f73971370c110d6995ba89e7b67445bbd Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 19 Oct 2022 23:58:56 +0100 Subject: [PATCH 47/93] Refactor SDL2 interface code --- src/engine/main.zig | 8 +- src/engine/sys.zig | 501 ++++++++++++++++--------------------- src/engine/sys/Archive.zig | 29 --- src/oar/main.zig | 2 +- 4 files changed, 223 insertions(+), 317 deletions(-) delete mode 100644 src/engine/sys/Archive.zig diff --git a/src/engine/main.zig b/src/engine/main.zig index 3202d11..d120a7b 100644 --- a/src/engine/main.zig +++ b/src/engine/main.zig @@ -6,16 +6,16 @@ const sys = @import("./sys.zig"); /// Starts the the game engine. /// pub fn main() anyerror!void { - return nosuspend await async sys.runGraphics(anyerror, run); + return nosuspend await async sys.display(anyerror, run); } -fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { +fn run(app: *sys.App, graphics: *sys.Graphics) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); { - var file_access = try app.data().open(try sys.Path.joined(&.{"ona.lua"}), .readonly); + var file_access = try app.data.open(try sys.Path.joined(&.{"ona.lua"}), .readonly); defer file_access.close(); @@ -27,7 +27,7 @@ fn run(app: *sys.AppContext, graphics: *sys.GraphicsContext) anyerror!void { if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; - sys.Log.debug.write(buffer); + app.log(.debug, buffer); } while (graphics.poll()) |_| { diff --git a/src/engine/sys.zig b/src/engine/sys.zig index 0c628e2..81cd0cc 100644 --- a/src/engine/sys.zig +++ b/src/engine/sys.zig @@ -1,5 +1,3 @@ -const Archive = @import("./sys/Archive.zig"); - const ext = @cImport({ @cInclude("SDL2/SDL.h"); }); @@ -9,222 +7,43 @@ const ona = @import("ona"); const std = @import("std"); /// -/// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. +/// Thread-safe platform abstraction over multiplexing system I/O processing and event handling. /// -pub const AppContext = opaque { - /// - /// Linked list of asynchronous messages chained together to be processed by the work processor. - /// - const Message = struct { - next: ?*Message = null, - - kind: union(enum) { - quit, - - task: struct { - data: *anyopaque, - action: fn (*anyopaque) void, - frame: anyframe, - }, - }, - }; +pub const App = struct { + message_chain: ?*Message = null, + message_semaphore: *ext.SDL_sem, + message_mutex: *ext.SDL_mutex, + data: FileSystem, + user: FileSystem, /// - /// Internal state of the event loop hidden from the API consumer. + /// Enqueues `message` to the message chain in `app`. /// - const Implementation = struct { - user_path_prefix: [*]u8, - data_file_system: FileSystem, - user_file_system: FileSystem, - message_semaphore: *ext.SDL_sem, - message_mutex: *ext.SDL_mutex, - message_thread: ?*ext.SDL_Thread, - messages: ?*Message = null, + fn enqueue(app: *App, message: *Message) void { + { + // TODO: Error check these. + _ = ext.SDL_LockMutex(app.message_mutex); - /// - /// [StartError.OutOfSemaphores] indicates that the process has no more semaphores available - /// to it for use, meaning an [Implementation] may not be initialized at this time. - /// - /// [StartError.OutOfMutexes] indicates that the process has no more mutexes available to it - /// for use, meaning an [Implementation] may not be initialized at this time. - /// - /// [StartError.OutOfMemory] indicates that the process has no more memory available to it - /// for use, meaning an [Implementation] may not be initialized at this time. - /// - const InitError = error { - OutOfSemaphores, - OutOfMutexes, - OutOfMemory, - }; + defer _ = ext.SDL_UnlockMutex(app.message_mutex); - /// - /// [StartError.OutOfThreads] indicates that the process has no more threads available to it - /// to use, meaning that no asynchronous work may be started on an [Implementation] at this - /// time. - /// - /// [StartError.AlreadyStarted] is occurs when a request to start work processing happens on - /// an [Implementation] that is already processing work. - /// - const StartError = error { - OutOfThreads, - AlreadyStarted, - }; - - /// - /// Casts `app_context` to a [Implementation] reference. - /// - /// *Note* that if `app_context` does not have the same alignment as [Implementation], safety- - /// checked undefined behavior will occur. - /// - fn cast(app_context: *AppContext) *Implementation { - return @ptrCast(*Implementation, @alignCast(@alignOf(Implementation), app_context)); - } - - /// - /// Deinitializes the `implementation`, requesting any running asynchronous workload - /// processes quit and waits for them to do so before freeing any resources. - /// - fn deinit(implementation: *Implementation) void { - var message = Message{.kind = .quit}; - - implementation.enqueue(&message); - - { - var status = @as(c_int, 0); - - // SDL2 defines waiting on a null thread reference as a no-op. See - // https://wiki.libsdl.org/SDL_WaitThread for more information - ext.SDL_WaitThread(implementation.message_thread, &status); - - if (status != 0) { - // TODO: Error check this. - } - } - - implementation.data_file_system.archive.index_cache.deinit(); - ext.SDL_free(implementation.user_path_prefix); - ext.SDL_DestroyMutex(implementation.message_mutex); - ext.SDL_DestroySemaphore(implementation.message_semaphore); - } - - /// - /// Enqueues `message` to the message processor of `implementation` to be processed at a - /// later, non-deterministic point in time. - /// - fn enqueue(implementation: *Implementation, message: *Message) void { - { - // TODO: Error check these. - _ = ext.SDL_LockMutex(implementation.message_mutex); - - defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); - - if (implementation.messages) |messages| { - messages.next = message; - } else { - implementation.messages = message; - } - } - - // TODO: Error check this. - _ = ext.SDL_SemPost(implementation.message_semaphore); - } - - /// - /// Initializes a new [Implemenation] with `data_archive_file_access` as the data archive to - /// read from and `user_path_prefix` as the native writable user data directory. - /// - /// Returns the created [Implementation] value on success or [InitError] on failure. - /// - fn init(allocator: std.mem.Allocator, - data_archive_file_access: ona.io.FileAccess) InitError!Implementation { - - const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse - return error.OutOfMemory; - - return Implementation{ - .user_file_system = .{.native = - user_path_prefix[0 .. std.mem.len(user_path_prefix)]}, - - .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, - .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, - .user_path_prefix = user_path_prefix, - - .data_file_system = .{.archive = .{ - .index_cache = try Archive.IndexCache.init(allocator), - .file_access = data_archive_file_access, - }}, - - .message_thread = null, - }; - } - - /// - /// [FileSystemMessage] processing function used by a dedicated worker thread, where `data` - /// is a type-erased reference to a [EventLoop]. - /// - /// The processor returns `0` if it exited normally or any other value if an erroneous exit - /// occured. - /// - fn processTasks(userdata: ?*anyopaque) callconv(.C) c_int { - const implementation = Implementation.cast( - @ptrCast(*AppContext, userdata orelse unreachable)); - - while (true) { - // TODO: Error check these. - _ = ext.SDL_SemWait(implementation.message_semaphore); - _ = ext.SDL_LockMutex(implementation.message_mutex); - - defer _ = ext.SDL_UnlockMutex(implementation.message_mutex); - - while (implementation.messages) |messages| { - switch (messages.kind) { - .quit => { - return 0; - }, - - .task => |task| { - task.action(task.data); - - resume task.frame; - }, - } - - implementation.messages = messages.next; - } + if (app.message_chain) |message_chain| { + message_chain.next = message; + } else { + app.message_chain = message; } } - /// - /// Attempts to start the asynchronous worker thread of `implementation` if it hasn't been - /// already. - /// - /// [StartError] is returned on failure. - /// - fn start(implementation: *Implementation) StartError!void { - if (implementation.message_thread != null) return error.AlreadyStarted; - - implementation.message_thread = ext.SDL_CreateThread(processTasks, - "File System Worker", implementation) orelse { - - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, - "Failed to create file-system work processor"); - - return error.OutOfThreads; - }; - } - }; - - /// - /// Returns a reference to the currently loaded data file-system. - /// - pub fn data(app_context: *AppContext) *FileSystem { - return &Implementation.cast(app_context).data_file_system; + // TODO: Error check this. + _ = ext.SDL_SemPost(app.message_semaphore); } /// + /// Asynchronously executes `procedure` with `arguments` as an anonymous struct of its arguments + /// and `app` as its execution context. /// + /// Once the execution frame resumes, the value returned by executing `procedure` is returned. /// - pub fn schedule(app_context: *AppContext, procedure: anytype, + pub fn schedule(app: *App, procedure: anytype, arguments: anytype) ona.meta.FnReturn(@TypeOf(procedure)) { const Task = struct { @@ -246,7 +65,7 @@ pub const AppContext = opaque { .arguments = &arguments, }; - var message = AppContext.Message{ + var message = Message{ .kind = .{.task = .{ .data = &task, .action = Task.process, @@ -254,14 +73,22 @@ pub const AppContext = opaque { }}, }; - suspend Implementation.cast(app_context).enqueue(&message); + suspend app.enqueue(&message); } /// - /// Returns a reference to the currently loaded user file-system. + /// Asynchronously logs `info` with `logger` as the logging method and `app` as the execution + /// context. /// - pub fn user(app_context: *AppContext) *FileSystem { - return &Implementation.cast(app_context).user_file_system; + pub fn log(app: *App, logger: Logger, info: []const u8) void { + var message = Message{ + .kind = .{.log = .{ + .logger = logger, + .info = info, + }}, + }; + + app.enqueue(&message); } }; @@ -274,17 +101,48 @@ pub const FileSystem = union(enum) { archive: Archive, /// - /// With files typically being backed by a block device, they can produce a variety of - /// errors - from physical to virtual errors - these are all encapsulated by the API as - /// general [OpenError.FileNotFound] errors. + /// Archive file system information. + /// + const Archive = struct { + file_access: ona.io.FileAccess, + index_cache: IndexCache, + entry_table: [max_open_entries]Entry = std.mem.zeroes([max_open_entries]Entry), + + /// + /// Hard limit on the maximum number of entries open at once. + /// + const max_open_entries = 16; + + /// + /// Stateful extension of an [oar.Entry]. + /// + const Entry = struct { + owner: ?*ona.io.FileAccess, + cursor: u64, + header: oar.Entry, + }; + + /// + /// Table cache for associating [oar.Path] values with offsets to entries in a given file. + /// + const IndexCache = ona.table.Hashed(oar.Path, u64, .{ + .equals = oar.Path.equals, + .hash = oar.Path.hash, + }); + }; + + /// + /// With files typically being backed by a block device, they can produce a variety of errors - + /// from physical to virtual errors - these are all encapsulated by the API as general + /// [OpenError.FileNotFound] errors. /// /// When a given [FileSystem] does not support a specified [OpenMode], /// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] /// should be tried or, if no mode other is suitable, that the resource is effectively /// unavailable. /// - /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] - /// is used to communicate this. + /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] is + /// used to communicate this. /// pub const OpenError = error { FileNotFound, @@ -380,12 +238,13 @@ pub const FileSystem = union(enum) { } fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { - const math = std.math; const archive_entry = archiveEntryCast(context); if (archive_entry.owner == null) return error.FileInaccessible; if (offset < 0) { + const math = std.math; + archive_entry.cursor = math.max(0, archive_entry.cursor - math.absCast(offset)); } else { @@ -447,17 +306,18 @@ pub const FileSystem = union(enum) { .native => |native| { if (native.len == 0) return error.FileNotFound; - var path_buffer = std.mem.zeroes([4096]u8); + const mem = std.mem; + var path_buffer = mem.zeroes([4096]u8); const seperator_length = @boolToInt(native[native.len - 1] != oar.Path.seperator); if ((native.len + seperator_length + path.length) >= path_buffer.len) return error.FileNotFound; - std.mem.copy(u8, path_buffer[0 ..], native); + mem.copy(u8, &path_buffer, native); if (seperator_length != 0) path_buffer[native.len] = oar.Path.seperator; - std.mem.copy(u8, path_buffer[native.len .. path_buffer. + mem.copy(u8, path_buffer[native.len .. path_buffer. len], path.buffer[0 .. path.length]); const FileAccess = ona.io.FileAccess; @@ -565,7 +425,7 @@ pub const FileSystem = union(enum) { /// /// /// -pub const GraphicsContext = opaque { +pub const Graphics = opaque { /// /// /// @@ -587,8 +447,8 @@ pub const GraphicsContext = opaque { /// /// /// - pub fn poll(graphics_context: *GraphicsContext) ?*const Event { - _ = graphics_context; + pub fn poll(graphics: *Graphics) ?*const Event { + _ = graphics; return null; } @@ -596,41 +456,46 @@ pub const GraphicsContext = opaque { /// /// /// - pub fn present(graphics_context: *GraphicsContext) void { + pub fn present(graphics: *Graphics) void { // TODO: Implement; - _ = graphics_context; + _ = graphics; } }; /// -/// Returns a graphics runner that uses `Errors` as its error set. +/// [Logger.info] logs information that isn't necessarily an error but indicates something useful to +/// be logged. /// -pub fn GraphicsRunner(comptime Errors: type) type { - return fn (*AppContext, *GraphicsContext) callconv(.Async) Errors!void; -} - +/// [Logger.debug] logs information only when the engine is in debug mode. /// -/// [Log.info] represents a log message which is purely informative and does not indicate any kind -/// of issue. +/// [Logger.warning] logs information to indicate a non-critical error has occured. /// -/// [Log.debug] represents a log message which is purely for debugging purposes and will only occurs -/// in debug builds. -/// -/// [Log.warning] represents a log message which is a warning about a issue that does not break -/// anything important but is not ideal. -/// -pub const Log = enum(u32) { +pub const Logger = enum(u32) { info = ext.SDL_LOG_PRIORITY_INFO, debug = ext.SDL_LOG_PRIORITY_DEBUG, warning = ext.SDL_LOG_PRIORITY_WARN, +}; - /// - /// Writes `utf8_message` as the log kind identified by `log`. - /// - pub fn write(log: Log, utf8_message: []const u8) void { - ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, - @enumToInt(log), "%.*s", utf8_message.len, utf8_message.ptr); - } +/// +/// Linked list of asynchronous messages chained together to be processed by the work processor. +/// +pub const Message = struct { + next: ?*Message = null, + + kind: union(enum) { + quit, + + log: struct { + logger: Logger, + info: []const u8, + }, + + task: struct { + data: *anyopaque, + action: fn (*anyopaque) void, + frame: anyframe, + }, + }, }; /// @@ -639,13 +504,10 @@ pub const Log = enum(u32) { pub const Path = oar.Path; /// -/// [RunError.InitFailure] occurs if a necessary resource fails to be acquired or allocated. -/// -/// [RunError.AlreadyRunning] occurs if a runner has already been started. +/// [RunError.InitFailure] occurs when the runtime fails to initialize. /// pub const RunError = error { InitFailure, - AlreadyRunning, }; /// @@ -654,8 +516,76 @@ pub const RunError = error { /// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if /// the underlying runtime fails and is logged. /// -pub fn runGraphics(comptime Error: anytype, - comptime run: GraphicsRunner(Error)) (RunError || Error)!void { +pub fn display(comptime Error: anytype, + comptime run: fn (*App, *Graphics) callconv(.Async) Error!void) (RunError || Error)!void { + + var cwd = FileSystem{.native = "./"}; + const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.InitFailure; + + defer ext.SDL_free(user_prefix); + + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + + defer if (gpa.deinit()) + ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, "Runtime allocator leaked memory"); + + var app = App{ + .user = .{.native = std.mem.sliceTo(user_prefix, 0)}, + + .message_semaphore = ext.SDL_CreateSemaphore(0) orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create message semaphore"); + + return error.InitFailure; + }, + + .message_mutex = ext.SDL_CreateMutex() orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create message mutex"); + + return error.InitFailure; + }, + + .data = .{.archive = .{ + .index_cache = FileSystem.Archive.IndexCache.init(gpa.allocator()) catch + return error.InitFailure, + + .file_access = cwd.open(try Path.joined(&.{"./data.oar"}), .readonly) catch { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load ./data.oar"); + + return error.InitFailure; + }, + }}, + }; + + defer { + app.data.archive.file_access.close(); + app.data.archive.index_cache.deinit(); + ext.SDL_DestroySemaphore(app.message_semaphore); + ext.SDL_DestroyMutex(app.message_mutex); + } + + const message_thread = ext.SDL_CreateThread(processMessages, "Message Processor", &app) orelse { + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create message processor"); + + return error.InitFailure; + }; + + defer { + var message = Message{.kind = .quit}; + + app.enqueue(&message); + + { + var status = std.mem.zeroes(c_int); + + // SDL2 defines waiting on a null thread reference as a no-op. See + // https://wiki.libsdl.org/SDL_WaitThread for more information + ext.SDL_WaitThread(message_thread, &status); + + if (status != 0) { + // TODO: Error check this. + } + } + } if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to initialize runtime"); @@ -690,42 +620,47 @@ pub fn runGraphics(comptime Error: anytype, defer ext.SDL_DestroyRenderer(renderer); - var cwd_file_system = FileSystem{.native ="./"}; - var data_access = try cwd_file_system.open(try Path.joined(&.{"./data.oar"}), .readonly); - - defer data_access.close(); - - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - defer _ = gpa.deinit(); - - var app_context = AppContext.Implementation.init(gpa.allocator(), data_access) catch |err| { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { - error.OutOfMemory => "Failed to allocate necessary memory", - error.OutOfMutexes => "Failed to create file-system work lock", - error.OutOfSemaphores => "Failed to create file-system work scheduler", - }); - - return error.InitFailure; - }; - - defer app_context.deinit(); - - app_context.start() catch |err| { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { - // Not possible for it to have already been started. - error.AlreadyStarted => unreachable, - error.OutOfThreads => "Failed to start file-system work processor", - }); - - return error.InitFailure; - }; - - var graphics_context = GraphicsContext.Implementation{ + var graphics = Graphics.Implementation{ .event = .{ }, }; - return run(@ptrCast(*AppContext, &app_context), @ptrCast(*GraphicsContext, &graphics_context)); + return run(@ptrCast(*App, &app), @ptrCast(*Graphics, &graphics)); +} + +/// +/// [FileSystemMessage] processing function used by a dedicated worker thread, where `data` +/// is a type-erased reference to a [EventLoop]. +/// +/// The processor returns `0` if it exited normally or any other value if an erroneous exit +/// occured. +/// +pub fn processMessages(userdata: ?*anyopaque) callconv(.C) c_int { + const app = @ptrCast(*App, @alignCast(@alignOf(App), userdata orelse unreachable)); + + while (true) { + // TODO: Error check these. + _ = ext.SDL_SemWait(app.message_semaphore); + _ = ext.SDL_LockMutex(app.message_mutex); + + defer _ = ext.SDL_UnlockMutex(app.message_mutex); + + while (app.message_chain) |message| { + switch (message.kind) { + .quit => return 0, + + .log => |log| ext.SDL_LogMessage(ext.SDL_LOG_CATEGORY_APPLICATION, + @enumToInt(log.logger), "%.*s", log.info.len, log.info.ptr), + + .task => |task| { + task.action(task.data); + + resume task.frame; + }, + } + + app.message_chain = message.next; + } + } } diff --git a/src/engine/sys/Archive.zig b/src/engine/sys/Archive.zig deleted file mode 100644 index 500ff4a..0000000 --- a/src/engine/sys/Archive.zig +++ /dev/null @@ -1,29 +0,0 @@ -const oar = @import("oar"); -const ona = @import("ona"); -const std = @import("std"); - -file_access: ona.io.FileAccess, -index_cache: IndexCache, -entry_table: [max_open_entries]Entry = std.mem.zeroes([max_open_entries]Entry), - -/// -/// Hard limit on the maximum number of entries open at once. -/// -const max_open_entries = 16; - -/// -/// Stateful extension of an [oar.Entry]. -/// -pub const Entry = struct { - owner: ?*ona.io.FileAccess, - cursor: u64, - header: oar.Entry, -}; - -/// -/// Table cache for associating [oar.Path] values with offsets to entries in a given file. -/// -pub const IndexCache = ona.table.Hashed(oar.Path, u64, .{ - .equals = oar.Path.equals, - .hash = oar.Path.hash, -}); diff --git a/src/oar/main.zig b/src/oar/main.zig index e11d69e..85ee82c 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -30,7 +30,7 @@ pub const Entry = extern struct { const origin = try file_access.queryCursor(); if (((try file_access.read(std.mem.asBytes(&entry))) != @sizeOf(Entry)) and - ona.io.equalsBytes(entry.signature[0 ..], signature_magic[0 ..])) { + ona.io.equalsBytes(&entry.signature, &signature_magic)) { try file_access.seek(origin); -- 2.34.1 From ef2c6c3a3c937d00db8c474e759be800bdb7ae92 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 20 Oct 2022 22:30:48 +0100 Subject: [PATCH 48/93] Extract Ona IO writer into reusable "Function" type --- src/ona/fmt.zig | 70 ++++++++++++++++++++++ src/ona/io.zig | 146 ++++++---------------------------------------- src/ona/meta.zig | 51 ++++++++++++++++ src/ona/stack.zig | 5 +- 4 files changed, 142 insertions(+), 130 deletions(-) create mode 100644 src/ona/fmt.zig diff --git a/src/ona/fmt.zig b/src/ona/fmt.zig new file mode 100644 index 0000000..43a0cba --- /dev/null +++ b/src/ona/fmt.zig @@ -0,0 +1,70 @@ +const io = @import("io.zig"); + +/// +/// Number formatting modes supported by [writeInt]. +/// +pub const Radix = enum { + binary, + tinary, + quaternary, + quinary, + senary, + septenary, + octal, + nonary, + decimal, + undecimal, + duodecimal, + tridecimal, + tetradecimal, + pentadecimal, + hexadecimal, +}; + +/// +/// Writes `value` as a ASCII / UTF-8 encoded integer to `writer`, returning `true` if the full +/// sequence was successfully written, otherwise `false`. +/// +/// The `radix` argument identifies which base system to format `value` as. +/// +pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) bool { + const Int = @TypeOf(value); + const type_info = @typeInfo(Int); + + switch (type_info) { + .Int => { + if (value == 0) return writer.writeByte('0'); + + // TODO: Unhardcode this as it will break with large ints. + var buffer = std.mem.zeroes([28]u8); + var buffer_count = @as(usize, 0); + var n1 = value; + + if ((type_info.Int.signedness == .signed) and (value < 0)) { + // Negative value. + n1 = -value; + buffer[0] = '-'; + buffer_count += 1; + } + + while (n1 != 0) { + const base = @enumToInt(radix); + + buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); + n1 = (n1 / base); + buffer_count += 1; + } + + for (buffer[0 .. (buffer_count / 2)]) |_, i| + std.mem.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); + + return (writer.call(.{buffer[0 .. buffer_count]}) == buffer_count); + }, + + // Cast comptime int into known-size integer and try again. + .ComptimeInt => return writer. + writeInt(radix, @intCast(std.math.IntFittingRange(value, value), value)), + + else => @compileError("value must be of type int"), + } +} diff --git a/src/ona/io.zig b/src/ona/io.zig index 83a7de3..3ad3701 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -1,3 +1,4 @@ +const meta = @import("./meta.zig"); const stack = @import("./stack.zig"); const std = @import("std"); @@ -170,121 +171,9 @@ test "Spliterating text" { } /// -/// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network -/// socket. +/// Opaque interface to a "writable" resource like a block device, memory buffer, or network socket. /// -pub const Writer = struct { - context: *anyopaque, - writeContext: fn (*anyopaque, []const u8) usize, - - /// - /// Radices supported by [writeInt]. - /// - pub const Radix = enum { - binary, - tinary, - quaternary, - quinary, - senary, - septenary, - octal, - nonary, - decimal, - undecimal, - duodecimal, - tridecimal, - tetradecimal, - pentadecimal, - hexadecimal, - }; - - /// - /// Wraps and returns a reference to `write_context` of type `WriteContext` and its associated - /// `writeContext` writing operation in a [Writer]. - /// - pub fn wrap( - comptime WriteContext: type, - write_context: *WriteContext, - comptime writeContext: fn (*WriteContext, []const u8) usize - ) Writer { - return .{ - .context = write_context, - - .writeContext = struct { - fn write(context: *anyopaque, buffer: []const u8) usize { - return writeContext(@ptrCast(*WriteContext, - @alignCast(@alignOf(WriteContext), context)), buffer); - } - }.write, - }; - } - - /// - /// Attempts to write `buffer` to `writer`, returning the number of bytes from `buffer` that - /// were successfully written. - /// - pub fn write(writer: Writer, buffer: []const u8) usize { - return writer.writeContext(writer.context, buffer); - } - - /// - /// Writes the singular `byte` to `writer`, returning `true` if it was successfully written, - /// otherwise `false`. - /// - pub fn writeByte(writer: Writer, byte: u8) bool { - return (writer.writeContext(writer.context, - @ptrCast([*]const u8, &byte)[0 .. 1]) != 0); - } - - /// - /// Writes `value` as a ASCII / UTF-8 encoded integer to `writer`, returning `true` if the full - /// sequence was successfully written, otherwise `false`. - /// - /// The `radix` argument identifies which base system to encode `value` as, with `10` being - /// decimal, `16` being hexadecimal, `8` being octal`, so on and so forth. - /// - pub fn writeInt(writer: Writer, radix: Radix, value: anytype) bool { - const Int = @TypeOf(value); - const type_info = @typeInfo(Int); - - switch (type_info) { - .Int => { - if (value == 0) return writer.writeByte('0'); - - // TODO: Unhardcode this as it will break with large ints. - var buffer = std.mem.zeroes([28]u8); - var buffer_count = @as(usize, 0); - var n1 = value; - - if ((type_info.Int.signedness == .signed) and (value < 0)) { - // Negative value. - n1 = -value; - buffer[0] = '-'; - buffer_count += 1; - } - - while (n1 != 0) { - const base = @enumToInt(radix); - - buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); - n1 = (n1 / base); - buffer_count += 1; - } - - for (buffer[0 .. (buffer_count / 2)]) |_, i| - std.mem.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); - - return (writer.write(buffer[0 .. buffer_count]) == buffer_count); - }, - - // Cast comptime int into known-size integer and try again. - .ComptimeInt => return writer. - writeInt(radix, @intCast(std.math.IntFittingRange(value, value), value)), - - else => @compileError("value must be of type int"), - } - } -}; +pub const Writer = meta.Function([]const u8, usize); /// /// Returns `true` if `this_bytes` is the same length and contains the same data as `that_bytes`, @@ -323,21 +212,19 @@ test "Hashing bytes" { try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42})); } +var null_context: usize = undefined; + /// /// Writer that silently throws consumed data away and never fails. /// /// This is commonly used for testing or redirected otherwise unwanted output data that can't not be /// sent somewhere for whatever reason. /// -pub const null_writer = Writer{ - .context = undefined, - - .writeContext = struct { - fn write(_: *anyopaque, buffer: []const u8) usize { - return buffer.len; - } - }.write, -}; +pub const null_writer = Writer.wrap(&null_context, struct { + fn write(_: *@TypeOf(null_context), buffer: []const u8) usize { + return buffer.len; + } +}.write); test "Null writing" { const testing = std.testing; @@ -345,9 +232,14 @@ test "Null writing" { { const sequence = "foo"; - try testing.expectEqual(null_writer.write(sequence), sequence.len); + try testing.expectEqual(null_writer.apply(sequence), sequence.len); } - - try testing.expect(null_writer.writeByte(0)); - try testing.expect(null_writer.writeInt(.decimal, 420)); +} + +/// +/// Writes the singular `byte` to `writer`, returning `true` if it was successfully written, +/// otherwise `false`. +/// +pub fn writeByte(writer: Writer, byte: u8) bool { + return (writer.call(.{@ptrCast([*]const u8, &byte)[0 .. 1]}) != 0); } diff --git a/src/ona/meta.zig b/src/ona/meta.zig index 9fa2c48..d555eca 100644 --- a/src/ona/meta.zig +++ b/src/ona/meta.zig @@ -8,3 +8,54 @@ pub fn FnReturn(comptime Fn: type) type { return type_info.Fn.return_type orelse void; } + +/// +/// Returns single-input closure type where `Input` is the input type and `Output` is the output +/// type. +/// +pub fn Function(comptime Input: type, comptime Output: type) type { + return struct { + context: *anyopaque, + contextualApply: fn (*anyopaque, Input) Output, + + /// + /// Function type. + /// + const Self = @This(); + + /// + /// Applies `input` to `self`, producing a result according to the type-erased + /// implementation. + /// + pub fn apply(self: Self, input: Input) Output { + return self.contextualApply(self.context, input); + } + + /// + /// Creates a new [Self] by wrapping `concrete_context` as a pointer to the implementation + /// and `contextualApply` as the behavior executed when [apply] is called. + /// + /// The newly created [Self] is returned. + /// + pub fn wrap( + concrete_context: anytype, + comptime contextualApply: fn (@TypeOf(concrete_context), Input) Output + ) Self { + const ConcreteContext = @TypeOf(concrete_context); + + if (@typeInfo(ConcreteContext) != .Pointer) + @compileError("`concrete_context` must be a pointer type"); + + return .{ + .context = concrete_context, + + .contextualApply = struct { + fn call(erased_context: *anyopaque, input: Input) Output { + return contextualApply(@ptrCast(ConcreteContext, @alignCast( + @alignOf(ConcreteContext), erased_context)), input); + } + }.call, + }; + } + }; +} diff --git a/src/ona/stack.zig b/src/ona/stack.zig index 9ebf9bc..617dc7c 100755 --- a/src/ona/stack.zig +++ b/src/ona/stack.zig @@ -23,7 +23,7 @@ pub fn Fixed(comptime Element: type) type { if (Element != u8) @compileError("Cannot coerce fixed stack of type " ++ @typeName(Element) ++ " into a Writer"); - return io.Writer.wrap(Self, self, struct { + return io.Writer.wrap(self, struct { fn write(stack: *Self, buffer: []const u8) usize { stack.pushAll(buffer) catch |err| switch (err) { error.OutOfMemory => return 0, @@ -112,6 +112,5 @@ test "Fixed stack manipulation" { const writer = stack.writer(); - try testing.expectEqual(writer.write(&.{0, 0, 0, 0}), 4); - try testing.expectEqual(writer.writeByte(0), false); + try testing.expectEqual(writer.apply(&.{0, 0, 0, 0}), 4); } -- 2.34.1 From 461d9b7cf72d4d3742e31bd2b6aa02cd2cba742f Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 22 Oct 2022 23:20:33 +0100 Subject: [PATCH 49/93] Modify Function type implementation to have customisable capture sizes --- src/ona/io.zig | 27 ++++++++++++----------- src/ona/meta.zig | 56 ++++++++++++++++++++++++++--------------------- src/ona/stack.zig | 7 ++---- 3 files changed, 47 insertions(+), 43 deletions(-) diff --git a/src/ona/io.zig b/src/ona/io.zig index 3ad3701..7714365 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -171,9 +171,10 @@ test "Spliterating text" { } /// -/// Opaque interface to a "writable" resource like a block device, memory buffer, or network socket. +/// Closure that captures a reference to writable resources like block devices, memory buffers, +/// network sockets, and more. /// -pub const Writer = meta.Function([]const u8, usize); +pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); /// /// Returns `true` if `this_bytes` is the same length and contains the same data as `that_bytes`, @@ -212,19 +213,19 @@ test "Hashing bytes" { try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42})); } -var null_context: usize = undefined; - /// -/// Writer that silently throws consumed data away and never fails. +/// Returns a [Writer] that silently consumes all given data without failure and throws it away. /// -/// This is commonly used for testing or redirected otherwise unwanted output data that can't not be +/// This is commonly used for testing or redirected otherwise unwanted output data that has to be /// sent somewhere for whatever reason. /// -pub const null_writer = Writer.wrap(&null_context, struct { - fn write(_: *@TypeOf(null_context), buffer: []const u8) usize { - return buffer.len; - } -}.write); +pub fn nullWriter() Writer { + return Writer.capture(std.mem.zeroes(usize), struct { + fn write(_: usize, buffer: []const u8) usize { + return buffer.len; + } + }.write); +} test "Null writing" { const testing = std.testing; @@ -232,7 +233,7 @@ test "Null writing" { { const sequence = "foo"; - try testing.expectEqual(null_writer.apply(sequence), sequence.len); + try testing.expectEqual(nullWriter().apply(sequence), sequence.len); } } @@ -241,5 +242,5 @@ test "Null writing" { /// otherwise `false`. /// pub fn writeByte(writer: Writer, byte: u8) bool { - return (writer.call(.{@ptrCast([*]const u8, &byte)[0 .. 1]}) != 0); + return (writer.apply(std.mem.asBytes(&byte)) != 0); } diff --git a/src/ona/meta.zig b/src/ona/meta.zig index d555eca..22b38eb 100644 --- a/src/ona/meta.zig +++ b/src/ona/meta.zig @@ -1,3 +1,5 @@ +const std = @import("std"); + /// /// Returns the return type of the function type `Fn`. /// @@ -10,13 +12,14 @@ pub fn FnReturn(comptime Fn: type) type { } /// -/// Returns single-input closure type where `Input` is the input type and `Output` is the output -/// type. +/// Returns a single-input single-output closure type where `Input` represents the input type, +/// `Output` represents the output type, and `captures_size` represents the size of the closure +/// context. /// -pub fn Function(comptime Input: type, comptime Output: type) type { +pub fn Function(comptime captures_size: usize, comptime Input: type, comptime Output: type) type { return struct { - context: *anyopaque, - contextualApply: fn (*anyopaque, Input) Output, + applyErased: fn (*anyopaque, Input) Output, + context: [captures_size]u8, /// /// Function type. @@ -24,38 +27,41 @@ pub fn Function(comptime Input: type, comptime Output: type) type { const Self = @This(); /// - /// Applies `input` to `self`, producing a result according to the type-erased - /// implementation. + /// Applies `input` to `self`, producing a result according to the current context data. /// - pub fn apply(self: Self, input: Input) Output { - return self.contextualApply(self.context, input); + pub fn apply(self: *Self, input: Input) Output { + return self.applyErased(&self.context, input); } /// - /// Creates a new [Self] by wrapping `concrete_context` as a pointer to the implementation - /// and `contextualApply` as the behavior executed when [apply] is called. + /// Creates a new [Self] by capturing the `captures` value as the context and `call` as the + /// as the behavior executed when [apply] or [applyErased] is called. /// /// The newly created [Self] is returned. /// - pub fn wrap( - concrete_context: anytype, - comptime contextualApply: fn (@TypeOf(concrete_context), Input) Output - ) Self { - const ConcreteContext = @TypeOf(concrete_context); + pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), Input) Output) Self { + const Captures = @TypeOf(captures); - if (@typeInfo(ConcreteContext) != .Pointer) - @compileError("`concrete_context` must be a pointer type"); + if (@sizeOf(Captures) > captures_size) + @compileError("`captures` must be smaller than or equal to " ++ + std.fmt.comptimePrint("{d}", .{captures_size}) ++ " bytes"); - return .{ - .context = concrete_context, + const captures_align = @alignOf(Captures); - .contextualApply = struct { - fn call(erased_context: *anyopaque, input: Input) Output { - return contextualApply(@ptrCast(ConcreteContext, @alignCast( - @alignOf(ConcreteContext), erased_context)), input); + var function = Self{ + .context = undefined, + + .applyErased = struct { + fn do(erased: *anyopaque, input: Input) Output { + return call(@ptrCast(*Captures, @alignCast( + captures_align, erased)).*, input); } - }.call, + }.do, }; + + @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; + + return function; } }; } diff --git a/src/ona/stack.zig b/src/ona/stack.zig index 617dc7c..77a4e01 100755 --- a/src/ona/stack.zig +++ b/src/ona/stack.zig @@ -23,7 +23,7 @@ pub fn Fixed(comptime Element: type) type { if (Element != u8) @compileError("Cannot coerce fixed stack of type " ++ @typeName(Element) ++ " into a Writer"); - return io.Writer.wrap(self, struct { + return io.Writer.capture(self, struct { fn write(stack: *Self, buffer: []const u8) usize { stack.pushAll(buffer) catch |err| switch (err) { error.OutOfMemory => return 0, @@ -109,8 +109,5 @@ test "Fixed stack manipulation" { stack.clear(); try testing.expectEqual(stack.count(), 0); - - const writer = stack.writer(); - - try testing.expectEqual(writer.apply(&.{0, 0, 0, 0}), 4); + try testing.expectEqual(stack.writer().apply(&.{0, 0, 0, 0}), 4); } -- 2.34.1 From b698f18c4d1504296d2155425284db75f24e7c78 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 22 Oct 2022 23:21:09 +0100 Subject: [PATCH 50/93] Temporarily hardcode path to test binary --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 4476fb8..fb959e1 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -14,7 +14,7 @@ "name": "Test", "type": "gdb", "request": "launch", - "target": "$(find zig-cache -name test) src/main.zig", + "target": "${workspaceFolder}/zig-cache/o/2c2b6e0f85a1dcd7caa00765b05ec679/test", "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", -- 2.34.1 From ccb96875b1b23d274ca585f39260dbfb1200568b Mon Sep 17 00:00:00 2001 From: kayomn Date: Sat, 22 Oct 2022 23:34:30 +0100 Subject: [PATCH 51/93] Add "Reader" IO closure type for reading resources --- src/ona/io.zig | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/ona/io.zig b/src/ona/io.zig index 7714365..c939f49 100644 --- a/src/ona/io.zig +++ b/src/ona/io.zig @@ -98,6 +98,12 @@ pub const FileAccess = struct { } }; +/// +/// Closure that captures a reference to readable resources like block devices, memory buffers, +/// network sockets, and more. +/// +pub const Reader = meta.Function(@sizeOf(usize), []u8, usize); + /// /// Returns a state machine for lazily computing all `Element` components of a given source input /// that match a delimiting pattern. @@ -238,9 +244,9 @@ test "Null writing" { } /// -/// Writes the singular `byte` to `writer`, returning `true` if it was successfully written, +/// Applies the singular `byte` to `writer`, returning `true` if it was successfully written, /// otherwise `false`. /// -pub fn writeByte(writer: Writer, byte: u8) bool { +pub fn writeByte(writer: *Writer, byte: u8) bool { return (writer.apply(std.mem.asBytes(&byte)) != 0); } -- 2.34.1 From 813df95e020b56369785b965b0de49c1d06b5740 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 23 Oct 2022 18:11:02 +0100 Subject: [PATCH 52/93] Tidy up codebase structure --- .vscode/launch.json | 11 +++++++- .vscode/tasks.json | 5 ++-- build.zig | 12 ++++----- src/{ona => core}/fmt.zig | 0 src/{ona => core}/io.zig | 8 +++--- src/core/main.zig | 27 +++++++++++++++++++ src/{ona => core}/meta.zig | 0 src/{ona => core}/stack.zig | 0 src/{ona => core}/table.zig | 0 src/engine/main.zig | 40 ---------------------------- src/oar/main.zig | 53 ++++++++++++++++++++++++++++++------- src/ona/main.zig | 49 +++++++++++++++++++++------------- src/{engine => ona}/sys.zig | 18 ++++++------- 13 files changed, 133 insertions(+), 90 deletions(-) rename src/{ona => core}/fmt.zig (100%) rename src/{ona => core}/io.zig (96%) create mode 100644 src/core/main.zig rename src/{ona => core}/meta.zig (100%) rename src/{ona => core}/stack.zig (100%) rename src/{ona => core}/table.zig (100%) delete mode 100644 src/engine/main.zig rename src/{engine => ona}/sys.zig (97%) diff --git a/.vscode/launch.json b/.vscode/launch.json index fb959e1..3d0579a 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -2,7 +2,7 @@ "version": "0.2.0", "configurations": [ { - "name": "Debug", + "name": "Ona", "type": "gdb", "request": "launch", "target": "${workspaceFolder}/zig-out/bin/ona", @@ -10,6 +10,15 @@ "valuesFormatting": "parseText", "preLaunchTask": "Build Debug", }, + { + "name": "Oar", + "type": "gdb", + "request": "launch", + "target": "${workspaceFolder}/zig-out/bin/oar", + "cwd": "${workspaceRoot}", + "valuesFormatting": "parseText", + "preLaunchTask": "Build Debug", + }, { "name": "Test", "type": "gdb", diff --git a/.vscode/tasks.json b/.vscode/tasks.json index b3326fe..b5d8c38 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -41,11 +41,12 @@ "revealProblems": "onProblem", }, }, + { - "label": "Build Test", + "label": "Build Tests", "type": "shell", "command": "zig build test", - "group": "test" + "group": "build", }, ], } diff --git a/build.zig b/build.zig index 89dc34a..ce64b2f 100644 --- a/build.zig +++ b/build.zig @@ -6,14 +6,14 @@ const std = @import("std"); pub fn build(builder: *std.build.Builder) void { const target = builder.standardTargetOptions(.{}); const mode = builder.standardReleaseOptions(); - const ona_pkg = projectPkg("ona", &.{}); + const core_pkg = projectPkg("core", &.{}); - // Engine executable. + // Ona executable. { - const exe = builder.addExecutable("engine", "./src/engine/main.zig"); + const exe = builder.addExecutable("ona", "./src/ona/main.zig"); - exe.addPackage(projectPkg("oar", &.{ona_pkg})); - exe.addPackage(ona_pkg); + exe.addPackage(projectPkg("oar", &.{core_pkg})); + exe.addPackage(core_pkg); exe.setTarget(target); exe.setBuildMode(mode); exe.install(); @@ -26,7 +26,7 @@ pub fn build(builder: *std.build.Builder) void { { const exe = builder.addExecutable("oar", "./src/oar/main.zig"); - exe.addPackage(ona_pkg); + exe.addPackage(core_pkg); exe.setTarget(target); exe.setBuildMode(mode); exe.install(); diff --git a/src/ona/fmt.zig b/src/core/fmt.zig similarity index 100% rename from src/ona/fmt.zig rename to src/core/fmt.zig diff --git a/src/ona/io.zig b/src/core/io.zig similarity index 96% rename from src/ona/io.zig rename to src/core/io.zig index c939f49..770e77f 100644 --- a/src/ona/io.zig +++ b/src/core/io.zig @@ -183,11 +183,11 @@ test "Spliterating text" { pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); /// -/// Returns `true` if `this_bytes` is the same length and contains the same data as `that_bytes`, -/// otherwise `false`. +/// Returns `true` if `this` is the same length and contains the same data as `that`, otherwise +/// `false`. /// -pub fn equalsBytes(this_bytes: []const u8, that_bytes: []const u8) bool { - return std.mem.eql(u8, this_bytes, that_bytes); +pub fn equalsBytes(this: []const u8, that: []const u8) bool { + return std.mem.eql(u8, this, that); } test "Equivalence of bytes" { diff --git a/src/core/main.zig b/src/core/main.zig new file mode 100644 index 0000000..0aa7691 --- /dev/null +++ b/src/core/main.zig @@ -0,0 +1,27 @@ + +/// +/// Platform-agnostic input and output interfaces for working with memory, files, and networks. +/// +pub const io = @import("./io.zig"); + +/// +/// Metaprogramming introspection utilities +/// +pub const meta = @import("./meta.zig"); + +/// +/// Sequential last-in first-out data structures. +/// +pub const stack = @import("./stack.zig"); + +/// +/// Unordered key-value association data structures. +/// +pub const table = @import("./table.zig"); + +test { + _ = io; + _ = meta; + _ = stack; + _ = table; +} diff --git a/src/ona/meta.zig b/src/core/meta.zig similarity index 100% rename from src/ona/meta.zig rename to src/core/meta.zig diff --git a/src/ona/stack.zig b/src/core/stack.zig similarity index 100% rename from src/ona/stack.zig rename to src/core/stack.zig diff --git a/src/ona/table.zig b/src/core/table.zig similarity index 100% rename from src/ona/table.zig rename to src/core/table.zig diff --git a/src/engine/main.zig b/src/engine/main.zig deleted file mode 100644 index d120a7b..0000000 --- a/src/engine/main.zig +++ /dev/null @@ -1,40 +0,0 @@ -const ona = @import("ona"); -const std = @import("std"); -const sys = @import("./sys.zig"); - -/// -/// Starts the the game engine. -/// -pub fn main() anyerror!void { - return nosuspend await async sys.display(anyerror, run); -} - -fn run(app: *sys.App, graphics: *sys.Graphics) anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - defer _ = gpa.deinit(); - - { - var file_access = try app.data.open(try sys.Path.joined(&.{"ona.lua"}), .readonly); - - defer file_access.close(); - - const file_size = try file_access.queryLength(); - const allocator = gpa.allocator(); - const buffer = try allocator.alloc(u8, file_size); - - defer allocator.free(buffer); - - if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; - - app.log(.debug, buffer); - } - - while (graphics.poll()) |_| { - graphics.present(); - } -} - -test { - _ = sys; -} diff --git a/src/oar/main.zig b/src/oar/main.zig index 85ee82c..9573ba7 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -1,4 +1,4 @@ -const ona = @import("ona"); +const core = @import("core"); const std = @import("std"); /// @@ -23,14 +23,18 @@ pub const Entry = extern struct { } /// + /// Attempts to read the next [Entry] from `file_access`. /// + /// Returns the read [Entry], `null` if there is no more to read, or a + /// [core.io.FileAccess.Error] if it failed. /// - pub fn next(file_access: ona.io.FileAccess) ona.io.FileAccess.Error!?Entry { - var entry = std.mem.zeroes(Entry); + pub fn next(file_access: core.io.FileAccess) core.io.FileAccess.Error!?Entry { + const mem = std.mem; + var entry = mem.zeroes(Entry); const origin = try file_access.queryCursor(); - if (((try file_access.read(std.mem.asBytes(&entry))) != @sizeOf(Entry)) and - ona.io.equalsBytes(&entry.signature, &signature_magic)) { + if (((try file_access.read(mem.asBytes(&entry))) != @sizeOf(Entry)) and + core.io.equalsBytes(&entry.signature, &signature_magic)) { try file_access.seek(origin); @@ -72,7 +76,7 @@ pub const Path = extern struct { /// Returns `true` if `this_path` is equal to `that_path, otherwise `false`. /// pub fn equals(this_path: Path, that_path: Path) bool { - return ona.io.equalsBytes(this_path.buffer[0 ..this_path. + return core.io.equalsBytes(this_path.buffer[0 ..this_path. length], that_path.buffer[0 .. that_path.length]); } @@ -80,7 +84,7 @@ pub const Path = extern struct { /// Returns the hash of the text in `path`. /// pub fn hash(path: Path) usize { - return ona.io.hashBytes(path.buffer[0 .. path.length]); + return core.io.hashBytes(path.buffer[0 .. path.length]); } /// @@ -97,7 +101,7 @@ pub const Path = extern struct { const last_sequence_index = sequences.len - 1; for (sequences) |sequence, index| if (sequence.len != 0) { - var components = ona.io.Spliterator(u8){ + var components = core.io.Spliterator(u8){ .source = sequence, .delimiter = "/", }; @@ -144,8 +148,37 @@ pub const Path = extern struct { /// /// Starts the **O**na **Ar**chive packer utility. /// -pub fn main() u8 { - // TODO: Implement. +pub fn main() !u8 { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + + defer _ = gpa.deinit(); + + const process = std.process; + const allocator = gpa.allocator(); + const args = try process.argsAlloc(allocator); + + defer process.argsFree(allocator, args); + + const outWriter = std.io.getStdOut().writer(); + + if (args.len > 1) { + const command = args[1]; + const io = core.io; + + if (io.equalsBytes(command, "pack")) { + return 0; + } + + if (io.equalsBytes(command, "unpack")) { + return 0; + } + + try outWriter.print("Unknown command: {s}", .{command}); + + return 1; + } + + try outWriter.print("{s}", .{args[0]}); return 0; } diff --git a/src/ona/main.zig b/src/ona/main.zig index 0aa7691..d38b13d 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -1,27 +1,40 @@ +const core = @import("core"); +const std = @import("std"); +const sys = @import("./sys.zig"); /// -/// Platform-agnostic input and output interfaces for working with memory, files, and networks. +/// Starts the the game engine. /// -pub const io = @import("./io.zig"); +pub fn main() anyerror!void { + return nosuspend await async sys.display(anyerror, run); +} -/// -/// Metaprogramming introspection utilities -/// -pub const meta = @import("./meta.zig"); +fn run(app: *sys.App, graphics: *sys.Graphics) anyerror!void { + var gpa = std.heap.GeneralPurposeAllocator(.{}){}; -/// -/// Sequential last-in first-out data structures. -/// -pub const stack = @import("./stack.zig"); + defer _ = gpa.deinit(); -/// -/// Unordered key-value association data structures. -/// -pub const table = @import("./table.zig"); + { + var file_access = try app.data.open(try sys.Path.joined(&.{"ona.lua"}), .readonly); + + defer file_access.close(); + + const file_size = try file_access.queryLength(); + const allocator = gpa.allocator(); + const buffer = try allocator.alloc(u8, file_size); + + defer allocator.free(buffer); + + if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; + + app.log(.debug, buffer); + } + + while (graphics.poll()) |_| { + graphics.present(); + } +} test { - _ = io; - _ = meta; - _ = stack; - _ = table; + _ = sys; } diff --git a/src/engine/sys.zig b/src/ona/sys.zig similarity index 97% rename from src/engine/sys.zig rename to src/ona/sys.zig index 81cd0cc..9b04750 100644 --- a/src/engine/sys.zig +++ b/src/ona/sys.zig @@ -2,8 +2,8 @@ const ext = @cImport({ @cInclude("SDL2/SDL.h"); }); +const core = @import("core"); const oar = @import("oar"); -const ona = @import("ona"); const std = @import("std"); /// @@ -44,12 +44,12 @@ pub const App = struct { /// Once the execution frame resumes, the value returned by executing `procedure` is returned. /// pub fn schedule(app: *App, procedure: anytype, - arguments: anytype) ona.meta.FnReturn(@TypeOf(procedure)) { + arguments: anytype) core.meta.FnReturn(@TypeOf(procedure)) { const Task = struct { procedure: @TypeOf(procedure), arguments: *@TypeOf(arguments), - result: ona.meta.FnReturn(@TypeOf(procedure)), + result: core.meta.FnReturn(@TypeOf(procedure)), const Task = @This(); @@ -104,7 +104,7 @@ pub const FileSystem = union(enum) { /// Archive file system information. /// const Archive = struct { - file_access: ona.io.FileAccess, + file_access: core.io.FileAccess, index_cache: IndexCache, entry_table: [max_open_entries]Entry = std.mem.zeroes([max_open_entries]Entry), @@ -117,7 +117,7 @@ pub const FileSystem = union(enum) { /// Stateful extension of an [oar.Entry]. /// const Entry = struct { - owner: ?*ona.io.FileAccess, + owner: ?*core.io.FileAccess, cursor: u64, header: oar.Entry, }; @@ -125,7 +125,7 @@ pub const FileSystem = union(enum) { /// /// Table cache for associating [oar.Path] values with offsets to entries in a given file. /// - const IndexCache = ona.table.Hashed(oar.Path, u64, .{ + const IndexCache = core.table.Hashed(oar.Path, u64, .{ .equals = oar.Path.equals, .hash = oar.Path.hash, }); @@ -172,12 +172,12 @@ pub const FileSystem = union(enum) { /// Returns a [FileAccess] reference that provides access to the file referenced by `path`or a /// [OpenError] if it failed. /// - pub fn open(file_system: *FileSystem, path: Path, mode: OpenMode) OpenError!ona.io.FileAccess { + pub fn open(file_system: *FileSystem, path: Path, mode: OpenMode) OpenError!core.io.FileAccess { switch (file_system.*) { .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; - const FileAccess = ona.io.FileAccess; + const FileAccess = core.io.FileAccess; for (archive.entry_table) |*entry| if (entry.owner == null) { const Implementation = struct { @@ -320,7 +320,7 @@ pub const FileSystem = union(enum) { mem.copy(u8, path_buffer[native.len .. path_buffer. len], path.buffer[0 .. path.length]); - const FileAccess = ona.io.FileAccess; + const FileAccess = core.io.FileAccess; const Implementation = struct { fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { -- 2.34.1 From 16a9d62933b3ceb4371bc716abb07bfc20330470 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 23 Oct 2022 18:15:25 +0100 Subject: [PATCH 53/93] Fix typo in VS Code debug configuration --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 3d0579a..68e2574 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -27,7 +27,7 @@ "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", - "preLaunchTask": "Build Test", + "preLaunchTask": "Build Tests", }, ] } -- 2.34.1 From 196a3d1200a59ba59e313db7ac89ae2ed30065e4 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 23 Oct 2022 18:15:48 +0100 Subject: [PATCH 54/93] Fix typo in tests root --- src/tests.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests.zig b/src/tests.zig index 0444b9e..25c718f 100644 --- a/src/tests.zig +++ b/src/tests.zig @@ -1,5 +1,5 @@ test { - _ = @import("./engine/main.zig"); + _ = @import("./core/main.zig"); _ = @import("./oar/main.zig"); _ = @import("./ona/main.zig"); } -- 2.34.1 From f5fd24fb765df938816063acf2190cfe8f049b26 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 23 Oct 2022 18:16:10 +0100 Subject: [PATCH 55/93] Clarify "meta" submodule doc comment and tidy up Function type --- src/core/main.zig | 2 +- src/core/meta.zig | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/core/main.zig b/src/core/main.zig index 0aa7691..602ea7b 100644 --- a/src/core/main.zig +++ b/src/core/main.zig @@ -5,7 +5,7 @@ pub const io = @import("./io.zig"); /// -/// Metaprogramming introspection utilities +/// Metaprogramming introspection and generation utilities. /// pub const meta = @import("./meta.zig"); diff --git a/src/core/meta.zig b/src/core/meta.zig index 22b38eb..884c4a4 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -12,13 +12,12 @@ pub fn FnReturn(comptime Fn: type) type { } /// -/// Returns a single-input single-output closure type where `Input` represents the input type, -/// `Output` represents the output type, and `captures_size` represents the size of the closure -/// context. +/// Returns a single-input single-output closure type where `In` represents the input type, `Out` +/// represents the output type, and `captures_size` represents the size of the closure context. /// -pub fn Function(comptime captures_size: usize, comptime Input: type, comptime Output: type) type { +pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: type) type { return struct { - applyErased: fn (*anyopaque, Input) Output, + applyErased: fn (*anyopaque, In) Out, context: [captures_size]u8, /// @@ -29,7 +28,7 @@ pub fn Function(comptime captures_size: usize, comptime Input: type, comptime Ou /// /// Applies `input` to `self`, producing a result according to the current context data. /// - pub fn apply(self: *Self, input: Input) Output { + pub fn apply(self: *Self, input: In) Out { return self.applyErased(&self.context, input); } @@ -39,7 +38,7 @@ pub fn Function(comptime captures_size: usize, comptime Input: type, comptime Ou /// /// The newly created [Self] is returned. /// - pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), Input) Output) Self { + pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), In) Out) Self { const Captures = @TypeOf(captures); if (@sizeOf(Captures) > captures_size) @@ -52,7 +51,7 @@ pub fn Function(comptime captures_size: usize, comptime Input: type, comptime Ou .context = undefined, .applyErased = struct { - fn do(erased: *anyopaque, input: Input) Output { + fn do(erased: *anyopaque, input: In) Out { return call(@ptrCast(*Captures, @alignCast( captures_align, erased)).*, input); } -- 2.34.1 From da7d9cfcc09c560bc4c61861d97e4bce833cfa67 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 24 Oct 2022 01:02:07 +0100 Subject: [PATCH 56/93] Add additional closures and uses --- src/core/io.zig | 21 +++++++++++++---- src/core/meta.zig | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 4 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 770e77f..4418c63 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -2,6 +2,11 @@ const meta = @import("./meta.zig"); const stack = @import("./stack.zig"); const std = @import("std"); +/// +/// Closure for allocating, reallocating, and deallocating dynamic memory resources through itself. +/// +pub const Allocator = meta.BiFunction(56, ?[]u8, usize, AllocationError![]u8); + /// /// File-system agnostic abstraction for manipulating a file. /// @@ -186,16 +191,24 @@ pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); /// Returns `true` if `this` is the same length and contains the same data as `that`, otherwise /// `false`. /// -pub fn equalsBytes(this: []const u8, that: []const u8) bool { - return std.mem.eql(u8, this, that); +pub fn equals(comptime Element: type, this: []const Element, that: []const Element) bool { + if (this.len != that.len) return false; + + { + var i = std.mem.zeroes(usize); + + while (i < this.len) : (i += 1) if (this[i] != that[i]) return false; + } + + return true; } test "Equivalence of bytes" { const bytes_sequence = &.{69, 42, 0}; const testing = std.testing; - try testing.expect(equalsBytes(bytes_sequence, bytes_sequence)); - try testing.expect(!equalsBytes(bytes_sequence, &.{69, 42})); + try testing.expect(equals(u8, bytes_sequence, bytes_sequence)); + try testing.expect(!equals(u8, bytes_sequence, &.{69, 42})); } /// diff --git a/src/core/meta.zig b/src/core/meta.zig index 884c4a4..582176b 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -11,6 +11,63 @@ pub fn FnReturn(comptime Fn: type) type { return type_info.Fn.return_type orelse void; } +/// +/// Returns a double-input single-output closure type where `A` represents the first input type, `B` +/// represents the second, and `Out` represents the output type, and `captures_size` represents the +/// size of the closure context. +/// +pub fn BiFunction(comptime captures_size: usize, comptime A: type, + comptime B: type, comptime Out: type) type { + + return struct { + applyErased: fn (*anyopaque, A, B) Out, + context: [captures_size]u8, + + /// + /// Function type. + /// + const Self = @This(); + + /// + /// Applies `a` and `b` to `self`, producing a result according to the current context data. + /// + pub fn apply(self: *Self, a: A, b: B) Out { + return self.applyErased(&self.context, a, b); + } + + /// + /// Creates a new [Self] by capturing the `captures` value as the context and `call` as the + /// as the behavior executed when [apply] or [applyErased] is called. + /// + /// The newly created [Self] is returned. + /// + pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), A, B) Out) Self { + const Captures = @TypeOf(captures); + + if (@sizeOf(Captures) > captures_size) + @compileError("`captures` must be smaller than or equal to " ++ + std.fmt.comptimePrint("{d}", .{captures_size}) ++ " bytes"); + + const captures_align = @alignOf(Captures); + + var function = Self{ + .context = undefined, + + .applyErased = struct { + fn do(erased: *anyopaque, a: A, b: B) Out { + return call(@ptrCast(*Captures, @alignCast( + captures_align, erased)).*, a, b); + } + }.do, + }; + + @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; + + return function; + } + }; +} + /// /// Returns a single-input single-output closure type where `In` represents the input type, `Out` /// represents the output type, and `captures_size` represents the size of the closure context. -- 2.34.1 From 989de70461c9cea0e40de6837f4fdfbf91de10e4 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 24 Oct 2022 13:58:27 +0100 Subject: [PATCH 57/93] Fix closure types not working with void captures --- src/core/meta.zig | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/core/meta.zig b/src/core/meta.zig index 582176b..f76f362 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -48,20 +48,20 @@ pub fn BiFunction(comptime captures_size: usize, comptime A: type, @compileError("`captures` must be smaller than or equal to " ++ std.fmt.comptimePrint("{d}", .{captures_size}) ++ " bytes"); - const captures_align = @alignOf(Captures); - var function = Self{ .context = undefined, .applyErased = struct { - fn do(erased: *anyopaque, a: A, b: B) Out { - return call(@ptrCast(*Captures, @alignCast( - captures_align, erased)).*, a, b); + fn applyErased(erased: *anyopaque, a: A, b: B) Out { + return call(if (Captures == void) {} else @ptrCast(*Captures, + @alignCast(@alignOf(Captures), erased)).*, a, b); } - }.do, + }.applyErased, }; - @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; + if (captures != {}) { + @ptrCast(*Captures, @alignCast(@alignOf(Captures), &function.context)).* = captures; + } return function; } @@ -108,11 +108,11 @@ pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: .context = undefined, .applyErased = struct { - fn do(erased: *anyopaque, input: In) Out { - return call(@ptrCast(*Captures, @alignCast( - captures_align, erased)).*, input); + fn applyErased(erased: *anyopaque, input: In) Out { + return call(if (Captures == void) {} else @ptrCast(*Captures, + @alignCast(@alignOf(Captures), erased)).*, input); } - }.do, + }.applyErased, }; @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; -- 2.34.1 From d578bb422e5018c7ec9d1b984232365fbdf3c784 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 24 Oct 2022 13:59:37 +0100 Subject: [PATCH 58/93] Change Allocator to use interface style --- src/core/io.zig | 14 +++----------- src/core/table.zig | 39 +++++++++++++++++++++++++++------------ 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 4418c63..c04fab1 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -3,9 +3,9 @@ const stack = @import("./stack.zig"); const std = @import("std"); /// -/// Closure for allocating, reallocating, and deallocating dynamic memory resources through itself. /// -pub const Allocator = meta.BiFunction(56, ?[]u8, usize, AllocationError![]u8); +/// +pub const Allocator = std.mem.Allocator; /// /// File-system agnostic abstraction for manipulating a file. @@ -203,7 +203,7 @@ pub fn equals(comptime Element: type, this: []const Element, that: []const Eleme return true; } -test "Equivalence of bytes" { +test "Memory buffers equal" { const bytes_sequence = &.{69, 42, 0}; const testing = std.testing; @@ -255,11 +255,3 @@ test "Null writing" { try testing.expectEqual(nullWriter().apply(sequence), sequence.len); } } - -/// -/// Applies the singular `byte` to `writer`, returning `true` if it was successfully written, -/// otherwise `false`. -/// -pub fn writeByte(writer: *Writer, byte: u8) bool { - return (writer.apply(std.mem.asBytes(&byte)) != 0); -} diff --git a/src/core/table.zig b/src/core/table.zig index b844766..ddf60b7 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -1,4 +1,4 @@ -const std = @import("std"); +const io = @import("./io.zig"); /// /// Returns a hash-backed table type of `Value`s indexed by `Key` and using `key_context` as the key @@ -7,7 +7,7 @@ const std = @import("std"); pub fn Hashed(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { - const Allocator = std.mem.Allocator; + const Allocator = io.Allocator; return struct { allocator: Allocator, @@ -44,11 +44,13 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// /// Initializes a [Self] using `allocator` as the memory allocation strategy. /// - /// Returns a new [Self] value or an [Allocator.Error] if initializing failed. + /// Returns a new [Self] value or an [io.Allocator.Error] if initializing failed. /// pub fn init(allocator: Allocator) Allocator.Error!Self { + const initial_capacity = 4; + return Self{ - .buckets = try allocator.alloc(Bucket, 4), + .buckets = try allocator.alloc(Bucket, initial_capacity), .filled = 0, .allocator = allocator, .load_limit = 0.75, @@ -158,7 +160,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key /// already present. /// -pub const InsertError = std.mem.Allocator.Error || error { +pub const InsertError = io.Allocator.Error || error { KeyExists, }; @@ -173,14 +175,27 @@ pub fn KeyContext(comptime Key: type) type { }; } -test "Hashed table manipulation with bytes context" { - const testing = std.testing; - const io = @import("./io.zig"); +/// +/// A [KeyContext] for dealing with string literal (i.e. []const u8) values. +/// +/// **Note** that, while lightweight, this context should only be considered safe to use with string +/// literals or variables pointing to string literals - as the [KeyContext] does not take ownership +/// of its keys beyond copying the reference. +/// +pub const string_literal_context = KeyContext([]const u8){ + .hash = io.hashBytes, - var table = try Hashed([]const u8, u32, .{ - .equals = io.equalsBytes, - .hash = io.hashBytes, - }).init(testing.allocator); + .equals = struct { + fn stringsEqual(this: []const u8, that: []const u8) bool { + return io.equals(u8, this, that); + } + }.stringsEqual, +}; + +test "Hash table manipulation with string literal context" { + const testing = @import("std").testing; + + var table = try Hashed([]const u8, u32, string_literal_context).init(testing.allocator); defer table.deinit(); -- 2.34.1 From e24868406cb848a826e9dd796bd7e6bc2d1c0ced Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 24 Oct 2022 14:01:27 +0100 Subject: [PATCH 59/93] Remove default build task from VS Code task configuration --- .vscode/tasks.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/.vscode/tasks.json b/.vscode/tasks.json index b5d8c38..0f3ff97 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -25,12 +25,7 @@ "label": "Build Debug", "type": "shell", "command": "zig build", - - "group": { - "kind": "build", - "isDefault": true - }, - + "group": "build", "presentation": { "echo": true, "reveal": "always", -- 2.34.1 From a2abd9d42d9a5a603ac1332ad2680f3b7dd05e13 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 00:39:43 +0100 Subject: [PATCH 60/93] Fix tests not depending on core lib module --- build.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/build.zig b/build.zig index ce64b2f..5cfee8e 100644 --- a/build.zig +++ b/build.zig @@ -36,6 +36,7 @@ pub fn build(builder: *std.build.Builder) void { { const tests = builder.addTestExe("test", "./src/tests.zig"); + tests.addPackage(core_pkg); tests.setTarget(target); tests.setBuildMode(mode); builder.step("test", "Run unit tests").dependOn(&tests.step); -- 2.34.1 From 90f503f7c0d7c479ee93c1364259d10e344c5312 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 00:40:14 +0100 Subject: [PATCH 61/93] More work on OAR packing utility --- src/core/io.zig | 19 ++++++++++++++- src/oar/main.zig | 62 ++++++++++++++++++++++++++++-------------------- 2 files changed, 54 insertions(+), 27 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index c04fab1..c6dd1cb 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -187,6 +187,23 @@ test "Spliterating text" { /// pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); +/// +/// Returns `true` if `elements` starts with the characters in `with`, otherwise `false`. +/// +pub fn begins(comptime Element: type, elements: []const Element, with: []const Element) bool { + if (elements.len < with.len) return false; + + return equals(Element, elements[0 .. with.len], with); +} + +test "Check memory begins with" { + const bytes_sequence = &.{69, 42}; + const testing = std.testing; + + try testing.expect(begins(u8, &.{69, 42, 0, 89}, bytes_sequence)); + try testing.expect(!begins(u8, &.{69, 89, 42, 0}, bytes_sequence)); +} + /// /// Returns `true` if `this` is the same length and contains the same data as `that`, otherwise /// `false`. @@ -203,7 +220,7 @@ pub fn equals(comptime Element: type, this: []const Element, that: []const Eleme return true; } -test "Memory buffers equal" { +test "Check memory is equals" { const bytes_sequence = &.{69, 42, 0}; const testing = std.testing; diff --git a/src/oar/main.zig b/src/oar/main.zig index 9573ba7..99066ae 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -76,7 +76,7 @@ pub const Path = extern struct { /// Returns `true` if `this_path` is equal to `that_path, otherwise `false`. /// pub fn equals(this_path: Path, that_path: Path) bool { - return core.io.equalsBytes(this_path.buffer[0 ..this_path. + return core.io.equals(u8, this_path.buffer[0 ..this_path. length], that_path.buffer[0 .. that_path.length]); } @@ -145,44 +145,54 @@ pub const Path = extern struct { pub const seperator = '/'; }; +test "Path" { + const testing = std.testing; + const empty_path = Path.empty; + + try testing.expectEqual(empty_path.length, 0); + try testing.expect(empty_path.equals(Path.empty)); + + const joined_component_path = try Path.joined(&.{"path", "to/my", "/file"}); + const joined_normalized_path = try Path.joined(&.{"path/to/my/file"}); + + try testing.expectEqual(empty_path.length, joined_normalized_path.length); + try testing.expect(joined_component_path.equals(joined_normalized_path)); +} + /// /// Starts the **O**na **Ar**chive packer utility. /// -pub fn main() !u8 { +pub fn main() u8 { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); + defer std.debug.assert(!gpa.deinit()); - const process = std.process; const allocator = gpa.allocator(); - const args = try process.argsAlloc(allocator); + const out_writer = std.io.getStdOut().writer(); + const process = std.process; + + const args = process.argsAlloc(allocator) catch { + try out_writer.print("Failed to allocate args memory", .{}); + + return 1; + }; defer process.argsFree(allocator, args); - const outWriter = std.io.getStdOut().writer(); + if (args.len < 2) { + try out_writer.print("Usage: oar [OPTION]... [FILE]...", .{}); + try out_writer.print("Options and arguments", .{}); - if (args.len > 1) { - const command = args[1]; - const io = core.io; - - if (io.equalsBytes(command, "pack")) { - return 0; - } - - if (io.equalsBytes(command, "unpack")) { - return 0; - } - - try outWriter.print("Unknown command: {s}", .{command}); - - return 1; + return 0; } - try outWriter.print("{s}", .{args[0]}); + const arg = std.mem.sliceTo(args[1], 0); + + if (core.io.begins(arg, "--create")) { + return 0; + } + + try out_writer.print("Unrecognized command-line option `{s}`", .{arg}); return 0; } - -test { - -} -- 2.34.1 From 7c5dcc1cdef10e1c1b9ead9277efc55e0d694980 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 01:02:01 +0100 Subject: [PATCH 62/93] Fix typo in OAR unit test --- src/oar/main.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oar/main.zig b/src/oar/main.zig index 99066ae..cd40b67 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -155,7 +155,7 @@ test "Path" { const joined_component_path = try Path.joined(&.{"path", "to/my", "/file"}); const joined_normalized_path = try Path.joined(&.{"path/to/my/file"}); - try testing.expectEqual(empty_path.length, joined_normalized_path.length); + try testing.expectEqual(joined_component_path.length, joined_normalized_path.length); try testing.expect(joined_component_path.equals(joined_normalized_path)); } -- 2.34.1 From 8ab80f07b860b875a1dbd66b0a6f3ecb5b421163 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 01:02:18 +0100 Subject: [PATCH 63/93] Add install step to tests in build script --- .vscode/launch.json | 2 +- build.zig | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 68e2574..ad7692e 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -23,7 +23,7 @@ "name": "Test", "type": "gdb", "request": "launch", - "target": "${workspaceFolder}/zig-cache/o/2c2b6e0f85a1dcd7caa00765b05ec679/test", + "target": "${workspaceFolder}/zig-cache/o/acfc38b60cc59a70f8386ac73748d92a/test", "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", diff --git a/build.zig b/build.zig index 5cfee8e..924d402 100644 --- a/build.zig +++ b/build.zig @@ -39,6 +39,7 @@ pub fn build(builder: *std.build.Builder) void { tests.addPackage(core_pkg); tests.setTarget(target); tests.setBuildMode(mode); + tests.install(); builder.step("test", "Run unit tests").dependOn(&tests.step); } } -- 2.34.1 From bc5b69ac050e5a04ec91fafb70c299f71ac484b4 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 13:19:56 +0100 Subject: [PATCH 64/93] Add test install location to VS code debug config --- .vscode/launch.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index ad7692e..be3d58d 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -23,7 +23,7 @@ "name": "Test", "type": "gdb", "request": "launch", - "target": "${workspaceFolder}/zig-cache/o/acfc38b60cc59a70f8386ac73748d92a/test", + "target": "${workspaceFolder}/zig-out/bin/test", "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", -- 2.34.1 From 32b18e4ebfd64ef769a00acbb9073dbc3ac1022f Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 25 Oct 2022 17:33:00 +0100 Subject: [PATCH 65/93] Plan out archive API and use in Oar tool program --- src/oar/main.zig | 61 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 6 deletions(-) diff --git a/src/oar/main.zig b/src/oar/main.zig index cd40b67..15f5a77 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -1,6 +1,13 @@ const core = @import("core"); const std = @import("std"); +/// +/// +/// +pub const Archive = struct { + +}; + /// /// An entry block of an Oar archive file. /// @@ -34,7 +41,7 @@ pub const Entry = extern struct { const origin = try file_access.queryCursor(); if (((try file_access.read(mem.asBytes(&entry))) != @sizeOf(Entry)) and - core.io.equalsBytes(&entry.signature, &signature_magic)) { + core.io.equals(u8, &entry.signature, &signature_magic)) { try file_access.seek(origin); @@ -172,7 +179,7 @@ pub fn main() u8 { const process = std.process; const args = process.argsAlloc(allocator) catch { - try out_writer.print("Failed to allocate args memory", .{}); + out_writer.print("Failed to allocate args memory\n", .{}) catch undefined; return 1; }; @@ -180,19 +187,61 @@ pub fn main() u8 { defer process.argsFree(allocator, args); if (args.len < 2) { - try out_writer.print("Usage: oar [OPTION]... [FILE]...", .{}); - try out_writer.print("Options and arguments", .{}); + out_writer.print("Usage: oar [OPTION]... [FILE]...\n", .{}) catch undefined; + out_writer.print("Options and arguments\n", .{}) catch undefined; return 0; } const arg = std.mem.sliceTo(args[1], 0); - if (core.io.begins(arg, "--create")) { + if (core.io.equals(u8, arg, "--create")) { + if (args.len < 3) { + out_writer.print("Expected output file specified after `--create`\n", .{}) catch undefined; + + return 1; + } + + var archive = Archive.init(allocator, Path.joined(&.{args[2]})) catch { + out_writer.print("Failed to initialize archive for create\n", .{}) catch undefined; + + return 1; + }; + + defer archive.deinit(); + + for (args[3 .. ]) |input_file_path| { + const file = std.fs.cwd().openFile(input_file_path) catch { + out_writer.print("Failed to open {s}\n", .{input_file_path}) catch undefined; + + return 1; + }; + + defer file.close(); + + var entry = archive.open(Path.joined(&.{input_file_path})) catch { + out_writer.print("Failed to open {s}\n", .{input_file_path}) catch undefined; + + return 1; + }; + + defer archive.close(entry); + + var copy_buffer = std.mem.zeroes([4096]u8); + + while (true) { + const read = try file.read(©_buffer); + + if (read == 0) break; + + try entry.write(copy_buffer[read ..]); + } + } + return 0; } - try out_writer.print("Unrecognized command-line option `{s}`", .{arg}); + out_writer.print("Unrecognized command-line option `{s}`\n", .{arg}) catch undefined; return 0; } -- 2.34.1 From 7599ce61f2c68a2ef730fa565632847a0d747e99 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 26 Oct 2022 18:09:59 +0100 Subject: [PATCH 66/93] Initial work merging Oar into Ona itself --- src/core/io.zig | 32 +++++++++++ src/oar/main.zig | 6 +++ src/ona/oar.zig | 138 +++++++++++++++++++++++++++++++++++++++++++++++ src/ona/sys.zig | 35 ++---------- 4 files changed, 179 insertions(+), 32 deletions(-) create mode 100644 src/ona/oar.zig diff --git a/src/core/io.zig b/src/core/io.zig index c6dd1cb..48dee34 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -204,6 +204,38 @@ test "Check memory begins with" { try testing.expect(!begins(u8, &.{69, 89, 42, 0}, bytes_sequence)); } +/// +/// Returns a sliced reference of the raw bytes in `pointer`. +/// +/// **Note** that passing a slice will convert it to a byte slice. +/// +pub fn bytes(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { + .Pointer => |info| if (info.is_const) []const u8 else []u8, + else => @compileError("`pointer` must be a pointer type"), +} { + const Pointer = @TypeOf(pointer); + const pointer_info = @typeInfo(Pointer).Pointer; + + switch (pointer_info.size) { + .Many => @compileError("`pointer` cannot be an unbound pointer type"), + .C => @compileError("`pointer` cannot be a C-style pointer"), + + .One => return @ptrCast(if (pointer_info.is_const) [*]const u8 + else [*]u8, pointer)[0 .. @sizeOf(Pointer)], + + .Slice => return @ptrCast(if (pointer_info.is_const) [*]const u8 else + [*]u8, pointer.ptr)[0 .. (@sizeOf(Pointer) * pointer.len)], + } +} + +test "Bytes of types" { + const testing = std.testing; + + var foo: u32 = 10; + + testing.expectEqual(bytes(&foo), 0x0a); +} + /// /// Returns `true` if `this` is the same length and contains the same data as `that`, otherwise /// `false`. diff --git a/src/oar/main.zig b/src/oar/main.zig index 15f5a77..bc1d5ea 100644 --- a/src/oar/main.zig +++ b/src/oar/main.zig @@ -5,7 +5,13 @@ const std = @import("std"); /// /// pub const Archive = struct { + pub fn deinit(archive: *Archive) { + } + + pub fn init(file_system: *const sys.FileSystem, file_path: sys.Path) { + + } }; /// diff --git a/src/ona/oar.zig b/src/ona/oar.zig new file mode 100644 index 0000000..d24da0d --- /dev/null +++ b/src/ona/oar.zig @@ -0,0 +1,138 @@ +const core = @import("./core.zig"); +const sys = @import("./sys.zig"); + +/// +/// +/// +pub const Archive = struct { + file_system: *const sys.FileSystem, + archive_path: sys.Path, + + /// + /// + /// + const Header = extern struct { + signature: [signature_magic.len]u8, + revision: u8, + entry_offset: u64, + padding: [500]u8 = std.mem.zeroes([500]u8), + + /// + /// Magic identifier used to validate [Entry] data. + /// + const signature_magic = [3]u8{'o', 'a', 'r'}; + + comptime { + const size = @sizeOf(@This()); + + if (size != 512) + @compileError("Header is " ++ + std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); + } + }; + + /// + /// An entry block of an Oar archive file. + /// + /// Typically, following the block in memory is the file data it holds the meta-information for. + /// + const Entry = extern struct { + signature: [signature_magic.len]u8 = signature_magic, + revision: u8, + path: Path, + file_size: u64, + absolute_offset: u64, + padding: [232]u8 = std.mem.zeroes([232]u8), + + comptime { + const entry_size = @sizeOf(Entry); + + if (entry_size != 512) + @compileError("Entry is " ++ + std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); + } + }; + + /// + /// + /// + pub const OpenError = error { + FileNotFound, + EntryNotFound, + UnsupportedArchive, + }; + + /// + /// + /// + pub fn open(archive: Archive, entry_path: Path) OpenError!EntryAccess { + const file_access = try archive.file_system.open(entry_path, .readonly); + + errdefer file_access.close(); + + var header = std.mem.zeroes(Header); + const header_size = @sizeOf(Header); + const io = core.io; + + // Validate header. + if ((try file_access.read(io.bytes(&header)) != header_size) or header + (!core.io.equals(u8, &header.signature, &Header.signature_magic)) or + (header.revision != revision) or + (header.entry_offset <= header_size)) return error.UnsupportedArchive; + + // Go to file table. + try file_access.seek(header.entry_offset); + + // Read file table. + var entry_buffer = std.mem.zeroes([8]Entry); + var bytes_read = try file_access.read(io.bytes(&entry_buffer)); + + while (@mod(bytes_read, @sizeOf(Entry)) == 0) { + for (entry_buffer[0 .. (bytes_read / @sizeOf(Entry))]) |entry| { + if (entry.path.equals(entry_path)) { + file_access.seek(entry.file_offset); + + return Entry{ + .file_access = file_access, + }; + } + } + + bytes_read = try file_access.read(io.bytes(&entry_buffer)); + } + + var head = std.mem.zeroes(usize); + var tail = (header.file_count - 1); + + while (head <= tail) { + const midpoint = (head + (tail - head) / 2); + + const comparison = entry_path.compare(arr[m])); + + if (comparison == 0) return midpoint; + + if (comparison > 0) { + // If x greater, ignore left half + head = (midpoint + 1); + } else { + // If x is smaller, ignore right half + tail = (midpoint - 1); + } + } + + return error.EntryNotFound; + } +}; + +pub const EntryAccess = struct { + file_access: FileAccess, + + pub fn close(entry: Entry) void { + entry.file_access.close(); + } +}; + +/// +/// +/// +const revision = 0; diff --git a/src/ona/sys.zig b/src/ona/sys.zig index 9b04750..63f3ed3 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -100,37 +100,6 @@ pub const FileSystem = union(enum) { native: []const u8, archive: Archive, - /// - /// Archive file system information. - /// - const Archive = struct { - file_access: core.io.FileAccess, - index_cache: IndexCache, - entry_table: [max_open_entries]Entry = std.mem.zeroes([max_open_entries]Entry), - - /// - /// Hard limit on the maximum number of entries open at once. - /// - const max_open_entries = 16; - - /// - /// Stateful extension of an [oar.Entry]. - /// - const Entry = struct { - owner: ?*core.io.FileAccess, - cursor: u64, - header: oar.Entry, - }; - - /// - /// Table cache for associating [oar.Path] values with offsets to entries in a given file. - /// - const IndexCache = core.table.Hashed(oar.Path, u64, .{ - .equals = oar.Path.equals, - .hash = oar.Path.hash, - }); - }; - /// /// With files typically being backed by a block device, they can produce a variety of errors - /// from physical to virtual errors - these are all encapsulated by the API as general @@ -172,7 +141,9 @@ pub const FileSystem = union(enum) { /// Returns a [FileAccess] reference that provides access to the file referenced by `path`or a /// [OpenError] if it failed. /// - pub fn open(file_system: *FileSystem, path: Path, mode: OpenMode) OpenError!core.io.FileAccess { + pub fn open(file_system: *const FileSystem, path: Path, + mode: OpenMode) OpenError!core.io.FileAccess { + switch (file_system.*) { .archive => |*archive| { if (mode != .readonly) return error.ModeUnsupported; -- 2.34.1 From b4816a34f6dfc6d5f80e9243170c19c9a67ff972 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 27 Oct 2022 16:51:11 +0100 Subject: [PATCH 67/93] Add additional memory utils to core module --- src/core/io.zig | 138 ++++++++++++------------------------------------ 1 file changed, 34 insertions(+), 104 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 48dee34..11c3fa1 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -7,102 +7,6 @@ const std = @import("std"); /// pub const Allocator = std.mem.Allocator; -/// -/// File-system agnostic abstraction for manipulating a file. -/// -pub const FileAccess = struct { - context: *anyopaque, - implementation: *const Implementation, - - /// - /// Provides a set of implementation-specific behaviors to a [FileAccess] instance. - /// - pub const Implementation = struct { - close: fn (*anyopaque) void, - queryCursor: fn (*anyopaque) Error!u64, - queryLength: fn (*anyopaque) Error!u64, - read: fn (*anyopaque, []u8) Error!usize, - seek: fn (*anyopaque, u64) Error!void, - seekToEnd: fn (*anyopaque) Error!void, - skip: fn (*anyopaque, i64) Error!void, - }; - - /// - /// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer - /// pointing to a file or the file becomming invalid for whatever reason. - /// - pub const Error = error { - FileInaccessible, - }; - - /// - /// Close the file referenced by `file_access` on the main thread, invalidating the reference to - /// it and releasing any associated resources. - /// - /// Freeing an invalid `file_access` has no effect on the file and logs a warning over the - /// wasted effort. - /// - pub fn close(file_access: FileAccess) void { - return file_access.implementation.close(file_access.context); - } - - /// - /// Attempts to query the current cursor position for the file referenced by `file_access`. - /// - /// Returns the number of bytes into the file that the cursor is relative to its beginning or a - /// [Error] on failure. - /// - pub fn queryCursor(file_access: FileAccess) Error!u64 { - return file_access.implementation.queryCursor(file_access.context); - } - - /// - /// Attempts to query the current length for the file referenced by `file_access`. - /// - /// Returns the current length of the file at the time of the operation or a [Error] if the file - /// failed to be queried. - /// - pub fn queryLength(file_access: FileAccess) Error!u64 { - return file_access.implementation.queryLength(file_access.context); - } - - /// - /// Attempts to read `file_access` from the its current position into `buffer`. - /// - /// Returns the number of bytes that were available to be read, otherwise an [Error] on failure. - /// - pub fn read(file_access: FileAccess, buffer: []u8) Error!usize { - return file_access.implementation.read(file_access.context, buffer); - } - - /// - /// Attempts to seek `file_access` from the beginning of the file to `cursor` bytes. - /// - /// Returns [Error] on failure. - /// - pub fn seek(file_access: FileAccess, cursor: u64) Error!void { - return file_access.implementation.seek(file_access.context, cursor); - } - - /// - /// Attempts to seek `file_access` to the end of the file. - /// - /// Returns [Error] on failure. - /// - pub fn seekToEnd(file_access: FileAccess) Error!void { - return file_access.implementation.seekToEnd(file_access.context); - } - - /// - /// Attempts to seek `file_access` by `offset` from the current file position. - /// - /// Returns [Error] on failure; - /// - pub fn skip(file_access: FileAccess, offset: i64) Error!void { - return file_access.implementation.skip(file_access.context, offset); - } -}; - /// /// Closure that captures a reference to readable resources like block devices, memory buffers, /// network sockets, and more. @@ -161,7 +65,7 @@ test "Spliterating text" { var index = @as(usize, 0); while (spliterator.next()) |split| : (index += 1) { - try testing.expect(std.mem.eql(u8, split, components[index])); + try testing.expect(equals(u8, split, components[index])); } } @@ -176,7 +80,7 @@ test "Spliterating text" { var index = @as(usize, 0); while (spliterator.next()) |split| : (index += 1) { - try testing.expect(std.mem.eql(u8, split, components[index])); + try testing.expect(equals(u8, split, components[index])); } } } @@ -209,7 +113,7 @@ test "Check memory begins with" { /// /// **Note** that passing a slice will convert it to a byte slice. /// -pub fn bytes(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { +pub fn bytesOf(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { .Pointer => |info| if (info.is_const) []const u8 else []u8, else => @compileError("`pointer` must be a pointer type"), } { @@ -233,7 +137,35 @@ test "Bytes of types" { var foo: u32 = 10; - testing.expectEqual(bytes(&foo), 0x0a); + testing.expectEqual(bytesOf(&foo), 0x0a); +} + +/// +/// Compares `this` to `that`, returning the difference between the first byte deviation in the two +/// sequences, otherwise `0` if they are identical. +/// +pub fn compareBytes(this: []const u8, that: []const u8) isize { + var cursor: usize = 0; + + while (cursor != this.len) : (cursor += 1) { + const this_byte = this[cursor]; + + if (cursor != that.len) return this_byte; + + const that_byte = that[cursor]; + + if (this_byte != that_byte) return (this_byte - that_byte); + } + + return 0; +} + +test "Compare bytes" { + const testing = std.testing; + + try testing.expectEquals(compareBytes(&.{69, 42, 0}, &.{69, 42, 0}), 0); + try testing.expectEquals(compareBytes(&.{69, 42, 0}, &.{69, 42}), 42); + try testing.expectEquals(compareBytes(&.{69, 42}, &.{69, 42, 0}), -42); } /// @@ -243,11 +175,9 @@ test "Bytes of types" { pub fn equals(comptime Element: type, this: []const Element, that: []const Element) bool { if (this.len != that.len) return false; - { - var i = std.mem.zeroes(usize); + var i = std.mem.zeroes(usize); - while (i < this.len) : (i += 1) if (this[i] != that[i]) return false; - } + while (i < this.len) : (i += 1) if (this[i] != that[i]) return false; return true; } -- 2.34.1 From 5a913ef0f8098083075b23b5bca0a1f072229035 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 27 Oct 2022 16:51:34 +0100 Subject: [PATCH 68/93] Remove Oar as a dedicated module --- .vscode/launch.json | 10 +- src/oar/main.zig | 253 -------------------------------------------- 2 files changed, 1 insertion(+), 262 deletions(-) delete mode 100644 src/oar/main.zig diff --git a/.vscode/launch.json b/.vscode/launch.json index be3d58d..4c87ee2 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -10,15 +10,7 @@ "valuesFormatting": "parseText", "preLaunchTask": "Build Debug", }, - { - "name": "Oar", - "type": "gdb", - "request": "launch", - "target": "${workspaceFolder}/zig-out/bin/oar", - "cwd": "${workspaceRoot}", - "valuesFormatting": "parseText", - "preLaunchTask": "Build Debug", - }, + { "name": "Test", "type": "gdb", diff --git a/src/oar/main.zig b/src/oar/main.zig deleted file mode 100644 index bc1d5ea..0000000 --- a/src/oar/main.zig +++ /dev/null @@ -1,253 +0,0 @@ -const core = @import("core"); -const std = @import("std"); - -/// -/// -/// -pub const Archive = struct { - pub fn deinit(archive: *Archive) { - - } - - pub fn init(file_system: *const sys.FileSystem, file_path: sys.Path) { - - } -}; - -/// -/// An entry block of an Oar archive file. -/// -/// Typically, following the block in memory is the file data it holds the meta-information for. -/// -pub const Entry = extern struct { - signature: [signature_magic.len]u8 = signature_magic, - revision: u8, - path: Path, - file_size: u64, - absolute_offset: u64, - padding: [232]u8 = std.mem.zeroes([232]u8), - - comptime { - const entry_size = @sizeOf(Entry); - - if (entry_size != 512) - @compileError("Entry is " ++ - std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); - } - - /// - /// Attempts to read the next [Entry] from `file_access`. - /// - /// Returns the read [Entry], `null` if there is no more to read, or a - /// [core.io.FileAccess.Error] if it failed. - /// - pub fn next(file_access: core.io.FileAccess) core.io.FileAccess.Error!?Entry { - const mem = std.mem; - var entry = mem.zeroes(Entry); - const origin = try file_access.queryCursor(); - - if (((try file_access.read(mem.asBytes(&entry))) != @sizeOf(Entry)) and - core.io.equals(u8, &entry.signature, &signature_magic)) { - - try file_access.seek(origin); - - return null; - } - - return entry; - } - - /// - /// Magic identifier used to validate [Entry] data. - /// - pub const signature_magic = [3]u8{'o', 'a', 'r'}; -}; - -/// -/// Unique identifier pointing to an entry within an archive. -/// -/// A path does not do any verification that the given entry pointed to actually exists. -/// -pub const Path = extern struct { - buffer: [255]u8, - length: u8, - - /// - /// [Error.TooLong] occurs when creating a path that is greater than the maximum path size **in - /// bytes**. - /// - pub const Error = error { - TooLong, - }; - - /// - /// An empty [Path] with a length of `0`. - /// - pub const empty = std.mem.zeroes(Path); - - /// - /// Returns `true` if `this_path` is equal to `that_path, otherwise `false`. - /// - pub fn equals(this_path: Path, that_path: Path) bool { - return core.io.equals(u8, this_path.buffer[0 ..this_path. - length], that_path.buffer[0 .. that_path.length]); - } - - /// - /// Returns the hash of the text in `path`. - /// - pub fn hash(path: Path) usize { - return core.io.hashBytes(path.buffer[0 .. path.length]); - } - - /// - /// Attempts to create a [Path] with the path components in `sequences` as a fully qualified - /// path from root. - /// - /// A [Path] value is returned containing the fully qualified path from the file-system root or - /// a [Error] if it could not be created. - /// - pub fn joined(sequences: []const []const u8) Error!Path { - var path = empty; - - if (sequences.len != 0) { - const last_sequence_index = sequences.len - 1; - - for (sequences) |sequence, index| if (sequence.len != 0) { - var components = core.io.Spliterator(u8){ - .source = sequence, - .delimiter = "/", - }; - - while (components.next()) |component| if (component.len != 0) { - for (component) |byte| { - if (path.length == max) return error.TooLong; - - path.buffer[path.length] = byte; - path.length += 1; - } - - if (components.hasNext()) { - if (path.length == max) return error.TooLong; - - path.buffer[path.length] = '/'; - path.length += 1; - } - }; - - if (index < last_sequence_index) { - if (path.length == max) return error.TooLong; - - path.buffer[path.length] = '/'; - path.length += 1; - } - }; - } - - return path; - } - - /// - /// Maximum number of **bytes** in a [Path]. - /// - pub const max = 255; - - /// - /// Textual separator between components of a [Path]. - /// - pub const seperator = '/'; -}; - -test "Path" { - const testing = std.testing; - const empty_path = Path.empty; - - try testing.expectEqual(empty_path.length, 0); - try testing.expect(empty_path.equals(Path.empty)); - - const joined_component_path = try Path.joined(&.{"path", "to/my", "/file"}); - const joined_normalized_path = try Path.joined(&.{"path/to/my/file"}); - - try testing.expectEqual(joined_component_path.length, joined_normalized_path.length); - try testing.expect(joined_component_path.equals(joined_normalized_path)); -} - -/// -/// Starts the **O**na **Ar**chive packer utility. -/// -pub fn main() u8 { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - defer std.debug.assert(!gpa.deinit()); - - const allocator = gpa.allocator(); - const out_writer = std.io.getStdOut().writer(); - const process = std.process; - - const args = process.argsAlloc(allocator) catch { - out_writer.print("Failed to allocate args memory\n", .{}) catch undefined; - - return 1; - }; - - defer process.argsFree(allocator, args); - - if (args.len < 2) { - out_writer.print("Usage: oar [OPTION]... [FILE]...\n", .{}) catch undefined; - out_writer.print("Options and arguments\n", .{}) catch undefined; - - return 0; - } - - const arg = std.mem.sliceTo(args[1], 0); - - if (core.io.equals(u8, arg, "--create")) { - if (args.len < 3) { - out_writer.print("Expected output file specified after `--create`\n", .{}) catch undefined; - - return 1; - } - - var archive = Archive.init(allocator, Path.joined(&.{args[2]})) catch { - out_writer.print("Failed to initialize archive for create\n", .{}) catch undefined; - - return 1; - }; - - defer archive.deinit(); - - for (args[3 .. ]) |input_file_path| { - const file = std.fs.cwd().openFile(input_file_path) catch { - out_writer.print("Failed to open {s}\n", .{input_file_path}) catch undefined; - - return 1; - }; - - defer file.close(); - - var entry = archive.open(Path.joined(&.{input_file_path})) catch { - out_writer.print("Failed to open {s}\n", .{input_file_path}) catch undefined; - - return 1; - }; - - defer archive.close(entry); - - var copy_buffer = std.mem.zeroes([4096]u8); - - while (true) { - const read = try file.read(©_buffer); - - if (read == 0) break; - - try entry.write(copy_buffer[read ..]); - } - } - - return 0; - } - - out_writer.print("Unrecognized command-line option `{s}`\n", .{arg}) catch undefined; - - return 0; -} -- 2.34.1 From 14397b05e3807ea37d521941e0833e89ee646d54 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 27 Oct 2022 16:51:49 +0100 Subject: [PATCH 69/93] Update build script --- build.zig | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/build.zig b/build.zig index 924d402..9db4b88 100644 --- a/build.zig +++ b/build.zig @@ -12,7 +12,6 @@ pub fn build(builder: *std.build.Builder) void { { const exe = builder.addExecutable("ona", "./src/ona/main.zig"); - exe.addPackage(projectPkg("oar", &.{core_pkg})); exe.addPackage(core_pkg); exe.setTarget(target); exe.setBuildMode(mode); @@ -22,16 +21,6 @@ pub fn build(builder: *std.build.Builder) void { exe.linkLibC(); } - // Oar executable. - { - const exe = builder.addExecutable("oar", "./src/oar/main.zig"); - - exe.addPackage(core_pkg); - exe.setTarget(target); - exe.setBuildMode(mode); - exe.install(); - } - // Tests executable. { const tests = builder.addTestExe("test", "./src/tests.zig"); -- 2.34.1 From 35de8cf2ae266b4d6a4622010ad80ca20abb6b35 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 30 Oct 2022 22:06:56 +0000 Subject: [PATCH 70/93] Tidy up build configurations --- .vscode/launch.json | 2 +- .vscode/tasks.json | 9 +-------- build.zig | 17 ++++++++--------- 3 files changed, 10 insertions(+), 18 deletions(-) diff --git a/.vscode/launch.json b/.vscode/launch.json index 4c87ee2..9782360 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -19,7 +19,7 @@ "arguments": "main.zig", "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", - "preLaunchTask": "Build Tests", + "preLaunchTask": "Build Debug", }, ] } diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 0f3ff97..f2f1dbc 100755 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -28,7 +28,7 @@ "group": "build", "presentation": { "echo": true, - "reveal": "always", + "reveal": "silent", "focus": true, "panel": "shared", "showReuseMessage": true, @@ -36,12 +36,5 @@ "revealProblems": "onProblem", }, }, - - { - "label": "Build Tests", - "type": "shell", - "command": "zig build test", - "group": "build", - }, ], } diff --git a/build.zig b/build.zig index 9db4b88..6dd3828 100644 --- a/build.zig +++ b/build.zig @@ -10,15 +10,15 @@ pub fn build(builder: *std.build.Builder) void { // Ona executable. { - const exe = builder.addExecutable("ona", "./src/ona/main.zig"); + const ona = builder.addExecutable("ona", "./src/ona/main.zig"); - exe.addPackage(core_pkg); - exe.setTarget(target); - exe.setBuildMode(mode); - exe.install(); - exe.addIncludeDir("./ext"); - exe.linkSystemLibrary("SDL2"); - exe.linkLibC(); + ona.addPackage(core_pkg); + ona.setTarget(target); + ona.setBuildMode(mode); + ona.install(); + ona.addIncludeDir("./ext"); + ona.linkSystemLibrary("SDL2"); + ona.linkLibC(); } // Tests executable. @@ -29,7 +29,6 @@ pub fn build(builder: *std.build.Builder) void { tests.setTarget(target); tests.setBuildMode(mode); tests.install(); - builder.step("test", "Run unit tests").dependOn(&tests.step); } } -- 2.34.1 From a07b56d2d5f6bc4391e26fbf657bcc51d89066ba Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 30 Oct 2022 22:07:09 +0000 Subject: [PATCH 71/93] Update tests file --- src/tests.zig | 1 - 1 file changed, 1 deletion(-) diff --git a/src/tests.zig b/src/tests.zig index 25c718f..f2feb81 100644 --- a/src/tests.zig +++ b/src/tests.zig @@ -1,5 +1,4 @@ test { _ = @import("./core/main.zig"); - _ = @import("./oar/main.zig"); _ = @import("./ona/main.zig"); } -- 2.34.1 From ac95993a4bbd63da4ce74451860e2889c1090339 Mon Sep 17 00:00:00 2001 From: kayomn Date: Sun, 30 Oct 2022 22:07:36 +0000 Subject: [PATCH 72/93] Tidy up core package code and comments --- src/core/fmt.zig | 70 ------------- src/core/io.zig | 230 ++++++++++++++++++++++++++++++------------- src/core/main.zig | 22 ++++- src/core/math.zig | 26 +++++ src/core/meta.zig | 87 +++------------- src/core/stack.zig | 132 +++++++++++++++---------- src/core/testing.zig | 18 ++++ src/core/unicode.zig | 84 ++++++++++++++++ 8 files changed, 407 insertions(+), 262 deletions(-) delete mode 100644 src/core/fmt.zig create mode 100644 src/core/math.zig create mode 100644 src/core/testing.zig create mode 100644 src/core/unicode.zig diff --git a/src/core/fmt.zig b/src/core/fmt.zig deleted file mode 100644 index 43a0cba..0000000 --- a/src/core/fmt.zig +++ /dev/null @@ -1,70 +0,0 @@ -const io = @import("io.zig"); - -/// -/// Number formatting modes supported by [writeInt]. -/// -pub const Radix = enum { - binary, - tinary, - quaternary, - quinary, - senary, - septenary, - octal, - nonary, - decimal, - undecimal, - duodecimal, - tridecimal, - tetradecimal, - pentadecimal, - hexadecimal, -}; - -/// -/// Writes `value` as a ASCII / UTF-8 encoded integer to `writer`, returning `true` if the full -/// sequence was successfully written, otherwise `false`. -/// -/// The `radix` argument identifies which base system to format `value` as. -/// -pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) bool { - const Int = @TypeOf(value); - const type_info = @typeInfo(Int); - - switch (type_info) { - .Int => { - if (value == 0) return writer.writeByte('0'); - - // TODO: Unhardcode this as it will break with large ints. - var buffer = std.mem.zeroes([28]u8); - var buffer_count = @as(usize, 0); - var n1 = value; - - if ((type_info.Int.signedness == .signed) and (value < 0)) { - // Negative value. - n1 = -value; - buffer[0] = '-'; - buffer_count += 1; - } - - while (n1 != 0) { - const base = @enumToInt(radix); - - buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); - n1 = (n1 / base); - buffer_count += 1; - } - - for (buffer[0 .. (buffer_count / 2)]) |_, i| - std.mem.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); - - return (writer.call(.{buffer[0 .. buffer_count]}) == buffer_count); - }, - - // Cast comptime int into known-size integer and try again. - .ComptimeInt => return writer. - writeInt(radix, @intCast(std.math.IntFittingRange(value, value), value)), - - else => @compileError("value must be of type int"), - } -} diff --git a/src/core/io.zig b/src/core/io.zig index 11c3fa1..495fd7d 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -1,11 +1,12 @@ +const math = @import("./math.zig"); const meta = @import("./meta.zig"); const stack = @import("./stack.zig"); -const std = @import("std"); +const testing = @import("./testing.zig"); /// /// /// -pub const Allocator = std.mem.Allocator; +pub const Allocator = @import("std").mem.Allocator; /// /// Closure that captures a reference to readable resources like block devices, memory buffers, @@ -31,6 +32,26 @@ pub fn Spliterator(comptime Element: type) type { return (self.source.len != 0); } + test "Check has data" { + var empty_spliterator = Spliterator(u8){ + .source = "", + .delimiter = "/", + }; + + try testing.expect(!empty_spliterator.hasNext()); + + var stateful_spliterator = Spliterator(u8){ + .source = "data", + .delimiter = "/", + }; + + try testing.expect(stateful_spliterator.hasNext()); + + _ = try stateful_spliterator.next(); + + try testing.expect(!stateful_spliterator.hasNext()); + } + /// /// Iterates on `self` and returns the next view of [Spliterator.source] that matches /// [Spliterator.delimiter], or `null` if there is no more data to be processed. @@ -38,7 +59,11 @@ pub fn Spliterator(comptime Element: type) type { pub fn next(self: *Self) ?[]const Element { if (!self.hasNext()) return null; - if (std.mem.indexOfPos(Element, self.source, 0, self.delimiter)) |index| { + if (findFirstOf(Element, self.source, self.delimiter, struct { + fn testEquality(this: Element, that: Element) bool { + return this == that; + } + }.testEquality)) |index| { defer self.source = self.source[(index + self.delimiter.len) .. self.source.len]; return self.source[0 .. index]; @@ -48,43 +73,43 @@ pub fn Spliterator(comptime Element: type) type { return self.source; } + + test "Iterate through data" { + // Single-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "single.character.separated.hello.world", + .delimiter = ".", + }; + + const components = [_][]const u8{"single", + "character", "separated", "hello", "world"}; + + var index = @as(usize, 0); + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(equals(u8, split, components[index])); + } + } + + // Multi-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "finding a needle in a needle stack", + .delimiter = "needle", + }; + + const components = [_][]const u8{"finding a ", " in a ", " stack"}; + var index = @as(usize, 0); + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(equals(u8, split, components[index])); + } + } + } }; } -test "Spliterating text" { - const testing = std.testing; - - // Single-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "single.character.separated.hello.world", - .delimiter = ".", - }; - - const components = [_][]const u8{"single", "character", "separated", "hello", "world"}; - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(equals(u8, split, components[index])); - } - } - - // Multi-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "finding a needle in a needle stack", - .delimiter = "needle", - }; - - const components = [_][]const u8{"finding a ", " in a ", " stack"}; - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(equals(u8, split, components[index])); - } - } -} - /// /// Closure that captures a reference to writable resources like block devices, memory buffers, /// network sockets, and more. @@ -102,7 +127,6 @@ pub fn begins(comptime Element: type, elements: []const Element, with: []const E test "Check memory begins with" { const bytes_sequence = &.{69, 42}; - const testing = std.testing; try testing.expect(begins(u8, &.{69, 42, 0, 89}, bytes_sequence)); try testing.expect(!begins(u8, &.{69, 89, 42, 0}, bytes_sequence)); @@ -133,11 +157,9 @@ pub fn bytesOf(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { } test "Bytes of types" { - const testing = std.testing; - var foo: u32 = 10; - testing.expectEqual(bytesOf(&foo), 0x0a); + try testing.expect(bytesOf(&foo)[0] == 0x0a); } /// @@ -145,27 +167,38 @@ test "Bytes of types" { /// sequences, otherwise `0` if they are identical. /// pub fn compareBytes(this: []const u8, that: []const u8) isize { - var cursor: usize = 0; + const range = math.min(usize, this.len, that.len); + var index: usize = 0; - while (cursor != this.len) : (cursor += 1) { - const this_byte = this[cursor]; + while (index < range) : (index += 1) { + const difference = (this[index] - that[index]); - if (cursor != that.len) return this_byte; - - const that_byte = that[cursor]; - - if (this_byte != that_byte) return (this_byte - that_byte); + if (difference != 0) return difference; } - return 0; + return (@intCast(isize, this.len) - @intCast(isize, that.len)); } test "Compare bytes" { - const testing = std.testing; + try testing.expect(compareBytes(&.{69, 42, 0}, &.{69, 42, 0}) == 0); + try testing.expect(compareBytes(&.{69, 42, 11}, &.{69, 42}) == 1); + try testing.expect(compareBytes(&.{69, 42}, &.{69, 42, 11}) == -1); +} - try testing.expectEquals(compareBytes(&.{69, 42, 0}, &.{69, 42, 0}), 0); - try testing.expectEquals(compareBytes(&.{69, 42, 0}, &.{69, 42}), 42); - try testing.expectEquals(compareBytes(&.{69, 42}, &.{69, 42, 0}), -42); +/// +/// Copies the contents of `source` into `target` +/// +pub fn copy(comptime Element: type, target: []Element, source: []const Element) void { + for (source) |element, index| target[index] = element; +} + +test "Copy data" { + var buffer = [_]u32{0} ** 20; + const data = [_]u32{3, 20, 8000}; + + copy(u32, &buffer, &data); + + for (data) |datum, index| try testing.expect(buffer[index] == datum); } /// @@ -175,21 +208,72 @@ test "Compare bytes" { pub fn equals(comptime Element: type, this: []const Element, that: []const Element) bool { if (this.len != that.len) return false; - var i = std.mem.zeroes(usize); + var index: usize = 0; - while (i < this.len) : (i += 1) if (this[i] != that[i]) return false; + while (index < this.len) : (index += 1) if (this[index] != that[index]) return false; return true; } -test "Check memory is equals" { +test "Check memory is equal" { const bytes_sequence = &.{69, 42, 0}; - const testing = std.testing; try testing.expect(equals(u8, bytes_sequence, bytes_sequence)); try testing.expect(!equals(u8, bytes_sequence, &.{69, 42})); } +/// +/// Searches for the first instance of an `Element` equal to `needle` in `haystack`, returning its +/// index or `null` if nothing was found. +/// +pub fn findFirst(comptime Element: type, haystack: []const Element, + needle: Element, comptime testEquality: fn (Element, Element) bool) ?usize { + + for (haystack) |element, index| if (testEquality(element, needle)) return index; + + return null; +} + +test "Find first of element" { + const haystack = &.{"", "", "foo"}; + + const testEquality = struct { + fn testEquality(this: []const u8, that: []const u8) bool { + return equals(u8, this, that); + } + }.testEquality; + + try testing.expect(findFirst([]const u8, haystack, "foo", testEquality).? == 2); + try testing.expect(findFirst([]const u8, haystack, "bar", testEquality) == null); +} + +/// +/// Searches for the first instance of an `Element` sequence equal to the contents of `needle` in +/// `haystack`, returning the starting index or `null` if nothing was found. +/// +pub fn findFirstOf(comptime Element: type, haystack: []const Element, + needle: []const Element, comptime testEquality: fn (Element, Element) bool) ?usize { + + var cursor: usize = 0; + const end = (haystack.len - needle.len); + + walk_haystack: while (cursor <= end) : (cursor += 1) { + const range = (cursor + needle.len); + var index = cursor; + + while (index < range) : (index += 1) + if (testEquality(haystack[index], needle[index])) continue: walk_haystack; + + return cursor; + } + + return null; +} + +test "Find first of sequence" { + +} + /// /// Returns a deterministic hash code compiled from each byte in `bytes`. /// @@ -205,12 +289,30 @@ pub fn hashBytes(bytes: []const u8) usize { test "Hashing bytes" { const bytes_sequence = &.{69, 42, 0}; - const testing = std.testing; try testing.expect(hashBytes(bytes_sequence) == hashBytes(bytes_sequence)); try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42})); } +/// +/// Swaps the `Data` in `this` with `that`. +/// +pub fn swap(comptime Data: type, this: *Data, that: *Data) void { + const temp = this.*; + this.* = that.*; + that.* = temp; +} + +test "Data swapping" { + var a: u64 = 0; + var b: u64 = 1; + + swap(u64, &a, &b); + + try testing.expect(a == 1); + try testing.expect(b == 0); +} + /// /// Returns a [Writer] that silently consumes all given data without failure and throws it away. /// @@ -218,7 +320,7 @@ test "Hashing bytes" { /// sent somewhere for whatever reason. /// pub fn nullWriter() Writer { - return Writer.capture(std.mem.zeroes(usize), struct { + return Writer.capture(@as(usize, 0), struct { fn write(_: usize, buffer: []const u8) usize { return buffer.len; } @@ -226,11 +328,7 @@ pub fn nullWriter() Writer { } test "Null writing" { - const testing = std.testing; + const sequence = "foo"; - { - const sequence = "foo"; - - try testing.expectEqual(nullWriter().apply(sequence), sequence.len); - } + try testing.expect(nullWriter().call(sequence) == sequence.len); } diff --git a/src/core/main.zig b/src/core/main.zig index 602ea7b..785b8e5 100644 --- a/src/core/main.zig +++ b/src/core/main.zig @@ -5,12 +5,17 @@ pub const io = @import("./io.zig"); /// -/// Metaprogramming introspection and generation utilities. +/// Math types and functions with a focus on graphics-specific linear algebra. +/// +pub const math = @import("./math.zig"); + +/// +/// Metaprogramming introspection and generation. /// pub const meta = @import("./meta.zig"); /// -/// Sequential last-in first-out data structures. +/// Sequential, last-in first-out data structures. /// pub const stack = @import("./stack.zig"); @@ -19,9 +24,22 @@ pub const stack = @import("./stack.zig"); /// pub const table = @import("./table.zig"); +/// +/// Unit testing suite utilities. +/// +pub const testing = @import("./testing.zig"); + +/// +/// Unicode-encoded string analysis and processing with a focus on UTF-8 encoded text. +/// +pub const unicode = @import("./unicode.zig"); + test { _ = io; + _ = math; _ = meta; _ = stack; _ = table; + _ = testing; + _ = unicode; } diff --git a/src/core/math.zig b/src/core/math.zig new file mode 100644 index 0000000..e6f9959 --- /dev/null +++ b/src/core/math.zig @@ -0,0 +1,26 @@ + +pub const IntFittingRange = @import("std").math.IntFittingRange; + +/// +/// Returns the maximum value of `Integer`. +/// +pub fn maxInt(comptime Integer: type) Integer { + return switch (@typeInfo(Integer)) { + .Int => |info| if (info.bits == 0) 0 else + ((1 << (info.bits - @boolToInt(info.signedness == .signed))) - 1), + + else => @compileError("`" ++ @typeName(Integer) ++ "` must be an int"), + }; +} + +/// +/// Returns the lowest `Number` value between `this` and `that`. +/// +pub fn min(comptime Number: type, this: Number, that: Number) Number { + return switch (@typeInfo(Number)) { + .Int, .Float, .ComptimeInt, .ComptimeFloat => if (this < that) this else that, + + else => @compileError("`" ++ @typeName(Number) ++ + "` must be an int, float, comptime_int, or comptime_float"), + }; +} diff --git a/src/core/meta.zig b/src/core/meta.zig index f76f362..e7be22d 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -1,5 +1,3 @@ -const std = @import("std"); - /// /// Returns the return type of the function type `Fn`. /// @@ -11,70 +9,13 @@ pub fn FnReturn(comptime Fn: type) type { return type_info.Fn.return_type orelse void; } -/// -/// Returns a double-input single-output closure type where `A` represents the first input type, `B` -/// represents the second, and `Out` represents the output type, and `captures_size` represents the -/// size of the closure context. -/// -pub fn BiFunction(comptime captures_size: usize, comptime A: type, - comptime B: type, comptime Out: type) type { - - return struct { - applyErased: fn (*anyopaque, A, B) Out, - context: [captures_size]u8, - - /// - /// Function type. - /// - const Self = @This(); - - /// - /// Applies `a` and `b` to `self`, producing a result according to the current context data. - /// - pub fn apply(self: *Self, a: A, b: B) Out { - return self.applyErased(&self.context, a, b); - } - - /// - /// Creates a new [Self] by capturing the `captures` value as the context and `call` as the - /// as the behavior executed when [apply] or [applyErased] is called. - /// - /// The newly created [Self] is returned. - /// - pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), A, B) Out) Self { - const Captures = @TypeOf(captures); - - if (@sizeOf(Captures) > captures_size) - @compileError("`captures` must be smaller than or equal to " ++ - std.fmt.comptimePrint("{d}", .{captures_size}) ++ " bytes"); - - var function = Self{ - .context = undefined, - - .applyErased = struct { - fn applyErased(erased: *anyopaque, a: A, b: B) Out { - return call(if (Captures == void) {} else @ptrCast(*Captures, - @alignCast(@alignOf(Captures), erased)).*, a, b); - } - }.applyErased, - }; - - if (captures != {}) { - @ptrCast(*Captures, @alignCast(@alignOf(Captures), &function.context)).* = captures; - } - - return function; - } - }; -} - /// /// Returns a single-input single-output closure type where `In` represents the input type, `Out` /// represents the output type, and `captures_size` represents the size of the closure context. /// pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: type) type { return struct { - applyErased: fn (*anyopaque, In) Out, + callErased: fn (*anyopaque, In) Out, context: [captures_size]u8, /// @@ -83,39 +24,39 @@ pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: const Self = @This(); /// - /// Applies `input` to `self`, producing a result according to the current context data. + /// Invokes `self` with `input`, producing a result according to the current context data. /// - pub fn apply(self: *Self, input: In) Out { - return self.applyErased(&self.context, input); + pub fn call(self: *Self, input: In) Out { + return self.callErased(&self.context, input); } /// - /// Creates a new [Self] by capturing the `captures` value as the context and `call` as the - /// as the behavior executed when [apply] or [applyErased] is called. + /// Creates a new [Self] by capturing the `captures` value as the context and `invoke` as + /// the as the behavior executed when [call] or [callErased] is called. /// /// The newly created [Self] is returned. /// - pub fn capture(captures: anytype, comptime call: fn (@TypeOf(captures), In) Out) Self { + pub fn capture(captures: anytype, comptime invoke: fn (@TypeOf(captures), In) Out) Self { const Captures = @TypeOf(captures); if (@sizeOf(Captures) > captures_size) - @compileError("`captures` must be smaller than or equal to " ++ - std.fmt.comptimePrint("{d}", .{captures_size}) ++ " bytes"); + @compileError("`captures` exceeds the size limit of the capture context"); const captures_align = @alignOf(Captures); var function = Self{ .context = undefined, - .applyErased = struct { - fn applyErased(erased: *anyopaque, input: In) Out { - return call(if (Captures == void) {} else @ptrCast(*Captures, + .callErased = struct { + fn callErased(erased: *anyopaque, input: In) Out { + return invoke(if (Captures == void) {} else @ptrCast(*Captures, @alignCast(@alignOf(Captures), erased)).*, input); } - }.applyErased, + }.callErased, }; - @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; + if (Captures != void) + @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; return function; } diff --git a/src/core/stack.zig b/src/core/stack.zig index 77a4e01..6caa0fb 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -1,5 +1,5 @@ const io = @import("./io.zig"); -const std = @import("std"); +const testing = @import("./testing.zig"); /// /// Returns a fixed-size stack type of `Element`s. @@ -15,42 +15,20 @@ pub fn Fixed(comptime Element: type) type { const Self = @This(); /// - /// Wraps `self` and returns it in a [io.Writer] value. - /// - /// Note that this will raise a compilation error if [Element] is not `u8`. - /// - pub fn writer(self: *Self) io.Writer { - if (Element != u8) @compileError("Cannot coerce fixed stack of type " ++ - @typeName(Element) ++ " into a Writer"); - - return io.Writer.capture(self, struct { - fn write(stack: *Self, buffer: []const u8) usize { - stack.pushAll(buffer) catch |err| switch (err) { - error.OutOfMemory => return 0, - }; - - return buffer.len; - } - }.write); - } - - /// - /// Clears all elements from `self`. + /// Resets the number of filled items to `0`, otherwise leaving the actual memory contents + /// of the buffer untouched until it is later overwritten by following operations on it. /// pub fn clear(self: *Self) void { self.filled = 0; } /// - /// Counts and returns the number of pushed elements in `self`. + /// If `self` is filled with at least `1` value, it is decremented by `1`, otherwise leaving + /// the actual memory contents of the buffer untouched until it is later overwritten by + /// following operations on it. /// - pub fn count(self: Self) usize { - return self.filled; - } - - /// - /// Attempts to pop the tail-end of `self`, returning the element value or `null` if the - /// stack is empty. + /// The value of the element removed from the list is returned if something existed to be + /// popped, otherwise `null` if it contained no elements. /// pub fn pop(self: *Self) ?Element { if (self.filled == 0) return null; @@ -74,40 +52,92 @@ pub fn Fixed(comptime Element: type) type { /// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it /// failed. /// - pub fn pushAll(self: *Self, elements: []const u8) PushError!void { + pub fn pushAll(self: *Self, elements: []const Element) PushError!void { const filled = (self.filled + elements.len); if (filled > self.buffer.len) return error.OutOfMemory; - std.mem.copy(u8, self.buffer[self.filled ..], elements); + io.copy(Element, self.buffer[self.filled ..], elements); self.filled = filled; } }; } +test "Fixed stack of string literals" { + const default_value = ""; + var buffer = [_][]const u8{default_value} ** 4; + var shopping_list = Fixed([]const u8){.buffer = &buffer}; + + // Pop empty stack. + { + try testing.expect(shopping_list.pop() == null); + try testing.expect(shopping_list.filled == 0); + try testing.expect(shopping_list.buffer.ptr == &buffer); + try testing.expect(shopping_list.buffer.len == buffer.len); + + for (shopping_list.buffer) |item| + try testing.expect(io.equals(u8, item, default_value)); + } + + // Push single element. + { + try shopping_list.push("milk"); + try testing.expect(shopping_list.filled == 1); + try testing.expect(shopping_list.buffer.ptr == &buffer); + try testing.expect(shopping_list.buffer.len == buffer.len); + try testing.expect(io.equals(u8, shopping_list.buffer[0], "milk")); + + for (shopping_list.buffer[1 ..]) |item| + try testing.expect(io.equals(u8, item, default_value)); + + // TODO: Test stack overflow. + } + + // Pop single element. + { + try testing.expect(io.equals(u8, shopping_list.pop().?, "milk")); + try testing.expect(shopping_list.filled == 0); + try testing.expect(shopping_list.buffer.ptr == &buffer); + try testing.expect(shopping_list.buffer.len == buffer.len); + try testing.expect(io.equals(u8, shopping_list.buffer[0], "milk")); + + for (shopping_list.buffer[1 ..]) |item| + try testing.expect(io.equals(u8, item, default_value)); + } + + // TODO: Multiple elements. + // TODO: Clear elements. +} + /// /// Potential errors that may occur while trying to push one or more elements into a stack. /// -pub const PushError = std.mem.Allocator.Error; +pub const PushError = io.Allocator.Error; -test "Fixed stack manipulation" { - const testing = std.testing; - var buffer = std.mem.zeroes([4]u8); - var stack = Fixed(u8){.buffer = &buffer}; +/// +/// Returns an [io.Writer] wrapping `fixed_stack`. +/// +/// Writing to the returned [io.Writer] will push values to the underlying [Fixed] stack instance +/// referenced by `fixed_stack` until it is full. +/// +pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { + return io.Writer.capture(fixed_stack, struct { + fn write(stack: *Fixed(u8), buffer: []const u8) usize { + stack.pushAll(buffer) catch |err| switch (err) { + error.OutOfMemory => return 0, + }; - try testing.expectEqual(stack.count(), 0); - try testing.expectEqual(stack.pop(), null); - try stack.push(69); - try testing.expectEqual(stack.count(), 1); - try testing.expectEqual(stack.pop(), 69); - try stack.pushAll(&.{42, 10, 95, 0}); - try testing.expectEqual(stack.count(), 4); - try testing.expectError(PushError.OutOfMemory, stack.push(1)); - try testing.expectError(PushError.OutOfMemory, stack.pushAll(&.{1, 11, 11})); - - stack.clear(); - - try testing.expectEqual(stack.count(), 0); - try testing.expectEqual(stack.writer().apply(&.{0, 0, 0, 0}), 4); + return buffer.len; + } + }.write); +} + +test "Fixed writer" { + var buffer = [_]u8{0} ** 4; + var sequence_stack = Fixed(u8){.buffer = &buffer}; + const sequence_data = [_]u8{8, 16, 32, 64}; + + try testing.expect(fixedWriter(&sequence_stack).call(&sequence_data) == sequence_data.len); + try testing.expect(io.equals(u8, sequence_stack.buffer, &sequence_data)); } diff --git a/src/core/testing.zig b/src/core/testing.zig new file mode 100644 index 0000000..1881378 --- /dev/null +++ b/src/core/testing.zig @@ -0,0 +1,18 @@ +/// +/// [TestError.UnexpectedResult] occurs when a conditional that should have been `true` was actually +/// `false`. +/// +pub const TestError = error { + UnexpectedResult, +}; + +/// +/// Returns a [TestError] if `ok` is false. +/// +pub fn expect(ok: bool) TestError!void { + if (!ok) return error.UnexpectedResult; +} + +// TODO: Implement tests. + +pub const expectError = @import("std").testing.expectError; diff --git a/src/core/unicode.zig b/src/core/unicode.zig new file mode 100644 index 0000000..6e89678 --- /dev/null +++ b/src/core/unicode.zig @@ -0,0 +1,84 @@ +const io = @import("./io.zig"); +const math = @import("./math.zig"); + +/// +/// [PrintError.WriteFailure] occurs when the underlying [io.Writer] implementation failed to write +/// the entirety of a the requested print operation. +/// +pub const PrintError = error { + WriteFailure, +}; + +/// +/// Number formatting modes supported by [printInt]. +/// +pub const Radix = enum { + binary, + tinary, + quaternary, + quinary, + senary, + septenary, + octal, + nonary, + decimal, + undecimal, + duodecimal, + tridecimal, + tetradecimal, + pentadecimal, + hexadecimal, +}; + +/// +/// Writes `value` as a ASCII / UTF-8 encoded integer to `writer`, returning `true` if the full +/// sequence was successfully written, otherwise `false`. +/// +/// The `radix` argument identifies which base system to format `value` as. +/// +pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void { + const Int = @TypeOf(value); + + switch (@typeInfo(Int)) { + .Int => |int_info| { + if (value == 0) return writer.apply("0"); + + const base = @enumToInt(radix); + const is_signed = (int_info.signedness == .signed); + + var buffer = [_]u8{0} ** (math.ceil(math.log(math. + maxInt(Int), base)) + @boolToInt(is_signed)); + + var buffer_count: usize = 0; + var n1 = value; + + if (is_signed and (value < 0)) { + // Negative value. + n1 = -value; + buffer[0] = '-'; + buffer_count += 1; + } + + while (n1 != 0) { + buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); + n1 = (n1 / base); + buffer_count += 1; + } + + for (buffer[0 .. (buffer_count / 2)]) |_, i| + io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); + + if (writer.call(buffer[0 .. buffer_count]) != buffer_count) return error.WriteFailure; + }, + + // Cast comptime int into known-size integer and try again. + .ComptimeInt => return printInt(writer, radix, + @intCast(math.IntFittingRange(value, value), value)), + + else => @compileError("`value` must be of type int or comptime_int"), + } +} + +test "Print 64-bit signed integer" { + // TODO: implement. +} -- 2.34.1 From f3426ef69188b981e9f183aa9b32ca6a7a7b393f Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 15:07:40 +0000 Subject: [PATCH 73/93] Change maxInt to return a comptime_int --- src/core/math.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/math.zig b/src/core/math.zig index e6f9959..6cb3c23 100644 --- a/src/core/math.zig +++ b/src/core/math.zig @@ -4,7 +4,7 @@ pub const IntFittingRange = @import("std").math.IntFittingRange; /// /// Returns the maximum value of `Integer`. /// -pub fn maxInt(comptime Integer: type) Integer { +pub fn maxInt(comptime Integer: type) comptime_int { return switch (@typeInfo(Integer)) { .Int => |info| if (info.bits == 0) 0 else ((1 << (info.bits - @boolToInt(info.signedness == .signed))) - 1), -- 2.34.1 From e95c754d62f126742fd9be48d6eb34d187e63c04 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 15:08:10 +0000 Subject: [PATCH 74/93] Refactor Oar implementation --- src/ona/main.zig | 8 +- src/ona/oar.zig | 168 ++++++++--------- src/ona/sys.zig | 467 +++++++++++++++++++++-------------------------- 3 files changed, 282 insertions(+), 361 deletions(-) diff --git a/src/ona/main.zig b/src/ona/main.zig index d38b13d..e8e8718 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -15,17 +15,17 @@ fn run(app: *sys.App, graphics: *sys.Graphics) anyerror!void { defer _ = gpa.deinit(); { - var file_access = try app.data.open(try sys.Path.joined(&.{"ona.lua"}), .readonly); + var file_reader = try app.data.openRead(try sys.Path.from(&.{"ona.lua"})); - defer file_access.close(); + defer file_reader.close(); - const file_size = try file_access.queryLength(); + const file_size = try file_reader.size(); const allocator = gpa.allocator(); const buffer = try allocator.alloc(u8, file_size); defer allocator.free(buffer); - if ((try file_access.read(buffer)) != file_size) return error.ScriptLoadFailure; + if ((try file_reader.read(0, buffer)) != file_size) return error.ScriptLoadFailure; app.log(.debug, buffer); } diff --git a/src/ona/oar.zig b/src/ona/oar.zig index d24da0d..16feb84 100644 --- a/src/ona/oar.zig +++ b/src/ona/oar.zig @@ -1,138 +1,116 @@ -const core = @import("./core.zig"); +const core = @import("core"); const sys = @import("./sys.zig"); /// +/// Metadata of an Oar archive entry. +/// +const Block = extern struct { + signature: [signature_magic.len]u8 = signature_magic, + revision: u8 = 0, + path: sys.Path = sys.Path.empty, + data_size: u64 = 0, + data_head: u64 = 0, + padding: [232]u8 = [_]u8{0} ** 232, + + comptime { + const entry_size = @sizeOf(@This()); + + if (entry_size != 512) @compileError("EntryBlock is greater than 512 bytes"); + } +}; + /// /// -pub const Archive = struct { - file_system: *const sys.FileSystem, - archive_path: sys.Path, +/// +pub const Entry = struct { + head: u64, + size: u64, /// /// /// - const Header = extern struct { - signature: [signature_magic.len]u8, - revision: u8, - entry_offset: u64, - padding: [500]u8 = std.mem.zeroes([500]u8), - - /// - /// Magic identifier used to validate [Entry] data. - /// - const signature_magic = [3]u8{'o', 'a', 'r'}; - - comptime { - const size = @sizeOf(@This()); - - if (size != 512) - @compileError("Header is " ++ - std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); - } - }; - - /// - /// An entry block of an Oar archive file. - /// - /// Typically, following the block in memory is the file data it holds the meta-information for. - /// - const Entry = extern struct { - signature: [signature_magic.len]u8 = signature_magic, - revision: u8, - path: Path, - file_size: u64, - absolute_offset: u64, - padding: [232]u8 = std.mem.zeroes([232]u8), - - comptime { - const entry_size = @sizeOf(Entry); - - if (entry_size != 512) - @compileError("Entry is " ++ - std.fmt.comptimePrint("{d}", .{entry_size}) ++ " bytes"); - } - }; - - /// - /// - /// - pub const OpenError = error { - FileNotFound, + pub const FindError = error { EntryNotFound, - UnsupportedArchive, + ArchiveUnsupported, }; /// /// /// - pub fn open(archive: Archive, entry_path: Path) OpenError!EntryAccess { - const file_access = try archive.file_system.open(entry_path, .readonly); - - errdefer file_access.close(); - - var header = std.mem.zeroes(Header); + pub fn find(archive_file: *sys.ReadableFile, entry_path: sys.Path) FindError!Entry { + var header = Header{}; const header_size = @sizeOf(Header); const io = core.io; - // Validate header. - if ((try file_access.read(io.bytes(&header)) != header_size) or header - (!core.io.equals(u8, &header.signature, &Header.signature_magic)) or - (header.revision != revision) or - (header.entry_offset <= header_size)) return error.UnsupportedArchive; - - // Go to file table. - try file_access.seek(header.entry_offset); + if (((archive_file.read(0, io.bytesOf(&header)) catch + return error.ArchiveUnsupported) != header_size) or + (!io.equals(u8, &header.signature, &signature_magic)) or + (header.revision != revision_magic) or + (header.entry_head <= header_size)) return error.ArchiveUnsupported; // Read file table. - var entry_buffer = std.mem.zeroes([8]Entry); - var bytes_read = try file_access.read(io.bytes(&entry_buffer)); - - while (@mod(bytes_read, @sizeOf(Entry)) == 0) { - for (entry_buffer[0 .. (bytes_read / @sizeOf(Entry))]) |entry| { - if (entry.path.equals(entry_path)) { - file_access.seek(entry.file_offset); - - return Entry{ - .file_access = file_access, - }; - } - } - - bytes_read = try file_access.read(io.bytes(&entry_buffer)); - } - - var head = std.mem.zeroes(usize); - var tail = (header.file_count - 1); + var head: usize = 0; + var tail: usize = (header.entry_count - 1); + const block_size = @sizeOf(Block); while (head <= tail) { + var block = Block{}; const midpoint = (head + (tail - head) / 2); - const comparison = entry_path.compare(arr[m])); + if ((archive_file.read(header.entry_head + (block_size * midpoint), io.bytesOf(&block)) + catch return error.ArchiveUnsupported) != block_size) return error.EntryNotFound; - if (comparison == 0) return midpoint; + const comparison = entry_path.compare(block.path); + + if (comparison == 0) return Entry{ + .head = block.data_head, + .size = block.data_size, + }; if (comparison > 0) { - // If x greater, ignore left half head = (midpoint + 1); } else { - // If x is smaller, ignore right half tail = (midpoint - 1); } } return error.EntryNotFound; } -}; -pub const EntryAccess = struct { - file_access: FileAccess, + /// + /// + /// + pub fn read(entry: Entry, archive_file: *sys.ReadableFile, + offset: u64, buffer: []u8) sys.FileError!usize { - pub fn close(entry: Entry) void { - entry.file_access.close(); + return archive_file.read(entry.head + offset, + buffer[0 .. core.math.min(usize, buffer.len, entry.size)]); } }; /// /// /// -const revision = 0; +const Header = extern struct { + signature: [signature_magic.len]u8 = signature_magic, + revision: u8 = revision_magic, + entry_count: u32 = 0, + entry_head: u64 = 0, + padding: [496]u8 = [_]u8{0} ** 496, + + comptime { + const size = @sizeOf(@This()); + + if (size != 512) @compileError("Header is greater than 512 bytes"); + } +}; + +/// +/// +/// +const revision_magic = 0; + +/// +/// Magic identifier used to validate [Entry] data. +/// +const signature_magic = [3]u8{'o', 'a', 'r'}; diff --git a/src/ona/sys.zig b/src/ona/sys.zig index 63f3ed3..27bbae2 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -3,7 +3,7 @@ const ext = @cImport({ }); const core = @import("core"); -const oar = @import("oar"); +const oar = @import("./oar.zig"); const std = @import("std"); /// @@ -92,13 +92,93 @@ pub const App = struct { } }; +/// +/// +/// +pub const ReadableFile = opaque { + /// + /// + /// + pub fn close(readable_file: *ReadableFile) void { + if (ext.SDL_RWclose(readable_file.rwOpsCast()) != 0) + return ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, + "Attempt to close an invalid file reference"); + } + + /// + /// + /// + pub fn read(readable_file: *ReadableFile, offset: u64, buffer: []u8) FileError!u64 { + const rw_ops = readable_file.rwOpsCast(); + + { + ext.SDL_ClearError(); + + var sought = core.math.min(u64, offset, core.math.maxInt(i64)); + + if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0) + return error.FileInaccessible; + + var to_seek = offset - sought; + + while (to_seek != 0) { + sought = core.math.min(u64, to_seek, core.math.maxInt(i64)); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_CUR) < 0) + return error.FileInaccessible; + + to_seek -= sought; + } + } + + ext.SDL_ClearError(); + + const buffer_read = ext.SDL_RWread(rw_ops, buffer.ptr, @sizeOf(u8), buffer.len); + + if ((buffer_read == 0) and (ext.SDL_GetError() != null)) + return error.FileInaccessible; + + return buffer_read; + } + + /// + /// + /// + pub fn rwOpsCast(readable_file: *ReadableFile) *ext.SDL_RWops { + return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), readable_file)); + } + + /// + /// + /// + pub fn size(readable_file: *ReadableFile) FileError!u64 { + ext.SDL_ClearError(); + + const byte_size = ext.SDL_RWsize(readable_file.rwOpsCast()); + + if (byte_size < 0) return error.FileInaccessible; + + return @intCast(u64, byte_size); + } +}; + +/// +/// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer pointing +/// to a file or the file becomming invalid for whatever reason. +/// +pub const FileError = error { + FileInaccessible, +}; + /// /// Platform-agnostic mechanism for working with an abstraction of the underlying file-system(s) /// available to the application in a sandboxed environment. /// pub const FileSystem = union(enum) { native: []const u8, - archive: Archive, + archive_file: *ReadableFile, /// /// With files typically being backed by a block device, they can produce a variety of errors - @@ -119,275 +199,45 @@ pub const FileSystem = union(enum) { OutOfFiles, }; - /// - /// [OpenMode.readonly] indicates that an existing file is opened in a read-only state, - /// disallowing write access. - /// - /// [OpenMode.overwrite] indicates that an empty file has been created or an existing file has - /// been completely overwritten into. - /// - /// [OpenMode.append] indicates that an existing file that has been opened for reading from and - /// writing to on the end of existing data. - /// - pub const OpenMode = enum { - readonly, - overwrite, - append, - }; - /// /// Attempts to open the file identified by `path` with `mode` as the mode for opening the file. /// - /// Returns a [FileAccess] reference that provides access to the file referenced by `path`or a + /// Returns a [ReadableFile] reference that provides access to the file referenced by `path`or a /// [OpenError] if it failed. /// - pub fn open(file_system: *const FileSystem, path: Path, - mode: OpenMode) OpenError!core.io.FileAccess { - + pub fn openRead(file_system: *const FileSystem, path: Path) OpenError!*ReadableFile { switch (file_system.*) { - .archive => |*archive| { - if (mode != .readonly) return error.ModeUnsupported; + .archive_file => |archive_file| { + const entry = oar.Entry.find(archive_file, path) catch return error.FileNotFound; - const FileAccess = core.io.FileAccess; + _ = entry; + // TODO: Alloc file context. - for (archive.entry_table) |*entry| if (entry.owner == null) { - const Implementation = struct { - fn archiveEntryCast(context: *anyopaque) *Archive.Entry { - return @ptrCast(*Archive.Entry, @alignCast( - @alignOf(Archive.Entry), context)); - } - - fn close(context: *anyopaque) void { - archiveEntryCast(context).owner = null; - } - - fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - return archive_entry.cursor; - } - - fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - return archive_entry.header.file_size; - } - - fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - const archive_entry = archiveEntryCast(context); - - const file_access = archive_entry.owner orelse - return error.FileInaccessible; - - if (archive_entry.cursor >= archive_entry.header.file_size) - return error.FileInaccessible; - - try file_access.seek(archive_entry.header.absolute_offset); - - return file_access.read(buffer[0 .. std.math.min( - buffer.len, archive_entry.header.file_size)]); - } - - fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - archive_entry.cursor = cursor; - } - - fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - archive_entry.cursor = archive_entry.header.file_size; - } - - fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { - const archive_entry = archiveEntryCast(context); - - if (archive_entry.owner == null) return error.FileInaccessible; - - if (offset < 0) { - const math = std.math; - - archive_entry.cursor = math.max(0, - archive_entry.cursor - math.absCast(offset)); - } else { - archive_entry.cursor += @intCast(u64, offset); - } - } - }; - - const Header = oar.Entry; - - if (archive.index_cache.lookup(path)) |index| { - archive.file_access.seek(index) catch return error.FileNotFound; - - entry.* = .{ - .owner = &archive.file_access, - .cursor = 0, - - .header = (Header.next(archive.file_access) catch - return error.FileNotFound) orelse { - - // Remove cannot fail if lookup succeeded. - std.debug.assert(archive.index_cache.remove(path) != null); - - return error.FileNotFound; - }, - }; - } else { - while (Header.next(archive.file_access) catch - return error.FileNotFound) |entry_header| { - - if (entry.header.path.equals(path)) entry.* = .{ - .owner = &archive.file_access, - .cursor = 0, - .header = entry_header, - }; - } - - return error.FileNotFound; - } - - return FileAccess{ - .context = entry, - - .implementation = &.{ - .close = Implementation.close, - .queryCursor = Implementation.queryCursor, - .queryLength = Implementation.queryLength, - .read = Implementation.read, - .seek = Implementation.seek, - .seekToEnd = Implementation.seekToEnd, - .skip = Implementation.skip, - }, - }; - }; - - return error.OutOfFiles; + return error.FileNotFound; }, .native => |native| { if (native.len == 0) return error.FileNotFound; - const mem = std.mem; - var path_buffer = mem.zeroes([4096]u8); - const seperator_length = @boolToInt(native[native.len - 1] != oar.Path.seperator); + var path_buffer = [_]u8{0} ** 4096; + const seperator_length = @boolToInt(native[native.len - 1] != Path.seperator); - if ((native.len + seperator_length + path.length) >= - path_buffer.len) return error.FileNotFound; + if ((native.len + seperator_length + path.length) >= path_buffer.len) + return error.FileNotFound; - mem.copy(u8, &path_buffer, native); + const io = core.io; - if (seperator_length != 0) path_buffer[native.len] = oar.Path.seperator; + io.copy(u8, &path_buffer, native); - mem.copy(u8, path_buffer[native.len .. path_buffer. - len], path.buffer[0 .. path.length]); + if (seperator_length != 0) path_buffer[native.len] = Path.seperator; - const FileAccess = core.io.FileAccess; - - const Implementation = struct { - fn rwOpsCast(context: *anyopaque) *ext.SDL_RWops { - return @ptrCast(*ext.SDL_RWops, @alignCast( - @alignOf(ext.SDL_RWops), context)); - } - - fn close(context: *anyopaque) void { - ext.SDL_ClearError(); - - if (ext.SDL_RWclose(rwOpsCast(context)) != 0) - ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, ext.SDL_GetError()); - } - - fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { - ext.SDL_ClearError(); - - const sought = ext.SDL_RWtell(rwOpsCast(context)); - - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); - } - - fn queryLength(context: *anyopaque) FileAccess.Error!u64 { - ext.SDL_ClearError(); - - const sought = ext.SDL_RWsize(rwOpsCast(context)); - - if (sought < 0) return error.FileInaccessible; - - return @intCast(u64, sought); - } - - fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { - ext.SDL_ClearError(); - - const buffer_read = ext.SDL_RWread(rwOpsCast( - context), buffer.ptr, @sizeOf(u8), buffer.len); - - if ((buffer_read == 0) and (ext.SDL_GetError() != null)) - return error.FileInaccessible; - - return buffer_read; - } - - fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { - var to_seek = cursor; - - while (to_seek != 0) { - const math = std.math; - const sought = math.min(to_seek, math.maxInt(i64)); - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rwOpsCast(context), @intCast(i64, sought), - ext.RW_SEEK_CUR) < 0) return error.FileInaccessible; - - to_seek -= sought; - } - } - - fn seekToEnd(context: *anyopaque) FileAccess.Error!void { - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rwOpsCast(context), 0, ext.RW_SEEK_END) < 0) - return error.FileInaccessible; - } - - fn skip(context: *anyopaque, offset: i64) FileAccess.Error!void { - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rwOpsCast(context), offset, ext.RW_SEEK_SET) < 0) - return error.FileInaccessible; - } - }; + io.copy(u8, path_buffer[native.len .. path_buffer.len], + path.buffer[0 .. path.length]); ext.SDL_ClearError(); - return FileAccess{ - .context = ext.SDL_RWFromFile(&path_buffer, switch (mode) { - .readonly => "rb", - .overwrite => "wb", - .append => "ab", - }) orelse return error.FileNotFound, - - .implementation = &.{ - .close = Implementation.close, - .queryCursor = Implementation.queryCursor, - .queryLength = Implementation.queryLength, - .read = Implementation.read, - .seek = Implementation.seek, - .seekToEnd = Implementation.seekToEnd, - .skip = Implementation.skip, - }, - }; + return @ptrCast(*ReadableFile, ext.SDL_RWFromFile(&path_buffer, "rb") + orelse return error.FileNotFound); }, } } @@ -472,7 +322,102 @@ pub const Message = struct { /// /// Path to a file on a [FileSystem]. /// -pub const Path = oar.Path; +pub const Path = extern struct { + buffer: [255]u8, + length: u8, + + /// + /// [Error.TooLong] occurs when creating a path that is greater than the maximum path size **in + /// bytes**. + /// + pub const FromError = error { + TooLong, + }; + + /// + /// An empty [Path] with a length of `0`. + /// + pub const empty = Path{ + .buffer = [_]u8{0} ** 255, + .length = 0, + }; + + /// + /// + /// + pub fn compare(this: Path, that: Path) isize { + return core.io.compareBytes(this.buffer[0 ..this.length], that.buffer[0 .. that.length]); + } + + /// + /// Returns `true` if `this` is equal to `that`, otherwise `false`. + /// + pub fn equals(this: Path, that: Path) bool { + return core.io.equals(u8, this.buffer[0 ..this.length], that.buffer[0 .. that.length]); + } + + /// + /// Attempts to create a [Path] with the path components in `sequences` as a fully qualified + /// path from root. + /// + /// A [Path] value is returned containing the fully qualified path from the file-system root or + /// a [FromError] if it could not be created. + /// + pub fn from(sequences: []const []const u8) FromError!Path { + var path = empty; + + if (sequences.len != 0) { + const last_sequence_index = sequences.len - 1; + + for (sequences) |sequence, index| if (sequence.len != 0) { + var components = core.io.Spliterator(u8){ + .source = sequence, + .delimiter = "/", + }; + + while (components.next()) |component| if (component.len != 0) { + for (component) |byte| { + if (path.length == max) return error.TooLong; + path.buffer[path.length] = byte; + path.length += 1; + } + + if (components.hasNext()) { + if (path.length == max) return error.TooLong; + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + + if (index < last_sequence_index) { + if (path.length == max) return error.TooLong; + + path.buffer[path.length] = '/'; + path.length += 1; + } + }; + } + + return path; + } + + /// + /// Returns the hash of the text in `path`. + /// + pub fn hash(path: Path) usize { + return core.io.hashBytes(path.buffer[0 .. path.length]); + } + + /// + /// Maximum number of **bytes** in a [Path]. + /// + pub const max = 255; + + /// + /// Textual separator between components of a [Path]. + /// + pub const seperator = '/'; +}; /// /// [RunError.InitFailure] occurs when the runtime fails to initialize. @@ -504,32 +449,30 @@ pub fn display(comptime Error: anytype, .user = .{.native = std.mem.sliceTo(user_prefix, 0)}, .message_semaphore = ext.SDL_CreateSemaphore(0) orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create message semaphore"); + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to create message semaphore"); return error.InitFailure; }, .message_mutex = ext.SDL_CreateMutex() orelse { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to create message mutex"); + ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, + "Failed to create message mutex"); return error.InitFailure; }, - .data = .{.archive = .{ - .index_cache = FileSystem.Archive.IndexCache.init(gpa.allocator()) catch - return error.InitFailure, - - .file_access = cwd.open(try Path.joined(&.{"./data.oar"}), .readonly) catch { + .data = .{ + .archive_file = cwd.openRead(try Path.from(&.{"./data.oar"})) catch { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load ./data.oar"); return error.InitFailure; }, - }}, + }, }; defer { - app.data.archive.file_access.close(); - app.data.archive.index_cache.deinit(); + app.data.archive_file.close(); ext.SDL_DestroySemaphore(app.message_semaphore); ext.SDL_DestroyMutex(app.message_mutex); } @@ -546,7 +489,7 @@ pub fn display(comptime Error: anytype, app.enqueue(&message); { - var status = std.mem.zeroes(c_int); + var status: c_int = 0; // SDL2 defines waiting on a null thread reference as a no-op. See // https://wiki.libsdl.org/SDL_WaitThread for more information -- 2.34.1 From 1773a04e5202c6202047a9e7ca4ce0f03cb398c1 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 15:11:24 +0000 Subject: [PATCH 75/93] Update drone CI script --- .drone.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.drone.yml b/.drone.yml index f9941e1..91d2809 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,5 +5,5 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: - - zig build test - - $(find zig-cache -name test) main.zig + - zig build + - ./zig-out/test main.zig -- 2.34.1 From b9c41971ee5c71282ec9105acf458e4159198ded Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 15:14:53 +0000 Subject: [PATCH 76/93] Add Musl to drone CI script --- .drone.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.drone.yml b/.drone.yml index 91d2809..75c6506 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,5 +5,6 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: + - apk add musl - zig build - ./zig-out/test main.zig -- 2.34.1 From 342d296bfd1ea1c5de98c7d6eb83864f79910cde Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 15:26:18 +0000 Subject: [PATCH 77/93] Add libc-dev dependency in Drone CI script --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 75c6506..c36148f 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,6 +5,6 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: - - apk add musl + - apk add libc-dev - zig build - ./zig-out/test main.zig -- 2.34.1 From a401de3bae322d718ed9faa0ec9e7e3d454f9885 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 16:07:13 +0000 Subject: [PATCH 78/93] Add Alpine build-base dependency to Drone CI --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index c36148f..9383551 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,6 +5,6 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: - - apk add libc-dev + - apk --no-cache add build-base - zig build - ./zig-out/test main.zig -- 2.34.1 From f8bfecc983b5df553ae4ac97052926c6b1f9036e Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 16:09:04 +0000 Subject: [PATCH 79/93] Add Alpine SDL2 to Drone CI dependencies --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 9383551..2c98056 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,6 +5,6 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: - - apk --no-cache add build-base + - apk --no-cache add build-base sdl2 - zig build - ./zig-out/test main.zig -- 2.34.1 From 40f2e869ac309da6144482e89734aa7f77a961b2 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 16:10:15 +0000 Subject: [PATCH 80/93] Fix incorrect Alpine SDL2 dependency being used in Drone CI --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 2c98056..28f4aa5 100644 --- a/.drone.yml +++ b/.drone.yml @@ -5,6 +5,6 @@ steps: - name: build & test image: euantorano/zig:0.9.1 commands: - - apk --no-cache add build-base sdl2 + - apk --no-cache add build-base sdl2-dev - zig build - ./zig-out/test main.zig -- 2.34.1 From 595ad83cddc0c446abfbbe7983572df73746ab76 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 16:11:44 +0000 Subject: [PATCH 81/93] Fix incorrect test binary path in Drone CI --- .drone.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.drone.yml b/.drone.yml index 28f4aa5..d5426c7 100644 --- a/.drone.yml +++ b/.drone.yml @@ -7,4 +7,4 @@ steps: commands: - apk --no-cache add build-base sdl2-dev - zig build - - ./zig-out/test main.zig + - ./zig-out/bin/test main.zig -- 2.34.1 From dd81ae76ec9da2bfc02bf0ab9d088348eda60e30 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 22:22:29 +0000 Subject: [PATCH 82/93] Fix formatting of system-level code in Ona module --- src/ona/sys.zig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ona/sys.zig b/src/ona/sys.zig index 27bbae2..38422fe 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -378,12 +378,14 @@ pub const Path = extern struct { while (components.next()) |component| if (component.len != 0) { for (component) |byte| { if (path.length == max) return error.TooLong; + path.buffer[path.length] = byte; path.length += 1; } if (components.hasNext()) { if (path.length == max) return error.TooLong; + path.buffer[path.length] = '/'; path.length += 1; } -- 2.34.1 From 14e5e18228a1013cee42ac3a51e5800f93e91ba5 Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 22:25:19 +0000 Subject: [PATCH 83/93] Fix implementation of findFirstOf and Spliterator --- src/core/io.zig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 495fd7d..ad814dc 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -63,10 +63,12 @@ pub fn Spliterator(comptime Element: type) type { fn testEquality(this: Element, that: Element) bool { return this == that; } - }.testEquality)) |index| { - defer self.source = self.source[(index + self.delimiter.len) .. self.source.len]; + }.testEquality)) |head| { + const tail = (head + self.delimiter.len); - return self.source[0 .. index]; + defer self.source = self.source[tail .. self.source.len]; + + return self.source[0 .. (tail - 1)]; } defer self.source = self.source[self.source.len .. self.source.len]; @@ -254,17 +256,15 @@ test "Find first of element" { pub fn findFirstOf(comptime Element: type, haystack: []const Element, needle: []const Element, comptime testEquality: fn (Element, Element) bool) ?usize { - var cursor: usize = 0; - const end = (haystack.len - needle.len); + var head: usize = 0; + const tail = (haystack.len - needle.len); - walk_haystack: while (cursor <= end) : (cursor += 1) { - const range = (cursor + needle.len); - var index = cursor; + walk_haystack: while (head <= tail) : (head += 1) { + for (needle) |element, index| { + if (!testEquality(haystack[head + index], element)) continue: walk_haystack; + } - while (index < range) : (index += 1) - if (testEquality(haystack[index], needle[index])) continue: walk_haystack; - - return cursor; + return head; } return null; -- 2.34.1 From cfd3e85021125abb0503d904e51895952811b22a Mon Sep 17 00:00:00 2001 From: kayomn Date: Tue, 1 Nov 2022 22:57:25 +0000 Subject: [PATCH 84/93] Tidy up IO code --- src/core/io.zig | 158 +++++++++++++++++++++++++----------------------- 1 file changed, 82 insertions(+), 76 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index ad814dc..5102493 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -32,26 +32,6 @@ pub fn Spliterator(comptime Element: type) type { return (self.source.len != 0); } - test "Check has data" { - var empty_spliterator = Spliterator(u8){ - .source = "", - .delimiter = "/", - }; - - try testing.expect(!empty_spliterator.hasNext()); - - var stateful_spliterator = Spliterator(u8){ - .source = "data", - .delimiter = "/", - }; - - try testing.expect(stateful_spliterator.hasNext()); - - _ = try stateful_spliterator.next(); - - try testing.expect(!stateful_spliterator.hasNext()); - } - /// /// Iterates on `self` and returns the next view of [Spliterator.source] that matches /// [Spliterator.delimiter], or `null` if there is no more data to be processed. @@ -59,81 +39,99 @@ pub fn Spliterator(comptime Element: type) type { pub fn next(self: *Self) ?[]const Element { if (!self.hasNext()) return null; + if (self.delimiter.len == 0) { + defer self.source = self.source[self.source.len .. self.source.len]; + + return self.source[0 .. self.source.len]; + } + if (findFirstOf(Element, self.source, self.delimiter, struct { fn testEquality(this: Element, that: Element) bool { return this == that; } }.testEquality)) |head| { - const tail = (head + self.delimiter.len); + defer self.source = self.source[(head + self.delimiter.len) .. self.source.len]; - defer self.source = self.source[tail .. self.source.len]; - - return self.source[0 .. (tail - 1)]; + return self.source[0 .. head]; } defer self.source = self.source[self.source.len .. self.source.len]; return self.source; } - - test "Iterate through data" { - // Single-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "single.character.separated.hello.world", - .delimiter = ".", - }; - - const components = [_][]const u8{"single", - "character", "separated", "hello", "world"}; - - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(equals(u8, split, components[index])); - } - } - - // Multi-character delimiter. - { - var spliterator = Spliterator(u8){ - .source = "finding a needle in a needle stack", - .delimiter = "needle", - }; - - const components = [_][]const u8{"finding a ", " in a ", " stack"}; - var index = @as(usize, 0); - - while (spliterator.next()) |split| : (index += 1) { - try testing.expect(equals(u8, split, components[index])); - } - } - } }; } +test "Spliterator of string literals" { + // Empty source. + { + var spliterator = Spliterator(u8){ + .source = "", + .delimiter = " ", + }; + + try testing.expect(!spliterator.hasNext()); + } + + // Empty delimiter. + { + var spliterator = Spliterator(u8){ + .source = "aaa", + .delimiter = "", + }; + + try testing.expect(spliterator.hasNext()); + try testing.expect(equals(u8, spliterator.next().?, "aaa")); + try testing.expect(!spliterator.hasNext()); + } + + // Single-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "single.character.separated.hello.world", + .delimiter = ".", + }; + + const components = [_][]const u8{"single", + "character", "separated", "hello", "world"}; + + var index = @as(usize, 0); + const components_tail = components.len - 1; + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(spliterator.hasNext() == (index < components_tail)); + try testing.expect(equals(u8, split, components[index])); + } + + try testing.expect(!spliterator.hasNext()); + } + + // Multi-character delimiter. + { + var spliterator = Spliterator(u8){ + .source = "finding a needle in a needle stack", + .delimiter = "needle", + }; + + const components = [_][]const u8{"finding a ", " in a ", " stack"}; + var index = @as(usize, 0); + const components_tail = components.len - 1; + + while (spliterator.next()) |split| : (index += 1) { + try testing.expect(spliterator.hasNext() == (index < components_tail)); + try testing.expect(equals(u8, split, components[index])); + } + + try testing.expect(!spliterator.hasNext()); + } +} + /// /// Closure that captures a reference to writable resources like block devices, memory buffers, /// network sockets, and more. /// pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); -/// -/// Returns `true` if `elements` starts with the characters in `with`, otherwise `false`. -/// -pub fn begins(comptime Element: type, elements: []const Element, with: []const Element) bool { - if (elements.len < with.len) return false; - - return equals(Element, elements[0 .. with.len], with); -} - -test "Check memory begins with" { - const bytes_sequence = &.{69, 42}; - - try testing.expect(begins(u8, &.{69, 42, 0, 89}, bytes_sequence)); - try testing.expect(!begins(u8, &.{69, 89, 42, 0}, bytes_sequence)); -} - /// /// Returns a sliced reference of the raw bytes in `pointer`. /// @@ -260,9 +258,8 @@ pub fn findFirstOf(comptime Element: type, haystack: []const Element, const tail = (haystack.len - needle.len); walk_haystack: while (head <= tail) : (head += 1) { - for (needle) |element, index| { + for (needle) |element, index| if (!testEquality(haystack[head + index], element)) continue: walk_haystack; - } return head; } @@ -271,7 +268,16 @@ pub fn findFirstOf(comptime Element: type, haystack: []const Element, } test "Find first of sequence" { + const haystack = &.{"foo", "bar", "baz"}; + const testEquality = struct { + fn testEquality(this: []const u8, that: []const u8) bool { + return equals(u8, this, that); + } + }.testEquality; + + try testing.expect(findFirstOf([]const u8, haystack, &.{"bar", "baz"}, testEquality).? == 1); + try testing.expect(findFirstOf([]const u8, haystack, &.{"baz", "bar"}, testEquality) == null); } /// -- 2.34.1 From d49608f7bb9a6660a24890f99958e578f71da379 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 2 Nov 2022 10:04:37 +0000 Subject: [PATCH 85/93] Initial work on allocator replacement --- src/core/io.zig | 61 ++++++++++++++++++++++++++++++++++++++++++++-- src/core/stack.zig | 2 +- src/core/table.zig | 31 ++++++++++++----------- src/ona/sys.zig | 13 ++++++++++ 4 files changed, 90 insertions(+), 17 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 5102493..5490c0f 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -4,9 +4,66 @@ const stack = @import("./stack.zig"); const testing = @import("./testing.zig"); /// +/// Dynamic memory allocation interface. /// -/// -pub const Allocator = @import("std").mem.Allocator; +pub const Allocator = union (enum) { + bound: struct { + alloc: fn (usize) ?[*]u8, + dealloc: fn ([*]u8) void, + }, + + arena: struct { + buffer: []u8, + cursor: usize = 0, + }, + + /// + /// [MakeError.OutOfMemory] is used to indicate there is not enough memory available for a given + /// operation. + /// + pub const MakeError = error { + OutOfMemory, + }; + + /// + /// Frees `allocation` using `allocator`. + /// + pub fn free(allocator: *Allocator, allocation: anytype) void { + switch (@typeInfo(@TypeOf(allocation))) { + .Pointer => |pointer| if (pointer.size == .Slice) + @compileError("`allocation` cannot be a slice"), + + else => @compileError("`allocation` must be a pointer"), + } + + if (@typeInfo(@TypeOf(allocation)) != .Pointer) + @compileError("`allocation` must be a pointer"); + + // TODO: Implement arena de-allocation. + switch (allocator.*) { + .bound => |bound| bound.dealloc(@ptrCast([*]u8, allocation)), + .arena => {}, + } + } + + /// + /// Attempts to allocate `size` number of `Element`s using `allocator`. + /// + /// Returns the allocation or a [MakeError] if it failed. + /// + pub fn make(allocator: *Allocator, comptime Element: type, size: usize) MakeError![*]Element { + switch (allocator.*) { + .bound => |bound| return @ptrCast([*]Element, @alignCast(@alignOf(Element), + bound.alloc(@sizeOf(Element) * size) orelse return error.OutOfMemory)), + + .arena => |*stack| { + defer stack.cursor += size; + + return @ptrCast([*]Element, @alignCast(@alignOf(Element), stack.buffer.ptr)); + }, + } + } +}; /// /// Closure that captures a reference to readable resources like block devices, memory buffers, diff --git a/src/core/stack.zig b/src/core/stack.zig index 6caa0fb..98c5c4b 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -113,7 +113,7 @@ test "Fixed stack of string literals" { /// /// Potential errors that may occur while trying to push one or more elements into a stack. /// -pub const PushError = io.Allocator.Error; +pub const PushError = io.Allocator.MakeError; /// /// Returns an [io.Writer] wrapping `fixed_stack`. diff --git a/src/core/table.zig b/src/core/table.zig index ddf60b7..c8c0755 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -1,4 +1,5 @@ const io = @import("./io.zig"); +const testing = @import("./testing.zig"); /// /// Returns a hash-backed table type of `Value`s indexed by `Key` and using `key_context` as the key @@ -10,7 +11,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, const Allocator = io.Allocator; return struct { - allocator: Allocator, + allocator: *Allocator, load_limit: f32, buckets: []Bucket, filled: usize, @@ -36,7 +37,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// Deinitializes `self`, preventing any further use. /// pub fn deinit(self: *Self) void { - self.allocator.free(self.buckets); + self.allocator.free(self.buckets.ptr); self.buckets = &.{}; } @@ -44,13 +45,13 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// /// Initializes a [Self] using `allocator` as the memory allocation strategy. /// - /// Returns a new [Self] value or an [io.Allocator.Error] if initializing failed. + /// Returns a new [Self] value or an [io.Allocator.MakeError] if initializing failed. /// - pub fn init(allocator: Allocator) Allocator.Error!Self { + pub fn init(allocator: *Allocator) Allocator.MakeError!Self { const initial_capacity = 4; return Self{ - .buckets = try allocator.alloc(Bucket, initial_capacity), + .buckets = (try allocator.make(Bucket, initial_capacity))[0 .. initial_capacity], .filled = 0, .allocator = allocator, .load_limit = 0.75, @@ -98,9 +99,11 @@ pub fn Hashed(comptime Key: type, comptime Value: type, if (self.loadFactor() >= self.load_limit) { const old_buckets = self.buckets; - defer self.allocator.free(old_buckets); + defer self.allocator.free(old_buckets.ptr); - self.buckets = try self.allocator.alloc(Bucket, old_buckets.len * 2); + const bucket_count = old_buckets.len * 2; + + self.buckets = (try self.allocator.make(Bucket, bucket_count))[0 .. bucket_count]; for (old_buckets) |bucket, index| self.buckets[index] = bucket; } @@ -160,7 +163,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key /// already present. /// -pub const InsertError = io.Allocator.Error || error { +pub const InsertError = io.Allocator.MakeError || error { KeyExists, }; @@ -193,16 +196,16 @@ pub const string_literal_context = KeyContext([]const u8){ }; test "Hash table manipulation with string literal context" { - const testing = @import("std").testing; - - var table = try Hashed([]const u8, u32, string_literal_context).init(testing.allocator); + var buffer = [_]u8{0} ** 1024; + var allocator = io.Allocator{.arena = .{.buffer = &buffer}}; + var table = try Hashed([]const u8, u32, string_literal_context).init(&allocator); defer table.deinit(); const foo = @as(u32, 69); - try testing.expectEqual(table.remove("foo"), null); + try testing.expect(table.remove("foo") == null); try table.insert("foo", foo); - try testing.expectEqual(table.remove("foo"), foo); - try testing.expectEqual(table.remove("foo"), null); + try testing.expect(table.remove("foo").? == foo); + try testing.expect(table.remove("foo") == null); } diff --git a/src/ona/sys.zig b/src/ona/sys.zig index 38422fe..d3f33a0 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -428,6 +428,19 @@ pub const RunError = error { InitFailure, }; +/// +/// Returns a [core.io.Allocator] bound to the underlying system allocator. +/// +pub fn allocator() core.io.Allocator { + // TODO: Add leak detection. + return .{ + .bound = .{ + .alloc = ext.SDL_alloc, + .dealloc = ext.SDL_free, + }, + }; +} + /// /// Runs a graphical application referenced by `run` with `error` as its error set. /// -- 2.34.1 From eb4a758251f15f0b165bd7c13d8b5271c59556f0 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 2 Nov 2022 11:27:10 +0000 Subject: [PATCH 86/93] Make Function closures only accept a pointer context --- src/core/io.zig | 10 ++++++---- src/core/meta.zig | 31 +++++++++++++++---------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 5490c0f..be0c92b 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -69,7 +69,7 @@ pub const Allocator = union (enum) { /// Closure that captures a reference to readable resources like block devices, memory buffers, /// network sockets, and more. /// -pub const Reader = meta.Function(@sizeOf(usize), []u8, usize); +pub const Reader = meta.Function([]u8, usize); /// /// Returns a state machine for lazily computing all `Element` components of a given source input @@ -187,7 +187,7 @@ test "Spliterator of string literals" { /// Closure that captures a reference to writable resources like block devices, memory buffers, /// network sockets, and more. /// -pub const Writer = meta.Function(@sizeOf(usize), []const u8, usize); +pub const Writer = meta.Function([]const u8, usize); /// /// Returns a sliced reference of the raw bytes in `pointer`. @@ -383,8 +383,10 @@ test "Data swapping" { /// sent somewhere for whatever reason. /// pub fn nullWriter() Writer { - return Writer.capture(@as(usize, 0), struct { - fn write(_: usize, buffer: []const u8) usize { + var dummy: usize = 0; + + return Writer.capture(&dummy, struct { + fn write(_: *usize, buffer: []const u8) usize { return buffer.len; } }.write); diff --git a/src/core/meta.zig b/src/core/meta.zig index e7be22d..e176c1b 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -13,10 +13,10 @@ pub fn FnReturn(comptime Fn: type) type { /// Returns a single-input single-output closure type where `In` represents the input type, `Out` /// represents the output type, and `captures_size` represents the size of the closure context. /// -pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: type) type { +pub fn Function(comptime In: type, comptime Out: type) type { return struct { callErased: fn (*anyopaque, In) Out, - context: [captures_size]u8, + context: *anyopaque, /// /// Function type. @@ -27,37 +27,36 @@ pub fn Function(comptime captures_size: usize, comptime In: type, comptime Out: /// Invokes `self` with `input`, producing a result according to the current context data. /// pub fn call(self: *Self, input: In) Out { - return self.callErased(&self.context, input); + return self.callErased(self.context, input); } /// - /// Creates a new [Self] by capturing the `captures` value as the context and `invoke` as - /// the as the behavior executed when [call] or [callErased] is called. + /// Creates a new [Self] by capturing the `context` value as the capture context and + /// `invoke` as the behavior executed when [call] or [callErased] is called. /// /// The newly created [Self] is returned. /// - pub fn capture(captures: anytype, comptime invoke: fn (@TypeOf(captures), In) Out) Self { - const Captures = @TypeOf(captures); + pub fn capture(context: anytype, comptime invoke: fn (@TypeOf(context), In) Out) Self { + const Context = @TypeOf(context); - if (@sizeOf(Captures) > captures_size) - @compileError("`captures` exceeds the size limit of the capture context"); + switch (@typeInfo(Context)) { + .Pointer => |info| if (info.size == .Slice) + @compileError("`context` cannot be a slice"), - const captures_align = @alignOf(Captures); + else => @compileError("`context` must be a pointer"), + } var function = Self{ - .context = undefined, + .context = @ptrCast(*anyopaque, context), .callErased = struct { fn callErased(erased: *anyopaque, input: In) Out { - return invoke(if (Captures == void) {} else @ptrCast(*Captures, - @alignCast(@alignOf(Captures), erased)).*, input); + return invoke(@ptrCast(*Context, @alignCast( + @alignOf(Context), erased)).*, input); } }.callErased, }; - if (Captures != void) - @ptrCast(*Captures, @alignCast(captures_align, &function.context)).* = captures; - return function; } }; -- 2.34.1 From 2e544393a5df156ebc63b2cbde81fc1deafc8ce6 Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 2 Nov 2022 13:11:17 +0000 Subject: [PATCH 87/93] Simplify programming interface for creating closureless Functions --- src/core/io.zig | 18 +++++++----------- src/core/meta.zig | 33 ++++++++++++++++++++++++++++----- src/core/stack.zig | 2 +- 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index be0c92b..43a3698 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -377,23 +377,19 @@ test "Data swapping" { } /// -/// Returns a [Writer] that silently consumes all given data without failure and throws it away. +/// [Writer] that silently consumes all given data without failure and throws it away. /// /// This is commonly used for testing or redirected otherwise unwanted output data that has to be /// sent somewhere for whatever reason. /// -pub fn nullWriter() Writer { - var dummy: usize = 0; - - return Writer.capture(&dummy, struct { - fn write(_: *usize, buffer: []const u8) usize { - return buffer.len; - } - }.write); -} +pub const null_writer = Writer.from(struct { + fn write(buffer: []const u8) usize { + return buffer.len; + } +}.write); test "Null writing" { const sequence = "foo"; - try testing.expect(nullWriter().call(sequence) == sequence.len); + try testing.expect(null_writer.call(sequence) == sequence.len); } diff --git a/src/core/meta.zig b/src/core/meta.zig index e176c1b..cbbc534 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -18,6 +18,10 @@ pub fn Function(comptime In: type, comptime Out: type) type { callErased: fn (*anyopaque, In) Out, context: *anyopaque, + fn Invoker(comptime Context: type) type { + return if (Context == void) fn (In) Out else fn (Context, In) Out; + } + /// /// Function type. /// @@ -26,23 +30,42 @@ pub fn Function(comptime In: type, comptime Out: type) type { /// /// Invokes `self` with `input`, producing a result according to the current context data. /// - pub fn call(self: *Self, input: In) Out { + pub fn call(self: Self, input: In) Out { return self.callErased(self.context, input); } /// - /// Creates a new [Self] by capturing the `context` value as the capture context and + /// Creates and returns a [Self] using the `invoke` as the behavior executed when [call] or + /// [callErased] is called. + /// + /// For creating a closure-style function, see [fromClosure]. + /// + pub fn from(comptime invoke: fn (In) Out) Self { + return .{ + .context = undefined, + + .callErased = struct { + fn callErased(_: *anyopaque, input: In) Out { + return invoke(input); + } + }.callErased, + }; + } + + /// + /// Creates and returns a [Self] by capturing the `context` value as the capture context and /// `invoke` as the behavior executed when [call] or [callErased] is called. /// /// The newly created [Self] is returned. /// - pub fn capture(context: anytype, comptime invoke: fn (@TypeOf(context), In) Out) Self { + pub fn fromClosure(context: anytype, comptime invoke: fn (@TypeOf(context), In) Out) Self { const Context = @TypeOf(context); switch (@typeInfo(Context)) { .Pointer => |info| if (info.size == .Slice) @compileError("`context` cannot be a slice"), + .Void => {}, else => @compileError("`context` must be a pointer"), } @@ -51,8 +74,8 @@ pub fn Function(comptime In: type, comptime Out: type) type { .callErased = struct { fn callErased(erased: *anyopaque, input: In) Out { - return invoke(@ptrCast(*Context, @alignCast( - @alignOf(Context), erased)).*, input); + return if (Context == void) invoke(input) else invoke(@ptrCast( + *Context, @alignCast(@alignOf(Context), erased)).*, input); } }.callErased, }; diff --git a/src/core/stack.zig b/src/core/stack.zig index 98c5c4b..841f76f 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -122,7 +122,7 @@ pub const PushError = io.Allocator.MakeError; /// referenced by `fixed_stack` until it is full. /// pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { - return io.Writer.capture(fixed_stack, struct { + return io.Writer.fromClosure(fixed_stack, struct { fn write(stack: *Fixed(u8), buffer: []const u8) usize { stack.pushAll(buffer) catch |err| switch (err) { error.OutOfMemory => return 0, -- 2.34.1 From fcd4ecd85d815f651954861ea59421392f9ed6ca Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 2 Nov 2022 15:15:18 +0000 Subject: [PATCH 88/93] Refactor dynamic memory allocation interface --- src/core/io.zig | 106 ++++++++++++++++++++++++--------------------- src/core/stack.zig | 2 +- src/core/table.zig | 23 +++++----- 3 files changed, 71 insertions(+), 60 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 43a3698..ec2e660 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -3,66 +3,46 @@ const meta = @import("./meta.zig"); const stack = @import("./stack.zig"); const testing = @import("./testing.zig"); +/// +/// +/// +pub const Allocation = struct { + existing: ?[*]u8, + size: usize +}; + /// /// Dynamic memory allocation interface. /// -pub const Allocator = union (enum) { - bound: struct { - alloc: fn (usize) ?[*]u8, - dealloc: fn ([*]u8) void, - }, +pub const Allocator = meta.Function(Allocation, ?[*]u8); - arena: struct { - buffer: []u8, - cursor: usize = 0, - }, +/// +/// +/// +pub const ArenaAllocator = struct { + region: []u8, + cursor: usize = 0, /// - /// [MakeError.OutOfMemory] is used to indicate there is not enough memory available for a given - /// operation. /// - pub const MakeError = error { - OutOfMemory, - }; - /// - /// Frees `allocation` using `allocator`. - /// - pub fn free(allocator: *Allocator, allocation: anytype) void { - switch (@typeInfo(@TypeOf(allocation))) { - .Pointer => |pointer| if (pointer.size == .Slice) - @compileError("`allocation` cannot be a slice"), + pub fn allocator(arena_allocator: *ArenaAllocator) Allocator { + return Allocator.fromClosure(arena_allocator, struct { + fn call(context: *ArenaAllocator, allocation: Allocation) ?[*]u8 { + _ = allocation; + _ = context; - else => @compileError("`allocation` must be a pointer"), - } - - if (@typeInfo(@TypeOf(allocation)) != .Pointer) - @compileError("`allocation` must be a pointer"); - - // TODO: Implement arena de-allocation. - switch (allocator.*) { - .bound => |bound| bound.dealloc(@ptrCast([*]u8, allocation)), - .arena => {}, - } + return null; + } + }.call); } +}; - /// - /// Attempts to allocate `size` number of `Element`s using `allocator`. - /// - /// Returns the allocation or a [MakeError] if it failed. - /// - pub fn make(allocator: *Allocator, comptime Element: type, size: usize) MakeError![*]Element { - switch (allocator.*) { - .bound => |bound| return @ptrCast([*]Element, @alignCast(@alignOf(Element), - bound.alloc(@sizeOf(Element) * size) orelse return error.OutOfMemory)), - - .arena => |*stack| { - defer stack.cursor += size; - - return @ptrCast([*]Element, @alignCast(@alignOf(Element), stack.buffer.ptr)); - }, - } - } +/// +/// +/// +pub const MakeError = error { + OutOfMemory, }; /// @@ -337,6 +317,24 @@ test "Find first of sequence" { try testing.expect(findFirstOf([]const u8, haystack, &.{"baz", "bar"}, testEquality) == null); } +/// +/// +/// +pub fn free(allocator: Allocator, allocated_memory: anytype) void { + if (allocator.call(.{ + .existing = @ptrCast([*]u8, switch (@typeInfo(@TypeOf(allocated_memory))) { + .Pointer => |info| switch (info.size) { + .One, .Many, .C => allocated_memory, + .Slice => allocated_memory.ptr, + }, + + else => @compileError("`allocated_memory` must be a pointer"), + }), + + .size = 0, + }) != null) unreachable; +} + /// /// Returns a deterministic hash code compiled from each byte in `bytes`. /// @@ -357,6 +355,16 @@ test "Hashing bytes" { try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42})); } +/// +/// +/// +pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) MakeError![*]Element { + return @ptrCast([*]Element, @alignCast(@alignOf(Element), allocator.call(.{ + .existing = null, + .size = size, + }) orelse return error.OutOfMemory)); +} + /// /// Swaps the `Data` in `this` with `that`. /// diff --git a/src/core/stack.zig b/src/core/stack.zig index 841f76f..27653e5 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -113,7 +113,7 @@ test "Fixed stack of string literals" { /// /// Potential errors that may occur while trying to push one or more elements into a stack. /// -pub const PushError = io.Allocator.MakeError; +pub const PushError = io.MakeError; /// /// Returns an [io.Writer] wrapping `fixed_stack`. diff --git a/src/core/table.zig b/src/core/table.zig index c8c0755..f67213b 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -11,7 +11,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, const Allocator = io.Allocator; return struct { - allocator: *Allocator, + allocator: Allocator, load_limit: f32, buckets: []Bucket, filled: usize, @@ -37,7 +37,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// Deinitializes `self`, preventing any further use. /// pub fn deinit(self: *Self) void { - self.allocator.free(self.buckets.ptr); + io.free(self.allocator, self.buckets); self.buckets = &.{}; } @@ -45,13 +45,13 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// /// Initializes a [Self] using `allocator` as the memory allocation strategy. /// - /// Returns a new [Self] value or an [io.Allocator.MakeError] if initializing failed. + /// Returns a new [Self] value or an [io.MakeError] if initializing failed. /// - pub fn init(allocator: *Allocator) Allocator.MakeError!Self { + pub fn init(allocator: Allocator) io.MakeError!Self { const initial_capacity = 4; return Self{ - .buckets = (try allocator.make(Bucket, initial_capacity))[0 .. initial_capacity], + .buckets = (try io.makeMany(Bucket, allocator, initial_capacity))[0 .. initial_capacity], .filled = 0, .allocator = allocator, .load_limit = 0.75, @@ -99,11 +99,12 @@ pub fn Hashed(comptime Key: type, comptime Value: type, if (self.loadFactor() >= self.load_limit) { const old_buckets = self.buckets; - defer self.allocator.free(old_buckets.ptr); + defer io.free(self.allocator, old_buckets); const bucket_count = old_buckets.len * 2; - self.buckets = (try self.allocator.make(Bucket, bucket_count))[0 .. bucket_count]; + self.buckets = (try io.makeMany(Bucket, self.allocator, + bucket_count))[0 .. bucket_count]; for (old_buckets) |bucket, index| self.buckets[index] = bucket; } @@ -163,7 +164,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key /// already present. /// -pub const InsertError = io.Allocator.MakeError || error { +pub const InsertError = io.MakeError || error { KeyExists, }; @@ -197,8 +198,10 @@ pub const string_literal_context = KeyContext([]const u8){ test "Hash table manipulation with string literal context" { var buffer = [_]u8{0} ** 1024; - var allocator = io.Allocator{.arena = .{.buffer = &buffer}}; - var table = try Hashed([]const u8, u32, string_literal_context).init(&allocator); + var arena_allocator = io.ArenaAllocator{.region = &buffer}; + + var table = + try Hashed([]const u8, u32, string_literal_context).init(arena_allocator.allocator()); defer table.deinit(); -- 2.34.1 From 14b39210015012db0709e9e9fcfcf03b11affbfa Mon Sep 17 00:00:00 2001 From: kayomn Date: Wed, 2 Nov 2022 17:45:17 +0000 Subject: [PATCH 89/93] Add fixed stack-backed allocator implementation --- src/core/io.zig | 67 +++++++++++++++++++++++++--------------------- src/core/meta.zig | 6 ++--- src/core/stack.zig | 58 ++++++++++++++++++++++++++++++++++++--- src/core/table.zig | 15 ++++++----- 4 files changed, 102 insertions(+), 44 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index ec2e660..f3282ae 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -4,11 +4,12 @@ const stack = @import("./stack.zig"); const testing = @import("./testing.zig"); /// -/// +/// Allocation options for an [Allocator]. /// pub const Allocation = struct { existing: ?[*]u8, - size: usize + alignment: u29, + size: usize, }; /// @@ -17,29 +18,7 @@ pub const Allocation = struct { pub const Allocator = meta.Function(Allocation, ?[*]u8); /// -/// -/// -pub const ArenaAllocator = struct { - region: []u8, - cursor: usize = 0, - - /// - /// - /// - pub fn allocator(arena_allocator: *ArenaAllocator) Allocator { - return Allocator.fromClosure(arena_allocator, struct { - fn call(context: *ArenaAllocator, allocation: Allocation) ?[*]u8 { - _ = allocation; - _ = context; - - return null; - } - }.call); - } -}; - -/// -/// +/// [MakeError.OutOfMemory] if the requested amount of memory could not be allocated. /// pub const MakeError = error { OutOfMemory, @@ -259,6 +238,21 @@ test "Check memory is equal" { try testing.expect(!equals(u8, bytes_sequence, &.{69, 42})); } +/// +/// Fills the contents of `target` with `source`. +/// +pub fn fill(comptime Element: type, target: []Element, source: Element) void { + for (target) |_, index| target[index] = source; +} + +test "Fill data" { + var buffer = [_]u32{0} ** 8; + + fill(u32, &buffer, 1); + + for (buffer) |element| try testing.expect(element == 1); +} + /// /// Searches for the first instance of an `Element` equal to `needle` in `haystack`, returning its /// index or `null` if nothing was found. @@ -318,7 +312,10 @@ test "Find first of sequence" { } /// +/// Frees `allocated_memory` using `allocator`. /// +/// *Note* that only memory known to be freeable by `allocator` should be passed via +/// `allocated_memory`. Anything else will result is considered unreachable logic. /// pub fn free(allocator: Allocator, allocated_memory: anytype) void { if (allocator.call(.{ @@ -332,6 +329,7 @@ pub fn free(allocator: Allocator, allocated_memory: anytype) void { }), .size = 0, + .alignment = 0, }) != null) unreachable; } @@ -356,13 +354,21 @@ test "Hashing bytes" { } /// -/// +/// Attempts to allocate a buffer of `size` `Element`s using `allocator`, returning it or a +/// [MakeError] if it failed. /// pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) MakeError![*]Element { - return @ptrCast([*]Element, @alignCast(@alignOf(Element), allocator.call(.{ + const alignment = @alignOf(Element); + + if (allocator.call(.{ .existing = null, - .size = size, - }) orelse return error.OutOfMemory)); + .size = @sizeOf(Element) * size, + .alignment = alignment, + })) |buffer| { + return @ptrCast([*]Element, @alignCast(alignment, buffer)); + } + + return error.OutOfMemory; } /// @@ -385,7 +391,8 @@ test "Data swapping" { } /// -/// [Writer] that silently consumes all given data without failure and throws it away. +/// Thread-safe and lock-free [Writer] that silently consumes all given data without failure and +/// throws it away. /// /// This is commonly used for testing or redirected otherwise unwanted output data that has to be /// sent somewhere for whatever reason. diff --git a/src/core/meta.zig b/src/core/meta.zig index cbbc534..ce54238 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -69,18 +69,16 @@ pub fn Function(comptime In: type, comptime Out: type) type { else => @compileError("`context` must be a pointer"), } - var function = Self{ + return Self{ .context = @ptrCast(*anyopaque, context), .callErased = struct { fn callErased(erased: *anyopaque, input: In) Out { return if (Context == void) invoke(input) else invoke(@ptrCast( - *Context, @alignCast(@alignOf(Context), erased)).*, input); + Context, @alignCast(@alignOf(Context), erased)), input); } }.callErased, }; - - return function; } }; } diff --git a/src/core/stack.zig b/src/core/stack.zig index 27653e5..e79a4ef 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -39,7 +39,7 @@ pub fn Fixed(comptime Element: type) type { } /// - /// Attempts to push `element` into `self`, returning a [FixedPushError] if it failed. + /// Attempts to push `element` into `self`, returning a [PushError] if it failed. /// pub fn push(self: *Self, element: Element) PushError!void { if (self.filled == self.buffer.len) return error.OutOfMemory; @@ -49,8 +49,7 @@ pub fn Fixed(comptime Element: type) type { } /// - /// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it - /// failed. + /// Attempts to push all of `elements` into `self`, returning a [PushError] if it failed. /// pub fn pushAll(self: *Self, elements: []const Element) PushError!void { const filled = (self.filled + elements.len); @@ -61,6 +60,20 @@ pub fn Fixed(comptime Element: type) type { self.filled = filled; } + + /// + /// Attempts to push `count` instances of `element` into `self`, returning a [PushError] if + /// it failed. + /// + pub fn pushMany(self: *Self, element: Element, count: usize) PushError!void { + const filled = (self.filled + count); + + if (filled > self.buffer.len) return error.OutOfMemory; + + io.fill(Element, self.buffer[self.filled ..], element); + + self.filled = filled; + } }; } @@ -115,6 +128,45 @@ test "Fixed stack of string literals" { /// pub const PushError = io.MakeError; +/// +/// Creates and returns a [io.Allocator] value wrapping `fixed_stack`. +/// +/// The returned [io.Allocator] uses `fixed_stack` and its backing memory buffer as a fixed-length +/// memory pool to linearly allocate memory from. +/// +pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { + return io.Allocator.fromClosure(fixed_stack, struct { + fn alloc(stack: *Fixed(u8), allocation: io.Allocation) ?[*]u8 { + if (allocation.existing) |buffer| if (allocation.size == 0) { + // Deallocate the memory. + const buffer_address = @ptrToInt(buffer); + const stack_buffer_address = @ptrToInt(stack.buffer.ptr); + + // Check the buffer is within the address space of the stack buffer. If not, it + // should just be returned to let the caller know it cannot be freed. + if ((buffer_address < stack_buffer_address) or + (buffer_address >= (stack_buffer_address + stack.filled))) return buffer; + + // TODO: Investigate ways of freeing if it is the last allocation. + return null; + }; + + // Reallocate / allocate the memory. + // TODO: Remove stdlib dependency. + const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr + + stack.filled, allocation.alignment) orelse return null; + + const head = stack.filled + adjusted_offset; + const tail = head + allocation.size; + + stack.pushMany(0, tail) catch return null; + + return stack.buffer[head .. tail].ptr; + + } + }.alloc); +} + /// /// Returns an [io.Writer] wrapping `fixed_stack`. /// diff --git a/src/core/table.zig b/src/core/table.zig index f67213b..33a2e4c 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -1,4 +1,5 @@ const io = @import("./io.zig"); +const stack = @import("./stack.zig"); const testing = @import("./testing.zig"); /// @@ -48,10 +49,10 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// Returns a new [Self] value or an [io.MakeError] if initializing failed. /// pub fn init(allocator: Allocator) io.MakeError!Self { - const initial_capacity = 4; + const capacity = 4; return Self{ - .buckets = (try io.makeMany(Bucket, allocator, initial_capacity))[0 .. initial_capacity], + .buckets = (try io.makeMany(Bucket, allocator, capacity))[0 .. capacity], .filled = 0, .allocator = allocator, .load_limit = 0.75, @@ -197,15 +198,15 @@ pub const string_literal_context = KeyContext([]const u8){ }; test "Hash table manipulation with string literal context" { - var buffer = [_]u8{0} ** 1024; - var arena_allocator = io.ArenaAllocator{.region = &buffer}; + var buffer = [_]u8{0} ** 4096; + var fixed_stack = stack.Fixed(u8){.buffer = &buffer}; - var table = - try Hashed([]const u8, u32, string_literal_context).init(arena_allocator.allocator()); + var table = try Hashed([]const u8, u32, string_literal_context). + init(stack.fixedAllocator(&fixed_stack)); defer table.deinit(); - const foo = @as(u32, 69); + const foo = 69; try testing.expect(table.remove("foo") == null); try table.insert("foo", foo); -- 2.34.1 From 9251b11427b8b0786913003f204a14644717ae6d Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 3 Nov 2022 15:51:42 +0000 Subject: [PATCH 90/93] Auto-switch to debug console when debugging in VS Code --- .vscode/launch.json | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.vscode/launch.json b/.vscode/launch.json index 9782360..fec89bf 100755 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -9,6 +9,7 @@ "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", "preLaunchTask": "Build Debug", + "internalConsoleOptions": "openOnSessionStart", }, { @@ -20,6 +21,7 @@ "cwd": "${workspaceRoot}", "valuesFormatting": "parseText", "preLaunchTask": "Build Debug", + "internalConsoleOptions": "openOnSessionStart", }, ] } -- 2.34.1 From 4bb86c41bc5d0190b56807bc202f3300e4d0a300 Mon Sep 17 00:00:00 2001 From: kayomn Date: Thu, 3 Nov 2022 15:53:36 +0000 Subject: [PATCH 91/93] Add more test coverage and clean up code --- src/core/io.zig | 71 ++++++++++++++++++++++++++--------- src/core/math.zig | 40 ++++++++++++++++++-- src/core/stack.zig | 67 ++++++++++++++++++++++++++++++--- src/core/table.zig | 6 +-- src/core/testing.zig | 6 ++- src/core/unicode.zig | 89 ++++++++++++++++++++++++++++++-------------- src/ona/sys.zig | 7 +++- 7 files changed, 227 insertions(+), 59 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index f3282ae..11b7ed1 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -13,7 +13,7 @@ pub const Allocation = struct { }; /// -/// Dynamic memory allocation interface. +/// Closure for dynamic memory allocation through the referenced allocator state machine capture. /// pub const Allocator = meta.Function(Allocation, ?[*]u8); @@ -78,7 +78,7 @@ pub fn Spliterator(comptime Element: type) type { }; } -test "Spliterator of string literals" { +test "Spliterator(u8)" { // Empty source. { var spliterator = Spliterator(u8){ @@ -172,7 +172,7 @@ pub fn bytesOf(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { } } -test "Bytes of types" { +test "bytesOf" { var foo: u32 = 10; try testing.expect(bytesOf(&foo)[0] == 0x0a); @@ -195,7 +195,7 @@ pub fn compareBytes(this: []const u8, that: []const u8) isize { return (@intCast(isize, this.len) - @intCast(isize, that.len)); } -test "Compare bytes" { +test "compareBytes" { try testing.expect(compareBytes(&.{69, 42, 0}, &.{69, 42, 0}) == 0); try testing.expect(compareBytes(&.{69, 42, 11}, &.{69, 42}) == 1); try testing.expect(compareBytes(&.{69, 42}, &.{69, 42, 11}) == -1); @@ -208,7 +208,7 @@ pub fn copy(comptime Element: type, target: []Element, source: []const Element) for (source) |element, index| target[index] = element; } -test "Copy data" { +test "copy" { var buffer = [_]u32{0} ** 20; const data = [_]u32{3, 20, 8000}; @@ -231,7 +231,7 @@ pub fn equals(comptime Element: type, this: []const Element, that: []const Eleme return true; } -test "Check memory is equal" { +test "equals" { const bytes_sequence = &.{69, 42, 0}; try testing.expect(equals(u8, bytes_sequence, bytes_sequence)); @@ -245,7 +245,7 @@ pub fn fill(comptime Element: type, target: []Element, source: Element) void { for (target) |_, index| target[index] = source; } -test "Fill data" { +test "fill" { var buffer = [_]u32{0} ** 8; fill(u32, &buffer, 1); @@ -265,7 +265,7 @@ pub fn findFirst(comptime Element: type, haystack: []const Element, return null; } -test "Find first of element" { +test "findFirst" { const haystack = &.{"", "", "foo"}; const testEquality = struct { @@ -298,7 +298,7 @@ pub fn findFirstOf(comptime Element: type, haystack: []const Element, return null; } -test "Find first of sequence" { +test "findFirstOf" { const haystack = &.{"foo", "bar", "baz"}; const testEquality = struct { @@ -333,6 +333,16 @@ pub fn free(allocator: Allocator, allocated_memory: anytype) void { }) != null) unreachable; } +test "free" { + var buffer = [_]u8{0} ** 4096; + var memory = stack.Fixed(u8){.buffer = &buffer}; + const fixed_allocator = stack.fixedAllocator(&memory); + const block_size = 8; + const allocated_block = (try makeMany(u8, fixed_allocator, block_size))[0 .. block_size]; + + defer free(fixed_allocator, allocated_block); +} + /// /// Returns a deterministic hash code compiled from each byte in `bytes`. /// @@ -346,7 +356,7 @@ pub fn hashBytes(bytes: []const u8) usize { return hash; } -test "Hashing bytes" { +test "hashBytes" { const bytes_sequence = &.{69, 42, 0}; try testing.expect(hashBytes(bytes_sequence) == hashBytes(bytes_sequence)); @@ -360,15 +370,42 @@ test "Hashing bytes" { pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) MakeError![*]Element { const alignment = @alignOf(Element); - if (allocator.call(.{ + return @ptrCast([*]Element, @alignCast(alignment, allocator.call(.{ .existing = null, .size = @sizeOf(Element) * size, .alignment = alignment, - })) |buffer| { - return @ptrCast([*]Element, @alignCast(alignment, buffer)); - } + }) orelse return error.OutOfMemory)); +} - return error.OutOfMemory; +test "makeMany" { + var buffer = [_]u8{0} ** 4096; + var memory = stack.Fixed(u8){.buffer = &buffer}; + const block_size = 8; + + // Don't care about the actual allocation - just assertions about it. + _ = (try makeMany(u8, stack.fixedAllocator(&memory), block_size))[0 .. block_size]; +} + +/// +/// Attempts to allocate a buffer of `1` `Element` using `allocator`, returning it or a [MakeError] +/// if it failed. +/// +pub fn makeOne(comptime Element: type, allocator: Allocator) MakeError!*Element { + const alignment = @alignOf(Element); + + return @ptrCast(*Element, @alignCast(alignment, allocator.call(.{ + .existing = null, + .size = @sizeOf(Element), + .alignment = alignment, + }) orelse return error.OutOfMemory)); +} + +test "makeOne" { + var buffer = [_]u8{0} ** 4096; + var memory = stack.Fixed(u8){.buffer = &buffer}; + + // Don't care about the actual allocation - just assertions about it. + _ = try makeOne(u8, stack.fixedAllocator(&memory)); } /// @@ -380,7 +417,7 @@ pub fn swap(comptime Data: type, this: *Data, that: *Data) void { that.* = temp; } -test "Data swapping" { +test "swap" { var a: u64 = 0; var b: u64 = 1; @@ -403,7 +440,7 @@ pub const null_writer = Writer.from(struct { } }.write); -test "Null writing" { +test "null_writer" { const sequence = "foo"; try testing.expect(null_writer.call(sequence) == sequence.len); diff --git a/src/core/math.zig b/src/core/math.zig index 6cb3c23..52c74a5 100644 --- a/src/core/math.zig +++ b/src/core/math.zig @@ -1,10 +1,12 @@ +const std = @import("std"); +const testing = @import("./testing.zig"); -pub const IntFittingRange = @import("std").math.IntFittingRange; +pub const IntFittingRange = std.math.IntFittingRange; /// -/// Returns the maximum value of `Integer`. +/// Returns the highest integer value representable by `Integer`. /// -pub fn maxInt(comptime Integer: type) comptime_int { +pub fn maxIntValue(comptime Integer: type) comptime_int { return switch (@typeInfo(Integer)) { .Int => |info| if (info.bits == 0) 0 else ((1 << (info.bits - @boolToInt(info.signedness == .signed))) - 1), @@ -13,6 +15,32 @@ pub fn maxInt(comptime Integer: type) comptime_int { }; } +test "maxIntValue" { + try testing.expect(maxIntValue(u8) == 255); + try testing.expect(maxIntValue(i8) == 127); + + try testing.expect(maxIntValue(u16) == 65535); + try testing.expect(maxIntValue(i16) == 32767); +} + +/// +/// Returns the highest `Number` value between `this` and `that`. +/// +pub fn max(comptime Number: type, this: Number, that: Number) Number { + return switch (@typeInfo(Number)) { + .Int, .Float, .ComptimeInt, .ComptimeFloat => if (this > that) this else that, + + else => @compileError("`" ++ @typeName(Number) ++ + "` must be an int, float, comptime_int, or comptime_float"), + }; +} + +test "max" { + try testing.expect(max(f32, 0.1, 1.0) == 1.0); + try testing.expect(max(f64, 1.0, 1.01) == 1.01); + try testing.expect(max(u32, 35615, 2873) == 35615); +} + /// /// Returns the lowest `Number` value between `this` and `that`. /// @@ -24,3 +52,9 @@ pub fn min(comptime Number: type, this: Number, that: Number) Number { "` must be an int, float, comptime_int, or comptime_float"), }; } + +test "min" { + try testing.expect(min(f32, 0.1, 1.0) == 0.1); + try testing.expect(min(f64, 1.0, 1.01) == 1.0); + try testing.expect(min(u32, 35615, 2873) == 2873); +} diff --git a/src/core/stack.zig b/src/core/stack.zig index e79a4ef..c83b35a 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -22,6 +22,13 @@ pub fn Fixed(comptime Element: type) type { self.filled = 0; } + /// + /// Returns `true` if `self` has filled its buffer to maximum capacity, otherwise `false`. + /// + pub fn isFull(self: Self) bool { + return (self.filled == self.buffer.len); + } + /// /// If `self` is filled with at least `1` value, it is decremented by `1`, otherwise leaving /// the actual memory contents of the buffer untouched until it is later overwritten by @@ -42,7 +49,7 @@ pub fn Fixed(comptime Element: type) type { /// Attempts to push `element` into `self`, returning a [PushError] if it failed. /// pub fn push(self: *Self, element: Element) PushError!void { - if (self.filled == self.buffer.len) return error.OutOfMemory; + if (self.isFull()) return error.OutOfMemory; self.buffer[self.filled] = element; self.filled += 1; @@ -77,7 +84,7 @@ pub fn Fixed(comptime Element: type) type { }; } -test "Fixed stack of string literals" { +test "Fixed([]const u8)" { const default_value = ""; var buffer = [_][]const u8{default_value} ** 4; var shopping_list = Fixed([]const u8){.buffer = &buffer}; @@ -140,12 +147,12 @@ pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { if (allocation.existing) |buffer| if (allocation.size == 0) { // Deallocate the memory. const buffer_address = @ptrToInt(buffer); - const stack_buffer_address = @ptrToInt(stack.buffer.ptr); + const stack_address = @ptrToInt(stack.buffer.ptr); // Check the buffer is within the address space of the stack buffer. If not, it // should just be returned to let the caller know it cannot be freed. - if ((buffer_address < stack_buffer_address) or - (buffer_address >= (stack_buffer_address + stack.filled))) return buffer; + if (buffer_address < stack_address or buffer_address >= + (stack_address + stack.filled)) return buffer; // TODO: Investigate ways of freeing if it is the last allocation. return null; @@ -167,6 +174,54 @@ pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { }.alloc); } +test "fixedAllocator" { + var buffer = [_]u8{0} ** 32; + var stack = Fixed(u8){.buffer = &buffer}; + const allocator = fixedAllocator(&stack); + + // Allocation + var block_memory = allocator.call(.{ + .existing = null, + .alignment = @alignOf(u64), + .size = @sizeOf(u64), + }); + + try testing.expect(block_memory != null); + + const buffer_address_head = @ptrToInt(&buffer); + const buffer_address_tail = @ptrToInt(&buffer) + buffer.len; + + { + const block_memory_address = @ptrToInt(block_memory); + + try testing.expect(block_memory_address >= buffer_address_head and + block_memory_address < buffer_address_tail); + } + + // Reallocation. + block_memory = allocator.call(.{ + .existing = block_memory, + .alignment = @alignOf(u64), + .size = @sizeOf(u64), + }); + + try testing.expect(block_memory != null); + + { + const block_memory_address = @ptrToInt(block_memory); + + try testing.expect(block_memory_address >= buffer_address_head and + block_memory_address < buffer_address_tail); + } + + // Deallocation. + try testing.expect(allocator.call(.{ + .existing = block_memory, + .alignment = 0, + .size = 0, + }) == null); +} + /// /// Returns an [io.Writer] wrapping `fixed_stack`. /// @@ -185,7 +240,7 @@ pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { }.write); } -test "Fixed writer" { +test "fixedWriter" { var buffer = [_]u8{0} ** 4; var sequence_stack = Fixed(u8){.buffer = &buffer}; const sequence_data = [_]u8{8, 16, 32, 64}; diff --git a/src/core/table.zig b/src/core/table.zig index 33a2e4c..1458060 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -197,12 +197,12 @@ pub const string_literal_context = KeyContext([]const u8){ }.stringsEqual, }; -test "Hash table manipulation with string literal context" { +test "Hashed([]const u8, u32, string_literal_context)" { var buffer = [_]u8{0} ** 4096; - var fixed_stack = stack.Fixed(u8){.buffer = &buffer}; + var memory = stack.Fixed(u8){.buffer = &buffer}; var table = try Hashed([]const u8, u32, string_literal_context). - init(stack.fixedAllocator(&fixed_stack)); + init(stack.fixedAllocator(&memory)); defer table.deinit(); diff --git a/src/core/testing.zig b/src/core/testing.zig index 1881378..c2b88e9 100644 --- a/src/core/testing.zig +++ b/src/core/testing.zig @@ -13,6 +13,10 @@ pub fn expect(ok: bool) TestError!void { if (!ok) return error.UnexpectedResult; } -// TODO: Implement tests. +test "expect" { + try expect(true); + + expect(false) catch {}; +} pub const expectError = @import("std").testing.expectError; diff --git a/src/core/unicode.zig b/src/core/unicode.zig index 6e89678..de6279f 100644 --- a/src/core/unicode.zig +++ b/src/core/unicode.zig @@ -1,5 +1,7 @@ const io = @import("./io.zig"); const math = @import("./math.zig"); +const stack = @import("./stack.zig"); +const testing = @import("./testing.zig"); /// /// [PrintError.WriteFailure] occurs when the underlying [io.Writer] implementation failed to write @@ -10,7 +12,7 @@ pub const PrintError = error { }; /// -/// Number formatting modes supported by [printInt]. +/// Named identifiers for number formats used in printing functions. /// pub const Radix = enum { binary, @@ -28,6 +30,29 @@ pub const Radix = enum { tetradecimal, pentadecimal, hexadecimal, + + /// + /// Returns the base number of `radix`. + /// + pub fn base(radix: Radix) u8 { + return switch (radix) { + .binary => 2, + .tinary => 3, + .quaternary => 4, + .quinary => 5, + .senary => 6, + .septenary => 7, + .octal => 8, + .nonary => 9, + .decimal => 10, + .undecimal => 11, + .duodecimal => 12, + .tridecimal => 13, + .tetradecimal => 14, + .pentadecimal => 15, + .hexadecimal => 16, + }; + } }; /// @@ -40,35 +65,39 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void const Int = @TypeOf(value); switch (@typeInfo(Int)) { - .Int => |int_info| { - if (value == 0) return writer.apply("0"); + .Int => |info| { + if (value == 0) { + const zero = "0"; - const base = @enumToInt(radix); - const is_signed = (int_info.signedness == .signed); + if (writer.call(zero) != zero.len) return error.WriteFailure; + } else { + // Big enough to hold the hexadecimal representation of the integer type, which is + // the largest number format accomodated for in [Radix]. + var buffer = [_]u8{0} ** (@sizeOf(Int) * (@bitSizeOf(u8) / 4)); + var buffer_count: usize = 0; + var n1 = value; - var buffer = [_]u8{0} ** (math.ceil(math.log(math. - maxInt(Int), base)) + @boolToInt(is_signed)); + if (info.signedness == .signed and value < 0) { + // Negative value. + n1 = -value; + buffer[0] = '-'; + buffer_count += 1; + } - var buffer_count: usize = 0; - var n1 = value; + while (n1 != 0) { + const base = radix.base(); - if (is_signed and (value < 0)) { - // Negative value. - n1 = -value; - buffer[0] = '-'; - buffer_count += 1; + buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); + n1 = (n1 / base); + buffer_count += 1; + } + + for (buffer[0 .. (buffer_count / 2)]) |_, i| + io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); + + if (writer.call(buffer[0 .. buffer_count]) != buffer_count) + return error.WriteFailure; } - - while (n1 != 0) { - buffer[buffer_count] = @intCast(u8, (n1 % base) + '0'); - n1 = (n1 / base); - buffer_count += 1; - } - - for (buffer[0 .. (buffer_count / 2)]) |_, i| - io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); - - if (writer.call(buffer[0 .. buffer_count]) != buffer_count) return error.WriteFailure; }, // Cast comptime int into known-size integer and try again. @@ -79,6 +108,12 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void } } -test "Print 64-bit signed integer" { - // TODO: implement. +test "printInt" { + // Max digits to represent a decimal u8 is 3 (i.e. 127 / 255). + var decimal_buffer = [_]u8{0} ** 3; + var decimal_stack = stack.Fixed(u8){.buffer = &decimal_buffer}; + var decimal_writer = stack.fixedWriter(&decimal_stack); + + try printInt(decimal_writer, .decimal, 365); + try testing.expect(decimal_stack.isFull()); } diff --git a/src/ona/sys.zig b/src/ona/sys.zig index d3f33a0..4682760 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -114,7 +114,10 @@ pub const ReadableFile = opaque { { ext.SDL_ClearError(); - var sought = core.math.min(u64, offset, core.math.maxInt(i64)); + const math = core.math; + const min = math.min; + const maxIntValue = math.maxIntValue; + var sought = min(u64, offset, maxIntValue(i64)); if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0) return error.FileInaccessible; @@ -122,7 +125,7 @@ pub const ReadableFile = opaque { var to_seek = offset - sought; while (to_seek != 0) { - sought = core.math.min(u64, to_seek, core.math.maxInt(i64)); + sought = min(u64, to_seek, maxIntValue(i64)); ext.SDL_ClearError(); -- 2.34.1 From 4f0224a029fe4a368c59ffb39baf9f767847087a Mon Sep 17 00:00:00 2001 From: kayomn Date: Fri, 4 Nov 2022 10:38:37 +0000 Subject: [PATCH 92/93] Fix documentation / tidy up code --- src/core/io.zig | 10 ++++++---- src/core/math.zig | 1 + src/core/stack.zig | 33 +++++++++++++++++++-------------- src/core/table.zig | 13 +++++++------ src/ona/main.zig | 9 ++++++--- src/ona/oar.zig | 25 ++++++++++++++++++------- 6 files changed, 57 insertions(+), 34 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 11b7ed1..79b4b73 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -151,8 +151,6 @@ pub const Writer = meta.Function([]const u8, usize); /// /// Returns a sliced reference of the raw bytes in `pointer`. /// -/// **Note** that passing a slice will convert it to a byte slice. -/// pub fn bytesOf(pointer: anytype) switch (@typeInfo(@TypeOf(pointer))) { .Pointer => |info| if (info.is_const) []const u8 else []u8, else => @compileError("`pointer` must be a pointer type"), @@ -254,8 +252,10 @@ test "fill" { } /// -/// Searches for the first instance of an `Element` equal to `needle` in `haystack`, returning its -/// index or `null` if nothing was found. +/// Linearly searches for the first instance of an `Element` equal to `needle` in `haystack`, +/// returning its index or `null` if nothing was found. +/// +/// **Note** that this operation has `O(n)` time complexity. /// pub fn findFirst(comptime Element: type, haystack: []const Element, needle: Element, comptime testEquality: fn (Element, Element) bool) ?usize { @@ -282,6 +282,8 @@ test "findFirst" { /// Searches for the first instance of an `Element` sequence equal to the contents of `needle` in /// `haystack`, returning the starting index or `null` if nothing was found. /// +/// **Note** that this operation has `O(nm)` time complexity. +/// pub fn findFirstOf(comptime Element: type, haystack: []const Element, needle: []const Element, comptime testEquality: fn (Element, Element) bool) ?usize { diff --git a/src/core/math.zig b/src/core/math.zig index 52c74a5..733a9dc 100644 --- a/src/core/math.zig +++ b/src/core/math.zig @@ -1,6 +1,7 @@ const std = @import("std"); const testing = @import("./testing.zig"); +// TODO: Remove stdlib dependency. pub const IntFittingRange = std.math.IntFittingRange; /// diff --git a/src/core/stack.zig b/src/core/stack.zig index c83b35a..609d66c 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -46,22 +46,23 @@ pub fn Fixed(comptime Element: type) type { } /// - /// Attempts to push `element` into `self`, returning a [PushError] if it failed. + /// Attempts to push `element` into `self`, returning a [FixedPushError] if it failed. /// - pub fn push(self: *Self, element: Element) PushError!void { - if (self.isFull()) return error.OutOfMemory; + pub fn push(self: *Self, element: Element) FixedPushError!void { + if (self.isFull()) return error.BufferOverflow; self.buffer[self.filled] = element; self.filled += 1; } /// - /// Attempts to push all of `elements` into `self`, returning a [PushError] if it failed. + /// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it + /// failed. /// - pub fn pushAll(self: *Self, elements: []const Element) PushError!void { + pub fn pushAll(self: *Self, elements: []const Element) FixedPushError!void { const filled = (self.filled + elements.len); - if (filled > self.buffer.len) return error.OutOfMemory; + if (filled > self.buffer.len) return error.BufferOverflow; io.copy(Element, self.buffer[self.filled ..], elements); @@ -69,13 +70,13 @@ pub fn Fixed(comptime Element: type) type { } /// - /// Attempts to push `count` instances of `element` into `self`, returning a [PushError] if - /// it failed. + /// Attempts to push `count` instances of `element` into `self`, returning a + /// [FixedPushError] if it failed. /// - pub fn pushMany(self: *Self, element: Element, count: usize) PushError!void { + pub fn pushMany(self: *Self, element: Element, count: usize) FixedPushError!void { const filled = (self.filled + count); - if (filled > self.buffer.len) return error.OutOfMemory; + if (filled > self.buffer.len) return error.BufferOverflow; io.fill(Element, self.buffer[self.filled ..], element); @@ -131,9 +132,11 @@ test "Fixed([]const u8)" { } /// -/// Potential errors that may occur while trying to push one or more elements into a stack. +/// Potential errors that may occur while trying to push one or more elements into a [Fixed] stack. /// -pub const PushError = io.MakeError; +pub const FixedPushError = error { + BufferOverflow, +}; /// /// Creates and returns a [io.Allocator] value wrapping `fixed_stack`. @@ -154,8 +157,10 @@ pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { if (buffer_address < stack_address or buffer_address >= (stack_address + stack.filled)) return buffer; - // TODO: Investigate ways of freeing if it is the last allocation. + // TODO: Investigate ways of actually freeing if it is the last allocation. return null; + } else { + // TODO: Investigate ways of in-place relocating if it is the last allocation. }; // Reallocate / allocate the memory. @@ -232,7 +237,7 @@ pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { return io.Writer.fromClosure(fixed_stack, struct { fn write(stack: *Fixed(u8), buffer: []const u8) usize { stack.pushAll(buffer) catch |err| switch (err) { - error.OutOfMemory => return 0, + error.BufferOverflow => return 0, }; return buffer.len; diff --git a/src/core/table.zig b/src/core/table.zig index 1458060..8cfb978 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -97,7 +97,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// [InsertError] if it fails. /// pub fn insert(self: *Self, key: Key, value: Value) InsertError!void { - if (self.loadFactor() >= self.load_limit) { + if (self.isOverloaded()) { const old_buckets = self.buckets; defer io.free(self.allocator, old_buckets); @@ -131,17 +131,18 @@ pub fn Hashed(comptime Key: type, comptime Value: type, } /// - /// Returns the current load factor of `self`, which is derived from the number of capacity - /// that has been filled. + /// Returns `true` if the current load factor, derived from the number of elements filling + /// the bucket table, is greater than the current load limit. /// - pub fn loadFactor(self: Self) f32 { - return @intToFloat(f32, self.filled) / @intToFloat(f32, self.buckets.len); + pub fn isOverloaded(self: Self) bool { + return (@intToFloat(f32, self.filled) / + @intToFloat(f32, self.buckets.len)) >= self.load_limit; } /// /// Searches for a value indexed with `key` in `self`. /// - /// The found value is returned or `null` if an key matching `key` failed to be found. + /// The found value is returned or `null` if any key matching `key` failed to be found. /// pub fn lookup(self: Self, key: Key) ?Value { var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]); diff --git a/src/ona/main.zig b/src/ona/main.zig index e8e8718..0045c34 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -3,13 +3,16 @@ const std = @import("std"); const sys = @import("./sys.zig"); /// -/// Starts the the game engine. +/// Application entry-point. /// pub fn main() anyerror!void { - return nosuspend await async sys.display(anyerror, run); + return nosuspend await async sys.display(anyerror, runEngine); } -fn run(app: *sys.App, graphics: *sys.Graphics) anyerror!void { +/// +/// Runs the game engine. +/// +fn runEngine(app: *sys.App, graphics: *sys.Graphics) anyerror!void { var gpa = std.heap.GeneralPurposeAllocator(.{}){}; defer _ = gpa.deinit(); diff --git a/src/ona/oar.zig b/src/ona/oar.zig index 16feb84..05ee2df 100644 --- a/src/ona/oar.zig +++ b/src/ona/oar.zig @@ -6,7 +6,6 @@ const sys = @import("./sys.zig"); /// const Block = extern struct { signature: [signature_magic.len]u8 = signature_magic, - revision: u8 = 0, path: sys.Path = sys.Path.empty, data_size: u64 = 0, data_head: u64 = 0, @@ -15,19 +14,23 @@ const Block = extern struct { comptime { const entry_size = @sizeOf(@This()); - if (entry_size != 512) @compileError("EntryBlock is greater than 512 bytes"); + if (entry_size != 512) @compileError("EntryBlock is not 512 bytes"); } }; /// -/// +/// Reference to a file entry in an Oar archive, denoting the starting offset from the top of head +/// of the file and its size. /// pub const Entry = struct { head: u64, size: u64, /// + /// [FindError.EntryNotFound] occurs when no entry matched the parameters of the find operation. /// + /// [FindError.ArchiveUnsupported] occurs if the file provided to the find operation is not a + /// valid archive file. /// pub const FindError = error { EntryNotFound, @@ -35,7 +38,11 @@ pub const Entry = struct { }; /// + /// Attempts to perform a binary search on the entry blocks defined in `archive_file` for one + /// matching `entry_path`, returning an [Entry] referencing its data or a [FindError] if it + /// failed. /// + /// **Note** that this operation has `O(log n)` time complexity. /// pub fn find(archive_file: *sys.ReadableFile, entry_path: sys.Path) FindError!Entry { var header = Header{}; @@ -78,7 +85,11 @@ pub const Entry = struct { } /// + /// Reads the data from `entry` in `archive_file` from the byte at the entry-relative `offset` + /// into `buffer` until either the end of the entry data, end of archive file, or end of buffer + /// is reached. /// + /// The number of bytes read is returned or [sys.FileError] if it failed. /// pub fn read(entry: Entry, archive_file: *sys.ReadableFile, offset: u64, buffer: []u8) sys.FileError!usize { @@ -89,7 +100,7 @@ pub const Entry = struct { }; /// -/// +/// Header data that every Oar archive file starts with at byte offset `0`. /// const Header = extern struct { signature: [signature_magic.len]u8 = signature_magic, @@ -101,16 +112,16 @@ const Header = extern struct { comptime { const size = @sizeOf(@This()); - if (size != 512) @compileError("Header is greater than 512 bytes"); + if (size != 512) @compileError("Header is not 512 bytes"); } }; /// -/// +/// The magic revision number that this Oar software implementation understands. /// const revision_magic = 0; /// -/// Magic identifier used to validate [Entry] data. +/// Magic identifier used to validate [Header] and [Block] data. /// const signature_magic = [3]u8{'o', 'a', 'r'}; -- 2.34.1 From 47a997b0ec711c28cfcde66638f689941a7b1237 Mon Sep 17 00:00:00 2001 From: kayomn Date: Mon, 7 Nov 2022 23:51:45 +0000 Subject: [PATCH 93/93] Implement loading of Oar archive entry-backed files --- src/core/io.zig | 229 ++++++++++++++++++----- src/core/meta.zig | 74 -------- src/core/stack.zig | 115 +++++++----- src/core/table.zig | 11 +- src/core/unicode.zig | 6 +- src/ona/main.zig | 17 +- src/ona/oar.zig | 144 ++++++-------- src/ona/sys.zig | 435 ++++++++++++++++++++++++++++++------------- 8 files changed, 647 insertions(+), 384 deletions(-) diff --git a/src/core/io.zig b/src/core/io.zig index 79b4b73..bf794b8 100644 --- a/src/core/io.zig +++ b/src/core/io.zig @@ -4,31 +4,129 @@ const stack = @import("./stack.zig"); const testing = @import("./testing.zig"); /// -/// Allocation options for an [Allocator]. +/// [AccessError.Inacessible] is a generic catch-all for IO resources that are inaccessible for +/// implementation-specific reasons. /// -pub const Allocation = struct { - existing: ?[*]u8, - alignment: u29, - size: usize, +pub const AccessError = error { + Inaccessible, }; /// -/// Closure for dynamic memory allocation through the referenced allocator state machine capture. +/// [AllocationError.OutOfMemory] if the requested amount of memory could not be allocated. /// -pub const Allocator = meta.Function(Allocation, ?[*]u8); - -/// -/// [MakeError.OutOfMemory] if the requested amount of memory could not be allocated. -/// -pub const MakeError = error { +pub const AllocationError = error { OutOfMemory, }; /// -/// Closure that captures a reference to readable resources like block devices, memory buffers, -/// network sockets, and more. +/// Memory layout description for a memory allocation. /// -pub const Reader = meta.Function([]u8, usize); +pub const AllocationLayout = struct { + length: usize, + alignment: u29 = 8, +}; + +/// +/// Interface for dynamic memory allocation through the state machine of the wrapped allocator +/// implementation. +/// +pub const Allocator = struct { + context: *anyopaque, + + vtable: *const struct { + alloc: fn (*anyopaque, AllocationLayout) AllocationError![*]u8, + dealloc: fn (*anyopaque, [*]u8) void, + realloc: fn (*anyopaque, [*]u8, AllocationLayout) AllocationError![*]u8, + }, + + /// + /// Attempts to allocate a block of memory from `allocator` according to `layout`, returning it + /// or [AllocationError] if it failed. + /// + pub fn alloc(allocator: Allocator, layout: AllocationLayout) AllocationError![*]u8 { + return allocator.vtable.alloc(allocator.context, layout); + } + + /// + /// Deallocates the block of memory from `allocator` referenced by `allocation`. + /// + pub fn dealloc(allocator: Allocator, allocation: [*]u8) void { + allocator.vtable.dealloc(allocator.context, allocation); + } + + /// + /// Attempts to reallocate the existing block of memory from `allocator` referenced by + /// `allocation` according to `layout`, returning it or [AllocationError] if it failed. + /// + pub fn realloc(allocator: Allocator, allocation: [*]u8, + layout: AllocationLayout) AllocationError![*]u8 { + + return allocator.vtable.realloc(allocator.context, allocation, layout); + } + + /// + /// Wraps `implementation`, returning the [Allocator] value. + /// + pub fn wrap(implementation: anytype) Allocator { + const Implementation = @TypeOf(implementation.*); + + return .{ + .context = @ptrCast(*anyopaque, implementation), + + .vtable = switch (@typeInfo(Implementation)) { + .Struct => &.{ + .alloc = struct { + fn call(context: *anyopaque, layout: AllocationLayout) AllocationError![*]u8 { + return @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).alloc(layout); + } + }.call, + + .dealloc = struct { + fn call(context: *anyopaque, allocation: [*]u8) void { + return @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).dealloc(allocation); + } + }.call, + + .realloc = struct { + fn call(context: *anyopaque, allocation: [*]u8, + layout: AllocationLayout) AllocationError![*]u8 { + + return @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).realloc(allocation, layout); + } + }.call, + }, + + .Opaque => &.{ + .alloc = struct { + fn call(context: *anyopaque, layout: AllocationLayout) AllocationError![*]u8 { + return @ptrCast(*Implementation, context).alloc(layout); + } + }.call, + + .dealloc = struct { + fn call(context: *anyopaque, allocation: [*]u8) void { + return @ptrCast(*Implementation, context).dealloc(allocation); + } + }.call, + + .realloc = struct { + fn call(context: *anyopaque, allocation: [*]u8, + layout: AllocationLayout) AllocationError![*]u8 { + + return @ptrCast(*Implementation, context).realloc(allocation, layout); + } + }.call, + }, + + else => @compileError( + "`context` must a single-element pointer referencing a struct or opaque type"), + }, + }; + } +}; /// /// Returns a state machine for lazily computing all `Element` components of a given source input @@ -143,10 +241,57 @@ test "Spliterator(u8)" { } /// -/// Closure that captures a reference to writable resources like block devices, memory buffers, +/// Interface for capturing a reference to a writable resource like block devices, memory buffers, /// network sockets, and more. /// -pub const Writer = meta.Function([]const u8, usize); +pub const Writer = struct { + context: *anyopaque, + + vtable: *const struct { + write: fn (*anyopaque, []const u8) AccessError!usize, + }, + + /// + /// Wraps `implementation`, returning the [Writer] value. + /// + pub fn wrap(implementation: anytype) Writer { + const Implementation = @TypeOf(implementation.*); + + return .{ + .context = @ptrCast(*anyopaque, implementation), + + .vtable = switch (@typeInfo(Implementation)) { + .Struct => &.{ + .write = struct { + fn call(context: *anyopaque, buffer: []const u8) AccessError!usize { + return @ptrCast(*Implementation, + @alignCast(@alignOf(Implementation), context)).write(buffer); + } + }.call, + }, + + .Opaque => &.{ + .write = struct { + fn call(context: *anyopaque, buffer: []const u8) AccessError!usize { + return @ptrCast(*Implementation, context).write(buffer); + } + }.call, + }, + + else => @compileError( + "`context` must a single-element pointer referencing a struct or opaque type"), + }, + }; + } + + /// + /// Attempts to write to `buffer` to `writer`, returning the number of successfully written or + /// [AccessError] if it failed. + /// + pub fn write(writer: Writer, buffer: []const u8) AccessError!usize { + return writer.vtable.write(writer.context, buffer); + } +}; /// /// Returns a sliced reference of the raw bytes in `pointer`. @@ -320,19 +465,14 @@ test "findFirstOf" { /// `allocated_memory`. Anything else will result is considered unreachable logic. /// pub fn free(allocator: Allocator, allocated_memory: anytype) void { - if (allocator.call(.{ - .existing = @ptrCast([*]u8, switch (@typeInfo(@TypeOf(allocated_memory))) { - .Pointer => |info| switch (info.size) { - .One, .Many, .C => allocated_memory, - .Slice => allocated_memory.ptr, - }, + allocator.dealloc(@ptrCast([*]u8, switch (@typeInfo(@TypeOf(allocated_memory))) { + .Pointer => |info| switch (info.size) { + .One, .Many, .C => allocated_memory, + .Slice => allocated_memory.ptr, + }, - else => @compileError("`allocated_memory` must be a pointer"), - }), - - .size = 0, - .alignment = 0, - }) != null) unreachable; + else => @compileError("`allocated_memory` must be a pointer"), + })); } test "free" { @@ -369,14 +509,13 @@ test "hashBytes" { /// Attempts to allocate a buffer of `size` `Element`s using `allocator`, returning it or a /// [MakeError] if it failed. /// -pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) MakeError![*]Element { +pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) AllocationError![*]Element { const alignment = @alignOf(Element); - return @ptrCast([*]Element, @alignCast(alignment, allocator.call(.{ - .existing = null, - .size = @sizeOf(Element) * size, + return @ptrCast([*]Element, @alignCast(alignment, try allocator.alloc(.{ + .length = @sizeOf(Element) * size, .alignment = alignment, - }) orelse return error.OutOfMemory)); + }))); } test "makeMany" { @@ -392,14 +531,13 @@ test "makeMany" { /// Attempts to allocate a buffer of `1` `Element` using `allocator`, returning it or a [MakeError] /// if it failed. /// -pub fn makeOne(comptime Element: type, allocator: Allocator) MakeError!*Element { +pub fn makeOne(comptime Element: type, allocator: Allocator) AllocationError!*Element { const alignment = @alignOf(Element); - return @ptrCast(*Element, @alignCast(alignment, allocator.call(.{ - .existing = null, - .size = @sizeOf(Element), + return @ptrCast(*Element, @alignCast(alignment, try allocator.alloc(.{ + .length = @sizeOf(Element), .alignment = alignment, - }) orelse return error.OutOfMemory)); + }))); } test "makeOne" { @@ -429,6 +567,11 @@ test "swap" { try testing.expect(b == 0); } +/// +/// Mandatory context variable used by [null_writer]. +/// +const null_context: u64 = 0; + /// /// Thread-safe and lock-free [Writer] that silently consumes all given data without failure and /// throws it away. @@ -436,11 +579,13 @@ test "swap" { /// This is commonly used for testing or redirected otherwise unwanted output data that has to be /// sent somewhere for whatever reason. /// -pub const null_writer = Writer.from(struct { - fn write(buffer: []const u8) usize { +pub const null_writer = Writer.wrap(@ptrCast(*const opaque { + const Self = @This(); + + fn write(_: Self, buffer: []const u8) usize { return buffer.len; } -}.write); +}, &null_context)); test "null_writer" { const sequence = "foo"; diff --git a/src/core/meta.zig b/src/core/meta.zig index ce54238..9fa2c48 100644 --- a/src/core/meta.zig +++ b/src/core/meta.zig @@ -8,77 +8,3 @@ pub fn FnReturn(comptime Fn: type) type { return type_info.Fn.return_type orelse void; } - -/// -/// Returns a single-input single-output closure type where `In` represents the input type, `Out` -/// represents the output type, and `captures_size` represents the size of the closure context. -/// -pub fn Function(comptime In: type, comptime Out: type) type { - return struct { - callErased: fn (*anyopaque, In) Out, - context: *anyopaque, - - fn Invoker(comptime Context: type) type { - return if (Context == void) fn (In) Out else fn (Context, In) Out; - } - - /// - /// Function type. - /// - const Self = @This(); - - /// - /// Invokes `self` with `input`, producing a result according to the current context data. - /// - pub fn call(self: Self, input: In) Out { - return self.callErased(self.context, input); - } - - /// - /// Creates and returns a [Self] using the `invoke` as the behavior executed when [call] or - /// [callErased] is called. - /// - /// For creating a closure-style function, see [fromClosure]. - /// - pub fn from(comptime invoke: fn (In) Out) Self { - return .{ - .context = undefined, - - .callErased = struct { - fn callErased(_: *anyopaque, input: In) Out { - return invoke(input); - } - }.callErased, - }; - } - - /// - /// Creates and returns a [Self] by capturing the `context` value as the capture context and - /// `invoke` as the behavior executed when [call] or [callErased] is called. - /// - /// The newly created [Self] is returned. - /// - pub fn fromClosure(context: anytype, comptime invoke: fn (@TypeOf(context), In) Out) Self { - const Context = @TypeOf(context); - - switch (@typeInfo(Context)) { - .Pointer => |info| if (info.size == .Slice) - @compileError("`context` cannot be a slice"), - - .Void => {}, - else => @compileError("`context` must be a pointer"), - } - - return Self{ - .context = @ptrCast(*anyopaque, context), - - .callErased = struct { - fn callErased(erased: *anyopaque, input: In) Out { - return if (Context == void) invoke(input) else invoke(@ptrCast( - Context, @alignCast(@alignOf(Context), erased)), input); - } - }.callErased, - }; - } - }; -} diff --git a/src/core/stack.zig b/src/core/stack.zig index 609d66c..7cec654 100755 --- a/src/core/stack.zig +++ b/src/core/stack.zig @@ -145,38 +145,69 @@ pub const FixedPushError = error { /// memory pool to linearly allocate memory from. /// pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { - return io.Allocator.fromClosure(fixed_stack, struct { - fn alloc(stack: *Fixed(u8), allocation: io.Allocation) ?[*]u8 { - if (allocation.existing) |buffer| if (allocation.size == 0) { - // Deallocate the memory. - const buffer_address = @ptrToInt(buffer); - const stack_address = @ptrToInt(stack.buffer.ptr); + const FixedStack = @TypeOf(fixed_stack.*); - // Check the buffer is within the address space of the stack buffer. If not, it - // should just be returned to let the caller know it cannot be freed. - if (buffer_address < stack_address or buffer_address >= - (stack_address + stack.filled)) return buffer; + return io.Allocator.wrap(@ptrCast(*opaque { + const Self = @This(); - // TODO: Investigate ways of actually freeing if it is the last allocation. - return null; - } else { - // TODO: Investigate ways of in-place relocating if it is the last allocation. - }; - - // Reallocate / allocate the memory. + pub fn alloc(self: *Self, layout: io.AllocationLayout) io.AllocationError![*]u8 { // TODO: Remove stdlib dependency. + const stack = self.stackCast(); + const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr + - stack.filled, allocation.alignment) orelse return null; + stack.filled, layout.alignment) orelse return error.OutOfMemory; const head = stack.filled + adjusted_offset; - const tail = head + allocation.size; + const tail = head + layout.length; - stack.pushMany(0, tail) catch return null; + stack.pushMany(0, tail) catch return error.OutOfMemory; return stack.buffer[head .. tail].ptr; - } - }.alloc); + + pub fn dealloc(self: *Self, allocation: [*]u8) void { + // Deallocate the memory. + const stack = self.stackCast(); + const allocation_address = @ptrToInt(allocation); + const stack_address = @ptrToInt(stack.buffer.ptr); + + // Check the buffer is within the address space of the stack buffer. If not, it cannot + // be freed. + if (allocation_address < stack_address or allocation_address >= + (stack_address + stack.filled)) unreachable; + + // TODO: Investigate ways of actually freeing if it is the last allocation. + } + + pub fn realloc(self: *Self, allocation: [*]u8, + layout: io.AllocationLayout) io.AllocationError![*]u8 { + + // TODO: Investigate ways of in-place relocating if it is the last allocation. + // TODO: Remove stdlib dependency. + const stack = self.stackCast(); + const allocation_address = @ptrToInt(allocation); + const stack_address = @ptrToInt(stack.buffer.ptr); + + // Check the buffer is within the address space of the stack buffer. If not, it cannot + // be reallocated. + if (allocation_address < stack_address or allocation_address >= + (stack_address + stack.filled)) unreachable; + + const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr + + stack.filled, layout.alignment) orelse return error.OutOfMemory; + + const head = stack.filled + adjusted_offset; + const tail = head + layout.length; + + stack.pushMany(0, tail) catch return error.OutOfMemory; + + return stack.buffer[head .. tail].ptr; + } + + fn stackCast(self: *Self) *Fixed(u8) { + return @ptrCast(*FixedStack, @alignCast(@alignOf(FixedStack), self)); + } + }, fixed_stack)); } test "fixedAllocator" { @@ -185,14 +216,11 @@ test "fixedAllocator" { const allocator = fixedAllocator(&stack); // Allocation - var block_memory = allocator.call(.{ - .existing = null, + var block_memory = try allocator.alloc(.{ .alignment = @alignOf(u64), - .size = @sizeOf(u64), + .length = @sizeOf(u64), }); - try testing.expect(block_memory != null); - const buffer_address_head = @ptrToInt(&buffer); const buffer_address_tail = @ptrToInt(&buffer) + buffer.len; @@ -204,14 +232,11 @@ test "fixedAllocator" { } // Reallocation. - block_memory = allocator.call(.{ - .existing = block_memory, + block_memory = try allocator.realloc(block_memory, .{ .alignment = @alignOf(u64), - .size = @sizeOf(u64), + .length = @sizeOf(u64), }); - try testing.expect(block_memory != null); - { const block_memory_address = @ptrToInt(block_memory); @@ -220,11 +245,7 @@ test "fixedAllocator" { } // Deallocation. - try testing.expect(allocator.call(.{ - .existing = block_memory, - .alignment = 0, - .size = 0, - }) == null); + allocator.dealloc(block_memory); } /// @@ -234,15 +255,23 @@ test "fixedAllocator" { /// referenced by `fixed_stack` until it is full. /// pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { - return io.Writer.fromClosure(fixed_stack, struct { - fn write(stack: *Fixed(u8), buffer: []const u8) usize { - stack.pushAll(buffer) catch |err| switch (err) { + const FixedStack = @TypeOf(fixed_stack.*); + + return io.Writer.wrap(@ptrCast(*opaque { + const Self = @This(); + + fn stackCast(self: *Self) *Fixed(u8) { + return @ptrCast(*FixedStack, @alignCast(@alignOf(FixedStack), self)); + } + + pub fn write(self: *Self, buffer: []const u8) io.AccessError!usize { + self.stackCast().pushAll(buffer) catch |err| switch (err) { error.BufferOverflow => return 0, }; return buffer.len; } - }.write); + }, fixed_stack)); } test "fixedWriter" { @@ -250,6 +279,8 @@ test "fixedWriter" { var sequence_stack = Fixed(u8){.buffer = &buffer}; const sequence_data = [_]u8{8, 16, 32, 64}; - try testing.expect(fixedWriter(&sequence_stack).call(&sequence_data) == sequence_data.len); + try testing.expect((try fixedWriter(&sequence_stack). + write(&sequence_data)) == sequence_data.len); + try testing.expect(io.equals(u8, sequence_stack.buffer, &sequence_data)); } diff --git a/src/core/table.zig b/src/core/table.zig index 8cfb978..174449a 100644 --- a/src/core/table.zig +++ b/src/core/table.zig @@ -29,6 +29,11 @@ pub fn Hashed(comptime Key: type, comptime Value: type, maybe_next_index: ?usize = null, }; + /// + /// Errors that may occur during initialization of a hash table. + /// + pub const InitError = io.AllocationError; + /// /// Hash table type. /// @@ -46,9 +51,9 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// /// Initializes a [Self] using `allocator` as the memory allocation strategy. /// - /// Returns a new [Self] value or an [io.MakeError] if initializing failed. + /// Returns a new [Self] value or an [InitError] if initializing failed. /// - pub fn init(allocator: Allocator) io.MakeError!Self { + pub fn init(allocator: Allocator) InitError!Self { const capacity = 4; return Self{ @@ -166,7 +171,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type, /// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key /// already present. /// -pub const InsertError = io.MakeError || error { +pub const InsertError = io.AllocationError || error { KeyExists, }; diff --git a/src/core/unicode.zig b/src/core/unicode.zig index de6279f..13e9a21 100644 --- a/src/core/unicode.zig +++ b/src/core/unicode.zig @@ -7,7 +7,7 @@ const testing = @import("./testing.zig"); /// [PrintError.WriteFailure] occurs when the underlying [io.Writer] implementation failed to write /// the entirety of a the requested print operation. /// -pub const PrintError = error { +pub const PrintError = io.AccessError || error { WriteFailure, }; @@ -69,7 +69,7 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void if (value == 0) { const zero = "0"; - if (writer.call(zero) != zero.len) return error.WriteFailure; + if ((try writer.write(zero)) != zero.len) return error.WriteFailure; } else { // Big enough to hold the hexadecimal representation of the integer type, which is // the largest number format accomodated for in [Radix]. @@ -95,7 +95,7 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void for (buffer[0 .. (buffer_count / 2)]) |_, i| io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); - if (writer.call(buffer[0 .. buffer_count]) != buffer_count) + if ((try writer.write(buffer[0 .. buffer_count])) != buffer_count) return error.WriteFailure; } }, diff --git a/src/ona/main.zig b/src/ona/main.zig index 0045c34..6bf9f79 100644 --- a/src/ona/main.zig +++ b/src/ona/main.zig @@ -13,22 +13,19 @@ pub fn main() anyerror!void { /// Runs the game engine. /// fn runEngine(app: *sys.App, graphics: *sys.Graphics) anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - defer _ = gpa.deinit(); - { - var file_reader = try app.data.openRead(try sys.Path.from(&.{"ona.lua"})); + const path = try sys.Path.from(&.{"ona.lua"}); + var file_reader = try app.data.openFileReader(path); defer file_reader.close(); - const file_size = try file_reader.size(); - const allocator = gpa.allocator(); - const buffer = try allocator.alloc(u8, file_size); + const file_size = (try app.data.query(path)).length; + const allocator = sys.threadSafeAllocator(); + const buffer = (try core.io.makeMany(u8, allocator, file_size))[0 .. file_size]; - defer allocator.free(buffer); + defer core.io.free(allocator, buffer); - if ((try file_reader.read(0, buffer)) != file_size) return error.ScriptLoadFailure; + if ((try file_reader.read(buffer)) != file_size) return error.ScriptLoadFailure; app.log(.debug, buffer); } diff --git a/src/ona/oar.zig b/src/ona/oar.zig index 05ee2df..d22b7ae 100644 --- a/src/ona/oar.zig +++ b/src/ona/oar.zig @@ -2,13 +2,13 @@ const core = @import("core"); const sys = @import("./sys.zig"); /// -/// Metadata of an Oar archive entry. +/// Metadata of an Oar archive file entry. /// -const Block = extern struct { +const Entry = extern struct { signature: [signature_magic.len]u8 = signature_magic, path: sys.Path = sys.Path.empty, - data_size: u64 = 0, - data_head: u64 = 0, + data_offset: u64 = 0, + data_length: u64 = 0, padding: [232]u8 = [_]u8{0} ** 232, comptime { @@ -19,84 +19,14 @@ const Block = extern struct { }; /// -/// Reference to a file entry in an Oar archive, denoting the starting offset from the top of head -/// of the file and its size. +/// [FindError.ArchiveUnsupported] occurs when trying to read a file that does not follow an Oar +/// archive format considered valid by this implemenatation. /// -pub const Entry = struct { - head: u64, - size: u64, - - /// - /// [FindError.EntryNotFound] occurs when no entry matched the parameters of the find operation. - /// - /// [FindError.ArchiveUnsupported] occurs if the file provided to the find operation is not a - /// valid archive file. - /// - pub const FindError = error { - EntryNotFound, - ArchiveUnsupported, - }; - - /// - /// Attempts to perform a binary search on the entry blocks defined in `archive_file` for one - /// matching `entry_path`, returning an [Entry] referencing its data or a [FindError] if it - /// failed. - /// - /// **Note** that this operation has `O(log n)` time complexity. - /// - pub fn find(archive_file: *sys.ReadableFile, entry_path: sys.Path) FindError!Entry { - var header = Header{}; - const header_size = @sizeOf(Header); - const io = core.io; - - if (((archive_file.read(0, io.bytesOf(&header)) catch - return error.ArchiveUnsupported) != header_size) or - (!io.equals(u8, &header.signature, &signature_magic)) or - (header.revision != revision_magic) or - (header.entry_head <= header_size)) return error.ArchiveUnsupported; - - // Read file table. - var head: usize = 0; - var tail: usize = (header.entry_count - 1); - const block_size = @sizeOf(Block); - - while (head <= tail) { - var block = Block{}; - const midpoint = (head + (tail - head) / 2); - - if ((archive_file.read(header.entry_head + (block_size * midpoint), io.bytesOf(&block)) - catch return error.ArchiveUnsupported) != block_size) return error.EntryNotFound; - - const comparison = entry_path.compare(block.path); - - if (comparison == 0) return Entry{ - .head = block.data_head, - .size = block.data_size, - }; - - if (comparison > 0) { - head = (midpoint + 1); - } else { - tail = (midpoint - 1); - } - } - - return error.EntryNotFound; - } - - /// - /// Reads the data from `entry` in `archive_file` from the byte at the entry-relative `offset` - /// into `buffer` until either the end of the entry data, end of archive file, or end of buffer - /// is reached. - /// - /// The number of bytes read is returned or [sys.FileError] if it failed. - /// - pub fn read(entry: Entry, archive_file: *sys.ReadableFile, - offset: u64, buffer: []u8) sys.FileError!usize { - - return archive_file.read(entry.head + offset, - buffer[0 .. core.math.min(usize, buffer.len, entry.size)]); - } +/// [FindError.EntryNotFound] occurs when the queried entry was not found in the archive file. +/// +pub const FindError = core.io.AccessError || error { + ArchiveUnsupported, + EntryNotFound, }; /// @@ -106,8 +36,7 @@ const Header = extern struct { signature: [signature_magic.len]u8 = signature_magic, revision: u8 = revision_magic, entry_count: u32 = 0, - entry_head: u64 = 0, - padding: [496]u8 = [_]u8{0} ** 496, + padding: [502]u8 = [_]u8{0} ** 502, comptime { const size = @sizeOf(@This()); @@ -117,7 +46,54 @@ const Header = extern struct { }; /// -/// The magic revision number that this Oar software implementation understands. +/// Attempts to find an [Entry] with a path name matching `path` in `archive_reader`. +/// +/// An [Entry] value is returned if a match was found, otherwise [FindError] if it failed. +/// +pub fn findEntry(archive_reader: sys.FileReader, path: sys.Path) FindError!Entry { + var header = Header{}; + const header_size = @sizeOf(Header); + const io = core.io; + + if ((try archive_reader.read(io.bytesOf(&header))) != header_size) + return error.ArchiveUnsupported; + + if (!io.equals(u8, &header.signature, &signature_magic)) + return error.ArchiveUnsupported; + + if (header.revision != revision_magic) return error.ArchiveUnsupported; + + // Read file table. + var head: u64 = 0; + var tail: u64 = (header.entry_count - 1); + const entry_size = @sizeOf(Entry); + + while (head <= tail) { + var entry = Entry{}; + const midpoint = head + ((tail - head) / 2); + const offset = header_size + (entry_size * midpoint); + + try archive_reader.seek(offset); + + if ((try archive_reader.read(io.bytesOf(&entry))) != entry_size) + return error.ArchiveUnsupported; + + const comparison = path.compare(entry.path); + + if (comparison == 0) return entry; + + if (comparison > 0) { + head = (midpoint + 1); + } else { + tail = (midpoint - 1); + } + } + + return error.EntryNotFound; +} + +/// +/// Magic revision number that this Oar software implementation understands. /// const revision_magic = 0; diff --git a/src/ona/sys.zig b/src/ona/sys.zig index 4682760..c5b66a0 100644 --- a/src/ona/sys.zig +++ b/src/ona/sys.zig @@ -93,86 +93,106 @@ pub const App = struct { }; /// +/// Snapshotted information about the status of a file. /// -/// -pub const ReadableFile = opaque { - /// - /// - /// - pub fn close(readable_file: *ReadableFile) void { - if (ext.SDL_RWclose(readable_file.rwOpsCast()) != 0) - return ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, - "Attempt to close an invalid file reference"); - } - - /// - /// - /// - pub fn read(readable_file: *ReadableFile, offset: u64, buffer: []u8) FileError!u64 { - const rw_ops = readable_file.rwOpsCast(); - - { - ext.SDL_ClearError(); - - const math = core.math; - const min = math.min; - const maxIntValue = math.maxIntValue; - var sought = min(u64, offset, maxIntValue(i64)); - - if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0) - return error.FileInaccessible; - - var to_seek = offset - sought; - - while (to_seek != 0) { - sought = min(u64, to_seek, maxIntValue(i64)); - - ext.SDL_ClearError(); - - if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_CUR) < 0) - return error.FileInaccessible; - - to_seek -= sought; - } - } - - ext.SDL_ClearError(); - - const buffer_read = ext.SDL_RWread(rw_ops, buffer.ptr, @sizeOf(u8), buffer.len); - - if ((buffer_read == 0) and (ext.SDL_GetError() != null)) - return error.FileInaccessible; - - return buffer_read; - } - - /// - /// - /// - pub fn rwOpsCast(readable_file: *ReadableFile) *ext.SDL_RWops { - return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), readable_file)); - } - - /// - /// - /// - pub fn size(readable_file: *ReadableFile) FileError!u64 { - ext.SDL_ClearError(); - - const byte_size = ext.SDL_RWsize(readable_file.rwOpsCast()); - - if (byte_size < 0) return error.FileInaccessible; - - return @intCast(u64, byte_size); - } +pub const FileStatus = struct { + length: u64, }; /// -/// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer pointing -/// to a file or the file becomming invalid for whatever reason. +/// Interface for working with bi-directional, streamable resources accessed through a file-system. /// -pub const FileError = error { - FileInaccessible, +pub const FileReader = struct { + context: *anyopaque, + + vtable: *const struct { + close: fn (*anyopaque) void, + read: fn (*anyopaque, []u8) core.io.AccessError!u64, + seek: fn (*anyopaque, u64) core.io.AccessError!void, + }, + + /// + /// Closes the `file_reader`, logging a wraning if the `file_reader` is already considered + /// closed. + /// + pub fn close(file_reader: FileReader) void { + file_reader.vtable.close(file_reader.context); + } + + /// + /// Attempts to read from `file_reader` into `buffer`, returning the number of bytes + /// successfully read or [core.io.AccessError] if it failed. + /// + pub fn read(file_reader: FileReader, buffer: []u8) core.io.AccessError!u64 { + return file_reader.vtable.read(file_reader.context, buffer); + } + + /// + /// Attempts to seek from the beginning of `file_reader` to `cursor` bytes in, returning + /// [core.io.AccessError] if it failed. + /// + pub fn seek(file_reader: FileReader, cursor: u64) core.io.AccessError!void { + return file_reader.vtable.seek(file_reader.context, cursor); + } + + /// + /// Wraps `implementation`, returning a [FileReader] value. + /// + pub fn wrap(implementation: anytype) FileReader { + const Implementation = @TypeOf(implementation.*); + + return .{ + .context = @ptrCast(*anyopaque, implementation), + + .vtable = switch (@typeInfo(Implementation)) { + .Struct => &.{ + .close = struct { + fn call(context: *anyopaque) void { + @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).close(); + } + }.call, + + .read = struct { + fn call(context: *anyopaque, buffer: []u8) core.io.AccessError!u64 { + return @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).read(buffer); + } + }.call, + + .seek = struct { + fn call(context: *anyopaque, cursor: u64) core.io.AccessError!void { + return @ptrCast(*Implementation, @alignCast( + @alignOf(Implementation), context)).seek(cursor); + } + }.call, + }, + + .Opaque => &.{ + .close = struct { + fn call(context: *anyopaque) void { + @ptrCast(*Implementation, context).close(); + } + }.call, + + .read = struct { + fn call(context: *anyopaque, buffer: []u8) core.io.AccessError!u64 { + return @ptrCast(*Implementation, context).read(buffer); + } + }.call, + + .seek = struct { + fn call(context: *anyopaque, cursor: u64) core.io.AccessError!void { + return @ptrCast(*Implementation, context).seek(cursor); + } + }.call, + }, + + else => @compileError( + "`context` must a single-element pointer referencing a struct or opaque type"), + }, + }; + } }; /// @@ -181,42 +201,84 @@ pub const FileError = error { /// pub const FileSystem = union(enum) { native: []const u8, - archive_file: *ReadableFile, + + archive: struct { + file_system: *const FileSystem, + path: Path, + }, /// - /// With files typically being backed by a block device, they can produce a variety of errors - - /// from physical to virtual errors - these are all encapsulated by the API as general - /// [OpenError.FileNotFound] errors. + /// [AccessError.FileNotFound] occurs when a queried file could not be found on the file-system + /// by the process. This may mean the file does not exist, however it may also mean that the + /// process does not have sufficient rights to read it. /// - /// When a given [FileSystem] does not support a specified [OpenMode], - /// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] - /// should be tried or, if no mode other is suitable, that the resource is effectively - /// unavailable. + /// [AccessError.FileSystemfailure] denotes a file-system implementation-specific failure to + /// access resources has occured and therefore cannot proceed to access the file. /// - /// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] is - /// used to communicate this. - /// - pub const OpenError = error { + pub const AccessError = error { FileNotFound, - ModeUnsupported, - OutOfFiles, + FileSystemFailure, }; /// - /// Attempts to open the file identified by `path` with `mode` as the mode for opening the file. + /// Attempts to open the file identified by `path` on `file_system` for reading, returning a + /// [FileReader] value that provides access to the opened file or [AccessError] if it failed. /// - /// Returns a [ReadableFile] reference that provides access to the file referenced by `path`or a - /// [OpenError] if it failed. - /// - pub fn openRead(file_system: *const FileSystem, path: Path) OpenError!*ReadableFile { - switch (file_system.*) { - .archive_file => |archive_file| { - const entry = oar.Entry.find(archive_file, path) catch return error.FileNotFound; + pub fn openFileReader(file_system: FileSystem, path: Path) AccessError!FileReader { + switch (file_system) { + .archive => |archive| { + const archive_reader = try archive.file_system.openFileReader(archive.path); - _ = entry; - // TODO: Alloc file context. + errdefer archive_reader.close(); - return error.FileNotFound; + const entry = oar.findEntry(archive_reader, path) catch |err| return switch (err) { + error.ArchiveUnsupported, error.Inaccessible => error.FileSystemFailure, + error.EntryNotFound => error.FileNotFound, + }; + + archive_reader.seek(entry.data_offset) catch return error.FileSystemFailure; + + const io = core.io; + + const allocator = threadSafeAllocator(); + + const entry_reader = io.makeOne(struct { + allocator: io.Allocator, + base_reader: FileReader, + cursor: u64, + offset: u64, + length: u64, + + const Self = @This(); + + pub fn close(self: *Self) void { + self.base_reader.close(); + io.free(self.allocator, self); + } + + pub fn read(self: *Self, buffer: []u8) io.AccessError!u64 { + try self.base_reader.seek(self.offset + self.cursor); + + return self.base_reader.read(buffer[0 .. + core.math.min(usize, buffer.len, self.length)]); + } + + pub fn seek(self: *Self, cursor: u64) io.AccessError!void { + self.cursor = cursor; + } + }, allocator) catch return error.FileSystemFailure; + + errdefer io.free(allocator, entry_reader); + + entry_reader.* = .{ + .allocator = allocator, + .base_reader = archive_reader, + .cursor = 0, + .offset = entry.data_offset, + .length = entry.data_length, + }; + + return FileReader.wrap(entry_reader); }, .native => |native| { @@ -239,8 +301,124 @@ pub const FileSystem = union(enum) { ext.SDL_ClearError(); - return @ptrCast(*ReadableFile, ext.SDL_RWFromFile(&path_buffer, "rb") - orelse return error.FileNotFound); + const rw_ops = + ext.SDL_RWFromFile(&path_buffer, "rb") orelse return error.FileNotFound; + + errdefer _ = ext.SDL_RWclose(rw_ops); + + return FileReader.wrap(@ptrCast(*opaque { + const Self = @This(); + + fn rwOpsCast(self: *Self) *ext.SDL_RWops { + return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), self)); + } + + pub fn read(self: *Self, buffer: []u8) core.io.AccessError!u64 { + ext.SDL_ClearError(); + + const bytes_read = + ext.SDL_RWread(self.rwOpsCast(), buffer.ptr, @sizeOf(u8), buffer.len); + + if ((bytes_read == 0) and (ext.SDL_GetError() != null)) + return error.Inaccessible; + + return bytes_read; + } + + pub fn seek(self: *Self, cursor: u64) core.io.AccessError!void { + ext.SDL_ClearError(); + + const math = core.math; + const min = math.min; + const maxIntValue = math.maxIntValue; + var sought = min(u64, cursor, maxIntValue(i64)); + const ops = self.rwOpsCast(); + + if (ext.SDL_RWseek(ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0) + return error.Inaccessible; + + var to_seek = cursor - sought; + + while (to_seek != 0) { + sought = min(u64, to_seek, maxIntValue(i64)); + + ext.SDL_ClearError(); + + if (ext.SDL_RWseek(ops, @intCast(i64, sought), ext.RW_SEEK_CUR) < 0) + return error.Inaccessible; + + to_seek -= sought; + } + } + + pub fn close(self: *Self) void { + ext.SDL_ClearError(); + + if (ext.SDL_RWclose(self.rwOpsCast()) != 0) + return ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, + "Attempt to close an invalid file reference"); + } + }, rw_ops)); + }, + } + } + + /// + /// Attempts to query the status of the file identified by `path` on `file_system` for reading, + /// returning a [FileStatus] value containing a the state of the file at the moment or + /// [AccessError] if it failed. + /// + pub fn query(file_system: FileSystem, path: Path) AccessError!FileStatus { + switch (file_system) { + .archive => |archive| { + const archive_reader = try archive.file_system.openFileReader(archive.path); + + defer archive_reader.close(); + + const entry = oar.findEntry(archive_reader, path) catch |err| return switch (err) { + error.ArchiveUnsupported, error.Inaccessible => error.FileSystemFailure, + error.EntryNotFound => error.FileNotFound, + }; + + return FileStatus{ + .length = entry.data_length, + }; + }, + + .native => |native| { + if (native.len == 0) return error.FileNotFound; + + var path_buffer = [_]u8{0} ** 4096; + const seperator_length = @boolToInt(native[native.len - 1] != Path.seperator); + + if ((native.len + seperator_length + path.length) >= path_buffer.len) + return error.FileNotFound; + + const io = core.io; + + io.copy(u8, &path_buffer, native); + + if (seperator_length != 0) path_buffer[native.len] = Path.seperator; + + io.copy(u8, path_buffer[native.len .. path_buffer.len], + path.buffer[0 .. path.length]); + + ext.SDL_ClearError(); + + const rw_ops = + ext.SDL_RWFromFile(&path_buffer, "rb") orelse return error.FileSystemFailure; + + defer if (ext.SDL_RWclose(rw_ops) != 0) unreachable; + + ext.SDL_ClearError(); + + const size = ext.SDL_RWsize(rw_ops); + + if (size < 0) return error.FileSystemFailure; + + return FileStatus{ + .length = @intCast(u64, size), + }; }, } } @@ -346,7 +524,8 @@ pub const Path = extern struct { }; /// - /// + /// Returns a value above `0` if the path of `this` is greater than `that`, below `0` if it is + /// less, or `0` if they are identical. /// pub fn compare(this: Path, that: Path) isize { return core.io.compareBytes(this.buffer[0 ..this.length], that.buffer[0 .. that.length]); @@ -432,16 +611,29 @@ pub const RunError = error { }; /// -/// Returns a [core.io.Allocator] bound to the underlying system allocator. +/// Returns a thread-safe [core.io.Allocator] value based on the default system allocation strategy. /// -pub fn allocator() core.io.Allocator { - // TODO: Add leak detection. - return .{ - .bound = .{ - .alloc = ext.SDL_alloc, - .dealloc = ext.SDL_free, - }, - }; +pub fn threadSafeAllocator() core.io.Allocator { + const io = core.io; + + return io.Allocator.wrap(@as(*opaque { + const Self = @This(); + + pub fn alloc(_: *Self, layout: io.AllocationLayout) io.AllocationError![*]u8 { + return @ptrCast([*]u8, ext.SDL_malloc(layout.length) orelse return error.OutOfMemory); + } + + pub fn realloc(_: *Self, allocation: [*]u8, + layout: io.AllocationLayout) io.AllocationError![*]u8 { + + return @ptrCast([*]u8, ext.SDL_realloc(allocation, layout.length) + orelse return error.OutOfMemory); + } + + pub fn dealloc(_: *Self, allocation: [*]u8) void { + ext.SDL_free(allocation); + } + }, undefined)); } /// @@ -453,16 +645,11 @@ pub fn allocator() core.io.Allocator { pub fn display(comptime Error: anytype, comptime run: fn (*App, *Graphics) callconv(.Async) Error!void) (RunError || Error)!void { - var cwd = FileSystem{.native = "./"}; + const cwd = FileSystem{.native = "./"}; const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.InitFailure; defer ext.SDL_free(user_prefix); - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - - defer if (gpa.deinit()) - ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, "Runtime allocator leaked memory"); - var app = App{ .user = .{.native = std.mem.sliceTo(user_prefix, 0)}, @@ -480,17 +667,13 @@ pub fn display(comptime Error: anytype, return error.InitFailure; }, - .data = .{ - .archive_file = cwd.openRead(try Path.from(&.{"./data.oar"})) catch { - ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load ./data.oar"); - - return error.InitFailure; - }, - }, + .data = .{.archive = .{ + .file_system = &cwd, + .path = try Path.from(&.{"./data.oar"}), + }}, }; defer { - app.data.archive_file.close(); ext.SDL_DestroySemaphore(app.message_semaphore); ext.SDL_DestroyMutex(app.message_mutex); } -- 2.34.1