Implement loading of Oar archive entry-backed files
continuous-integration/drone/pr Build is failing Details
continuous-integration/drone/push Build is failing Details

This commit is contained in:
kayomn 2022-11-07 23:51:45 +00:00
parent 4f0224a029
commit 47a997b0ec
8 changed files with 647 additions and 384 deletions

View File

@ -4,31 +4,129 @@ const stack = @import("./stack.zig");
const testing = @import("./testing.zig"); const testing = @import("./testing.zig");
/// ///
/// Allocation options for an [Allocator]. /// [AccessError.Inacessible] is a generic catch-all for IO resources that are inaccessible for
/// implementation-specific reasons.
/// ///
pub const Allocation = struct { pub const AccessError = error {
existing: ?[*]u8, Inaccessible,
alignment: u29,
size: usize,
}; };
/// ///
/// Closure for dynamic memory allocation through the referenced allocator state machine capture. /// [AllocationError.OutOfMemory] if the requested amount of memory could not be allocated.
/// ///
pub const Allocator = meta.Function(Allocation, ?[*]u8); pub const AllocationError = error {
///
/// [MakeError.OutOfMemory] if the requested amount of memory could not be allocated.
///
pub const MakeError = error {
OutOfMemory, OutOfMemory,
}; };
/// ///
/// Closure that captures a reference to readable resources like block devices, memory buffers, /// Memory layout description for a memory allocation.
/// network sockets, and more.
/// ///
pub const Reader = meta.Function([]u8, usize); pub const AllocationLayout = struct {
length: usize,
alignment: u29 = 8,
};
///
/// Interface for dynamic memory allocation through the state machine of the wrapped allocator
/// implementation.
///
pub const Allocator = struct {
context: *anyopaque,
vtable: *const struct {
alloc: fn (*anyopaque, AllocationLayout) AllocationError![*]u8,
dealloc: fn (*anyopaque, [*]u8) void,
realloc: fn (*anyopaque, [*]u8, AllocationLayout) AllocationError![*]u8,
},
///
/// Attempts to allocate a block of memory from `allocator` according to `layout`, returning it
/// or [AllocationError] if it failed.
///
pub fn alloc(allocator: Allocator, layout: AllocationLayout) AllocationError![*]u8 {
return allocator.vtable.alloc(allocator.context, layout);
}
///
/// Deallocates the block of memory from `allocator` referenced by `allocation`.
///
pub fn dealloc(allocator: Allocator, allocation: [*]u8) void {
allocator.vtable.dealloc(allocator.context, allocation);
}
///
/// Attempts to reallocate the existing block of memory from `allocator` referenced by
/// `allocation` according to `layout`, returning it or [AllocationError] if it failed.
///
pub fn realloc(allocator: Allocator, allocation: [*]u8,
layout: AllocationLayout) AllocationError![*]u8 {
return allocator.vtable.realloc(allocator.context, allocation, layout);
}
///
/// Wraps `implementation`, returning the [Allocator] value.
///
pub fn wrap(implementation: anytype) Allocator {
const Implementation = @TypeOf(implementation.*);
return .{
.context = @ptrCast(*anyopaque, implementation),
.vtable = switch (@typeInfo(Implementation)) {
.Struct => &.{
.alloc = struct {
fn call(context: *anyopaque, layout: AllocationLayout) AllocationError![*]u8 {
return @ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).alloc(layout);
}
}.call,
.dealloc = struct {
fn call(context: *anyopaque, allocation: [*]u8) void {
return @ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).dealloc(allocation);
}
}.call,
.realloc = struct {
fn call(context: *anyopaque, allocation: [*]u8,
layout: AllocationLayout) AllocationError![*]u8 {
return @ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).realloc(allocation, layout);
}
}.call,
},
.Opaque => &.{
.alloc = struct {
fn call(context: *anyopaque, layout: AllocationLayout) AllocationError![*]u8 {
return @ptrCast(*Implementation, context).alloc(layout);
}
}.call,
.dealloc = struct {
fn call(context: *anyopaque, allocation: [*]u8) void {
return @ptrCast(*Implementation, context).dealloc(allocation);
}
}.call,
.realloc = struct {
fn call(context: *anyopaque, allocation: [*]u8,
layout: AllocationLayout) AllocationError![*]u8 {
return @ptrCast(*Implementation, context).realloc(allocation, layout);
}
}.call,
},
else => @compileError(
"`context` must a single-element pointer referencing a struct or opaque type"),
},
};
}
};
/// ///
/// Returns a state machine for lazily computing all `Element` components of a given source input /// Returns a state machine for lazily computing all `Element` components of a given source input
@ -143,10 +241,57 @@ test "Spliterator(u8)" {
} }
/// ///
/// Closure that captures a reference to writable resources like block devices, memory buffers, /// Interface for capturing a reference to a writable resource like block devices, memory buffers,
/// network sockets, and more. /// network sockets, and more.
/// ///
pub const Writer = meta.Function([]const u8, usize); pub const Writer = struct {
context: *anyopaque,
vtable: *const struct {
write: fn (*anyopaque, []const u8) AccessError!usize,
},
///
/// Wraps `implementation`, returning the [Writer] value.
///
pub fn wrap(implementation: anytype) Writer {
const Implementation = @TypeOf(implementation.*);
return .{
.context = @ptrCast(*anyopaque, implementation),
.vtable = switch (@typeInfo(Implementation)) {
.Struct => &.{
.write = struct {
fn call(context: *anyopaque, buffer: []const u8) AccessError!usize {
return @ptrCast(*Implementation,
@alignCast(@alignOf(Implementation), context)).write(buffer);
}
}.call,
},
.Opaque => &.{
.write = struct {
fn call(context: *anyopaque, buffer: []const u8) AccessError!usize {
return @ptrCast(*Implementation, context).write(buffer);
}
}.call,
},
else => @compileError(
"`context` must a single-element pointer referencing a struct or opaque type"),
},
};
}
///
/// Attempts to write to `buffer` to `writer`, returning the number of successfully written or
/// [AccessError] if it failed.
///
pub fn write(writer: Writer, buffer: []const u8) AccessError!usize {
return writer.vtable.write(writer.context, buffer);
}
};
/// ///
/// Returns a sliced reference of the raw bytes in `pointer`. /// Returns a sliced reference of the raw bytes in `pointer`.
@ -320,19 +465,14 @@ test "findFirstOf" {
/// `allocated_memory`. Anything else will result is considered unreachable logic. /// `allocated_memory`. Anything else will result is considered unreachable logic.
/// ///
pub fn free(allocator: Allocator, allocated_memory: anytype) void { pub fn free(allocator: Allocator, allocated_memory: anytype) void {
if (allocator.call(.{ allocator.dealloc(@ptrCast([*]u8, switch (@typeInfo(@TypeOf(allocated_memory))) {
.existing = @ptrCast([*]u8, switch (@typeInfo(@TypeOf(allocated_memory))) {
.Pointer => |info| switch (info.size) { .Pointer => |info| switch (info.size) {
.One, .Many, .C => allocated_memory, .One, .Many, .C => allocated_memory,
.Slice => allocated_memory.ptr, .Slice => allocated_memory.ptr,
}, },
else => @compileError("`allocated_memory` must be a pointer"), else => @compileError("`allocated_memory` must be a pointer"),
}), }));
.size = 0,
.alignment = 0,
}) != null) unreachable;
} }
test "free" { test "free" {
@ -369,14 +509,13 @@ test "hashBytes" {
/// Attempts to allocate a buffer of `size` `Element`s using `allocator`, returning it or a /// Attempts to allocate a buffer of `size` `Element`s using `allocator`, returning it or a
/// [MakeError] if it failed. /// [MakeError] if it failed.
/// ///
pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) MakeError![*]Element { pub fn makeMany(comptime Element: type, allocator: Allocator, size: usize) AllocationError![*]Element {
const alignment = @alignOf(Element); const alignment = @alignOf(Element);
return @ptrCast([*]Element, @alignCast(alignment, allocator.call(.{ return @ptrCast([*]Element, @alignCast(alignment, try allocator.alloc(.{
.existing = null, .length = @sizeOf(Element) * size,
.size = @sizeOf(Element) * size,
.alignment = alignment, .alignment = alignment,
}) orelse return error.OutOfMemory)); })));
} }
test "makeMany" { test "makeMany" {
@ -392,14 +531,13 @@ test "makeMany" {
/// Attempts to allocate a buffer of `1` `Element` using `allocator`, returning it or a [MakeError] /// Attempts to allocate a buffer of `1` `Element` using `allocator`, returning it or a [MakeError]
/// if it failed. /// if it failed.
/// ///
pub fn makeOne(comptime Element: type, allocator: Allocator) MakeError!*Element { pub fn makeOne(comptime Element: type, allocator: Allocator) AllocationError!*Element {
const alignment = @alignOf(Element); const alignment = @alignOf(Element);
return @ptrCast(*Element, @alignCast(alignment, allocator.call(.{ return @ptrCast(*Element, @alignCast(alignment, try allocator.alloc(.{
.existing = null, .length = @sizeOf(Element),
.size = @sizeOf(Element),
.alignment = alignment, .alignment = alignment,
}) orelse return error.OutOfMemory)); })));
} }
test "makeOne" { test "makeOne" {
@ -429,6 +567,11 @@ test "swap" {
try testing.expect(b == 0); try testing.expect(b == 0);
} }
///
/// Mandatory context variable used by [null_writer].
///
const null_context: u64 = 0;
/// ///
/// Thread-safe and lock-free [Writer] that silently consumes all given data without failure and /// Thread-safe and lock-free [Writer] that silently consumes all given data without failure and
/// throws it away. /// throws it away.
@ -436,11 +579,13 @@ test "swap" {
/// This is commonly used for testing or redirected otherwise unwanted output data that has to be /// This is commonly used for testing or redirected otherwise unwanted output data that has to be
/// sent somewhere for whatever reason. /// sent somewhere for whatever reason.
/// ///
pub const null_writer = Writer.from(struct { pub const null_writer = Writer.wrap(@ptrCast(*const opaque {
fn write(buffer: []const u8) usize { const Self = @This();
fn write(_: Self, buffer: []const u8) usize {
return buffer.len; return buffer.len;
} }
}.write); }, &null_context));
test "null_writer" { test "null_writer" {
const sequence = "foo"; const sequence = "foo";

View File

@ -8,77 +8,3 @@ pub fn FnReturn(comptime Fn: type) type {
return type_info.Fn.return_type orelse void; return type_info.Fn.return_type orelse void;
} }
///
/// Returns a single-input single-output closure type where `In` represents the input type, `Out`
/// represents the output type, and `captures_size` represents the size of the closure context.
///
pub fn Function(comptime In: type, comptime Out: type) type {
return struct {
callErased: fn (*anyopaque, In) Out,
context: *anyopaque,
fn Invoker(comptime Context: type) type {
return if (Context == void) fn (In) Out else fn (Context, In) Out;
}
///
/// Function type.
///
const Self = @This();
///
/// Invokes `self` with `input`, producing a result according to the current context data.
///
pub fn call(self: Self, input: In) Out {
return self.callErased(self.context, input);
}
///
/// Creates and returns a [Self] using the `invoke` as the behavior executed when [call] or
/// [callErased] is called.
///
/// For creating a closure-style function, see [fromClosure].
///
pub fn from(comptime invoke: fn (In) Out) Self {
return .{
.context = undefined,
.callErased = struct {
fn callErased(_: *anyopaque, input: In) Out {
return invoke(input);
}
}.callErased,
};
}
///
/// Creates and returns a [Self] by capturing the `context` value as the capture context and
/// `invoke` as the behavior executed when [call] or [callErased] is called.
///
/// The newly created [Self] is returned.
///
pub fn fromClosure(context: anytype, comptime invoke: fn (@TypeOf(context), In) Out) Self {
const Context = @TypeOf(context);
switch (@typeInfo(Context)) {
.Pointer => |info| if (info.size == .Slice)
@compileError("`context` cannot be a slice"),
.Void => {},
else => @compileError("`context` must be a pointer"),
}
return Self{
.context = @ptrCast(*anyopaque, context),
.callErased = struct {
fn callErased(erased: *anyopaque, input: In) Out {
return if (Context == void) invoke(input) else invoke(@ptrCast(
Context, @alignCast(@alignOf(Context), erased)), input);
}
}.callErased,
};
}
};
}

View File

@ -145,38 +145,69 @@ pub const FixedPushError = error {
/// memory pool to linearly allocate memory from. /// memory pool to linearly allocate memory from.
/// ///
pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator { pub fn fixedAllocator(fixed_stack: *Fixed(u8)) io.Allocator {
return io.Allocator.fromClosure(fixed_stack, struct { const FixedStack = @TypeOf(fixed_stack.*);
fn alloc(stack: *Fixed(u8), allocation: io.Allocation) ?[*]u8 {
if (allocation.existing) |buffer| if (allocation.size == 0) {
// Deallocate the memory.
const buffer_address = @ptrToInt(buffer);
const stack_address = @ptrToInt(stack.buffer.ptr);
// Check the buffer is within the address space of the stack buffer. If not, it return io.Allocator.wrap(@ptrCast(*opaque {
// should just be returned to let the caller know it cannot be freed. const Self = @This();
if (buffer_address < stack_address or buffer_address >=
(stack_address + stack.filled)) return buffer;
// TODO: Investigate ways of actually freeing if it is the last allocation. pub fn alloc(self: *Self, layout: io.AllocationLayout) io.AllocationError![*]u8 {
return null;
} else {
// TODO: Investigate ways of in-place relocating if it is the last allocation.
};
// Reallocate / allocate the memory.
// TODO: Remove stdlib dependency. // TODO: Remove stdlib dependency.
const stack = self.stackCast();
const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr + const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr +
stack.filled, allocation.alignment) orelse return null; stack.filled, layout.alignment) orelse return error.OutOfMemory;
const head = stack.filled + adjusted_offset; const head = stack.filled + adjusted_offset;
const tail = head + allocation.size; const tail = head + layout.length;
stack.pushMany(0, tail) catch return null; stack.pushMany(0, tail) catch return error.OutOfMemory;
return stack.buffer[head .. tail].ptr; return stack.buffer[head .. tail].ptr;
} }
}.alloc);
pub fn dealloc(self: *Self, allocation: [*]u8) void {
// Deallocate the memory.
const stack = self.stackCast();
const allocation_address = @ptrToInt(allocation);
const stack_address = @ptrToInt(stack.buffer.ptr);
// Check the buffer is within the address space of the stack buffer. If not, it cannot
// be freed.
if (allocation_address < stack_address or allocation_address >=
(stack_address + stack.filled)) unreachable;
// TODO: Investigate ways of actually freeing if it is the last allocation.
}
pub fn realloc(self: *Self, allocation: [*]u8,
layout: io.AllocationLayout) io.AllocationError![*]u8 {
// TODO: Investigate ways of in-place relocating if it is the last allocation.
// TODO: Remove stdlib dependency.
const stack = self.stackCast();
const allocation_address = @ptrToInt(allocation);
const stack_address = @ptrToInt(stack.buffer.ptr);
// Check the buffer is within the address space of the stack buffer. If not, it cannot
// be reallocated.
if (allocation_address < stack_address or allocation_address >=
(stack_address + stack.filled)) unreachable;
const adjusted_offset = @import("std").mem.alignPointerOffset(stack.buffer.ptr +
stack.filled, layout.alignment) orelse return error.OutOfMemory;
const head = stack.filled + adjusted_offset;
const tail = head + layout.length;
stack.pushMany(0, tail) catch return error.OutOfMemory;
return stack.buffer[head .. tail].ptr;
}
fn stackCast(self: *Self) *Fixed(u8) {
return @ptrCast(*FixedStack, @alignCast(@alignOf(FixedStack), self));
}
}, fixed_stack));
} }
test "fixedAllocator" { test "fixedAllocator" {
@ -185,14 +216,11 @@ test "fixedAllocator" {
const allocator = fixedAllocator(&stack); const allocator = fixedAllocator(&stack);
// Allocation // Allocation
var block_memory = allocator.call(.{ var block_memory = try allocator.alloc(.{
.existing = null,
.alignment = @alignOf(u64), .alignment = @alignOf(u64),
.size = @sizeOf(u64), .length = @sizeOf(u64),
}); });
try testing.expect(block_memory != null);
const buffer_address_head = @ptrToInt(&buffer); const buffer_address_head = @ptrToInt(&buffer);
const buffer_address_tail = @ptrToInt(&buffer) + buffer.len; const buffer_address_tail = @ptrToInt(&buffer) + buffer.len;
@ -204,14 +232,11 @@ test "fixedAllocator" {
} }
// Reallocation. // Reallocation.
block_memory = allocator.call(.{ block_memory = try allocator.realloc(block_memory, .{
.existing = block_memory,
.alignment = @alignOf(u64), .alignment = @alignOf(u64),
.size = @sizeOf(u64), .length = @sizeOf(u64),
}); });
try testing.expect(block_memory != null);
{ {
const block_memory_address = @ptrToInt(block_memory); const block_memory_address = @ptrToInt(block_memory);
@ -220,11 +245,7 @@ test "fixedAllocator" {
} }
// Deallocation. // Deallocation.
try testing.expect(allocator.call(.{ allocator.dealloc(block_memory);
.existing = block_memory,
.alignment = 0,
.size = 0,
}) == null);
} }
/// ///
@ -234,15 +255,23 @@ test "fixedAllocator" {
/// referenced by `fixed_stack` until it is full. /// referenced by `fixed_stack` until it is full.
/// ///
pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer { pub fn fixedWriter(fixed_stack: *Fixed(u8)) io.Writer {
return io.Writer.fromClosure(fixed_stack, struct { const FixedStack = @TypeOf(fixed_stack.*);
fn write(stack: *Fixed(u8), buffer: []const u8) usize {
stack.pushAll(buffer) catch |err| switch (err) { return io.Writer.wrap(@ptrCast(*opaque {
const Self = @This();
fn stackCast(self: *Self) *Fixed(u8) {
return @ptrCast(*FixedStack, @alignCast(@alignOf(FixedStack), self));
}
pub fn write(self: *Self, buffer: []const u8) io.AccessError!usize {
self.stackCast().pushAll(buffer) catch |err| switch (err) {
error.BufferOverflow => return 0, error.BufferOverflow => return 0,
}; };
return buffer.len; return buffer.len;
} }
}.write); }, fixed_stack));
} }
test "fixedWriter" { test "fixedWriter" {
@ -250,6 +279,8 @@ test "fixedWriter" {
var sequence_stack = Fixed(u8){.buffer = &buffer}; var sequence_stack = Fixed(u8){.buffer = &buffer};
const sequence_data = [_]u8{8, 16, 32, 64}; const sequence_data = [_]u8{8, 16, 32, 64};
try testing.expect(fixedWriter(&sequence_stack).call(&sequence_data) == sequence_data.len); try testing.expect((try fixedWriter(&sequence_stack).
write(&sequence_data)) == sequence_data.len);
try testing.expect(io.equals(u8, sequence_stack.buffer, &sequence_data)); try testing.expect(io.equals(u8, sequence_stack.buffer, &sequence_data));
} }

View File

@ -29,6 +29,11 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
maybe_next_index: ?usize = null, maybe_next_index: ?usize = null,
}; };
///
/// Errors that may occur during initialization of a hash table.
///
pub const InitError = io.AllocationError;
/// ///
/// Hash table type. /// Hash table type.
/// ///
@ -46,9 +51,9 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
/// ///
/// Initializes a [Self] using `allocator` as the memory allocation strategy. /// Initializes a [Self] using `allocator` as the memory allocation strategy.
/// ///
/// Returns a new [Self] value or an [io.MakeError] if initializing failed. /// Returns a new [Self] value or an [InitError] if initializing failed.
/// ///
pub fn init(allocator: Allocator) io.MakeError!Self { pub fn init(allocator: Allocator) InitError!Self {
const capacity = 4; const capacity = 4;
return Self{ return Self{
@ -166,7 +171,7 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
/// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key /// [InsertError.KeyExists] occurs when an insertion was attempted on a table with a matching key
/// already present. /// already present.
/// ///
pub const InsertError = io.MakeError || error { pub const InsertError = io.AllocationError || error {
KeyExists, KeyExists,
}; };

View File

@ -7,7 +7,7 @@ const testing = @import("./testing.zig");
/// [PrintError.WriteFailure] occurs when the underlying [io.Writer] implementation failed to write /// [PrintError.WriteFailure] occurs when the underlying [io.Writer] implementation failed to write
/// the entirety of a the requested print operation. /// the entirety of a the requested print operation.
/// ///
pub const PrintError = error { pub const PrintError = io.AccessError || error {
WriteFailure, WriteFailure,
}; };
@ -69,7 +69,7 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void
if (value == 0) { if (value == 0) {
const zero = "0"; const zero = "0";
if (writer.call(zero) != zero.len) return error.WriteFailure; if ((try writer.write(zero)) != zero.len) return error.WriteFailure;
} else { } else {
// Big enough to hold the hexadecimal representation of the integer type, which is // Big enough to hold the hexadecimal representation of the integer type, which is
// the largest number format accomodated for in [Radix]. // the largest number format accomodated for in [Radix].
@ -95,7 +95,7 @@ pub fn printInt(writer: io.Writer, radix: Radix, value: anytype) PrintError!void
for (buffer[0 .. (buffer_count / 2)]) |_, i| for (buffer[0 .. (buffer_count / 2)]) |_, i|
io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]); io.swap(u8, &buffer[i], &buffer[buffer_count - i - 1]);
if (writer.call(buffer[0 .. buffer_count]) != buffer_count) if ((try writer.write(buffer[0 .. buffer_count])) != buffer_count)
return error.WriteFailure; return error.WriteFailure;
} }
}, },

View File

@ -13,22 +13,19 @@ pub fn main() anyerror!void {
/// Runs the game engine. /// Runs the game engine.
/// ///
fn runEngine(app: *sys.App, graphics: *sys.Graphics) anyerror!void { fn runEngine(app: *sys.App, graphics: *sys.Graphics) anyerror!void {
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer _ = gpa.deinit();
{ {
var file_reader = try app.data.openRead(try sys.Path.from(&.{"ona.lua"})); const path = try sys.Path.from(&.{"ona.lua"});
var file_reader = try app.data.openFileReader(path);
defer file_reader.close(); defer file_reader.close();
const file_size = try file_reader.size(); const file_size = (try app.data.query(path)).length;
const allocator = gpa.allocator(); const allocator = sys.threadSafeAllocator();
const buffer = try allocator.alloc(u8, file_size); const buffer = (try core.io.makeMany(u8, allocator, file_size))[0 .. file_size];
defer allocator.free(buffer); defer core.io.free(allocator, buffer);
if ((try file_reader.read(0, buffer)) != file_size) return error.ScriptLoadFailure; if ((try file_reader.read(buffer)) != file_size) return error.ScriptLoadFailure;
app.log(.debug, buffer); app.log(.debug, buffer);
} }

View File

@ -2,13 +2,13 @@ const core = @import("core");
const sys = @import("./sys.zig"); const sys = @import("./sys.zig");
/// ///
/// Metadata of an Oar archive entry. /// Metadata of an Oar archive file entry.
/// ///
const Block = extern struct { const Entry = extern struct {
signature: [signature_magic.len]u8 = signature_magic, signature: [signature_magic.len]u8 = signature_magic,
path: sys.Path = sys.Path.empty, path: sys.Path = sys.Path.empty,
data_size: u64 = 0, data_offset: u64 = 0,
data_head: u64 = 0, data_length: u64 = 0,
padding: [232]u8 = [_]u8{0} ** 232, padding: [232]u8 = [_]u8{0} ** 232,
comptime { comptime {
@ -19,60 +19,68 @@ const Block = extern struct {
}; };
/// ///
/// Reference to a file entry in an Oar archive, denoting the starting offset from the top of head /// [FindError.ArchiveUnsupported] occurs when trying to read a file that does not follow an Oar
/// of the file and its size. /// archive format considered valid by this implemenatation.
/// ///
pub const Entry = struct { /// [FindError.EntryNotFound] occurs when the queried entry was not found in the archive file.
head: u64,
size: u64,
/// ///
/// [FindError.EntryNotFound] occurs when no entry matched the parameters of the find operation. pub const FindError = core.io.AccessError || error {
///
/// [FindError.ArchiveUnsupported] occurs if the file provided to the find operation is not a
/// valid archive file.
///
pub const FindError = error {
EntryNotFound,
ArchiveUnsupported, ArchiveUnsupported,
EntryNotFound,
}; };
/// ///
/// Attempts to perform a binary search on the entry blocks defined in `archive_file` for one /// Header data that every Oar archive file starts with at byte offset `0`.
/// matching `entry_path`, returning an [Entry] referencing its data or a [FindError] if it
/// failed.
/// ///
/// **Note** that this operation has `O(log n)` time complexity. const Header = extern struct {
signature: [signature_magic.len]u8 = signature_magic,
revision: u8 = revision_magic,
entry_count: u32 = 0,
padding: [502]u8 = [_]u8{0} ** 502,
comptime {
const size = @sizeOf(@This());
if (size != 512) @compileError("Header is not 512 bytes");
}
};
/// ///
pub fn find(archive_file: *sys.ReadableFile, entry_path: sys.Path) FindError!Entry { /// Attempts to find an [Entry] with a path name matching `path` in `archive_reader`.
///
/// An [Entry] value is returned if a match was found, otherwise [FindError] if it failed.
///
pub fn findEntry(archive_reader: sys.FileReader, path: sys.Path) FindError!Entry {
var header = Header{}; var header = Header{};
const header_size = @sizeOf(Header); const header_size = @sizeOf(Header);
const io = core.io; const io = core.io;
if (((archive_file.read(0, io.bytesOf(&header)) catch if ((try archive_reader.read(io.bytesOf(&header))) != header_size)
return error.ArchiveUnsupported) != header_size) or return error.ArchiveUnsupported;
(!io.equals(u8, &header.signature, &signature_magic)) or
(header.revision != revision_magic) or if (!io.equals(u8, &header.signature, &signature_magic))
(header.entry_head <= header_size)) return error.ArchiveUnsupported; return error.ArchiveUnsupported;
if (header.revision != revision_magic) return error.ArchiveUnsupported;
// Read file table. // Read file table.
var head: usize = 0; var head: u64 = 0;
var tail: usize = (header.entry_count - 1); var tail: u64 = (header.entry_count - 1);
const block_size = @sizeOf(Block); const entry_size = @sizeOf(Entry);
while (head <= tail) { while (head <= tail) {
var block = Block{}; var entry = Entry{};
const midpoint = (head + (tail - head) / 2); const midpoint = head + ((tail - head) / 2);
const offset = header_size + (entry_size * midpoint);
if ((archive_file.read(header.entry_head + (block_size * midpoint), io.bytesOf(&block)) try archive_reader.seek(offset);
catch return error.ArchiveUnsupported) != block_size) return error.EntryNotFound;
const comparison = entry_path.compare(block.path); if ((try archive_reader.read(io.bytesOf(&entry))) != entry_size)
return error.ArchiveUnsupported;
if (comparison == 0) return Entry{ const comparison = path.compare(entry.path);
.head = block.data_head,
.size = block.data_size, if (comparison == 0) return entry;
};
if (comparison > 0) { if (comparison > 0) {
head = (midpoint + 1); head = (midpoint + 1);
@ -85,39 +93,7 @@ pub const Entry = struct {
} }
/// ///
/// Reads the data from `entry` in `archive_file` from the byte at the entry-relative `offset` /// Magic revision number that this Oar software implementation understands.
/// into `buffer` until either the end of the entry data, end of archive file, or end of buffer
/// is reached.
///
/// The number of bytes read is returned or [sys.FileError] if it failed.
///
pub fn read(entry: Entry, archive_file: *sys.ReadableFile,
offset: u64, buffer: []u8) sys.FileError!usize {
return archive_file.read(entry.head + offset,
buffer[0 .. core.math.min(usize, buffer.len, entry.size)]);
}
};
///
/// Header data that every Oar archive file starts with at byte offset `0`.
///
const Header = extern struct {
signature: [signature_magic.len]u8 = signature_magic,
revision: u8 = revision_magic,
entry_count: u32 = 0,
entry_head: u64 = 0,
padding: [496]u8 = [_]u8{0} ** 496,
comptime {
const size = @sizeOf(@This());
if (size != 512) @compileError("Header is not 512 bytes");
}
};
///
/// The magic revision number that this Oar software implementation understands.
/// ///
const revision_magic = 0; const revision_magic = 0;

View File

@ -93,86 +93,106 @@ pub const App = struct {
}; };
/// ///
/// Snapshotted information about the status of a file.
/// ///
/// pub const FileStatus = struct {
pub const ReadableFile = opaque { length: u64,
///
///
///
pub fn close(readable_file: *ReadableFile) void {
if (ext.SDL_RWclose(readable_file.rwOpsCast()) != 0)
return ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION,
"Attempt to close an invalid file reference");
}
///
///
///
pub fn read(readable_file: *ReadableFile, offset: u64, buffer: []u8) FileError!u64 {
const rw_ops = readable_file.rwOpsCast();
{
ext.SDL_ClearError();
const math = core.math;
const min = math.min;
const maxIntValue = math.maxIntValue;
var sought = min(u64, offset, maxIntValue(i64));
if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0)
return error.FileInaccessible;
var to_seek = offset - sought;
while (to_seek != 0) {
sought = min(u64, to_seek, maxIntValue(i64));
ext.SDL_ClearError();
if (ext.SDL_RWseek(rw_ops, @intCast(i64, sought), ext.RW_SEEK_CUR) < 0)
return error.FileInaccessible;
to_seek -= sought;
}
}
ext.SDL_ClearError();
const buffer_read = ext.SDL_RWread(rw_ops, buffer.ptr, @sizeOf(u8), buffer.len);
if ((buffer_read == 0) and (ext.SDL_GetError() != null))
return error.FileInaccessible;
return buffer_read;
}
///
///
///
pub fn rwOpsCast(readable_file: *ReadableFile) *ext.SDL_RWops {
return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), readable_file));
}
///
///
///
pub fn size(readable_file: *ReadableFile) FileError!u64 {
ext.SDL_ClearError();
const byte_size = ext.SDL_RWsize(readable_file.rwOpsCast());
if (byte_size < 0) return error.FileInaccessible;
return @intCast(u64, byte_size);
}
}; };
/// ///
/// [Error.FileInaccessible] is a generic catch-all for a [FileAccess] reference no longer pointing /// Interface for working with bi-directional, streamable resources accessed through a file-system.
/// to a file or the file becomming invalid for whatever reason.
/// ///
pub const FileError = error { pub const FileReader = struct {
FileInaccessible, context: *anyopaque,
vtable: *const struct {
close: fn (*anyopaque) void,
read: fn (*anyopaque, []u8) core.io.AccessError!u64,
seek: fn (*anyopaque, u64) core.io.AccessError!void,
},
///
/// Closes the `file_reader`, logging a wraning if the `file_reader` is already considered
/// closed.
///
pub fn close(file_reader: FileReader) void {
file_reader.vtable.close(file_reader.context);
}
///
/// Attempts to read from `file_reader` into `buffer`, returning the number of bytes
/// successfully read or [core.io.AccessError] if it failed.
///
pub fn read(file_reader: FileReader, buffer: []u8) core.io.AccessError!u64 {
return file_reader.vtable.read(file_reader.context, buffer);
}
///
/// Attempts to seek from the beginning of `file_reader` to `cursor` bytes in, returning
/// [core.io.AccessError] if it failed.
///
pub fn seek(file_reader: FileReader, cursor: u64) core.io.AccessError!void {
return file_reader.vtable.seek(file_reader.context, cursor);
}
///
/// Wraps `implementation`, returning a [FileReader] value.
///
pub fn wrap(implementation: anytype) FileReader {
const Implementation = @TypeOf(implementation.*);
return .{
.context = @ptrCast(*anyopaque, implementation),
.vtable = switch (@typeInfo(Implementation)) {
.Struct => &.{
.close = struct {
fn call(context: *anyopaque) void {
@ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).close();
}
}.call,
.read = struct {
fn call(context: *anyopaque, buffer: []u8) core.io.AccessError!u64 {
return @ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).read(buffer);
}
}.call,
.seek = struct {
fn call(context: *anyopaque, cursor: u64) core.io.AccessError!void {
return @ptrCast(*Implementation, @alignCast(
@alignOf(Implementation), context)).seek(cursor);
}
}.call,
},
.Opaque => &.{
.close = struct {
fn call(context: *anyopaque) void {
@ptrCast(*Implementation, context).close();
}
}.call,
.read = struct {
fn call(context: *anyopaque, buffer: []u8) core.io.AccessError!u64 {
return @ptrCast(*Implementation, context).read(buffer);
}
}.call,
.seek = struct {
fn call(context: *anyopaque, cursor: u64) core.io.AccessError!void {
return @ptrCast(*Implementation, context).seek(cursor);
}
}.call,
},
else => @compileError(
"`context` must a single-element pointer referencing a struct or opaque type"),
},
};
}
}; };
/// ///
@ -181,42 +201,84 @@ pub const FileError = error {
/// ///
pub const FileSystem = union(enum) { pub const FileSystem = union(enum) {
native: []const u8, native: []const u8,
archive_file: *ReadableFile,
archive: struct {
file_system: *const FileSystem,
path: Path,
},
/// ///
/// With files typically being backed by a block device, they can produce a variety of errors - /// [AccessError.FileNotFound] occurs when a queried file could not be found on the file-system
/// from physical to virtual errors - these are all encapsulated by the API as general /// by the process. This may mean the file does not exist, however it may also mean that the
/// [OpenError.FileNotFound] errors. /// process does not have sufficient rights to read it.
/// ///
/// When a given [FileSystem] does not support a specified [OpenMode], /// [AccessError.FileSystemfailure] denotes a file-system implementation-specific failure to
/// [OpenError.ModeUnsupported] is used to inform the consuming code that another [OpenMode] /// access resources has occured and therefore cannot proceed to access the file.
/// should be tried or, if no mode other is suitable, that the resource is effectively
/// unavailable.
/// ///
/// If the number of known [FileAccess] handles has been exhausted, [OpenError.OutOfFiles] is pub const AccessError = error {
/// used to communicate this.
///
pub const OpenError = error {
FileNotFound, FileNotFound,
ModeUnsupported, FileSystemFailure,
OutOfFiles,
}; };
/// ///
/// Attempts to open the file identified by `path` with `mode` as the mode for opening the file. /// Attempts to open the file identified by `path` on `file_system` for reading, returning a
/// [FileReader] value that provides access to the opened file or [AccessError] if it failed.
/// ///
/// Returns a [ReadableFile] reference that provides access to the file referenced by `path`or a pub fn openFileReader(file_system: FileSystem, path: Path) AccessError!FileReader {
/// [OpenError] if it failed. switch (file_system) {
/// .archive => |archive| {
pub fn openRead(file_system: *const FileSystem, path: Path) OpenError!*ReadableFile { const archive_reader = try archive.file_system.openFileReader(archive.path);
switch (file_system.*) {
.archive_file => |archive_file| {
const entry = oar.Entry.find(archive_file, path) catch return error.FileNotFound;
_ = entry; errdefer archive_reader.close();
// TODO: Alloc file context.
return error.FileNotFound; const entry = oar.findEntry(archive_reader, path) catch |err| return switch (err) {
error.ArchiveUnsupported, error.Inaccessible => error.FileSystemFailure,
error.EntryNotFound => error.FileNotFound,
};
archive_reader.seek(entry.data_offset) catch return error.FileSystemFailure;
const io = core.io;
const allocator = threadSafeAllocator();
const entry_reader = io.makeOne(struct {
allocator: io.Allocator,
base_reader: FileReader,
cursor: u64,
offset: u64,
length: u64,
const Self = @This();
pub fn close(self: *Self) void {
self.base_reader.close();
io.free(self.allocator, self);
}
pub fn read(self: *Self, buffer: []u8) io.AccessError!u64 {
try self.base_reader.seek(self.offset + self.cursor);
return self.base_reader.read(buffer[0 ..
core.math.min(usize, buffer.len, self.length)]);
}
pub fn seek(self: *Self, cursor: u64) io.AccessError!void {
self.cursor = cursor;
}
}, allocator) catch return error.FileSystemFailure;
errdefer io.free(allocator, entry_reader);
entry_reader.* = .{
.allocator = allocator,
.base_reader = archive_reader,
.cursor = 0,
.offset = entry.data_offset,
.length = entry.data_length,
};
return FileReader.wrap(entry_reader);
}, },
.native => |native| { .native => |native| {
@ -239,8 +301,124 @@ pub const FileSystem = union(enum) {
ext.SDL_ClearError(); ext.SDL_ClearError();
return @ptrCast(*ReadableFile, ext.SDL_RWFromFile(&path_buffer, "rb") const rw_ops =
orelse return error.FileNotFound); ext.SDL_RWFromFile(&path_buffer, "rb") orelse return error.FileNotFound;
errdefer _ = ext.SDL_RWclose(rw_ops);
return FileReader.wrap(@ptrCast(*opaque {
const Self = @This();
fn rwOpsCast(self: *Self) *ext.SDL_RWops {
return @ptrCast(*ext.SDL_RWops, @alignCast(@alignOf(ext.SDL_RWops), self));
}
pub fn read(self: *Self, buffer: []u8) core.io.AccessError!u64 {
ext.SDL_ClearError();
const bytes_read =
ext.SDL_RWread(self.rwOpsCast(), buffer.ptr, @sizeOf(u8), buffer.len);
if ((bytes_read == 0) and (ext.SDL_GetError() != null))
return error.Inaccessible;
return bytes_read;
}
pub fn seek(self: *Self, cursor: u64) core.io.AccessError!void {
ext.SDL_ClearError();
const math = core.math;
const min = math.min;
const maxIntValue = math.maxIntValue;
var sought = min(u64, cursor, maxIntValue(i64));
const ops = self.rwOpsCast();
if (ext.SDL_RWseek(ops, @intCast(i64, sought), ext.RW_SEEK_SET) < 0)
return error.Inaccessible;
var to_seek = cursor - sought;
while (to_seek != 0) {
sought = min(u64, to_seek, maxIntValue(i64));
ext.SDL_ClearError();
if (ext.SDL_RWseek(ops, @intCast(i64, sought), ext.RW_SEEK_CUR) < 0)
return error.Inaccessible;
to_seek -= sought;
}
}
pub fn close(self: *Self) void {
ext.SDL_ClearError();
if (ext.SDL_RWclose(self.rwOpsCast()) != 0)
return ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION,
"Attempt to close an invalid file reference");
}
}, rw_ops));
},
}
}
///
/// Attempts to query the status of the file identified by `path` on `file_system` for reading,
/// returning a [FileStatus] value containing a the state of the file at the moment or
/// [AccessError] if it failed.
///
pub fn query(file_system: FileSystem, path: Path) AccessError!FileStatus {
switch (file_system) {
.archive => |archive| {
const archive_reader = try archive.file_system.openFileReader(archive.path);
defer archive_reader.close();
const entry = oar.findEntry(archive_reader, path) catch |err| return switch (err) {
error.ArchiveUnsupported, error.Inaccessible => error.FileSystemFailure,
error.EntryNotFound => error.FileNotFound,
};
return FileStatus{
.length = entry.data_length,
};
},
.native => |native| {
if (native.len == 0) return error.FileNotFound;
var path_buffer = [_]u8{0} ** 4096;
const seperator_length = @boolToInt(native[native.len - 1] != Path.seperator);
if ((native.len + seperator_length + path.length) >= path_buffer.len)
return error.FileNotFound;
const io = core.io;
io.copy(u8, &path_buffer, native);
if (seperator_length != 0) path_buffer[native.len] = Path.seperator;
io.copy(u8, path_buffer[native.len .. path_buffer.len],
path.buffer[0 .. path.length]);
ext.SDL_ClearError();
const rw_ops =
ext.SDL_RWFromFile(&path_buffer, "rb") orelse return error.FileSystemFailure;
defer if (ext.SDL_RWclose(rw_ops) != 0) unreachable;
ext.SDL_ClearError();
const size = ext.SDL_RWsize(rw_ops);
if (size < 0) return error.FileSystemFailure;
return FileStatus{
.length = @intCast(u64, size),
};
}, },
} }
} }
@ -346,7 +524,8 @@ pub const Path = extern struct {
}; };
/// ///
/// /// Returns a value above `0` if the path of `this` is greater than `that`, below `0` if it is
/// less, or `0` if they are identical.
/// ///
pub fn compare(this: Path, that: Path) isize { pub fn compare(this: Path, that: Path) isize {
return core.io.compareBytes(this.buffer[0 ..this.length], that.buffer[0 .. that.length]); return core.io.compareBytes(this.buffer[0 ..this.length], that.buffer[0 .. that.length]);
@ -432,16 +611,29 @@ pub const RunError = error {
}; };
/// ///
/// Returns a [core.io.Allocator] bound to the underlying system allocator. /// Returns a thread-safe [core.io.Allocator] value based on the default system allocation strategy.
/// ///
pub fn allocator() core.io.Allocator { pub fn threadSafeAllocator() core.io.Allocator {
// TODO: Add leak detection. const io = core.io;
return .{
.bound = .{ return io.Allocator.wrap(@as(*opaque {
.alloc = ext.SDL_alloc, const Self = @This();
.dealloc = ext.SDL_free,
}, pub fn alloc(_: *Self, layout: io.AllocationLayout) io.AllocationError![*]u8 {
}; return @ptrCast([*]u8, ext.SDL_malloc(layout.length) orelse return error.OutOfMemory);
}
pub fn realloc(_: *Self, allocation: [*]u8,
layout: io.AllocationLayout) io.AllocationError![*]u8 {
return @ptrCast([*]u8, ext.SDL_realloc(allocation, layout.length)
orelse return error.OutOfMemory);
}
pub fn dealloc(_: *Self, allocation: [*]u8) void {
ext.SDL_free(allocation);
}
}, undefined));
} }
/// ///
@ -453,16 +645,11 @@ pub fn allocator() core.io.Allocator {
pub fn display(comptime Error: anytype, pub fn display(comptime Error: anytype,
comptime run: fn (*App, *Graphics) callconv(.Async) Error!void) (RunError || Error)!void { comptime run: fn (*App, *Graphics) callconv(.Async) Error!void) (RunError || Error)!void {
var cwd = FileSystem{.native = "./"}; const cwd = FileSystem{.native = "./"};
const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.InitFailure; const user_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse return error.InitFailure;
defer ext.SDL_free(user_prefix); defer ext.SDL_free(user_prefix);
var gpa = std.heap.GeneralPurposeAllocator(.{}){};
defer if (gpa.deinit())
ext.SDL_LogWarn(ext.SDL_LOG_CATEGORY_APPLICATION, "Runtime allocator leaked memory");
var app = App{ var app = App{
.user = .{.native = std.mem.sliceTo(user_prefix, 0)}, .user = .{.native = std.mem.sliceTo(user_prefix, 0)},
@ -480,17 +667,13 @@ pub fn display(comptime Error: anytype,
return error.InitFailure; return error.InitFailure;
}, },
.data = .{ .data = .{.archive = .{
.archive_file = cwd.openRead(try Path.from(&.{"./data.oar"})) catch { .file_system = &cwd,
ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load ./data.oar"); .path = try Path.from(&.{"./data.oar"}),
}},
return error.InitFailure;
},
},
}; };
defer { defer {
app.data.archive_file.close();
ext.SDL_DestroySemaphore(app.message_semaphore); ext.SDL_DestroySemaphore(app.message_semaphore);
ext.SDL_DestroyMutex(app.message_mutex); ext.SDL_DestroyMutex(app.message_mutex);
} }