Compare commits

...

2 Commits

Author SHA1 Message Date
kayomn 26f342e518 Add entry caching to archive file systems
continuous-integration/drone/push Build is failing Details
continuous-integration/drone/pr Build is failing Details
2022-10-15 01:25:21 +01:00
kayomn b8517d3b22 Tidy up naming conventions in stack module 2022-10-15 01:08:03 +01:00
4 changed files with 183 additions and 119 deletions

View File

@ -11,7 +11,9 @@ const sys = @import("./sys.zig");
/// Entry point. /// Entry point.
/// ///
pub fn main() anyerror!void { pub fn main() anyerror!void {
return nosuspend await async sys.runGraphics(anyerror, run); var gpa = std.heap.GeneralPurposeAllocator(.{}){};
return nosuspend await async sys.runGraphics(gpa.allocator(), anyerror, run);
} }
test { test {

View File

@ -1,11 +1,17 @@
const io = @import("./io.zig"); const io = @import("./io.zig");
const std = @import("std"); const std = @import("std");
///
/// Returns a fixed-size stack type of `Element`s.
///
pub fn Fixed(comptime Element: type) type { pub fn Fixed(comptime Element: type) type {
return struct { return struct {
filled: usize = 0, filled: usize = 0,
buffer: []Element, buffer: []Element,
///
/// Stack type.
///
const Self = @This(); const Self = @This();
/// ///
@ -57,8 +63,8 @@ pub fn Fixed(comptime Element: type) type {
/// ///
/// Attempts to push `element` into `self`, returning a [FixedPushError] if it failed. /// Attempts to push `element` into `self`, returning a [FixedPushError] if it failed.
/// ///
pub fn push(self: *Self, element: Element) FixedPushError!void { pub fn push(self: *Self, element: Element) PushError!void {
if (self.filled == self.buffer.len) return error.Overflow; if (self.filled == self.buffer.len) return error.OutOfMemory;
self.buffer[self.filled] = element; self.buffer[self.filled] = element;
self.filled += 1; self.filled += 1;
@ -68,10 +74,10 @@ pub fn Fixed(comptime Element: type) type {
/// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it /// Attempts to push all of `elements` into `self`, returning a [FixedPushError] if it
/// failed. /// failed.
/// ///
pub fn pushAll(self: *Self, elements: []const u8) FixedPushError!void { pub fn pushAll(self: *Self, elements: []const u8) PushError!void {
const filled = (self.filled + elements.len); const filled = (self.filled + elements.len);
if (filled > self.buffer.len) return error.Overflow; if (filled > self.buffer.len) return error.OutOfMemory;
std.mem.copy(u8, self.buffer[self.filled ..], elements); std.mem.copy(u8, self.buffer[self.filled ..], elements);
@ -81,15 +87,9 @@ pub fn Fixed(comptime Element: type) type {
} }
/// ///
/// Potential errors that may occur while trying to push one or more elements into a stack of a /// Potential errors that may occur while trying to push one or more elements into a stack.
/// known maximum size.
/// ///
/// [FinitePushError.Overflow] is returned if the stack does not have sufficient capacity to hold a pub const PushError = std.mem.Allocator.Error;
/// given set of elements.
///
pub const FixedPushError = error {
Overflow,
};
test { test {
const testing = std.testing; const testing = std.testing;
@ -103,8 +103,8 @@ test {
try testing.expectEqual(stack.pop(), 69); try testing.expectEqual(stack.pop(), 69);
try stack.pushAll(&.{42, 10, 95, 0}); try stack.pushAll(&.{42, 10, 95, 0});
try testing.expectEqual(stack.count(), 4); try testing.expectEqual(stack.count(), 4);
try testing.expectError(FixedPushError.Overflow, stack.push(1)); try testing.expectError(PushError.OutOfMemory, stack.push(1));
try testing.expectError(FixedPushError.Overflow, stack.pushAll(&.{1, 11, 11})); try testing.expectError(PushError.OutOfMemory, stack.pushAll(&.{1, 11, 11}));
stack.clear(); stack.clear();

View File

@ -35,6 +35,7 @@ pub const AppContext = opaque {
/// Internal state of the event loop hidden from the API consumer. /// Internal state of the event loop hidden from the API consumer.
/// ///
const Implementation = struct { const Implementation = struct {
user_path_prefix: [*]u8,
data_file_system: FileSystem, data_file_system: FileSystem,
user_file_system: FileSystem, user_file_system: FileSystem,
message_semaphore: *ext.SDL_sem, message_semaphore: *ext.SDL_sem,
@ -104,6 +105,7 @@ pub const AppContext = opaque {
ext.SDL_DestroyMutex(implementation.message_mutex); ext.SDL_DestroyMutex(implementation.message_mutex);
ext.SDL_DestroySemaphore(implementation.message_semaphore); ext.SDL_DestroySemaphore(implementation.message_semaphore);
ext.SDL_free(implementation.user_path_prefix);
} }
/// ///
@ -134,14 +136,25 @@ pub const AppContext = opaque {
/// ///
/// Returns the created [Implementation] value on success or [InitError] on failure. /// Returns the created [Implementation] value on success or [InitError] on failure.
/// ///
fn init(data_archive_file_access: FileAccess, fn init(allocator: std.mem.Allocator,
user_path_prefix: []const u8) InitError!Implementation { data_archive_file_access: FileAccess) InitError!Implementation {
const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse
return error.OutOfMemory;
return Implementation{ return Implementation{
.user_file_system = .{.native = .{.path_prefix =
user_path_prefix[0 .. std.mem.len(user_path_prefix)]}},
.message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores, .message_semaphore = ext.SDL_CreateSemaphore(0) orelse return error.OutOfSemaphores,
.message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes, .message_mutex = ext.SDL_CreateMutex() orelse return error.OutOfMutexes,
.data_file_system = .{.archive = .{.file_access = data_archive_file_access}}, .user_path_prefix = user_path_prefix,
.user_file_system = .{.native = .{.path_prefix = user_path_prefix}},
.data_file_system = .{.archive = .{
.file_access = data_archive_file_access,
.index_cache = try FileSystem.ArchiveIndexCache.init(allocator),
}},
.message_thread = null, .message_thread = null,
}; };
} }
@ -351,7 +364,7 @@ pub const FileSystem = union(enum) {
archive: struct { archive: struct {
file_access: FileAccess, file_access: FileAccess,
index_cache: *table.Dynamic([]const u8, ArchiveEntry, table.string_context), index_cache: ArchiveIndexCache,
entry_table: [max_open_entries]ArchiveEntry = entry_table: [max_open_entries]ArchiveEntry =
std.mem.zeroes([max_open_entries]ArchiveEntry), std.mem.zeroes([max_open_entries]ArchiveEntry),
@ -368,6 +381,8 @@ pub const FileSystem = union(enum) {
cursor: u64, cursor: u64,
}; };
const ArchiveIndexCache = table.Hashed([]const u8, oar.Entry, table.string_context);
/// ///
/// Platform-agnostic mechanism for referencing files and directories on a [FileSystem]. /// Platform-agnostic mechanism for referencing files and directories on a [FileSystem].
/// ///
@ -447,99 +462,98 @@ pub const FileSystem = union(enum) {
.archive => |*archive| { .archive => |*archive| {
if (mode != .readonly) return error.ModeUnsupported; if (mode != .readonly) return error.ModeUnsupported;
for (archive.entry_table) |_, index| { for (archive.entry_table) |*entry| if (entry.using == null) {
if (archive.entry_table[index].using == null) { const entry_path = path.buffer[0 .. path.length];
const archive_path = path.buffer[0 .. path.length];
entry.* = .{
.header = find_header: {
if (archive.index_cache.lookup(entry_path)) |header|
break: find_header header.*;
const entry_header = archive.index_cache.lookup(archive_path) orelse {
const header = oar.Entry.find(archive.file_access, const header = oar.Entry.find(archive.file_access,
archive_path) catch return error.FileNotFound; entry_path) catch return error.FileNotFound;
archive.index_cache.insert(archive_path, header) catch { // If caching fails... oh well...
// If caching fails... oh well... archive.index_cache.insert(entry_path, header) catch {};
};
break header; break: find_header header;
}; },
archive.entry_table[index] = .{ .using = &archive.file_access,
.header = entry_header, .cursor = 0,
.using = &archive.file_access, };
.cursor = 0,
};
const Implementation = struct { const Implementation = struct {
fn archiveEntryCast(context: *anyopaque) *ArchiveEntry { fn archiveEntryCast(context: *anyopaque) *ArchiveEntry {
return @ptrCast(*ArchiveEntry, @alignCast( return @ptrCast(*ArchiveEntry, @alignCast(
@alignOf(ArchiveEntry), context)); @alignOf(ArchiveEntry), context));
} }
fn close(context: *anyopaque) void { fn close(context: *anyopaque) void {
archiveEntryCast(context).using = null; archiveEntryCast(context).using = null;
} }
fn queryCursor(context: *anyopaque) FileAccess.Error!u64 { fn queryCursor(context: *anyopaque) FileAccess.Error!u64 {
const archive_entry = archiveEntryCast(context); const archive_entry = archiveEntryCast(context);
if (archive_entry.using == null) return error.FileInaccessible; if (archive_entry.using == null) return error.FileInaccessible;
return archive_entry.cursor; return archive_entry.cursor;
} }
fn queryLength(context: *anyopaque) FileAccess.Error!u64 { fn queryLength(context: *anyopaque) FileAccess.Error!u64 {
const archive_entry = archiveEntryCast(context); const archive_entry = archiveEntryCast(context);
if (archive_entry.using == null) return error.FileInaccessible; if (archive_entry.using == null) return error.FileInaccessible;
return archive_entry.header.file_size; return archive_entry.header.file_size;
} }
fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize { fn read(context: *anyopaque, buffer: []u8) FileAccess.Error!usize {
const archive_entry = archiveEntryCast(context); const archive_entry = archiveEntryCast(context);
const file_access = archive_entry.using orelse const file_access = archive_entry.using orelse
return error.FileInaccessible; return error.FileInaccessible;
if (archive_entry.cursor >= archive_entry.header.file_size) if (archive_entry.cursor >= archive_entry.header.file_size)
return error.FileInaccessible; return error.FileInaccessible;
try file_access.seek(archive_entry.header.file_offset); try file_access.seek(archive_entry.header.file_offset);
return file_access.read(buffer[0 .. std.math.min( return file_access.read(buffer[0 .. std.math.min(
buffer.len, archive_entry.header.file_size)]); buffer.len, archive_entry.header.file_size)]);
} }
fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void { fn seek(context: *anyopaque, cursor: usize) FileAccess.Error!void {
const archive_entry = archiveEntryCast(context); const archive_entry = archiveEntryCast(context);
if (archive_entry.using == null) return error.FileInaccessible; if (archive_entry.using == null) return error.FileInaccessible;
archive_entry.cursor = cursor; archive_entry.cursor = cursor;
} }
fn seekToEnd(context: *anyopaque) FileAccess.Error!void { fn seekToEnd(context: *anyopaque) FileAccess.Error!void {
const archive_entry = archiveEntryCast(context); const archive_entry = archiveEntryCast(context);
if (archive_entry.using == null) return error.FileInaccessible; if (archive_entry.using == null) return error.FileInaccessible;
archive_entry.cursor = archive_entry.header.file_size; archive_entry.cursor = archive_entry.header.file_size;
} }
}; };
return FileAccess{ return FileAccess{
.context = &archive.entry_table[index], .context = entry,
.implementation = &.{ .implementation = &.{
.close = Implementation.close, .close = Implementation.close,
.queryCursor = Implementation.queryCursor, .queryCursor = Implementation.queryCursor,
.queryLength = Implementation.queryLength, .queryLength = Implementation.queryLength,
.read = Implementation.read, .read = Implementation.read,
.seek = Implementation.seek, .seek = Implementation.seek,
.seekToEnd = Implementation.seekToEnd, .seekToEnd = Implementation.seekToEnd,
}, },
}; };
} };
}
return error.OutOfFiles; return error.OutOfFiles;
}, },
@ -805,7 +819,7 @@ pub const RunError = error {
/// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if /// Should an error from `run` occur, an `Error` is returned, otherwise a [RunError] is returned if
/// the underlying runtime fails and is logged. /// the underlying runtime fails and is logged.
/// ///
pub fn runGraphics(comptime Error: anytype, pub fn runGraphics(allocator: std.mem.Allocator, comptime Error: anytype,
comptime run: GraphicsRunner(Error)) (RunError || Error)!void { comptime run: GraphicsRunner(Error)) (RunError || Error)!void {
if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) { if (ext.SDL_Init(ext.SDL_INIT_EVERYTHING) != 0) {
@ -841,14 +855,6 @@ pub fn runGraphics(comptime Error: anytype,
defer ext.SDL_DestroyRenderer(renderer); defer ext.SDL_DestroyRenderer(renderer);
const user_path_prefix = ext.SDL_GetPrefPath("ona", "ona") orelse {
ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, "Failed to load user path");
return error.InitFailure;
};
defer ext.SDL_free(user_path_prefix);
var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}}; var cwd_file_system = FileSystem{.native =.{.path_prefix = "./"}};
var data_archive_file_access = try (try cwd_file_system. var data_archive_file_access = try (try cwd_file_system.
@ -856,9 +862,7 @@ pub fn runGraphics(comptime Error: anytype,
defer data_archive_file_access.close(); defer data_archive_file_access.close();
var app_context = AppContext.Implementation.init(data_archive_file_access, var app_context = AppContext.Implementation.init(allocator, data_archive_file_access) catch |err| {
user_path_prefix[0 .. std.mem.len(user_path_prefix)]) catch |err| {
ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) { ext.SDL_LogCritical(ext.SDL_LOG_CATEGORY_APPLICATION, switch (err) {
error.OutOfMemory => "Failed to allocate necessary memory", error.OutOfMemory => "Failed to allocate necessary memory",
error.OutOfMutexes => "Failed to create file-system work lock", error.OutOfMutexes => "Failed to create file-system work lock",

View File

@ -1,61 +1,119 @@
const std = @import("std"); const std = @import("std");
pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { ///
/// Returns a hash-backed table type of `Value`s indexed by `Key` and using `key_context` as the key
/// context.
///
pub fn Hashed(comptime Key: type, comptime Value: type,
comptime key_context: KeyContext(Key)) type {
const Allocator = std.mem.Allocator;
return struct { return struct {
load_maximum: f32, allocator: Allocator,
buckets_used: usize, load_limit: f32,
buckets: []Bucket, buckets: []Bucket,
filled: usize,
/// ///
/// /// A slot in the hash table.
/// ///
const Bucket = struct { const Bucket = struct {
maybe_entry: ?struct { maybe_entry: ?struct {
key: Key, key: Key,
value: Value, value: Value,
}, } = null,
maybe_next_index: ?usize, maybe_next_index: ?usize = null,
}; };
/// ///
/// /// Hash table type.
/// ///
const Self = @This(); const Self = @This();
/// ///
/// Searches for `key` to delete it, returning the deleted value or `null` if no matching
/// key was found.
/// ///
/// pub fn remove(self: Self, key: Key) ?Value {
pub fn delete(self: Self, key: Key) bool { var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]);
_ = key;
_ = self; if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
defer entry.value = null;
self.filled -= 1;
return entry.value;
};
while (bucket.maybe_next_index) |index| {
bucket = &(self.buckets[index]);
if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
defer entry.value = null;
self.filled -= 1;
return entry.value;
};
}
return null;
}
pub fn init(allocator: Allocator) Allocator.Error!Self {
return Self{
.buckets = try allocator.alloc(Bucket, 4),
.filled = 0,
.allocator = allocator,
.load_limit = 0.75,
};
} }
/// ///
/// Attempts to insert the value at `key` to be `value` in `self`, returning an
/// [InsertError] if it fails.
/// ///
/// pub fn insert(self: *Self, key: Key, value: Value) InsertError!void {
pub fn insert(self: Self, key: Key, value: Value) InsertError!void { if (self.loadFactor() >= self.load_limit) {
if ((@intToFloat(f32, self.buckets_used) / @intToFloat( const old_buckets = self.buckets;
f32, self.buckets.len)) >= self.load_maximum) try self.rehash();
defer self.allocator.free(old_buckets);
self.buckets = try self.allocator.alloc(Bucket, old_buckets.len * 2);
for (old_buckets) |bucket, index| self.buckets[index] = bucket;
}
var hash = @mod(key_context.hash(key), self.buckets.len); var hash = @mod(key_context.hash(key), self.buckets.len);
while (true) { while (true) {
const bucket = &(self.buckets[hash]); const bucket = &(self.buckets[hash]);
const entry = &(bucket.maybe_entry orelse { if (key_context.equals((bucket.maybe_entry orelse {
bucket.maybe_entry = .{ bucket.maybe_entry = .{
.key = key, .key = key,
.value = value .value = value
}; };
});
if (key_context.equals(entry.key, key)) return error.KeyExists; self.filled += 1;
hash = @mod(hashHash(hash), self.buckets.len); break;
}).key, key)) return error.KeyExists;
hash = @mod(hash + 1, self.buckets.len);
} }
} }
///
/// Returns the current load factor of `self`, which is derived from the number of capacity
/// that has been filled.
///
pub fn loadFactor(self: Self) f32 {
return @intToFloat(f32, self.filled) / @intToFloat(f32, self.buckets.len);
}
/// ///
/// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching /// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching
/// entry was found. /// entry was found.
@ -81,7 +139,7 @@ pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: K
/// ///
/// ///
/// ///
pub const InsertError = std.mem.Allocator || error { pub const InsertError = std.mem.Allocator.Error || error {
KeyExists, KeyExists,
}; };