diff --git a/src/oar.zig b/src/oar.zig index 6a72116..f1e3b0f 100644 --- a/src/oar.zig +++ b/src/oar.zig @@ -23,7 +23,7 @@ pub const Entry = extern struct { } /// - /// + /// [FindError.EntryNotFound] happens when an entry could not be found. /// pub const FindError = sys.FileAccess.Error || error { EntryNotFound, diff --git a/src/sys.zig b/src/sys.zig index 4bb595e..f427eeb 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -8,6 +8,7 @@ const meta = @import("./meta.zig"); const oar = @import("./oar.zig"); const stack = @import("./stack.zig"); const std = @import("std"); +const table = @import("./table.zig"); /// /// A thread-safe platform abstraction over multiplexing system I/O processing and event handling. @@ -211,7 +212,9 @@ pub const AppContext = opaque { /// /// /// - pub fn schedule(app_context: *AppContext, procedure: anytype, arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + pub fn schedule(app_context: *AppContext, procedure: anytype, + arguments: anytype) meta.FnReturn(@TypeOf(procedure)) { + const Task = struct { procedure: @TypeOf(procedure), arguments: *@TypeOf(arguments), @@ -348,6 +351,7 @@ pub const FileSystem = union(enum) { archive: struct { file_access: FileAccess, + index_cache: *table.Dynamic([]const u8, ArchiveEntry, table.string_context), entry_table: [max_open_entries]ArchiveEntry = std.mem.zeroes([max_open_entries]ArchiveEntry), @@ -445,10 +449,21 @@ pub const FileSystem = union(enum) { for (archive.entry_table) |_, index| { if (archive.entry_table[index].using == null) { - archive.entry_table[index] = .{ - .header = oar.Entry.find(archive.file_access, path. - buffer[0 .. path.length]) catch return error.FileNotFound, + const archive_path = path.buffer[0 .. path.length]; + const entry_header = archive.index_cache.lookup(archive_path) orelse { + const header = oar.Entry.find(archive.file_access, + archive_path) catch return error.FileNotFound; + + archive.index_cache.insert(archive_path, header) catch { + // If caching fails... oh well... + }; + + break header; + }; + + archive.entry_table[index] = .{ + .header = entry_header, .using = &archive.file_access, .cursor = 0, }; @@ -533,7 +548,6 @@ pub const FileSystem = union(enum) { if (native.path_prefix.len == 0) return error.FileNotFound; var path_buffer = std.mem.zeroes([4096]u8); - const seperator = '/'; const seperator_length = @boolToInt(native.path_prefix[ native.path_prefix.len - 1] != seperator); @@ -640,6 +654,8 @@ pub const FileSystem = union(enum) { }, } } + + pub const seperator = '/'; }; /// diff --git a/src/table.zig b/src/table.zig new file mode 100644 index 0000000..cec8dba --- /dev/null +++ b/src/table.zig @@ -0,0 +1,78 @@ +const std = @import("std"); + +pub fn Dynamic(comptime Key: type, comptime Value: type, comptime key_context: KeyContext(Key)) type { + return struct { + buckets_used: usize, + bucket_map: []?Bucket, + + /// + /// + /// + const Bucket = struct { + key: Key, + value: Value, + next: ?usize, + }; + + /// + /// + /// + pub const InsertError = error { + + }; + + /// + /// + /// + const Self = @This(); + + /// + /// + /// + pub fn insert(self: Self, key: Key, value: Value) InsertError!void { + _ = value; + _ = key; + _ = self; + } + + /// + /// + /// + pub fn lookup(self: Self, key: Key) ?*Value { + var bucket = &(self.bucket_map[@mod(key_context.hash( + key), self.bucket_map.len)] orelse return null); + + while (bucket) { + if (key_context.equals(bucket.key, key)) return &bucket.value; + + bucket = bucket.next; + } + + return null; + } + }; +} + +pub fn KeyContext(comptime Key: type) type { + return struct { + hash: fn (Key) usize, + equals: fn (Key, Key) bool, + }; +} + +fn equalsString(this_string: []const u8, that_string: []const u8) bool { + return std.mem.eql(u8, this_string, that_string); +} + +fn hashString(string: []const u8) usize { + var hash = @as(usize, 5381); + + for (string) |byte| hash = ((hash << 5) + hash) + byte; + + return hash; +} + +pub const string_context = KeyContext([]const u8){ + .hash = hashString, + .equals = equalsString, +};