Compare commits

...

5 Commits

Author SHA1 Message Date
kayomn 1cc19d41da Replace byte slices with Path structure in Oar module
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2022-10-17 15:49:35 +01:00
kayomn 033227b243 Replace UTF-8 compares with standard memory compares 2022-10-17 15:48:56 +01:00
kayomn 9f411025a7 Make all build configurations show formatted errors in VS Code 2022-10-17 15:36:46 +01:00
kayomn 2a44f5bf11 Abort if debug errors occur in project settings 2022-10-17 15:33:49 +01:00
kayomn 1d2356e942 Add library support for UTF-8 to Ona module 2022-10-17 14:42:41 +01:00
6 changed files with 111 additions and 72 deletions

View File

@ -14,4 +14,5 @@
"git.detectSubmodulesLimit": 0,
"git.ignoreSubmodules": true,
"debug.onTaskErrors": "showErrors",
}

38
.vscode/tasks.json vendored
View File

@ -1,6 +1,25 @@
{
"version": "2.0.0",
"problemMatcher": {
"source": "zig",
"owner": "cpptools",
"fileLocation": [
"autoDetect",
"${cwd}",
],
"pattern": {
"regexp": "^(.*?):(\\d+):(\\d*):?\\s+(?:fatal\\s+)?(warning|error):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"severity": 4,
"message": 5,
}
},
"tasks": [
{
"label": "Build Debug",
@ -21,25 +40,6 @@
"clear": true,
"revealProblems": "onProblem",
},
"problemMatcher": {
"source": "zig",
"owner": "cpptools",
"fileLocation": [
"autoDetect",
"${cwd}",
],
"pattern": {
"regexp": "^(.*?):(\\d+):(\\d*):?\\s+(?:fatal\\s+)?(warning|error):\\s+(.*)$",
"file": 1,
"line": 2,
"column": 3,
"severity": 4,
"message": 5,
}
}
},
{
"label": "Build Test",

View File

@ -281,8 +281,7 @@ pub const FileSystem = union(enum) {
///
pub const Path = struct {
file_system: *FileSystem,
length: u8,
buffer: [max]u8,
path: oar.Path,
///
/// With files typically being backed by a block device, they can produce a variety of
@ -430,8 +429,7 @@ pub const FileSystem = union(enum) {
}
};
entry.* = archive.instance.find(path.buffer[0 .. path.length]) catch |err|
return switch (err) {
entry.* = archive.instance.find(path.path) catch |err| return switch (err) {
error.FileInaccessible => error.FileNotFound,
error.EntryNotFound => error.FileNotFound,
};
@ -460,15 +458,15 @@ pub const FileSystem = union(enum) {
var path_buffer = std.mem.zeroes([4096]u8);
const seperator_length = @boolToInt(native[native.len - 1] != seperator);
if ((native.len + seperator_length + path.length) >=
if ((native.len + seperator_length + path.path.length) >=
path_buffer.len) return error.FileNotFound;
std.mem.copy(u8, path_buffer[0 ..], native);
if (seperator_length != 0) path_buffer[native.len] = seperator;
std.mem.copy(u8, path_buffer[native.len ..
path_buffer.len], path.buffer[0 .. path.length]);
std.mem.copy(u8, path_buffer[native.len .. path_buffer.
len], path.path.buffer[0 .. path.path.length]);
ext.SDL_ClearError();
@ -592,8 +590,7 @@ pub const FileSystem = union(enum) {
pub fn joinedPath(file_system: *FileSystem, sequences: []const []const u8) PathError!Path {
var path = Path{
.file_system = file_system,
.buffer = std.mem.zeroes([Path.max]u8),
.length = 0,
.path = oar.Path.empty,
};
if (sequences.len != 0) {
@ -607,25 +604,25 @@ pub const FileSystem = union(enum) {
while (components.next()) |component| if (component.len != 0) {
for (component) |byte| {
if (path.length == Path.max) return error.TooLong;
if (path.path.length == Path.max) return error.TooLong;
path.buffer[path.length] = byte;
path.length += 1;
path.path.buffer[path.path.length] = byte;
path.path.length += 1;
}
if (components.hasNext()) {
if (path.length == Path.max) return error.TooLong;
if (path.path.length == Path.max) return error.TooLong;
path.buffer[path.length] = '/';
path.length += 1;
path.path.buffer[path.path.length] = '/';
path.path.length += 1;
}
};
if (index < last_sequence_index) {
if (path.length == Path.max) return error.TooLong;
if (path.path.length == Path.max) return error.TooLong;
path.buffer[path.length] = '/';
path.length += 1;
path.path.buffer[path.path.length] = '/';
path.path.length += 1;
}
};
}

View File

@ -26,7 +26,10 @@ pub const Archive = struct {
/// As the archive is queried via [find], the cache is lazily assembled with the absolute
/// offsets of each queried file.
///
const IndexCache = ona.table.Hashed([]const u8, u64, ona.table.string_context);
const IndexCache = ona.table.Hashed(Path, u64, .{
.equals = Path.equals,
.hash = Path.hash,
});
///
/// Deinitializes the index cache of `archive`, freeing all associated memory.
@ -43,12 +46,12 @@ pub const Archive = struct {
///
/// The found [Entry] value is returned or a [FindError] if it failed to be found.
///
pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry {
pub fn find(archive: *Archive, entry_path: Path) FindError!Entry {
return Entry{
.header = find_header: {
var header = Entry.Header{
.revision = 0,
.path = std.mem.zeroes(Path),
.path = Path.empty,
.file_size = 0,
.absolute_offset = 0
};
@ -73,7 +76,7 @@ pub const Archive = struct {
// Read first entry.
while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) {
if (mem.eql(u8, entry_path, header.path.buffer[0 .. header.path.length])) {
if (entry_path.equals(header.path)) {
// If caching fails... oh well...
archive.index_cache.insert(entry_path, header.absolute_offset) catch {};
@ -156,11 +159,33 @@ pub const Entry = struct {
};
///
/// Unique identifier pointing to an entry within an archive.
///
/// A path does not do any verification that the given entry pointed to actually exists.
///
pub const Path = extern struct {
buffer: [255]u8,
length: u8,
///
/// An empty [Path] with a length of `0`.
///
pub const empty = std.mem.zeroes(Path);
///
/// Returns `true` if `this_path` is equal to `that_path, otherwise `false`.
///
pub fn equals(this_path: Path, that_path: Path) bool {
return ona.io.equalsBytes(this_path.buffer[0 ..this_path.
length], that_path.buffer[0 .. that_path.length]);
}
///
/// Returns the hash of the text in `path`.
///
pub fn hash(path: Path) usize {
return ona.io.hashBytes(path.buffer[0 .. path.length]);
}
};
///

View File

@ -286,6 +286,43 @@ pub const Writer = struct {
}
};
///
/// Returns `true` if `this_bytes` is the same length and contains the same data as `that_bytes`,
/// otherwise `false`.
///
pub fn equalsBytes(this_bytes: []const u8, that_bytes: []const u8) bool {
return std.mem.eql(u8, this_bytes, that_bytes);
}
test "Equivalence of bytes" {
const bytes_sequence = &.{69, 42, 0};
const testing = std.testing;
try testing.expect(equalsBytes(bytes_sequence, bytes_sequence));
try testing.expect(!equalsBytes(bytes_sequence, &.{69, 42}));
}
///
/// Returns a deterministic hash code compiled from each byte in `bytes`.
///
/// **Note** that this operation has `O(n)` time complexity.
///
pub fn hashBytes(bytes: []const u8) usize {
var hash = @as(usize, 5381);
for (bytes) |byte| hash = ((hash << 5) + hash) + byte;
return hash;
}
test "Hashing bytes" {
const bytes_sequence = &.{69, 42, 0};
const testing = std.testing;
try testing.expect(hashBytes(bytes_sequence) == hashBytes(bytes_sequence));
try testing.expect(hashBytes(bytes_sequence) != hashBytes(&.{69, 42}));
}
///
/// Writer that silently throws consumed data away and never fails.
///

View File

@ -173,35 +173,14 @@ pub fn KeyContext(comptime Key: type) type {
};
}
///
/// Tests if the contents of `this_string` lexically equals the contents of `that_string`.
///
fn equalsString(this_string: []const u8, that_string: []const u8) bool {
return std.mem.eql(u8, this_string, that_string);
}
///
/// Hashes `string` into a hash value of `usize`.
///
fn hashString(string: []const u8) usize {
var hash = @as(usize, 5381);
for (string) |byte| hash = ((hash << 5) + hash) + byte;
return hash;
}
///
/// A [KeyContext] for handling `[]const u8` types.
///
pub const string_context = KeyContext([]const u8){
.hash = hashString,
.equals = equalsString,
};
test "Hashed table manipulation with string context" {
test "Hashed table manipulation with bytes context" {
const testing = std.testing;
var table = try Hashed([]const u8, u32, string_context).init(testing.allocator);
const io = @import("./io.zig");
var table = try Hashed([]const u8, u32, .{
.equals = io.equalsBytes,
.hash = io.hashBytes,
}).init(testing.allocator);
defer table.deinit();