Compare commits
2 Commits
2792f27473
...
449b56947e
Author | SHA1 | Date |
---|---|---|
kayomn | 449b56947e | |
kayomn | 98372cc85f |
58
src/oar.zig
58
src/oar.zig
|
@ -24,7 +24,7 @@ pub const Archive = struct {
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
const IndexCache = table.Hashed([]const u8, Entry.Header, table.string_context);
|
const IndexCache = table.Hashed([]const u8, u64, table.string_context);
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Finds an entry matching `entry_path` in `archive`.
|
/// Finds an entry matching `entry_path` in `archive`.
|
||||||
|
@ -34,40 +34,50 @@ pub const Archive = struct {
|
||||||
pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry {
|
pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry {
|
||||||
return Entry{
|
return Entry{
|
||||||
.header = find_header: {
|
.header = find_header: {
|
||||||
if (archive.index_cache.lookup(entry_path)) |entry_header|
|
var header = Entry.Header{
|
||||||
break: find_header entry_header.*;
|
|
||||||
|
|
||||||
// Start from beginning of archive.
|
|
||||||
try archive.file_access.seek(0);
|
|
||||||
|
|
||||||
var entry_header = Entry.Header{
|
|
||||||
.revision = 0,
|
.revision = 0,
|
||||||
.file_size = 0,
|
.file_size = 0,
|
||||||
.file_offset = 0
|
.absolute_offset = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
const read_buffer = std.mem.asBytes(&entry_header);
|
const header_size = @sizeOf(Entry.Header);
|
||||||
|
|
||||||
// Read first entry.
|
if (archive.index_cache.lookup(entry_path)) |cursor| {
|
||||||
while ((try archive.file_access.read(read_buffer)) == @sizeOf(Entry.Header)) {
|
try archive.file_access.seek(cursor);
|
||||||
if (std.mem.eql(u8, entry_path, entry_header.
|
|
||||||
name_buffer[0 .. entry_header.name_length])) {
|
|
||||||
|
|
||||||
// If caching fails... oh well...
|
if ((try archive.file_access.read(std.mem.asBytes(&header))) != header_size) {
|
||||||
archive.index_cache.insert(entry_path, entry_header) catch {};
|
std.debug.assert(archive.index_cache.remove(entry_path) != null);
|
||||||
|
|
||||||
break: find_header entry_header;
|
return error.EntryNotFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move over file data following the entry.
|
break: find_header header;
|
||||||
var to_skip = entry_header.file_size;
|
} else {
|
||||||
|
const mem = std.mem;
|
||||||
|
|
||||||
while (to_skip != 0) {
|
// Start from beginning of archive.
|
||||||
const skipped = std.math.min(to_skip, std.math.maxInt(i64));
|
try archive.file_access.seek(0);
|
||||||
|
|
||||||
try archive.file_access.skip(@intCast(i64, skipped));
|
// Read first entry.
|
||||||
|
while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) {
|
||||||
|
if (mem.eql(u8, entry_path, header.name_buffer[0 .. header.name_length])) {
|
||||||
|
// If caching fails... oh well...
|
||||||
|
archive.index_cache.insert(entry_path, header.absolute_offset) catch {};
|
||||||
|
|
||||||
to_skip -= skipped;
|
break: find_header header;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move over file data following the entry.
|
||||||
|
var to_skip = header.file_size;
|
||||||
|
|
||||||
|
while (to_skip != 0) {
|
||||||
|
const math = std.math;
|
||||||
|
const skipped = math.min(to_skip, math.maxInt(i64));
|
||||||
|
|
||||||
|
try archive.file_access.skip(@intCast(i64, skipped));
|
||||||
|
|
||||||
|
to_skip -= skipped;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,7 +119,7 @@ pub const Entry = struct {
|
||||||
name_buffer: [255]u8 = std.mem.zeroes([255]u8),
|
name_buffer: [255]u8 = std.mem.zeroes([255]u8),
|
||||||
name_length: u8 = 0,
|
name_length: u8 = 0,
|
||||||
file_size: u64,
|
file_size: u64,
|
||||||
file_offset: u64,
|
absolute_offset: u64,
|
||||||
padding: [232]u8 = std.mem.zeroes([232]u8),
|
padding: [232]u8 = std.mem.zeroes([232]u8),
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
|
|
|
@ -487,7 +487,7 @@ pub const FileSystem = union(enum) {
|
||||||
if (archive_entry.cursor >= archive_entry.header.file_size)
|
if (archive_entry.cursor >= archive_entry.header.file_size)
|
||||||
return error.FileInaccessible;
|
return error.FileInaccessible;
|
||||||
|
|
||||||
try file_access.seek(archive_entry.header.file_offset);
|
try file_access.seek(archive_entry.header.absolute_offset);
|
||||||
|
|
||||||
return file_access.read(buffer[0 .. std.math.min(
|
return file_access.read(buffer[0 .. std.math.min(
|
||||||
buffer.len, archive_entry.header.file_size)]);
|
buffer.len, archive_entry.header.file_size)]);
|
||||||
|
|
|
@ -56,16 +56,18 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Searches for `key` to delete it, returning the deleted value or `null` if no matching
|
/// Searches for `key` and deletes it from `self.
|
||||||
/// key was found.
|
|
||||||
///
|
///
|
||||||
pub fn remove(self: Self, key: Key) ?Value {
|
/// The removed value is returned or `null` if no key matching `key` was found.
|
||||||
|
///
|
||||||
|
pub fn remove(self: *Self, key: Key) ?Value {
|
||||||
var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]);
|
var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]);
|
||||||
|
|
||||||
if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
|
if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
|
||||||
defer entry.value = null;
|
defer {
|
||||||
|
bucket.maybe_entry = null;
|
||||||
self.filled -= 1;
|
self.filled -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
return entry.value;
|
return entry.value;
|
||||||
};
|
};
|
||||||
|
@ -74,9 +76,10 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
|
||||||
bucket = &(self.buckets[index]);
|
bucket = &(self.buckets[index]);
|
||||||
|
|
||||||
if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
|
if (bucket.maybe_entry) |*entry| if (key_context.equals(entry.key, key)) {
|
||||||
defer entry.value = null;
|
defer {
|
||||||
|
bucket.maybe_entry = null;
|
||||||
self.filled -= 1;
|
self.filled -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
return entry.value;
|
return entry.value;
|
||||||
};
|
};
|
||||||
|
@ -129,20 +132,21 @@ pub fn Hashed(comptime Key: type, comptime Value: type,
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Searches for a value indexed with `key` in `self`, returning it or `null` if no matching
|
/// Searches for a value indexed with `key` in `self`.
|
||||||
/// entry was found.
|
|
||||||
///
|
///
|
||||||
pub fn lookup(self: Self, key: Key) ?*Value {
|
/// The found value is returned or `null` if an key matching `key` failed to be found.
|
||||||
|
///
|
||||||
|
pub fn lookup(self: Self, key: Key) ?Value {
|
||||||
var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]);
|
var bucket = &(self.buckets[@mod(key_context.hash(key), self.buckets.len)]);
|
||||||
|
|
||||||
if (bucket.maybe_entry) |*entry|
|
if (bucket.maybe_entry) |entry|
|
||||||
if (key_context.equals(entry.key, key)) return &entry.value;
|
if (key_context.equals(entry.key, key)) return entry.value;
|
||||||
|
|
||||||
while (bucket.maybe_next_index) |index| {
|
while (bucket.maybe_next_index) |index| {
|
||||||
bucket = &(self.buckets[index]);
|
bucket = &(self.buckets[index]);
|
||||||
|
|
||||||
if (bucket.maybe_entry) |*entry|
|
if (bucket.maybe_entry) |entry|
|
||||||
if (key_context.equals(entry.key, key)) return &entry.value;
|
if (key_context.equals(entry.key, key)) return entry.value;
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
|
Loading…
Reference in New Issue