Improve memory safety of Oar archive lookups
continuous-integration/drone/pr Build is passing Details
continuous-integration/drone/push Build is passing Details

This commit is contained in:
kayomn 2022-10-15 21:11:15 +01:00
parent 98372cc85f
commit 449b56947e
2 changed files with 35 additions and 25 deletions

View File

@ -24,7 +24,7 @@ pub const Archive = struct {
/// ///
/// ///
/// ///
const IndexCache = table.Hashed([]const u8, Entry.Header, table.string_context); const IndexCache = table.Hashed([]const u8, u64, table.string_context);
/// ///
/// Finds an entry matching `entry_path` in `archive`. /// Finds an entry matching `entry_path` in `archive`.
@ -34,40 +34,50 @@ pub const Archive = struct {
pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry { pub fn find(archive: *Archive, entry_path: []const u8) FindError!Entry {
return Entry{ return Entry{
.header = find_header: { .header = find_header: {
if (archive.index_cache.lookup(entry_path)) |entry_header| var header = Entry.Header{
break: find_header entry_header.*;
// Start from beginning of archive.
try archive.file_access.seek(0);
var entry_header = Entry.Header{
.revision = 0, .revision = 0,
.file_size = 0, .file_size = 0,
.file_offset = 0 .absolute_offset = 0
}; };
const read_buffer = std.mem.asBytes(&entry_header); const header_size = @sizeOf(Entry.Header);
// Read first entry. if (archive.index_cache.lookup(entry_path)) |cursor| {
while ((try archive.file_access.read(read_buffer)) == @sizeOf(Entry.Header)) { try archive.file_access.seek(cursor);
if (std.mem.eql(u8, entry_path, entry_header.
name_buffer[0 .. entry_header.name_length])) {
// If caching fails... oh well... if ((try archive.file_access.read(std.mem.asBytes(&header))) != header_size) {
archive.index_cache.insert(entry_path, entry_header) catch {}; std.debug.assert(archive.index_cache.remove(entry_path) != null);
break: find_header entry_header; return error.EntryNotFound;
} }
// Move over file data following the entry. break: find_header header;
var to_skip = entry_header.file_size; } else {
const mem = std.mem;
while (to_skip != 0) { // Start from beginning of archive.
const skipped = std.math.min(to_skip, std.math.maxInt(i64)); try archive.file_access.seek(0);
try archive.file_access.skip(@intCast(i64, skipped)); // Read first entry.
while ((try archive.file_access.read(mem.asBytes(&header))) == header_size) {
if (mem.eql(u8, entry_path, header.name_buffer[0 .. header.name_length])) {
// If caching fails... oh well...
archive.index_cache.insert(entry_path, header.absolute_offset) catch {};
to_skip -= skipped; break: find_header header;
}
// Move over file data following the entry.
var to_skip = header.file_size;
while (to_skip != 0) {
const math = std.math;
const skipped = math.min(to_skip, math.maxInt(i64));
try archive.file_access.skip(@intCast(i64, skipped));
to_skip -= skipped;
}
} }
} }
@ -109,7 +119,7 @@ pub const Entry = struct {
name_buffer: [255]u8 = std.mem.zeroes([255]u8), name_buffer: [255]u8 = std.mem.zeroes([255]u8),
name_length: u8 = 0, name_length: u8 = 0,
file_size: u64, file_size: u64,
file_offset: u64, absolute_offset: u64,
padding: [232]u8 = std.mem.zeroes([232]u8), padding: [232]u8 = std.mem.zeroes([232]u8),
comptime { comptime {

View File

@ -487,7 +487,7 @@ pub const FileSystem = union(enum) {
if (archive_entry.cursor >= archive_entry.header.file_size) if (archive_entry.cursor >= archive_entry.header.file_size)
return error.FileInaccessible; return error.FileInaccessible;
try file_access.seek(archive_entry.header.file_offset); try file_access.seek(archive_entry.header.absolute_offset);
return file_access.read(buffer[0 .. std.math.min( return file_access.read(buffer[0 .. std.math.min(
buffer.len, archive_entry.header.file_size)]); buffer.len, archive_entry.header.file_size)]);