Split event loop into its own file

This commit is contained in:
kayomn 2022-09-24 23:09:02 +01:00
parent 6fb6e63280
commit 94b1d8f67e
6 changed files with 380 additions and 554 deletions

View File

@ -1,7 +0,0 @@
///
/// Returns `true` if `value` did not return `Error`, otherwise `false`.
///
pub fn isOk(comptime Error: type, value: Error!void) bool {
return if (value) |_| true else |_| false;
}

View File

@ -1,476 +1,6 @@
const stack = @import("./stack.zig"); const stack = @import("./stack.zig");
const std = @import("std"); const std = @import("std");
///
///
///
pub const Path = struct {
length: u16,
buffer: [max]u8,
///
///
///
pub const empty = std.mem.zeroes(Path);
///
///
///
pub fn equalsText(path: Path, text: []const u8) bool {
return std.mem.eql(u8, path.buffer[0 .. path.length], text);
}
///
/// The maximum possible byte-length of a [Path].
///
/// Note that paths are encoded using UTF-8, meaning that a character may be bigger than one
/// byte. Because of this, it is not safe to asume that a path may hold `2048` individual
/// characters.
///
pub const max = 2048;
};
///
/// Universal resource identifier (URI) that operates atop the operating system to provide a
/// platform-agnostic interface for local and networked I/O access.
///
/// For more information, see [https://en.wikipedia.org/wiki/URI].
///
pub const Uri = struct {
buffer: [max]u8,
scheme_len: u16,
user_info_range: Range,
host_range: Range,
port_number: u16,
path_range: Range,
///
/// Errors that may occur during parsing of a URI from URI-compatible source encoding.
///
/// [ParseError.TooLong] occurs when the provided source data is bigger than the max allowed
/// data representation in [max].
///
/// [ParseError.UnexpectedToken] occurs when the internal tokenization process encounters a
/// URI component token in the wrong order.
///
/// [ParseError.InvalidEncoding] occurs when the source encoding being parsed is not properly
/// encoded in its own format (malformed UTF-8, for example).
///
pub const ParseError = error {
TooLong,
UnexpectedToken,
InvalidEncoding,
};
const Range = struct {
off: u16,
len: u16,
const none = std.mem.zeroes(Range);
};
///
/// Represents an individual component of a URI sequence.
///
pub const Token = union(enum) {
scheme: []const u8,
user_info: []const u8,
host: []const u8,
port: []const u8,
path: []const u8,
query: []const u8,
fragment: []const u8,
};
///
/// Tokenizes the data in [Tokenizer.utf8_sequence] into URI tokens.
///
/// See [Component] for more information on the supported URI tokens.
///
pub const Tokenizer = struct {
cursor: usize = 0,
utf8_sequence: []const u8,
///
/// Extracts the next [Token] in sequence from `tokenizer` and returns it or `null` if
/// there are no more tokens to be extracted.
///
pub fn next(tokenizer: *Tokenizer) ?Token {
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'A' ... 'Z', 'a' ... 'z' => {
const begin = tokenizer.cursor;
tokenizer.cursor += 1;
var is_scheme = (begin == 0);
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'+', '.', '-', '0' ... '9', 'A' ... 'Z', 'a' ... 'z' =>
tokenizer.cursor += 1,
':' => {
if (is_scheme) {
defer tokenizer.cursor += 1;
return Token{.scheme =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
}
tokenizer.cursor += 1;
},
'#', '?' => break,
else => {
tokenizer.cursor += 1;
is_scheme = false;
},
};
return Token{.path =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'@' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'/', ':' => break,
else => tokenizer.cursor += 1,
};
return Token{.host =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
':' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'/' => break,
else => tokenizer.cursor += 1,
};
return Token{
.port = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'/' => {
tokenizer.cursor += 1;
if (tokenizer.utf8_sequence[tokenizer.cursor] == '/') {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'@' => return Token{.user_info =
tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]},
':', '/' => break,
else => tokenizer.cursor += 1,
};
return Token{
.host = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
} else {
const begin = (tokenizer.cursor - 1);
tokenizer.cursor += 1;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'?', '#' => break,
else => tokenizer.cursor += 1,
};
return Token{
.path = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
}
},
'?' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'#' => {
tokenizer.cursor -= 1;
break;
},
else => tokenizer.cursor += 1,
};
return Token{
.query = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
'#' => {
tokenizer.cursor += 1;
const begin = tokenizer.cursor;
while (tokenizer.cursor < tokenizer.utf8_sequence.len) tokenizer.cursor += 1;
return Token{
.fragment = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
else => {
const begin = tokenizer.cursor;
tokenizer.cursor += 1;
while (tokenizer.cursor < tokenizer.utf8_sequence.len)
switch (tokenizer.utf8_sequence[tokenizer.cursor]) {
'#', '?' => break,
else => tokenizer.cursor += 1,
};
return Token{
.path = tokenizer.utf8_sequence[begin .. (tokenizer.cursor - begin)]};
},
};
return null;
}
///
/// A more constrained variant of [next] that accepts a `expected_token` argument to
/// validate the component type of a [Token] before it is returned.
///
/// If the [Component] of the extracted [Token] is not identical to `expected_token`,
/// it will be discarded and `null` is returned instead.
///
pub fn nextExpect(tokenizer: *Tokenizer, expected_token: std.meta.Tag(Token)) ?Token {
if (tokenizer.next()) |token| {
if (token == expected_token) return token;
}
return null;
}
};
///
///
///
pub const empty = std.mem.zeroes(Uri);
///
/// The maximum possible byte-length of a [URI].
///
/// Note that a URI character may be encoded using multiple bytes, meaning that `2048` is not
/// identical in meaning to `2048` URI characters.
///
pub const max = 2048;
///
/// Returns `true` if `uri_scheme` matches the scheme contained in `uri`, otherwise `false`.
///
pub fn isScheme(uri: Uri, uri_scheme: []const u8) bool {
return std.mem.eql(u8, uri.buffer[0 .. uri.scheme_len], uri_scheme);
}
///
/// Attempts to parse and return a [URI] value out of `utf8_sequence`, otherwise returning
/// [ParseError] if `utf8_sequence` is invalid in any way.
///
/// [ParseError.InvalidEncoding] occurs if the data encoded in `utf8_sequence` cannot be
/// validated as UTF-8 or it contains an invalid ASCII decimal number encoding for its URL port.
///
/// See [ParseError] for more details on the other errors that may be returned.
///
pub fn parse(utf8_sequence: []const u8) ParseError!Uri {
if (!std.unicode.utf8ValidateSlice(utf8_sequence)) return error.InvalidEncoding;
var uri = Uri.empty;
if (utf8_sequence.len != 0) {
if (utf8_sequence.len > max) return error.TooLong;
var tokenizer = Tokenizer{.utf8_sequence = utf8_sequence};
const scheme_token = tokenizer.nextExpect(.scheme) orelse return error.UnexpectedToken;
var uri_buffer = stack.Unmanaged(u8){.buffer = &uri.buffer};
const uri_writer = uri_buffer.asWriter();
const assert = std.debug.assert;
// These write operations will never fail because the uri_buffer will be known to be big
// enough by this point.
assert(uri_writer.write(scheme_token.scheme) == scheme_token.scheme.len);
assert(uri_writer.writeByte(':'));
// Downcast is safe because utf8_sequence can't be greater than u16 max.
uri.scheme_len = @intCast(u16, scheme_token.scheme.len);
var last_token = scheme_token;
while (tokenizer.next()) |scheme_specific_token| {
switch (scheme_specific_token) {
.scheme => return error.UnexpectedToken,
.user_info => |user_info| {
if (last_token != .scheme) return error.UnexpectedToken;
const delimiter = "//";
assert(uri_writer.write(delimiter) == delimiter.len);
uri.user_info_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, user_info.len),
};
assert(uri_writer.write(user_info) == user_info.len);
assert(uri_writer.writeByte('@'));
},
.host => |host| {
switch (last_token) {
.scheme => {
const delimiter = "//";
assert(uri_writer.write(delimiter) == delimiter.len);
},
.user_info => {},
else => return error.UnexpectedToken,
}
assert(uri_writer.write(host) == host.len);
},
.port => |port| {
if (last_token != .host) return error.UnexpectedToken;
const port_radix = 10;
uri.port_number = std.fmt.parseInt(u16, port, port_radix) catch
return error.InvalidEncoding;
assert(uri_writer.writeByte(':'));
assert(uri_writer.write(port) == port.len);
},
.path => |path| {
if ((last_token != .scheme) and (last_token != .host) and
(last_token != .port)) return error.UnexpectedToken;
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, path.len),
};
assert(uri_writer.write(path) == path.len);
},
.query => |query| {
if ((last_token != .scheme) and (last_token != .host) and
(last_token != .port) and (last_token != .path))
return error.UnexpectedToken;
assert(uri_writer.writeByte('?'));
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, query.len),
};
assert(uri_writer.write(query) == query.len);
},
.fragment => |fragment| {
if ((last_token != .scheme) or (last_token != .host) or
(last_token != .port) or (last_token != .path) or
(last_token != .query)) return error.UnexpectedToken;
assert(uri_writer.writeByte('#'));
uri.path_range = .{
.off = @intCast(u16, uri_buffer.filled),
.len = @intCast(u16, fragment.len),
};
assert(uri_writer.write(fragment) == fragment.len);
},
}
last_token = scheme_specific_token;
}
}
return uri;
}
///
/// Creates and returns a [Path] value from the path component of `uri`.
///
pub fn toPath(uri: Uri) Path {
var path = Path{
.length = uri.path_range.len,
.buffer = std.mem.zeroes([Path.max]u8),
};
std.mem.copy(u8, path.buffer[0 ..], uri.buffer[uri.path_range.off .. uri.path_range.len]);
return path;
}
///
/// Writes the path component of `uri` to `path_writer`, returning `true` if all bytes used to
/// encode the path were successfully written, otherwise `false` if it was partially completed
/// or not at all.
///
pub fn writePath(uri: Uri, path_writer: Writer) bool {
return (path_writer.write(uri.buffer[uri.path_range.off ..
uri.path_range.len]) == uri.path_range.len);
}
};
test "uri" {
const testing = @import("std").testing;
const empty_uri = Uri.empty;
try testing.expect(empty_uri.isScheme(""));
try testing.expect(empty_uri.toPath().equalsText(""));
const scheme_only_uri = try Uri.parse("uri:");
try testing.expect(scheme_only_uri.isScheme("uri"));
try testing.expect(scheme_only_uri.toPath().equalsText(""));
const absolute_file_path = "/path/to/file";
const absolute_file_uri = try Uri.parse("file:" ++ absolute_file_path);
try testing.expect(absolute_file_uri.isScheme("file"));
try testing.expect(absolute_file_uri.toPath().equalsText(absolute_file_path));
const relative_file_path = "path/to/file";
const relative_file_uri = try Uri.parse("file:" ++ relative_file_path);
try testing.expect(relative_file_uri.isScheme("file"));
try testing.expect(relative_file_uri.toPath().equalsText(relative_file_path));
}
/// ///
/// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network /// Opaque interface to a "writable" resource, such as a block device, memory buffer, or network
/// socket. /// socket.

View File

@ -2,27 +2,10 @@ const ext = @cImport({
@cInclude("SDL2/SDL.h"); @cInclude("SDL2/SDL.h");
}); });
const errors = @import("./errors.zig");
const io = @import("./io.zig"); const io = @import("./io.zig");
const stack = @import("./stack.zig"); const stack = @import("./stack.zig");
const std = @import("std"); const std = @import("std");
const sys = @import("./sys.zig");
const Request = struct {
next: ?*Request = null,
frame: anyframe,
message: union(enum) {
close: struct {
file: *ext.SDL_RWops,
is_closed: *bool,
},
open_readable: struct {
uri: *const io.Uri,
file: ?*ext.SDL_RWops,
},
},
};
/// ///
/// Entry point. /// Entry point.
@ -73,7 +56,7 @@ pub fn main() anyerror!void {
defer ext.SDL_DestroyRenderer(renderer); defer ext.SDL_DestroyRenderer(renderer);
var request_chain = @as(?*Request, null); var event_loop = sys.EventLoop{};
var is_running = true; var is_running = true;
while (is_running) { while (is_running) {
@ -97,43 +80,7 @@ pub fn main() anyerror!void {
} }
ext.SDL_RenderPresent(renderer); ext.SDL_RenderPresent(renderer);
event_loop.tick();
while (request_chain) |request_head| {
const request = request_head;
request_chain = request_head.next;
switch (request.message) {
.close => |*close| close.is_closed.* = (ext.SDL_RWclose(close.file) == 0),
.open_readable => |*open_readable| {
if (open_readable.uri.isScheme("data")) {
var path = stack.Fixed(u8, 4096).init();
// These can never fail as the sum of the potential bytes written will
// always be less than 4096.
path.pushAll("./") catch unreachable;
std.debug.assert(open_readable.uri.writePath(path.asWriter()));
open_readable.file = ext.SDL_RWFromFile(&path.buffer, "r");
} else if (open_readable.uri.isScheme("user")) {
var path = stack.Fixed(u8, 4096).init();
const isOk = errors.isOk;
// Cannot guarantee that the sum of potential bytes written will always be
// less than path max.
if (isOk(stack.FinitePushError, path.pushAll(pref_path)) and
open_readable.uri.writePath(path.asWriter())) {
open_readable.file = ext.SDL_RWFromFile(&path.buffer, "r");
}
}
},
}
resume request.frame;
}
ext.SDL_Delay(1); ext.SDL_Delay(1);
} }
} }

58
src/mem.zig Normal file
View File

@ -0,0 +1,58 @@
const std = @import("std");
///
/// State machine for lazily computing all components of [Spliterator.source] that match the pattern
/// in [Spliterator.delimiter].
///
pub fn Spliterator(comptime Element: type) type {
return struct {
source: []const Element,
delimiter: []const Element,
const Self = @This();
///
/// Returns `true` if there is more data to be processed, otherwise `false`.
///
/// Note that [Spliterator.next] implicitly calls this function to determine if it should
/// return another slice or `null`.
///
pub fn hasNext(self: Self) bool {
return (self.source.len != 0);
}
///
/// Iterates on `self` and returns the next view of [Spliterator.source] that matches
/// [Spliterator.delimiter], or `null` if there is no more data to be processed.
///
pub fn next(self: *Self) ?[]const Element {
if (self.delimiter.len == 0) {
defer self.source = self.source[self.source.len .. 0];
return self.source;
}
while (self.hasNext()) {
var cursor = @as(usize, 0);
var window = self.source[cursor .. (self.source - cursor)];
defer self.source = window;
if (std.mem.eql(Element, window, self.delimiter))
return self.source[cursor .. self.delimiter.len];
}
return null;
}
};
}
///
/// Searches the slice of `Data` referenced by `data` for the first instance of `sought_datum`,
/// returning its index or `null` if it could not be found.
///
pub fn findFirst(comptime Data: type, data: []const Data, sought_datum: Data) ?usize {
for (data) |datum, index| if (datum == sought_datum) return index;
return null;
}

View File

@ -57,16 +57,6 @@ pub fn Fixed(comptime Element: type, comptime capacity: usize) type {
return self.filled; return self.filled;
} }
///
/// Creates and returns a [Self] value.
///
pub fn init() Self {
return Self{
.filled = 0,
.buffer = undefined,
};
}
/// ///
/// Attempts to pop the tail-end of `self`, returning the element value or `null` if the /// Attempts to pop the tail-end of `self`, returning the element value or `null` if the
/// stack is empty. /// stack is empty.
@ -108,7 +98,7 @@ pub fn Fixed(comptime Element: type, comptime capacity: usize) type {
pub fn Unmanaged(comptime Element: type) type { pub fn Unmanaged(comptime Element: type) type {
return struct { return struct {
filled: usize = 0, filled: usize,
buffer: []Element, buffer: []Element,
const Self = @This(); const Self = @This();
@ -147,16 +137,6 @@ pub fn Unmanaged(comptime Element: type) type {
return self.filled; return self.filled;
} }
///
/// Creates and returns a [Self] value wrapping `buffer` as its writable memory buffer.
///
pub fn init(buffer: []Element) Self {
return Self{
.filled = 0,
.buffer = buffer,
};
}
/// ///
/// Attempts to pop the tail-end of `self`, returning the element value or `null` if the /// Attempts to pop the tail-end of `self`, returning the element value or `null` if the
/// stack is empty. /// stack is empty.

318
src/sys.zig Normal file
View File

@ -0,0 +1,318 @@
const ext = @cImport({
@cInclude("SDL2/SDL.h");
});
const mem = @import("./mem.zig");
const stack = @import("./stack.zig");
const std = @import("std");
///
///
///
pub const EventLoop = packed struct {
current_request: ?*Request = null,
///
/// With files typically being backed by a block device, they can produce a variety of errors -
/// from physical to virtual errors - these are all encapsulated by the API as general
/// [Error.Inaccessible] errors.
///
pub const FileError = error {
Inaccessible,
};
///
/// [OpenError.NotFound] is used as a catch-all for any hardware or software-specific reason for
/// failing to open a given file. This includes file-system restrictions surrounding a specific
/// file as well as it simply not existing.
///
/// [OpenError.OutOfFiles] occurs when there are no more resources available to open further
/// files. As a result, some open files must be closed before more may be opened.
///
pub const OpenError = error {
NotFound,
OutOfFiles,
};
///
/// Indicates what kind of access the consumer logic has to a file.
///
/// [OpenMode.read] is for reading from an existing file from the start.
///
/// [OpenMode.overwrite] is for deleting the contents of a file, or creating an empty one if no
/// such file exists, and writing to it from the start.
///
/// [OpenMode.append] is for writing additional contents to a file, creating an empty one if no
/// such file exists, on the end of whatever it already contains.
///
pub const OpenMode = enum {
read,
overwrite,
append,
};
const Request = struct {
next: ?*Request = null,
frame: anyframe,
message: union(enum) {
close: struct {
file: *ext.SDL_RWops,
},
open: struct {
path: *const Path,
file: OpenError!*ext.SDL_RWops,
mode: OpenMode,
},
},
};
const max_files = 512;
///
/// Asynchronously closes `file_access` via `event_loop`.
///
/// *Note* that `file_access` must have been opened by `event_loop` for it to be closed by it,
/// otherwise it will cause undefined behavior.
///
pub fn close(event_loop: *EventLoop, file_access: *FileAccess) void {
var request = Request{
.frame = @frame(),
.message = .{.close = @ptrCast(*ext.SDL_RWops, file_access)},
};
suspend {
if (event_loop.current_request) |current_request| {
current_request.next = &request;
} else {
event_loop.current_request = &request;
}
}
}
///
/// Asynchronously attempts to open access to a file at `path` via `event_loop`, with `mode` as
/// the preferences for how it should be opened.
///
/// A reference to a [FileAccess] representing the bound file is returned if the operation was
/// successful, otherwise an [OpenError] if the file could not be opened.
///
/// *Note* that, regardless of platform, files will always be treated as containing binary data.
///
pub fn open(event_loop: *EventLoop, path: Path, mode: OpenMode) OpenError!*FileAccess {
var request = Request{
.frame = @frame(),
.message = .{
.open = .{
.path = &path,
.file = error.OutOfFiles,
.mode = mode,
},
},
};
suspend {
if (event_loop.current_request) |current_request| {
current_request.next = &request;
} else {
event_loop.current_request = &request;
}
}
return @ptrCast(*FileAccess, try request.message.open.file);
}
///
///
///
pub fn tick(event_loop: *EventLoop) void {
while (event_loop.current_request) |request| {
switch (request.message) {
.close => |*close| {
// Swallow file close errors.
_ = ext.SDL_RWclose(close.file);
},
.open => |*open| {
open.file = ext.SDL_RWFromFile(&open.path.buffer, switch (open.mode) {
.read => "rb",
.overwrite => "wb",
.append => "ab",
}) orelse error.NotFound;
},
}
resume request.frame;
event_loop.current_request = request.next;
}
}
};
pub const FileAccess = opaque {
};
///
/// Platform-agnostic mechanism for accessing files on any of the virtual file-systems supported by
/// Ona.
///
pub const Path = struct {
locator: Locator,
length: u16,
buffer: [max]u8,
///
/// Virtual file-system locators supported by Ona.
///
pub const Locator = enum(u8) {
relative,
data,
user,
};
///
/// Errors that may occur during parsing of an incorrectly formatted [Path] URI.
///
pub const ParseError = (JoinError || error {
BadLocator,
});
///
///
///
pub const JoinError = error {
TooLong,
};
///
/// Returns `true` if the length of `path` is empty, otherwise `false`.
///
pub fn isEmpty(path: Path) bool {
return (path.length == 0);
}
///
/// Attempts to lazily join `components` into a new [Path] value derived from `path`, returning
/// it when `components` has no more data or a [JoinError] if the operation failed.
///
/// Any duplicate path components, such as trailing ASCII forward-slashes (`/`) or periods
/// (`.`), will be normalized to a more concise internal representation.
///
/// *Note* that `components` may be mutated during execution of the operation.
///
pub fn joinSpliterator(path: Path, components: *mem.Spliterator) JoinError!Path {
var joined_path = path;
var path_buffer = stack.Unmanaged{
.buffer = &joined_path.buffer,
.filled = if (joined_path.length == 0) joined_path.length else (joined_path.length - 1),
};
if (components.next()) |component| switch (component.len) {
0 => if (joined_path.isEmpty()) path_buffer.push('/') catch return error.TooLong,
1 => if ((component[0] == '.') and joined_path.isEmpty())
path_buffer.push("./") catch return error.TooLong,
else => {
if (!joined_path.isEmpty()) path_buffer.push('/') catch return error.TooLong;
path_buffer.pushAll(component) catch return error.TooLong;
if (components.hasNext()) path_buffer.push('/') catch return error.TooLong;
},
};
while (components.next()) |component|
if ((component.len != 0) or (!((component.len == 1) and (component[0] == '.')))) {
if (!joined_path.isEmpty()) path_buffer.push('/') catch return error.TooLong;
path_buffer.pushAll(component) catch return error.TooLong;
if (components.hasNext()) path_buffer.push('/') catch return error.TooLong;
};
// No space left over for the null terminator.
if (path_buffer.filled >= max) return error.TooLong;
joined_path.length = path_buffer.filled;
return joined_path;
}
///
/// Returns `true` if `this` is equal to `that`, otherwise `false`.
///
pub fn equals(this: Path, that: Path) bool {
return (this.locator == that.locator) and
std.mem.eql(this.buffer[0 .. this.length], that.buffer[0 .. that.length]);
}
///
/// Creates and returns an empty [Path] value rooted at the location of `locator`.
///
pub fn from(locator: Locator) Path {
return .{
.locator = locator,
.length = 0,
.buffer = std.mem.zeroes([max]u8),
};
}
///
/// The maximum possible byte-length of a [Path].
///
/// Note that paths are encoded using UTF-8, meaning that a character may be bigger than one
/// byte. Because of this, it is not safe to asume that a path may hold [max] individual
/// characters.
///
pub const max = 1000;
///
/// Attempts to parse the data in `sequence`, returning the [Path] value or an error from
/// [ParseError] if it failed to parse.
///
/// The rules surrounding the encoding of `sequence` are loose, with the only fundamental
/// requirements being:
///
/// * It starts with a locator key followed by an ASCII colon (`data:`, `user:`, etc.)
/// followed by the rest of the path.
///
/// * Each component of the path is separated by an ASCII forward-slash (`/`).
///
/// * A path that begins with an ASCII forward-slash ('/') after the locator key is considered
/// to be relative to the root of the specified locator key instead of relative to the path
/// caller.
///
/// Additional encoding rules are enforced via underlying file-system being used. For example,
/// Microsoft Windows is case-insensitive while Unix and Linux systems are not. Additionally,
/// Windows has far more reserved characters and sequences which cannot be used when interfacing
/// with files compared to Linux and Unix systems.
///
/// See [ParseError] for more information on the kinds of errors that may be returned.
///
pub fn parse(sequence: []const u8) ParseError!Path {
if (sequence.len == 0) return Path.from(.relative);
if (mem.forwardFind(u8, sequence, ':')) |locator_path_delimiter_index| {
var locator = std.meta.stringToEnum(Locator,
sequence[0 .. locator_path_delimiter_index]) orelse return error.BadLocator;
const components_index = (locator_path_delimiter_index + 1);
return Path.from(locator).joinSpliterator(&.{
.source = sequence[components_index .. (sequence.len - components_index)],
.delimiter = "/",
});
}
return Path.from(.relative).joinSpliterator(&.{
.source = sequence,
.delimiter = "/",
});
}
};