Add VFS syscalls
This commit is contained in:
parent
ce051f0bbd
commit
426eb13d46
5 changed files with 855 additions and 24 deletions
|
@ -211,13 +211,8 @@ pub const DirNode = struct {
|
||||||
|
|
||||||
/// See the documentation for FileSystem.Open
|
/// See the documentation for FileSystem.Open
|
||||||
pub fn open(self: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node {
|
pub fn open(self: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node {
|
||||||
var fs = self.fs;
|
var node = self.mount orelse self;
|
||||||
var node = self;
|
return node.fs.open(node.fs, node, name, flags, args);
|
||||||
if (self.mount) |mnt| {
|
|
||||||
fs = mnt.fs;
|
|
||||||
node = mnt;
|
|
||||||
}
|
|
||||||
return fs.open(fs, node, name, flags, args);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// See the documentation for FileSystem.Close
|
/// See the documentation for FileSystem.Close
|
||||||
|
@ -426,6 +421,20 @@ pub fn open(path: []const u8, follow_symlinks: bool, flags: OpenFlags, args: Ope
|
||||||
return try traversePath(path, follow_symlinks, flags, args);
|
return try traversePath(path, follow_symlinks, flags, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Close a node.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN node: Node - The node to close
|
||||||
|
///
|
||||||
|
pub fn close(node: Node) void {
|
||||||
|
switch (node) {
|
||||||
|
.Dir => |d| d.close(),
|
||||||
|
.File => |f| f.close(),
|
||||||
|
.Symlink => |s| s.close(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Open a file at a path.
|
/// Open a file at a path.
|
||||||
///
|
///
|
||||||
|
@ -592,7 +601,7 @@ const TestFS = struct {
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
fn deinit(self: *@This()) void {
|
pub fn deinit(self: *@This()) void {
|
||||||
self.tree.deinit(self.allocator);
|
self.tree.deinit(self.allocator);
|
||||||
self.allocator.destroy(self.fs);
|
self.allocator.destroy(self.fs);
|
||||||
}
|
}
|
||||||
|
@ -718,7 +727,7 @@ const TestFS = struct {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
fn testInitFs(allocator: Allocator) !*TestFS {
|
pub fn testInitFs(allocator: Allocator) !*TestFS {
|
||||||
const fs = try allocator.create(FileSystem);
|
const fs = try allocator.create(FileSystem);
|
||||||
var testfs = try allocator.create(TestFS);
|
var testfs = try allocator.create(TestFS);
|
||||||
var root_node = try allocator.create(Node);
|
var root_node = try allocator.create(Node);
|
||||||
|
|
|
@ -17,6 +17,7 @@ const scheduler = @import("scheduler.zig");
|
||||||
const vfs = @import("filesystem/vfs.zig");
|
const vfs = @import("filesystem/vfs.zig");
|
||||||
const initrd = @import("filesystem/initrd.zig");
|
const initrd = @import("filesystem/initrd.zig");
|
||||||
const keyboard = @import("keyboard.zig");
|
const keyboard = @import("keyboard.zig");
|
||||||
|
const syscalls = @import("syscalls.zig");
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
|
|
||||||
comptime {
|
comptime {
|
||||||
|
@ -96,6 +97,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
|
||||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
|
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
syscalls.init(kernel_heap.allocator());
|
||||||
tty.init(kernel_heap.allocator(), boot_payload);
|
tty.init(kernel_heap.allocator(), boot_payload);
|
||||||
var arch_kb = keyboard.init(fixed_allocator.allocator()) catch |e| {
|
var arch_kb = keyboard.init(fixed_allocator.allocator()) catch |e| {
|
||||||
panic_root.panic(@errorReturnTrace(), "Failed to inititalise keyboard: {}\n", .{e});
|
panic_root.panic(@errorReturnTrace(), "Failed to inititalise keyboard: {}\n", .{e});
|
||||||
|
|
|
@ -27,7 +27,7 @@ extern var KERNEL_STACK_START: []u32;
|
||||||
extern var KERNEL_STACK_END: []u32;
|
extern var KERNEL_STACK_END: []u32;
|
||||||
|
|
||||||
/// The current task running
|
/// The current task running
|
||||||
var current_task: *Task = undefined;
|
pub var current_task: *Task = undefined;
|
||||||
|
|
||||||
/// Array list of all runnable tasks
|
/// Array list of all runnable tasks
|
||||||
var tasks: TailQueue(*Task) = undefined;
|
var tasks: TailQueue(*Task) = undefined;
|
||||||
|
|
|
@ -1,14 +1,99 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
|
const testing = std.testing;
|
||||||
|
const is_test = @import("builtin").is_test;
|
||||||
const scheduler = @import("scheduler.zig");
|
const scheduler = @import("scheduler.zig");
|
||||||
const panic = @import("panic.zig").panic;
|
const panic = @import("panic.zig").panic;
|
||||||
const log = std.log.scoped(.syscalls);
|
const log = std.log.scoped(.syscalls);
|
||||||
const arch = @import("arch.zig").internals;
|
const arch = @import("arch.zig").internals;
|
||||||
|
const vfs = @import("filesystem/vfs.zig");
|
||||||
|
const task = @import("task.zig");
|
||||||
|
const vmm = @import("vmm.zig");
|
||||||
|
const mem = @import("mem.zig");
|
||||||
|
const pmm = @import("pmm.zig");
|
||||||
|
const bitmap = @import("bitmap.zig");
|
||||||
|
|
||||||
/// A compilation of all errors that syscall handlers could return.
|
var allocator: std.mem.Allocator = undefined;
|
||||||
pub const Error = error{OutOfMemory};
|
|
||||||
|
/// The maximum amount of data to allocate when copying user memory into kernel memory
|
||||||
|
pub const USER_MAX_DATA_LEN = 16 * 1024;
|
||||||
|
|
||||||
|
pub const Error = error{ NoMoreFSHandles, TooBig, NotAFile };
|
||||||
|
|
||||||
/// All implemented syscalls
|
/// All implemented syscalls
|
||||||
pub const Syscall = enum {
|
pub const Syscall = enum {
|
||||||
|
/// Open a new vfs node
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// path_ptr: usize - The user/kernel pointer to the file path to open
|
||||||
|
/// path_len: usize - The length of the file path
|
||||||
|
/// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags
|
||||||
|
/// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs
|
||||||
|
/// ignored: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The handle for the opened vfs node
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles
|
||||||
|
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
|
||||||
|
/// TooBig - The path length is greater than allowed
|
||||||
|
/// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.)
|
||||||
|
/// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value
|
||||||
|
/// Refer to vfs.Error for details on what causes vfs errors
|
||||||
|
///
|
||||||
|
Open,
|
||||||
|
|
||||||
|
/// Read data from an open vfs file
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The file handle returned from the open syscall
|
||||||
|
/// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in
|
||||||
|
/// buff_len: usize - The size of the buffer
|
||||||
|
/// ignored1: usize - Ignored
|
||||||
|
/// ignored2: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The number of bytes read and put into the buffer
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
|
||||||
|
/// NotAFile - The handle does not correspond to a file
|
||||||
|
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
|
||||||
|
///
|
||||||
|
Read,
|
||||||
|
/// Write data from to open vfs file
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The file handle returned from the open syscall
|
||||||
|
/// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write
|
||||||
|
/// buff_len: usize - The size of the buffer
|
||||||
|
/// ignored1: usize - Ignored
|
||||||
|
/// ignored2: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The number of bytes written
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
|
||||||
|
/// NotAFile - The handle does not correspond to a file
|
||||||
|
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
|
||||||
|
///
|
||||||
|
Write,
|
||||||
|
///
|
||||||
|
/// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The handle to close
|
||||||
|
/// ignored1..4: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: void
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// NotOpened - The node handle hasn't been opened
|
||||||
|
Close,
|
||||||
Test1,
|
Test1,
|
||||||
Test2,
|
Test2,
|
||||||
Test3,
|
Test3,
|
||||||
|
@ -24,6 +109,10 @@ pub const Syscall = enum {
|
||||||
///
|
///
|
||||||
fn getHandler(self: @This()) Handler {
|
fn getHandler(self: @This()) Handler {
|
||||||
return switch (self) {
|
return switch (self) {
|
||||||
|
.Open => handleOpen,
|
||||||
|
.Read => handleRead,
|
||||||
|
.Write => handleWrite,
|
||||||
|
.Close => handleClose,
|
||||||
.Test1 => handleTest1,
|
.Test1 => handleTest1,
|
||||||
.Test2 => handleTest2,
|
.Test2 => handleTest2,
|
||||||
.Test3 => handleTest3,
|
.Test3 => handleTest3,
|
||||||
|
@ -42,21 +131,26 @@ pub const Syscall = enum {
|
||||||
pub fn isTest(self: @This()) bool {
|
pub fn isTest(self: @This()) bool {
|
||||||
return switch (self) {
|
return switch (self) {
|
||||||
.Test1, .Test2, .Test3 => true,
|
.Test1, .Test2, .Test3 => true,
|
||||||
|
else => false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A function that can handle a syscall and return a result or an error
|
/// A function that can handle a syscall and return a result or an error
|
||||||
pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize;
|
pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize;
|
||||||
|
|
||||||
|
pub fn init(alloc: std.mem.Allocator) void {
|
||||||
|
allocator = alloc;
|
||||||
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Convert an error code to an instance of Error. The conversion must be synchronised with toErrorCode
|
/// Convert an error code to an instance of anyerror. The conversion must be synchronised with toErrorCode
|
||||||
/// Passing an error code that does not correspond to an error results in safety-protected undefined behaviour
|
/// Passing an error code that does not correspond to an error results in safety-protected undefined behaviour
|
||||||
///
|
///
|
||||||
/// Arguments:
|
/// Arguments:
|
||||||
/// IN code: u16 - The erorr code to convert
|
/// IN code: u16 - The erorr code to convert
|
||||||
///
|
///
|
||||||
/// Return: Error
|
/// Return: anyerror
|
||||||
/// The error corresponding to the error code
|
/// The error corresponding to the error code
|
||||||
///
|
///
|
||||||
pub fn fromErrorCode(code: u16) anyerror {
|
pub fn fromErrorCode(code: u16) anyerror {
|
||||||
|
@ -64,10 +158,10 @@ pub fn fromErrorCode(code: u16) anyerror {
|
||||||
}
|
}
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Convert an instance of Error to an error code. The conversion must be synchronised with fromErrorCode
|
/// Convert an instance of anyerror to an error code. The conversion must be synchronised with fromErrorCode
|
||||||
///
|
///
|
||||||
/// Arguments:
|
/// Arguments:
|
||||||
/// IN err: Error - The erorr to convert
|
/// IN err: anyerror - The erorr to convert
|
||||||
///
|
///
|
||||||
/// Return: u16
|
/// Return: u16
|
||||||
/// The error code corresponding to the error
|
/// The error code corresponding to the error
|
||||||
|
@ -86,14 +180,223 @@ pub fn toErrorCode(err: anyerror) u16 {
|
||||||
/// Return: usize
|
/// Return: usize
|
||||||
/// The syscall result
|
/// The syscall result
|
||||||
///
|
///
|
||||||
/// Error: Error
|
/// Error: anyerror
|
||||||
/// The error raised by the handler
|
/// The error raised by the handler
|
||||||
///
|
///
|
||||||
pub fn handle(syscall: Syscall, ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
|
pub fn handle(syscall: Syscall, ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
|
||||||
return try syscall.getHandler()(ctx, arg1, arg2, arg3, arg4, arg5);
|
return try syscall.getHandler()(ctx, arg1, arg2, arg3, arg4, arg5);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
|
///
|
||||||
|
/// Get a slice containing the data at an address and length. If the current task is a kernel task then a simple pointer to slice conversion is performed,
|
||||||
|
/// otherwise the slice is allocated on the heap and the data is copied in from user space.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN ptr: usize - The slice's address
|
||||||
|
/// IN len: usize - The number of bytes
|
||||||
|
///
|
||||||
|
/// Error: Error || Allocator.Error || VmmError || BitmapError
|
||||||
|
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
|
||||||
|
/// TooBig - The user task requested to have too much data copied
|
||||||
|
/// NotAllocated - The pointer hasn't been mapped by the task
|
||||||
|
/// OutOfBounds - The pointer and length is out of bounds of the task's VMM
|
||||||
|
///
|
||||||
|
/// Return: []u8
|
||||||
|
/// The slice of data. Will be stack-allocated if the current task is kernel-level, otherwise will be heap-allocated
|
||||||
|
///
|
||||||
|
fn getData(ptr: usize, len: usize) (Error || std.mem.Allocator.Error || vmm.VmmError || bitmap.BitmapError)![]u8 {
|
||||||
|
if (scheduler.current_task.kernel) {
|
||||||
|
if (try vmm.kernel_vmm.isSet(ptr)) {
|
||||||
|
return @intToPtr([*]u8, ptr)[0..len];
|
||||||
|
} else {
|
||||||
|
return error.NotAllocated;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (len > USER_MAX_DATA_LEN) {
|
||||||
|
return Error.TooBig;
|
||||||
|
}
|
||||||
|
var buff = try allocator.alloc(u8, len);
|
||||||
|
errdefer allocator.free(buff);
|
||||||
|
try vmm.kernel_vmm.copyData(scheduler.current_task.vmm, false, buff, ptr);
|
||||||
|
return buff;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Open a new vfs node
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// path_ptr: usize - The user/kernel pointer to the file path to open
|
||||||
|
/// path_len: usize - The length of the file path
|
||||||
|
/// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags
|
||||||
|
/// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs
|
||||||
|
/// ignored: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The handle for the opened vfs node
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles
|
||||||
|
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
|
||||||
|
/// TooBig - The path length is greater than allowed
|
||||||
|
/// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.)
|
||||||
|
/// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value
|
||||||
|
/// Refer to vfs.Error for details on what causes vfs errors
|
||||||
|
///
|
||||||
|
fn handleOpen(ctx: *const arch.CpuState, path_ptr: usize, path_len: usize, flags: usize, args: usize, ignored: usize) anyerror!usize {
|
||||||
|
_ = ctx;
|
||||||
|
_ = ignored;
|
||||||
|
const current_task = scheduler.current_task;
|
||||||
|
if (!current_task.hasFreeVFSHandle()) {
|
||||||
|
return Error.NoMoreFSHandles;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the open arguments from user/kernel memory
|
||||||
|
var open_args: vfs.OpenArgs = if (args == 0) .{} else blk: {
|
||||||
|
const data = try getData(args, @sizeOf(vfs.OpenArgs));
|
||||||
|
defer if (!current_task.kernel) allocator.free(data);
|
||||||
|
break :blk std.mem.bytesAsValue(vfs.OpenArgs, data[0..@sizeOf(vfs.OpenArgs)]).*;
|
||||||
|
};
|
||||||
|
// The symlink target could refer to a location in user memory so convert that too
|
||||||
|
if (open_args.symlink_target) |target| {
|
||||||
|
open_args.symlink_target = try getData(@ptrToInt(target.ptr), target.len);
|
||||||
|
}
|
||||||
|
defer if (!current_task.kernel) if (open_args.symlink_target) |target| allocator.free(target);
|
||||||
|
|
||||||
|
const open_flags = std.meta.intToEnum(vfs.OpenFlags, flags) catch return error.InvalidFlags;
|
||||||
|
const path = try getData(path_ptr, path_len);
|
||||||
|
defer if (!current_task.kernel) allocator.free(path);
|
||||||
|
|
||||||
|
const node = try vfs.open(path, true, open_flags, open_args);
|
||||||
|
errdefer vfs.close(node.*);
|
||||||
|
return (try current_task.addVFSHandle(node)) orelse panic(null, "Failed to add a VFS handle to current_task\n", .{});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read data from an open vfs file
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The file handle returned from the open syscall
|
||||||
|
/// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in
|
||||||
|
/// buff_len: usize - The size of the buffer
|
||||||
|
/// ignored1: usize - Ignored
|
||||||
|
/// ignored2: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The number of bytes read and put into the buffer
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
|
||||||
|
/// NotAFile - The handle does not correspond to a file
|
||||||
|
/// NotOpened - The handle doesn't correspond to an opened file
|
||||||
|
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
|
||||||
|
///
|
||||||
|
fn handleRead(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize {
|
||||||
|
_ = ctx;
|
||||||
|
_ = ignored1;
|
||||||
|
_ = ignored2;
|
||||||
|
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
|
||||||
|
return error.OutOfBounds;
|
||||||
|
const real_handle = @intCast(task.Handle, node_handle);
|
||||||
|
if (buff_len > USER_MAX_DATA_LEN) {
|
||||||
|
return Error.TooBig;
|
||||||
|
}
|
||||||
|
|
||||||
|
const current_task = scheduler.current_task;
|
||||||
|
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
|
||||||
|
if (node_opt) |node| {
|
||||||
|
const file = switch (node.*) {
|
||||||
|
.File => |*f| f,
|
||||||
|
else => return error.NotAFile,
|
||||||
|
};
|
||||||
|
var buff = if (current_task.kernel) @intToPtr([*]u8, buff_ptr)[0..buff_len] else try allocator.alloc(u8, buff_len);
|
||||||
|
defer if (!current_task.kernel) allocator.free(buff);
|
||||||
|
|
||||||
|
const bytes_read = try file.read(buff);
|
||||||
|
// TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer
|
||||||
|
if (!current_task.kernel) try vmm.kernel_vmm.copyData(current_task.vmm, true, buff, buff_ptr);
|
||||||
|
return bytes_read;
|
||||||
|
}
|
||||||
|
|
||||||
|
return error.NotOpened;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write data from to open vfs file
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The file handle returned from the open syscall
|
||||||
|
/// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write
|
||||||
|
/// buff_len: usize - The size of the buffer
|
||||||
|
/// ignored1: usize - Ignored
|
||||||
|
/// ignored2: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: usize
|
||||||
|
/// The number of bytes written
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
|
||||||
|
/// NotAFile - The handle does not correspond to a file
|
||||||
|
/// NotOpened - The handle doesn't correspond to an opened file
|
||||||
|
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
|
||||||
|
///
|
||||||
|
fn handleWrite(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize {
|
||||||
|
_ = ctx;
|
||||||
|
_ = ignored1;
|
||||||
|
_ = ignored2;
|
||||||
|
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
|
||||||
|
return error.OutOfBounds;
|
||||||
|
const real_handle = @intCast(task.Handle, node_handle);
|
||||||
|
|
||||||
|
const current_task = scheduler.current_task;
|
||||||
|
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
|
||||||
|
if (node_opt) |node| {
|
||||||
|
const file = switch (node.*) {
|
||||||
|
.File => |*f| f,
|
||||||
|
else => return error.NotAFile,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer
|
||||||
|
var buff = try getData(buff_ptr, buff_len);
|
||||||
|
defer if (!current_task.kernel) allocator.free(buff);
|
||||||
|
return try file.write(buff);
|
||||||
|
}
|
||||||
|
|
||||||
|
return error.NotOpened;
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// node_handle: usize - The handle to close
|
||||||
|
/// ignored1..4: usize - Ignored
|
||||||
|
///
|
||||||
|
/// Return: void
|
||||||
|
///
|
||||||
|
/// Error:
|
||||||
|
/// OutOfBounds - The node handle is outside of the maximum per process
|
||||||
|
/// NotOpened - The node handle hasn't been opened
|
||||||
|
fn handleClose(ctx: *const arch.CpuState, node_handle: usize, ignored1: usize, ignored2: usize, ignored3: usize, ignored4: usize) anyerror!usize {
|
||||||
|
_ = ctx;
|
||||||
|
_ = ignored1;
|
||||||
|
_ = ignored2;
|
||||||
|
_ = ignored3;
|
||||||
|
_ = ignored4;
|
||||||
|
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
|
||||||
|
return error.OutOfBounds;
|
||||||
|
const real_handle = @intCast(task.Handle, node_handle);
|
||||||
|
const current_task = scheduler.current_task;
|
||||||
|
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
|
||||||
|
if (node_opt) |node| {
|
||||||
|
current_task.clearVFSHandle(real_handle) catch |e| return switch (e) {
|
||||||
|
error.VFSHandleNotSet, error.OutOfBounds => error.NotOpened,
|
||||||
|
};
|
||||||
|
vfs.close(node.*);
|
||||||
|
}
|
||||||
|
return error.NotOpened;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
|
||||||
// Suppress unused variable warnings
|
// Suppress unused variable warnings
|
||||||
_ = ctx;
|
_ = ctx;
|
||||||
_ = arg1;
|
_ = arg1;
|
||||||
|
@ -104,12 +407,12 @@ pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: us
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handleTest2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
|
pub fn handleTest2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
|
||||||
_ = ctx;
|
_ = ctx;
|
||||||
return arg1 + arg2 + arg3 + arg4 + arg5;
|
return arg1 + arg2 + arg3 + arg4 + arg5;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
|
pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
|
||||||
// Suppress unused variable warnings
|
// Suppress unused variable warnings
|
||||||
_ = ctx;
|
_ = ctx;
|
||||||
_ = arg1;
|
_ = arg1;
|
||||||
|
@ -117,18 +420,351 @@ pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: us
|
||||||
_ = arg3;
|
_ = arg3;
|
||||||
_ = arg4;
|
_ = arg4;
|
||||||
_ = arg5;
|
_ = arg5;
|
||||||
return std.mem.Allocator.Error.OutOfMemory;
|
return error.OutOfMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn testInitMem(comptime num_vmm_entries: usize, alloc: std.mem.Allocator, map_all: bool) !std.heap.FixedBufferAllocator {
|
||||||
|
// handleOpen requires that the name passed is mapped in the VMM
|
||||||
|
// Allocate them within a buffer so we know the start and end address to give to the VMM
|
||||||
|
var buffer = try alloc.alloc(u8, num_vmm_entries * vmm.BLOCK_SIZE);
|
||||||
|
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||||
|
|
||||||
|
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(fixed_buffer_allocator.buffer.ptr), @ptrToInt(fixed_buffer_allocator.buffer.ptr) + buffer.len, alloc, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
|
||||||
|
// The PMM is required as well
|
||||||
|
const mem_profile = mem.MemProfile{
|
||||||
|
.vaddr_end = undefined,
|
||||||
|
.vaddr_start = undefined,
|
||||||
|
.physaddr_start = undefined,
|
||||||
|
.physaddr_end = undefined,
|
||||||
|
.mem_kb = num_vmm_entries * vmm.BLOCK_SIZE / 1024,
|
||||||
|
.fixed_allocator = undefined,
|
||||||
|
.virtual_reserved = &[_]mem.Map{},
|
||||||
|
.physical_reserved = &[_]mem.Range{},
|
||||||
|
.modules = &[_]mem.Module{},
|
||||||
|
};
|
||||||
|
pmm.init(&mem_profile, alloc);
|
||||||
|
// Set the whole VMM space as mapped so all address within the buffer allocator will be considered valid
|
||||||
|
if (map_all) _ = try vmm.kernel_vmm.alloc(num_vmm_entries, null, .{ .kernel = true, .writable = true, .cachable = true });
|
||||||
|
return fixed_buffer_allocator;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn testDeinitMem(alloc: std.mem.Allocator, buffer_allocator: std.heap.FixedBufferAllocator) void {
|
||||||
|
alloc.free(buffer_allocator.buffer);
|
||||||
|
vmm.kernel_vmm.deinit();
|
||||||
|
pmm.deinit();
|
||||||
}
|
}
|
||||||
|
|
||||||
test "getHandler" {
|
test "getHandler" {
|
||||||
try std.testing.expectEqual(Syscall.Test1.getHandler(), handleTest1);
|
try std.testing.expectEqual(Syscall.Test1.getHandler(), handleTest1);
|
||||||
try std.testing.expectEqual(Syscall.Test2.getHandler(), handleTest2);
|
try std.testing.expectEqual(Syscall.Test2.getHandler(), handleTest2);
|
||||||
try std.testing.expectEqual(Syscall.Test3.getHandler(), handleTest3);
|
try std.testing.expectEqual(Syscall.Test3.getHandler(), handleTest3);
|
||||||
|
try std.testing.expectEqual(Syscall.Open.getHandler(), handleOpen);
|
||||||
|
try std.testing.expectEqual(Syscall.Close.getHandler(), handleClose);
|
||||||
|
try std.testing.expectEqual(Syscall.Read.getHandler(), handleRead);
|
||||||
|
try std.testing.expectEqual(Syscall.Write.getHandler(), handleWrite);
|
||||||
}
|
}
|
||||||
|
|
||||||
test "handle" {
|
test "handle" {
|
||||||
const state = arch.CpuState.empty();
|
const state = arch.CpuState.empty();
|
||||||
try std.testing.expectEqual(@as(usize, 0), try handle(.Test1, &state, 0, 0, 0, 0, 0));
|
try std.testing.expectEqual(@as(usize, 0), try handle(.Test1, &state, 0, 0, 0, 0, 0));
|
||||||
try std.testing.expectEqual(@as(usize, 1 + 2 + 3 + 4 + 5), try handle(.Test2, &state, 1, 2, 3, 4, 5));
|
try std.testing.expectEqual(@as(usize, 1 + 2 + 3 + 4 + 5), try handle(.Test2, &state, 1, 2, 3, 4, 5));
|
||||||
try std.testing.expectError(Error.OutOfMemory, handle(.Test3, &state, 0, 0, 0, 0, 0));
|
try std.testing.expectError(error.OutOfMemory, handle(.Test3, &state, 0, 0, 0, 0, 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleOpen" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, undefined, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
var current_task = scheduler.current_task;
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
// Creating a file
|
||||||
|
var name1 = try buffer_allocator.dupe(u8, "/abc.txt");
|
||||||
|
var test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name1.ptr), name1.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
|
||||||
|
var test_node = (try current_task.getVFSHandle(test_handle)).?;
|
||||||
|
try testing.expectEqual(testfs.tree.children.items.len, 1);
|
||||||
|
var tree = testfs.tree.children.items[0];
|
||||||
|
try testing.expect(tree.val.isFile() and test_node.isFile());
|
||||||
|
try testing.expectEqual(&test_node.File, &tree.val.File);
|
||||||
|
try testing.expect(std.mem.eql(u8, tree.name, "abc.txt"));
|
||||||
|
try testing.expectEqual(tree.data, null);
|
||||||
|
try testing.expectEqual(tree.children.items.len, 0);
|
||||||
|
|
||||||
|
// Creating a dir
|
||||||
|
var name2 = try buffer_allocator.dupe(u8, "/def");
|
||||||
|
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined));
|
||||||
|
test_node = (try current_task.getVFSHandle(test_handle)).?;
|
||||||
|
try testing.expectEqual(testfs.tree.children.items.len, 2);
|
||||||
|
tree = testfs.tree.children.items[1];
|
||||||
|
try testing.expect(tree.val.isDir() and test_node.isDir());
|
||||||
|
try testing.expectEqual(&test_node.Dir, &tree.val.Dir);
|
||||||
|
try testing.expect(std.mem.eql(u8, tree.name, "def"));
|
||||||
|
try testing.expectEqual(tree.data, null);
|
||||||
|
try testing.expectEqual(tree.children.items.len, 0);
|
||||||
|
|
||||||
|
// Creating a file under a new dir
|
||||||
|
var name3 = try buffer_allocator.dupe(u8, "/def/ghi.zig");
|
||||||
|
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
|
||||||
|
test_node = (try current_task.getVFSHandle(test_handle)).?;
|
||||||
|
try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1);
|
||||||
|
tree = testfs.tree.children.items[1].children.items[0];
|
||||||
|
try testing.expect(tree.val.isFile() and test_node.isFile());
|
||||||
|
try testing.expectEqual(&test_node.File, &tree.val.File);
|
||||||
|
try testing.expect(std.mem.eql(u8, tree.name, "ghi.zig"));
|
||||||
|
try testing.expectEqual(tree.data, null);
|
||||||
|
try testing.expectEqual(tree.children.items.len, 0);
|
||||||
|
|
||||||
|
// Opening an existing file
|
||||||
|
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.NO_CREATION), 0, undefined));
|
||||||
|
test_node = (try current_task.getVFSHandle(test_handle)).?;
|
||||||
|
try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1);
|
||||||
|
try testing.expect(test_node.isFile());
|
||||||
|
try testing.expectEqual(&test_node.File, &tree.val.File);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleRead" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
_ = scheduler.current_task;
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
var test_file_path = try buffer_allocator.dupe(u8, "/foo.txt");
|
||||||
|
var test_file = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(test_file_path.ptr), test_file_path.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
|
||||||
|
var f_data = &testfs.tree.children.items[0].data;
|
||||||
|
var str = "test123";
|
||||||
|
f_data.* = try testing.allocator.dupe(u8, str);
|
||||||
|
|
||||||
|
var buffer: [str.len]u8 = undefined;
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 1, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 3, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len - 1, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str[0 .. str.len - 1], buffer[0..length]));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), 0, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str[0..0], buffer[0..length]));
|
||||||
|
}
|
||||||
|
// Try reading from a symlink
|
||||||
|
var args = try buffer_allocator.create(vfs.OpenArgs);
|
||||||
|
args.* = vfs.OpenArgs{ .symlink_target = test_file_path };
|
||||||
|
var link = try buffer_allocator.dupe(u8, "/link");
|
||||||
|
var test_link = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(link.ptr), link.len, @enumToInt(vfs.OpenFlags.CREATE_SYMLINK), @ptrToInt(args), undefined));
|
||||||
|
{
|
||||||
|
const length = try handleRead(&empty, test_link, @ptrToInt(&buffer[0]), buffer.len, 0, undefined);
|
||||||
|
try testing.expect(std.mem.eql(u8, str[0..str.len], buffer[0..length]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleRead errors" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
{
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
// The data we pass to handleRead needs to be mapped within the VMM, so we need to know their address
|
||||||
|
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
|
||||||
|
var fixed_buffer_allocator = try testInitMem(3, allocator, true);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
|
||||||
|
// Invalid file handle
|
||||||
|
try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// Unopened file
|
||||||
|
try testing.expectError(error.NotOpened, handleRead(&empty, 0, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.NotOpened, handleRead(&empty, 1, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.NotOpened, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// Reading from a dir
|
||||||
|
const name = try buffer_allocator.dupe(u8, "/dir");
|
||||||
|
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0);
|
||||||
|
try testing.expectError(error.NotAFile, handleRead(&empty, node, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// User buffer is too big
|
||||||
|
const name2 = try buffer_allocator.dupe(u8, "/file.txt");
|
||||||
|
const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0);
|
||||||
|
scheduler.current_task.kernel = false;
|
||||||
|
try testing.expectError(Error.TooBig, handleRead(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0));
|
||||||
|
}
|
||||||
|
try testing.expect(!testing.allocator_instance.detectLeaks());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleWrite" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
// Open test file
|
||||||
|
const name = try buffer_allocator.dupe(u8, "/abc.txt");
|
||||||
|
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined);
|
||||||
|
|
||||||
|
// Write
|
||||||
|
const data = try buffer_allocator.dupe(u8, "test_data 123");
|
||||||
|
const res = try handleWrite(&empty, node, @ptrToInt(data.ptr), data.len, 0, 0);
|
||||||
|
try testing.expectEqual(res, data.len);
|
||||||
|
try testing.expectEqualSlices(u8, data, testfs.tree.children.items[0].data.?);
|
||||||
|
|
||||||
|
// Write to a file in a folder
|
||||||
|
const name2 = try buffer_allocator.dupe(u8, "/dir");
|
||||||
|
_ = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined);
|
||||||
|
const name3 = try buffer_allocator.dupe(u8, "/dir/def.txt");
|
||||||
|
const node3 = try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined);
|
||||||
|
const data2 = try buffer_allocator.dupe(u8, "some more test data!");
|
||||||
|
const res2 = try handleWrite(&empty, node3, @ptrToInt(data2.ptr), data2.len, 0, 0);
|
||||||
|
try testing.expectEqual(res2, data2.len);
|
||||||
|
try testing.expectEqualSlices(u8, data2, testfs.tree.children.items[1].children.items[0].data.?);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleWrite errors" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
{
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
// The data we pass to handleWrite needs to be mapped within the VMM, so we need to know their address
|
||||||
|
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
|
||||||
|
var fixed_buffer_allocator = try testInitMem(3, allocator, true);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
|
||||||
|
// Invalid file handle
|
||||||
|
try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// Unopened file
|
||||||
|
try testing.expectError(error.NotOpened, handleWrite(&empty, 0, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.NotOpened, handleWrite(&empty, 1, 0, 0, 0, 0));
|
||||||
|
try testing.expectError(error.NotOpened, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// Writing to a dir
|
||||||
|
const name = try buffer_allocator.dupe(u8, "/dir");
|
||||||
|
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0);
|
||||||
|
try testing.expectError(error.NotAFile, handleWrite(&empty, node, 0, 0, 0, 0));
|
||||||
|
|
||||||
|
// User buffer is too big
|
||||||
|
const name2 = try buffer_allocator.dupe(u8, "/file.txt");
|
||||||
|
const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0);
|
||||||
|
scheduler.current_task.kernel = false;
|
||||||
|
try testing.expectError(Error.TooBig, handleWrite(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0));
|
||||||
|
}
|
||||||
|
try testing.expect(!testing.allocator_instance.detectLeaks());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "handleOpen errors" {
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
var testfs = try vfs.testInitFs(allocator);
|
||||||
|
{
|
||||||
|
defer allocator.destroy(testfs);
|
||||||
|
defer testfs.deinit();
|
||||||
|
|
||||||
|
testfs.instance = 1;
|
||||||
|
try vfs.setRoot(testfs.tree.val);
|
||||||
|
|
||||||
|
const empty = arch.CpuState.empty();
|
||||||
|
|
||||||
|
// The data we pass to handleOpen needs to be mapped within the VMM, so we need to know their address
|
||||||
|
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
|
||||||
|
var fixed_buffer_allocator = try testInitMem(3, allocator, false);
|
||||||
|
var buffer_allocator = fixed_buffer_allocator.allocator();
|
||||||
|
defer testDeinitMem(allocator, fixed_buffer_allocator);
|
||||||
|
|
||||||
|
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
|
||||||
|
defer scheduler.current_task.destroy(allocator);
|
||||||
|
|
||||||
|
// Check opening with no free file handles left
|
||||||
|
const free_handles = scheduler.current_task.file_handles.num_free_entries;
|
||||||
|
scheduler.current_task.file_handles.num_free_entries = 0;
|
||||||
|
try testing.expectError(Error.NoMoreFSHandles, handleOpen(&empty, 0, 0, 0, 0, 0));
|
||||||
|
scheduler.current_task.file_handles.num_free_entries = free_handles;
|
||||||
|
|
||||||
|
// Using a path that is too long
|
||||||
|
scheduler.current_task.kernel = false;
|
||||||
|
try testing.expectError(Error.TooBig, handleOpen(&empty, 0, USER_MAX_DATA_LEN + 1, 0, 0, 0));
|
||||||
|
|
||||||
|
// Unallocated user address
|
||||||
|
const test_alloc = try buffer_allocator.alloc(u8, 1);
|
||||||
|
// The kernel VMM and task VMM need to have their buffers mapped, so we'll temporarily use the buffer allocator since it operates within a known address space
|
||||||
|
allocator = buffer_allocator;
|
||||||
|
try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0));
|
||||||
|
allocator = std.testing.allocator;
|
||||||
|
|
||||||
|
// Unallocated kernel address
|
||||||
|
scheduler.current_task.kernel = true;
|
||||||
|
try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0));
|
||||||
|
|
||||||
|
// Invalid flag enum value
|
||||||
|
try testing.expectError(error.InvalidFlags, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 999, 0, 0));
|
||||||
|
}
|
||||||
|
try testing.expect(!testing.allocator_instance.detectLeaks());
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const expectEqual = std.testing.expectEqual;
|
const expectEqual = std.testing.expectEqual;
|
||||||
const expectError = std.testing.expectError;
|
const expectError = std.testing.expectError;
|
||||||
|
const expect = std.testing.expect;
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const is_test = builtin.is_test;
|
const is_test = builtin.is_test;
|
||||||
const build_options = @import("build_options");
|
const build_options = @import("build_options");
|
||||||
|
@ -11,6 +12,7 @@ const pmm = @import("pmm.zig");
|
||||||
const mem = @import("mem.zig");
|
const mem = @import("mem.zig");
|
||||||
const elf = @import("elf.zig");
|
const elf = @import("elf.zig");
|
||||||
const bitmap = @import("bitmap.zig");
|
const bitmap = @import("bitmap.zig");
|
||||||
|
const vfs = @import("filesystem/vfs.zig");
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const log = std.log.scoped(.task);
|
const log = std.log.scoped(.task);
|
||||||
|
|
||||||
|
@ -18,6 +20,12 @@ const log = std.log.scoped(.task);
|
||||||
/// as we cannot deallocate this.
|
/// as we cannot deallocate this.
|
||||||
extern var KERNEL_STACK_START: *u32;
|
extern var KERNEL_STACK_START: *u32;
|
||||||
|
|
||||||
|
/// The number of vfs handles that a process can have
|
||||||
|
pub const VFS_HANDLES_PER_PROCESS = std.math.maxInt(Handle);
|
||||||
|
|
||||||
|
/// A vfs handle. 65k is probably a good limit for the number of files a task can have open at once so we use u16 as the type
|
||||||
|
pub const Handle = u16;
|
||||||
|
|
||||||
/// The function type for the entry point.
|
/// The function type for the entry point.
|
||||||
pub const EntryPoint = usize;
|
pub const EntryPoint = usize;
|
||||||
|
|
||||||
|
@ -27,11 +35,18 @@ const PidBitmap = bitmap.Bitmap(1024, usize);
|
||||||
/// The list of PIDs that have been allocated.
|
/// The list of PIDs that have been allocated.
|
||||||
var all_pids = PidBitmap.init(1024, null) catch unreachable;
|
var all_pids = PidBitmap.init(1024, null) catch unreachable;
|
||||||
|
|
||||||
|
const FileHandleBitmap = bitmap.Bitmap(1024, usize);
|
||||||
|
|
||||||
/// The default stack size of a task. Currently this is set to a page size.
|
/// The default stack size of a task. Currently this is set to a page size.
|
||||||
pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32);
|
pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32);
|
||||||
|
|
||||||
/// The task control block for storing all the information needed to save and restore a task.
|
/// The task control block for storing all the information needed to save and restore a task.
|
||||||
pub const Task = struct {
|
pub const Task = struct {
|
||||||
|
pub const Error = error{
|
||||||
|
/// The supplied vfs handle hasn't been allocated
|
||||||
|
VFSHandleNotSet,
|
||||||
|
};
|
||||||
|
|
||||||
const Self = @This();
|
const Self = @This();
|
||||||
|
|
||||||
/// The unique task identifier
|
/// The unique task identifier
|
||||||
|
@ -52,6 +67,12 @@ pub const Task = struct {
|
||||||
/// The virtual memory manager belonging to the task
|
/// The virtual memory manager belonging to the task
|
||||||
vmm: *vmm.VirtualMemoryManager(arch.VmmPayload),
|
vmm: *vmm.VirtualMemoryManager(arch.VmmPayload),
|
||||||
|
|
||||||
|
/// The list of file handles for this process
|
||||||
|
file_handles: FileHandleBitmap,
|
||||||
|
|
||||||
|
/// The mapping between file handles and file nodes
|
||||||
|
file_handle_mapping: std.hash_map.AutoHashMap(Handle, *vfs.Node),
|
||||||
|
|
||||||
///
|
///
|
||||||
/// Create a task. This will allocate a PID and the stack. The stack will be set up as a
|
/// Create a task. This will allocate a PID and the stack. The stack will be set up as a
|
||||||
/// kernel task. As this is a new task, the stack will need to be initialised with the CPU
|
/// kernel task. As this is a new task, the stack will need to be initialised with the CPU
|
||||||
|
@ -90,6 +111,8 @@ pub const Task = struct {
|
||||||
.stack_pointer = if (!alloc_kernel_stack) 0 else @ptrToInt(&k_stack[STACK_SIZE - 1]),
|
.stack_pointer = if (!alloc_kernel_stack) 0 else @ptrToInt(&k_stack[STACK_SIZE - 1]),
|
||||||
.kernel = kernel,
|
.kernel = kernel,
|
||||||
.vmm = task_vmm,
|
.vmm = task_vmm,
|
||||||
|
.file_handles = FileHandleBitmap.init(null, null) catch unreachable,
|
||||||
|
.file_handle_mapping = std.hash_map.AutoHashMap(Handle, *vfs.Node).init(allocator),
|
||||||
};
|
};
|
||||||
|
|
||||||
try arch.initTask(task, entry_point, allocator, alloc_kernel_stack);
|
try arch.initTask(task, entry_point, allocator, alloc_kernel_stack);
|
||||||
|
@ -146,8 +169,97 @@ pub const Task = struct {
|
||||||
if (!self.kernel) {
|
if (!self.kernel) {
|
||||||
allocator.free(self.user_stack);
|
allocator.free(self.user_stack);
|
||||||
}
|
}
|
||||||
|
self.file_handle_mapping.deinit();
|
||||||
allocator.destroy(self);
|
allocator.destroy(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Get the VFS node associated with a VFS handle.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN self: *Self - The pointer to self.
|
||||||
|
/// IN handle: Handle - The handle to get the node for. Must have been returned from addVFSHandle.
|
||||||
|
///
|
||||||
|
/// Return: *vfs.Node
|
||||||
|
/// The node associated with the handle.
|
||||||
|
///
|
||||||
|
/// Error: bitmap.BitmapError
|
||||||
|
/// See Bitmap.
|
||||||
|
///
|
||||||
|
pub fn getVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!?*vfs.Node {
|
||||||
|
return self.file_handle_mapping.get(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Check if the task has free handles to allocate.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN self: Self - The self.
|
||||||
|
///
|
||||||
|
/// Return: bool
|
||||||
|
/// True if there are free handles, else false.
|
||||||
|
///
|
||||||
|
pub fn hasFreeVFSHandle(self: Self) bool {
|
||||||
|
return self.file_handles.num_free_entries > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Add a handle associated with a node. The node can later be retrieved with getVFSHandle.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN self: *Self - The pointer to self.
|
||||||
|
/// IN node: *vfs.Node - The node to associate with the returned handle.
|
||||||
|
///
|
||||||
|
/// Return: Handle
|
||||||
|
/// The handle now associated with the vfs node.
|
||||||
|
///
|
||||||
|
/// Error: std.mem.Allocator.Error
|
||||||
|
///
|
||||||
|
pub fn addVFSHandle(self: *Self, node: *vfs.Node) std.mem.Allocator.Error!?Handle {
|
||||||
|
if (self.file_handles.setFirstFree()) |handle| {
|
||||||
|
const real_handle = @intCast(Handle, handle);
|
||||||
|
try self.file_handle_mapping.put(real_handle, node);
|
||||||
|
return real_handle;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Check if the task has a certain handle registered.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN self: Self - The self.
|
||||||
|
/// IN handle: Handle - The handle to check.
|
||||||
|
///
|
||||||
|
/// Return: bool
|
||||||
|
/// True if the handle has been registered to this task, else false.
|
||||||
|
///
|
||||||
|
/// Error: bitmap.BitmapError
|
||||||
|
/// See Bitmap.
|
||||||
|
///
|
||||||
|
pub fn hasVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!bool {
|
||||||
|
return self.file_handles.isSet(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Clear a registered handle and de-associate the node from it.
|
||||||
|
///
|
||||||
|
/// Arguments:
|
||||||
|
/// IN self: *Self - The pointer to self.
|
||||||
|
/// IN handle: Handle - The handle to clear. Must have been registered before.
|
||||||
|
///
|
||||||
|
/// Error: bitmap.BitmapError || Error
|
||||||
|
/// bitmap.BitmapError.* - See bitmap.BitmapError
|
||||||
|
/// Error.VFSHandleNotSet - The handle has not previously been registered
|
||||||
|
///
|
||||||
|
pub fn clearVFSHandle(self: *Self, handle: Handle) (bitmap.BitmapError || Error)!void {
|
||||||
|
if (try self.hasVFSHandle(handle)) {
|
||||||
|
try self.file_handles.clearEntry(handle);
|
||||||
|
_ = self.file_handle_mapping.remove(handle);
|
||||||
|
} else {
|
||||||
|
return Error.VFSHandleNotSet;
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
///
|
///
|
||||||
|
@ -381,3 +493,75 @@ test "create doesn't allocate kernel stack" {
|
||||||
try std.testing.expectEqualSlices(usize, task.kernel_stack, &[_]usize{});
|
try std.testing.expectEqualSlices(usize, task.kernel_stack, &[_]usize{});
|
||||||
try std.testing.expectEqual(task.stack_pointer, 0);
|
try std.testing.expectEqual(task.stack_pointer, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
test "addVFSHandle" {
|
||||||
|
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
|
||||||
|
defer task.destroy(std.testing.allocator);
|
||||||
|
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
|
||||||
|
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
|
||||||
|
|
||||||
|
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
try expectEqual(handle1, 0);
|
||||||
|
try expectEqual(&node1, task.file_handle_mapping.get(handle1).?);
|
||||||
|
try expectEqual(true, try task.file_handles.isSet(handle1));
|
||||||
|
|
||||||
|
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
try expectEqual(handle2, 1);
|
||||||
|
try expectEqual(&node2, task.file_handle_mapping.get(handle2).?);
|
||||||
|
try expectEqual(true, try task.file_handles.isSet(handle2));
|
||||||
|
}
|
||||||
|
|
||||||
|
test "hasFreeVFSHandle" {
|
||||||
|
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
|
||||||
|
defer task.destroy(std.testing.allocator);
|
||||||
|
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
|
||||||
|
|
||||||
|
try expect(task.hasFreeVFSHandle());
|
||||||
|
|
||||||
|
_ = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
try expect(task.hasFreeVFSHandle());
|
||||||
|
|
||||||
|
var i: usize = 0;
|
||||||
|
const free_entries = task.file_handles.num_free_entries;
|
||||||
|
while (i < free_entries) : (i += 1) {
|
||||||
|
try expect(task.hasFreeVFSHandle());
|
||||||
|
_ = task.file_handles.setFirstFree();
|
||||||
|
}
|
||||||
|
try expect(!task.hasFreeVFSHandle());
|
||||||
|
}
|
||||||
|
|
||||||
|
test "getVFSHandle" {
|
||||||
|
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
|
||||||
|
defer task.destroy(std.testing.allocator);
|
||||||
|
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
|
||||||
|
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
|
||||||
|
|
||||||
|
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
try expectEqual(&node1, (try task.getVFSHandle(handle1)).?);
|
||||||
|
|
||||||
|
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
try expectEqual(&node2, (try task.getVFSHandle(handle2)).?);
|
||||||
|
try expectEqual(&node1, (try task.getVFSHandle(handle1)).?);
|
||||||
|
|
||||||
|
try expectEqual(task.getVFSHandle(handle2 + 1), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "clearVFSHandle" {
|
||||||
|
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
|
||||||
|
defer task.destroy(std.testing.allocator);
|
||||||
|
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
|
||||||
|
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
|
||||||
|
|
||||||
|
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
|
||||||
|
|
||||||
|
try task.clearVFSHandle(handle1);
|
||||||
|
try expectEqual(false, try task.hasVFSHandle(handle1));
|
||||||
|
|
||||||
|
try task.clearVFSHandle(handle2);
|
||||||
|
try expectEqual(false, try task.hasVFSHandle(handle2));
|
||||||
|
|
||||||
|
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2 + 1));
|
||||||
|
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2));
|
||||||
|
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle1));
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue