Compare commits

...

11 commits

Author SHA1 Message Date
Imbus
66d15e8945 No brakes on the fix train
Some checks are pending
CI / Build mode ${{ matrix.build_mode }} () (push) Waiting to run
CI / Build mode ${{ matrix.build_mode }} (-Drelease-fast) (push) Waiting to run
CI / Build mode ${{ matrix.build_mode }} (-Drelease-safe) (push) Waiting to run
CI / Build mode ${{ matrix.build_mode }} (-Drelease-small) (push) Waiting to run
2024-06-24 21:10:31 +02:00
Imbus
98d876b18c Autofixes 2024-06-24 20:47:54 +02:00
Imbus
d3f278fc59 Build steps now silent, unclear if fixed 2024-06-24 20:47:27 +02:00
Imbus
6b03e8dfae Ignore .zig-cache 2024-06-24 20:46:59 +02:00
Sam Tebbs
426eb13d46 Add VFS syscalls 2023-01-19 22:09:48 +00:00
Sam Tebbs
ce051f0bbd
Merge pull request #329 from ZystemOS/bugfix/revert-packages
Revert "Split the project into packages."
2023-01-18 22:05:59 +00:00
Sam Tebbs
a023fadc24 Revert "Split the project into packages."
This reverts commit ab93a33bbd.
2023-01-18 21:38:25 +00:00
Sam Tebbs
b1718addde
Merge pull request #323 from ZystemOS/f/create-task-0-normally
Create task 0 like other tasks
2023-01-18 21:37:08 +00:00
Sam Tebbs
6156f1b30a Create task 0 like other tasks 2023-01-18 21:27:54 +00:00
Sam Tebbs
474073a695
Merge pull request #324 from Dawid33/arch-package
Putting arch file into a package.
2022-06-18 19:33:29 +01:00
Dawid Sobczak
ab93a33bbd Split the project into packages. 2022-06-18 17:15:46 +01:00
11 changed files with 969 additions and 131 deletions

1
.gitignore vendored
View file

@ -6,6 +6,7 @@
# Zig ignore
**/zig-cache/
**/.zig-cache/
**/zig-out/
**/build/
**/build-*/

View file

@ -4,10 +4,10 @@ const builtin = @import("builtin");
const rt = @import("test/runtime_test.zig");
const RuntimeStep = rt.RuntimeStep;
const Allocator = std.mem.Allocator;
const Builder = std.build.Builder;
// const Builder = std.build.Builder;
const Step = std.build.Step;
const Target = std.Target;
const CrossTarget = std.zig.CrossTarget;
const CrossTarget = std.Target.Query;
const fs = std.fs;
const File = fs.File;
const Mode = std.builtin.Mode;
@ -16,12 +16,12 @@ const ArrayList = std.ArrayList;
const Fat32 = @import("mkfat32.zig").Fat32;
const x86_i686 = CrossTarget{
.cpu_arch = .i386,
.cpu_arch = .x86,
.os_tag = .freestanding,
.cpu_model = .{ .explicit = &Target.x86.cpu._i686 },
.cpu_model = .{ .explicit = &Target.x86.cpu.i686 },
};
pub fn build(b: *Builder) !void {
pub fn build(b: *std.Build) !void {
const target = b.standardTargetOptions(.{ .whitelist = &[_]CrossTarget{x86_i686}, .default_target = x86_i686 });
const arch = switch (target.getCpuArch()) {
.i386 => "x86",
@ -156,7 +156,7 @@ pub fn build(b: *Builder) !void {
try qemu_args_al.append("none");
}
var qemu_args = qemu_args_al.toOwnedSlice();
const qemu_args = qemu_args_al.toOwnedSlice();
const rt_step = RuntimeStep.create(b, test_mode, qemu_args);
rt_step.step.dependOn(&make_iso.step);
@ -197,7 +197,7 @@ const Fat32BuilderStep = struct {
step: Step,
/// The builder pointer, also all you need to know
builder: *Builder,
builder: *std.Build,
/// The path to where the ramdisk will be written to.
out_file_path: []const u8,
@ -217,7 +217,7 @@ const Fat32BuilderStep = struct {
/// Fat32.Error - If there was an error creating the FAT image. This will be invalid options.
///
fn make(step: *Step) (error{EndOfStream} || File.OpenError || File.ReadError || File.WriteError || File.SeekError || Fat32.Error)!void {
const self = @fieldParentPtr(Fat32BuilderStep, "step", step);
const self: *Fat32BuilderStep = @fieldParentPtr("step", step);
// Open the out file
const image = try std.fs.cwd().createFile(self.out_file_path, .{ .read = true });
@ -237,7 +237,7 @@ const Fat32BuilderStep = struct {
/// Return: *Fat32BuilderStep
/// The FAT32 builder step pointer to add to the build process.
///
pub fn create(builder: *Builder, options: Fat32.Options, out_file_path: []const u8) *Fat32BuilderStep {
pub fn create(builder: *std.Build, options: Fat32.Options, out_file_path: []const u8) *Fat32BuilderStep {
const fat32_builder_step = builder.allocator.create(Fat32BuilderStep) catch unreachable;
fat32_builder_step.* = .{
.step = Step.init(.custom, builder.fmt("Fat32BuilderStep", .{}), builder.allocator, make),
@ -255,7 +255,7 @@ const RamdiskStep = struct {
step: Step,
/// The builder pointer, also all you need to know
builder: *Builder,
builder: *std.Build,
/// The target for the build
target: CrossTarget,
@ -293,7 +293,7 @@ const RamdiskStep = struct {
// First write the number of files/headers
std.debug.assert(self.files.len < std.math.maxInt(Usize));
try ramdisk.writer().writeInt(Usize, @truncate(Usize, self.files.len), endian);
try ramdisk.writer().writeInt(Usize, @truncate(self.files.len), endian);
var current_offset: usize = 0;
for (self.files) |file_path| {
// Open, and read the file. Can get the size from this as well
@ -305,14 +305,14 @@ const RamdiskStep = struct {
// Write the header and file content to the ramdisk
// Name length
std.debug.assert(file_path[file_name_index..].len < std.math.maxInt(Usize));
try ramdisk.writer().writeInt(Usize, @truncate(Usize, file_path[file_name_index..].len), endian);
try ramdisk.writer().writeInt(Usize, @truncate(file_path[file_name_index..].len), endian);
// Name
try ramdisk.writer().writeAll(file_path[file_name_index..]);
// Length
std.debug.assert(file_content.len < std.math.maxInt(Usize));
try ramdisk.writer().writeInt(Usize, @truncate(Usize, file_content.len), endian);
try ramdisk.writer().writeInt(Usize, @truncate(file_content.len), endian);
// File contest
try ramdisk.writer().writeAll(file_content);
@ -333,7 +333,7 @@ const RamdiskStep = struct {
/// Errors for opening, reading and writing to and from files and for allocating memory.
///
fn make(step: *Step) Error!void {
const self = @fieldParentPtr(RamdiskStep, "step", step);
const self: *RamdiskStep = @fieldParentPtr("step", step);
switch (self.target.getCpuArch()) {
.i386 => try writeRamdisk(u32, self),
else => unreachable,
@ -352,7 +352,7 @@ const RamdiskStep = struct {
/// Return: *RamdiskStep
/// The ramdisk step pointer to add to the build process.
///
pub fn create(builder: *Builder, target: CrossTarget, files: []const []const u8, out_file_path: []const u8) *RamdiskStep {
pub fn create(builder: *std.Build, target: CrossTarget, files: []const []const u8, out_file_path: []const u8) *RamdiskStep {
const ramdisk_step = builder.allocator.create(RamdiskStep) catch unreachable;
ramdisk_step.* = .{
.step = Step.init(.custom, builder.fmt("Ramdisk", .{}), builder.allocator, make),

View file

@ -311,13 +311,13 @@ pub const Fat32 = struct {
return switch (image_size) {
0...35 * 512 - 1 => Error.TooSmall,
35 * 512...64 * MB - 1 => @intCast(u8, std.math.max(512, bytes_per_sector) / bytes_per_sector),
64 * MB...128 * MB - 1 => @intCast(u8, std.math.max(1024, bytes_per_sector) / bytes_per_sector),
128 * MB...256 * MB - 1 => @intCast(u8, std.math.max(2048, bytes_per_sector) / bytes_per_sector),
256 * MB...8 * GB - 1 => @intCast(u8, std.math.max(4096, bytes_per_sector) / bytes_per_sector),
8 * GB...16 * GB - 1 => @intCast(u8, std.math.max(8192, bytes_per_sector) / bytes_per_sector),
16 * GB...32 * GB - 1 => @intCast(u8, std.math.max(16384, bytes_per_sector) / bytes_per_sector),
32 * GB...2 * TB - 1 => @intCast(u8, std.math.max(32768, bytes_per_sector) / bytes_per_sector),
35 * 512...64 * MB - 1 => @intCast(std.math.max(512, bytes_per_sector) / bytes_per_sector),
64 * MB...128 * MB - 1 => @intCast(std.math.max(1024, bytes_per_sector) / bytes_per_sector),
128 * MB...256 * MB - 1 => @intCast(std.math.max(2048, bytes_per_sector) / bytes_per_sector),
256 * MB...8 * GB - 1 => @intCast(std.math.max(4096, bytes_per_sector) / bytes_per_sector),
8 * GB...16 * GB - 1 => @intCast(std.math.max(8192, bytes_per_sector) / bytes_per_sector),
16 * GB...32 * GB - 1 => @intCast(std.math.max(16384, bytes_per_sector) / bytes_per_sector),
32 * GB...2 * TB - 1 => @intCast(std.math.max(32768, bytes_per_sector) / bytes_per_sector),
else => Error.TooLarge,
};
}
@ -354,7 +354,7 @@ pub const Fat32 = struct {
fn createSerialNumber() u32 {
// TODO: Get the actual date. Currently there is no std lib for human readable date.
const year = 2020;
const month = 09;
const month = 9;
const day = 27;
const hour = 13;
const minute = 46;
@ -541,14 +541,14 @@ pub const Fat32 = struct {
}
// See: https://board.flatassembler.net/topic.php?t=12680
var sectors_per_fat = @intCast(u32, (image_size - getReservedSectors() + (2 * options.cluster_size)) / ((options.cluster_size * (options.sector_size / 4)) + 2));
var sectors_per_fat: u8 = @intCast((image_size - getReservedSectors() + (2 * options.cluster_size)) / ((options.cluster_size * (options.sector_size / 4)) + 2));
// round up sectors
sectors_per_fat = (sectors_per_fat + options.sector_size - 1) / options.sector_size;
return Header{
.bytes_per_sector = options.sector_size,
.sectors_per_cluster = options.cluster_size,
.total_sectors = @intCast(u32, @divExact(image_size, options.sector_size)),
.total_sectors = @intCast(@divExact(image_size, options.sector_size)),
.sectors_per_fat = sectors_per_fat,
.serial_number = createSerialNumber(),
.volume_label = options.volume_name,

View file

@ -530,8 +530,8 @@ pub fn initKeyboard(allocator: Allocator) Allocator.Error!*Keyboard {
}
///
/// Initialise a stack used for creating a task.
/// Currently only support fn () noreturn functions for the entry point.
/// Initialise a stack and vmm payload used for creating a task.
/// Currently only supports fn () noreturn functions for the entry point.
///
/// Arguments:
/// IN task: *Task - The task to be initialised. The function will only modify whatever
@ -540,62 +540,66 @@ pub fn initKeyboard(allocator: Allocator) Allocator.Error!*Keyboard {
/// IN entry_point: usize - The pointer to the entry point of the function. Functions only
/// supported is fn () noreturn
/// IN allocator: Allocator - The allocator use for allocating a stack.
/// IN set_up_stack: bool - Set up the kernel and user stacks (register values, PC etc.) for task entry
///
/// Error: Allocator.Error
/// OutOfMemory - Unable to allocate space for the stack.
///
pub fn initTask(task: *Task, entry_point: usize, allocator: Allocator) Allocator.Error!void {
const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11;
// Setting the bottom two bits of the code offset designates that this is a ring 3 task
const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11;
// Ring switches push and pop two extra values on interrupt: user_esp and user_ss
const kernel_stack_bottom = if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20;
var stack = &task.kernel_stack;
pub fn initTask(task: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void {
// TODO Will need to add the exit point
// Set up everything as a kernel task
task.vmm.payload = &paging.kernel_directory;
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs
stack.*[kernel_stack_bottom + 3] = data_offset; // es
stack.*[kernel_stack_bottom + 4] = data_offset; // ds
stack.*[kernel_stack_bottom + 5] = 0; // edi
stack.*[kernel_stack_bottom + 6] = 0; // esi
// End of the stack
stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp
stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
var stack = &task.kernel_stack;
const kernel_stack_bottom = if (!set_up_stack) 0 else if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20;
if (set_up_stack) {
const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11;
// Setting the bottom two bits of the code offset designates that this is a ring 3 task
const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11;
// Ring switches push and pop two extra values on interrupt: user_esp and user_ss
stack.*[kernel_stack_bottom + 9] = 0; // ebx
stack.*[kernel_stack_bottom + 10] = 0; // edx
stack.*[kernel_stack_bottom + 11] = 0; // ecx
stack.*[kernel_stack_bottom + 12] = 0; // eax
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs
stack.*[kernel_stack_bottom + 3] = data_offset; // es
stack.*[kernel_stack_bottom + 4] = data_offset; // ds
stack.*[kernel_stack_bottom + 13] = 0; // int_num
stack.*[kernel_stack_bottom + 14] = 0; // error_code
stack.*[kernel_stack_bottom + 5] = 0; // edi
stack.*[kernel_stack_bottom + 6] = 0; // esi
// End of the stack
stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp
stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
stack.*[kernel_stack_bottom + 15] = entry_point; // eip
stack.*[kernel_stack_bottom + 16] = code_offset; // cs
stack.*[kernel_stack_bottom + 17] = 0x202; // eflags
stack.*[kernel_stack_bottom + 9] = 0; // ebx
stack.*[kernel_stack_bottom + 10] = 0; // edx
stack.*[kernel_stack_bottom + 11] = 0; // ecx
stack.*[kernel_stack_bottom + 12] = 0; // eax
if (!task.kernel) {
// Put the extra values on the kernel stack needed when chaning privilege levels
stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp
stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss
stack.*[kernel_stack_bottom + 13] = 0; // int_num
stack.*[kernel_stack_bottom + 14] = 0; // error_code
if (!builtin.is_test) {
// Create a new page directory for the user task by mirroring the kernel directory
// We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt
task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0];
task.vmm.payload.* = paging.kernel_directory.copy();
stack.*[kernel_stack_bottom + 15] = entry_point; // eip
stack.*[kernel_stack_bottom + 16] = code_offset; // cs
stack.*[kernel_stack_bottom + 17] = 0x202; // eflags
if (!task.kernel) {
// Put the extra values on the kernel stack needed when chaning privilege levels
stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp
stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss
}
task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]);
}
if (!task.kernel and !builtin.is_test) {
// Create a new page directory for the user task by mirroring the kernel directory
// We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt
task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0];
task.vmm.payload.* = paging.kernel_directory.copy();
if (set_up_stack) {
stack.*[kernel_stack_bottom] = vmm.kernel_vmm.virtToPhys(@ptrToInt(task.vmm.payload)) catch |e| {
panic(@errorReturnTrace(), "Failed to get the physical address of the user task's page directory: {}\n", .{e});
};
}
}
task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]);
}
///

View file

@ -211,13 +211,8 @@ pub const DirNode = struct {
/// See the documentation for FileSystem.Open
pub fn open(self: *const DirNode, name: []const u8, flags: OpenFlags, args: OpenArgs) (Allocator.Error || Error)!*Node {
var fs = self.fs;
var node = self;
if (self.mount) |mnt| {
fs = mnt.fs;
node = mnt;
}
return fs.open(fs, node, name, flags, args);
var node = self.mount orelse self;
return node.fs.open(node.fs, node, name, flags, args);
}
/// See the documentation for FileSystem.Close
@ -426,6 +421,20 @@ pub fn open(path: []const u8, follow_symlinks: bool, flags: OpenFlags, args: Ope
return try traversePath(path, follow_symlinks, flags, args);
}
///
/// Close a node.
///
/// Arguments:
/// IN node: Node - The node to close
///
pub fn close(node: Node) void {
switch (node) {
.Dir => |d| d.close(),
.File => |f| f.close(),
.Symlink => |s| s.close(),
}
}
///
/// Open a file at a path.
///
@ -592,7 +601,7 @@ const TestFS = struct {
const Self = @This();
fn deinit(self: *@This()) void {
pub fn deinit(self: *@This()) void {
self.tree.deinit(self.allocator);
self.allocator.destroy(self.fs);
}
@ -718,7 +727,7 @@ const TestFS = struct {
}
};
fn testInitFs(allocator: Allocator) !*TestFS {
pub fn testInitFs(allocator: Allocator) !*TestFS {
const fs = try allocator.create(FileSystem);
var testfs = try allocator.create(TestFS);
var root_node = try allocator.create(Node);

View file

@ -17,6 +17,7 @@ const scheduler = @import("scheduler.zig");
const vfs = @import("filesystem/vfs.zig");
const initrd = @import("filesystem/initrd.zig");
const keyboard = @import("keyboard.zig");
const syscalls = @import("syscalls.zig");
const Allocator = std.mem.Allocator;
comptime {
@ -96,6 +97,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
};
syscalls.init(kernel_heap.allocator());
tty.init(kernel_heap.allocator(), boot_payload);
var arch_kb = keyboard.init(fixed_allocator.allocator()) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to inititalise keyboard: {}\n", .{e});
@ -144,7 +146,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
kmain_log.info("Creating init2\n", .{});
// Create a init2 task
var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, kernel_heap.allocator()) catch |e| {
var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, kernel_heap.allocator(), true) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to create init stage 2 task: {}\n", .{e});
};
scheduler.scheduleTask(stage2_task, kernel_heap.allocator()) catch |e| {

View file

@ -27,7 +27,7 @@ extern var KERNEL_STACK_START: []u32;
extern var KERNEL_STACK_END: []u32;
/// The current task running
var current_task: *Task = undefined;
pub var current_task: *Task = undefined;
/// Array list of all runnable tasks
var tasks: TailQueue(*Task) = undefined;
@ -137,15 +137,13 @@ pub fn init(allocator: Allocator, mem_profile: *const mem.MemProfile) Allocator.
// Init the task list for round robin
tasks = TailQueue(*Task){};
// Set up the init task to continue execution
current_task = try allocator.create(Task);
// Set up the init task to continue execution.
// The kernel stack will point to the stack section rather than the heap
current_task = try Task.create(0, true, &vmm.kernel_vmm, allocator, false);
errdefer allocator.destroy(current_task);
// PID 0
current_task.pid = 0;
const kernel_stack_size = @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START);
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..kernel_stack_size];
current_task.user_stack = &[_]usize{};
current_task.kernel = true;
// ESP will be saved on next schedule
// Run the runtime tests here
@ -155,7 +153,7 @@ pub fn init(allocator: Allocator, mem_profile: *const mem.MemProfile) Allocator.
}
// Create the idle task when there are no more tasks left
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator);
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator, true);
errdefer idle_task.destroy(allocator);
try scheduleTask(idle_task, allocator);
@ -195,21 +193,21 @@ test "pickNextTask" {
tasks = TailQueue(*Task){};
// Set up a current task
var first = try allocator.create(Task);
var first = try Task.create(0, true, &vmm.kernel_vmm, allocator, false);
// We use an intermediary variable to avoid a double-free.
// Deferring freeing current_task will free whatever current_task points to at the end
defer allocator.destroy(first);
defer first.destroy(allocator);
current_task = first;
current_task.pid = 0;
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
current_task.stack_pointer = @ptrToInt(&KERNEL_STACK_START);
// Create two tasks and schedule them
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator);
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator, true);
defer test_fn2_task.destroy(allocator);
try scheduleTask(test_fn2_task, allocator);
@ -254,7 +252,7 @@ test "createNewTask add new task" {
// Init the task list
tasks = TailQueue(*Task){};
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
@ -309,7 +307,7 @@ fn rt_variable_preserved(allocator: Allocator) void {
defer allocator.destroy(is_set);
is_set.* = true;
var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e});
var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator, true) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e});
scheduleTask(test_task, allocator) catch |e| panic(@errorReturnTrace(), "Failed to schedule a task in rt_variable_preserved: {}\n", .{e});
// TODO: Need to add the ability to remove tasks

View file

@ -1,14 +1,99 @@
const std = @import("std");
const testing = std.testing;
const is_test = @import("builtin").is_test;
const scheduler = @import("scheduler.zig");
const panic = @import("panic.zig").panic;
const log = std.log.scoped(.syscalls);
const arch = @import("arch.zig").internals;
const vfs = @import("filesystem/vfs.zig");
const task = @import("task.zig");
const vmm = @import("vmm.zig");
const mem = @import("mem.zig");
const pmm = @import("pmm.zig");
const bitmap = @import("bitmap.zig");
/// A compilation of all errors that syscall handlers could return.
pub const Error = error{OutOfMemory};
var allocator: std.mem.Allocator = undefined;
/// The maximum amount of data to allocate when copying user memory into kernel memory
pub const USER_MAX_DATA_LEN = 16 * 1024;
pub const Error = error{ NoMoreFSHandles, TooBig, NotAFile };
/// All implemented syscalls
pub const Syscall = enum {
/// Open a new vfs node
///
/// Arguments:
/// path_ptr: usize - The user/kernel pointer to the file path to open
/// path_len: usize - The length of the file path
/// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags
/// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs
/// ignored: usize - Ignored
///
/// Return: usize
/// The handle for the opened vfs node
///
/// Error:
/// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
/// TooBig - The path length is greater than allowed
/// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.)
/// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value
/// Refer to vfs.Error for details on what causes vfs errors
///
Open,
/// Read data from an open vfs file
///
/// Arguments:
/// node_handle: usize - The file handle returned from the open syscall
/// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in
/// buff_len: usize - The size of the buffer
/// ignored1: usize - Ignored
/// ignored2: usize - Ignored
///
/// Return: usize
/// The number of bytes read and put into the buffer
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
/// NotAFile - The handle does not correspond to a file
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
///
Read,
/// Write data from to open vfs file
///
/// Arguments:
/// node_handle: usize - The file handle returned from the open syscall
/// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write
/// buff_len: usize - The size of the buffer
/// ignored1: usize - Ignored
/// ignored2: usize - Ignored
///
/// Return: usize
/// The number of bytes written
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
/// NotAFile - The handle does not correspond to a file
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
///
Write,
///
/// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed
///
/// Arguments:
/// node_handle: usize - The handle to close
/// ignored1..4: usize - Ignored
///
/// Return: void
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// NotOpened - The node handle hasn't been opened
Close,
Test1,
Test2,
Test3,
@ -24,6 +109,10 @@ pub const Syscall = enum {
///
fn getHandler(self: @This()) Handler {
return switch (self) {
.Open => handleOpen,
.Read => handleRead,
.Write => handleWrite,
.Close => handleClose,
.Test1 => handleTest1,
.Test2 => handleTest2,
.Test3 => handleTest3,
@ -42,21 +131,26 @@ pub const Syscall = enum {
pub fn isTest(self: @This()) bool {
return switch (self) {
.Test1, .Test2, .Test3 => true,
else => false,
};
}
};
/// A function that can handle a syscall and return a result or an error
pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize;
pub const Handler = fn (ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize;
pub fn init(alloc: std.mem.Allocator) void {
allocator = alloc;
}
///
/// Convert an error code to an instance of Error. The conversion must be synchronised with toErrorCode
/// Convert an error code to an instance of anyerror. The conversion must be synchronised with toErrorCode
/// Passing an error code that does not correspond to an error results in safety-protected undefined behaviour
///
/// Arguments:
/// IN code: u16 - The erorr code to convert
///
/// Return: Error
/// Return: anyerror
/// The error corresponding to the error code
///
pub fn fromErrorCode(code: u16) anyerror {
@ -64,10 +158,10 @@ pub fn fromErrorCode(code: u16) anyerror {
}
///
/// Convert an instance of Error to an error code. The conversion must be synchronised with fromErrorCode
/// Convert an instance of anyerror to an error code. The conversion must be synchronised with fromErrorCode
///
/// Arguments:
/// IN err: Error - The erorr to convert
/// IN err: anyerror - The erorr to convert
///
/// Return: u16
/// The error code corresponding to the error
@ -86,14 +180,223 @@ pub fn toErrorCode(err: anyerror) u16 {
/// Return: usize
/// The syscall result
///
/// Error: Error
/// Error: anyerror
/// The error raised by the handler
///
pub fn handle(syscall: Syscall, ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
pub fn handle(syscall: Syscall, ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
return try syscall.getHandler()(ctx, arg1, arg2, arg3, arg4, arg5);
}
pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
///
/// Get a slice containing the data at an address and length. If the current task is a kernel task then a simple pointer to slice conversion is performed,
/// otherwise the slice is allocated on the heap and the data is copied in from user space.
///
/// Arguments:
/// IN ptr: usize - The slice's address
/// IN len: usize - The number of bytes
///
/// Error: Error || Allocator.Error || VmmError || BitmapError
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
/// TooBig - The user task requested to have too much data copied
/// NotAllocated - The pointer hasn't been mapped by the task
/// OutOfBounds - The pointer and length is out of bounds of the task's VMM
///
/// Return: []u8
/// The slice of data. Will be stack-allocated if the current task is kernel-level, otherwise will be heap-allocated
///
fn getData(ptr: usize, len: usize) (Error || std.mem.Allocator.Error || vmm.VmmError || bitmap.BitmapError)![]u8 {
if (scheduler.current_task.kernel) {
if (try vmm.kernel_vmm.isSet(ptr)) {
return @intToPtr([*]u8, ptr)[0..len];
} else {
return error.NotAllocated;
}
} else {
if (len > USER_MAX_DATA_LEN) {
return Error.TooBig;
}
var buff = try allocator.alloc(u8, len);
errdefer allocator.free(buff);
try vmm.kernel_vmm.copyData(scheduler.current_task.vmm, false, buff, ptr);
return buff;
}
}
/// Open a new vfs node
///
/// Arguments:
/// path_ptr: usize - The user/kernel pointer to the file path to open
/// path_len: usize - The length of the file path
/// flags: usize - The flag specifying what to do with the opened node. Use the integer value of vfs.OpenFlags
/// args: usize - The user/kernel pointer to the structure holding the vfs.OpenArgs
/// ignored: usize - Ignored
///
/// Return: usize
/// The handle for the opened vfs node
///
/// Error:
/// NoMoreFSHandles - The task has reached the maximum number of allowed vfs handles
/// OutOfMemory - There wasn't enough kernel (heap or VMM) memory left to fulfill the request.
/// TooBig - The path length is greater than allowed
/// InvalidAddress - A pointer that the user task passed is invalid (not mapped, out of bounds etc.)
/// InvalidFlags - The flags provided don't correspond to a vfs.OpenFlags value
/// Refer to vfs.Error for details on what causes vfs errors
///
fn handleOpen(ctx: *const arch.CpuState, path_ptr: usize, path_len: usize, flags: usize, args: usize, ignored: usize) anyerror!usize {
_ = ctx;
_ = ignored;
const current_task = scheduler.current_task;
if (!current_task.hasFreeVFSHandle()) {
return Error.NoMoreFSHandles;
}
// Fetch the open arguments from user/kernel memory
var open_args: vfs.OpenArgs = if (args == 0) .{} else blk: {
const data = try getData(args, @sizeOf(vfs.OpenArgs));
defer if (!current_task.kernel) allocator.free(data);
break :blk std.mem.bytesAsValue(vfs.OpenArgs, data[0..@sizeOf(vfs.OpenArgs)]).*;
};
// The symlink target could refer to a location in user memory so convert that too
if (open_args.symlink_target) |target| {
open_args.symlink_target = try getData(@ptrToInt(target.ptr), target.len);
}
defer if (!current_task.kernel) if (open_args.symlink_target) |target| allocator.free(target);
const open_flags = std.meta.intToEnum(vfs.OpenFlags, flags) catch return error.InvalidFlags;
const path = try getData(path_ptr, path_len);
defer if (!current_task.kernel) allocator.free(path);
const node = try vfs.open(path, true, open_flags, open_args);
errdefer vfs.close(node.*);
return (try current_task.addVFSHandle(node)) orelse panic(null, "Failed to add a VFS handle to current_task\n", .{});
}
/// Read data from an open vfs file
///
/// Arguments:
/// node_handle: usize - The file handle returned from the open syscall
/// buff_ptr: usize ` - The user/kernel address of the buffer to put the read data in
/// buff_len: usize - The size of the buffer
/// ignored1: usize - Ignored
/// ignored2: usize - Ignored
///
/// Return: usize
/// The number of bytes read and put into the buffer
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
/// NotAFile - The handle does not correspond to a file
/// NotOpened - The handle doesn't correspond to an opened file
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
///
fn handleRead(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize {
_ = ctx;
_ = ignored1;
_ = ignored2;
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
return error.OutOfBounds;
const real_handle = @intCast(task.Handle, node_handle);
if (buff_len > USER_MAX_DATA_LEN) {
return Error.TooBig;
}
const current_task = scheduler.current_task;
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
if (node_opt) |node| {
const file = switch (node.*) {
.File => |*f| f,
else => return error.NotAFile,
};
var buff = if (current_task.kernel) @intToPtr([*]u8, buff_ptr)[0..buff_len] else try allocator.alloc(u8, buff_len);
defer if (!current_task.kernel) allocator.free(buff);
const bytes_read = try file.read(buff);
// TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer
if (!current_task.kernel) try vmm.kernel_vmm.copyData(current_task.vmm, true, buff, buff_ptr);
return bytes_read;
}
return error.NotOpened;
}
/// Write data from to open vfs file
///
/// Arguments:
/// node_handle: usize - The file handle returned from the open syscall
/// buff_ptr: usize ` - The user/kernel address of the buffer containing the data to write
/// buff_len: usize - The size of the buffer
/// ignored1: usize - Ignored
/// ignored2: usize - Ignored
///
/// Return: usize
/// The number of bytes written
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// TooBig - The buffer is bigger than what a user process is allowed to give the kernel
/// NotAFile - The handle does not correspond to a file
/// NotOpened - The handle doesn't correspond to an opened file
/// Refer to vfs.FileNode.read and vmm.VirtualMemoryManager.copyData for details on what causes other errors
///
fn handleWrite(ctx: *const arch.CpuState, node_handle: usize, buff_ptr: usize, buff_len: usize, ignored1: usize, ignored2: usize) anyerror!usize {
_ = ctx;
_ = ignored1;
_ = ignored2;
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
return error.OutOfBounds;
const real_handle = @intCast(task.Handle, node_handle);
const current_task = scheduler.current_task;
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
if (node_opt) |node| {
const file = switch (node.*) {
.File => |*f| f,
else => return error.NotAFile,
};
// TODO: A more performant method would be mapping in the user memory and using that directly. Then we wouldn't need to allocate or copy the buffer
var buff = try getData(buff_ptr, buff_len);
defer if (!current_task.kernel) allocator.free(buff);
return try file.write(buff);
}
return error.NotOpened;
}
///
/// Close an open vfs node. What it means to "close" depends on the underlying file system, but often it will cause the file to be committed to disk or for a network socket to be closed
///
/// Arguments:
/// node_handle: usize - The handle to close
/// ignored1..4: usize - Ignored
///
/// Return: void
///
/// Error:
/// OutOfBounds - The node handle is outside of the maximum per process
/// NotOpened - The node handle hasn't been opened
fn handleClose(ctx: *const arch.CpuState, node_handle: usize, ignored1: usize, ignored2: usize, ignored3: usize, ignored4: usize) anyerror!usize {
_ = ctx;
_ = ignored1;
_ = ignored2;
_ = ignored3;
_ = ignored4;
if (node_handle >= task.VFS_HANDLES_PER_PROCESS)
return error.OutOfBounds;
const real_handle = @intCast(task.Handle, node_handle);
const current_task = scheduler.current_task;
const node_opt = current_task.getVFSHandle(real_handle) catch panic(@errorReturnTrace(), "Failed to get VFS node for handle {}\n", .{real_handle});
if (node_opt) |node| {
current_task.clearVFSHandle(real_handle) catch |e| return switch (e) {
error.VFSHandleNotSet, error.OutOfBounds => error.NotOpened,
};
vfs.close(node.*);
}
return error.NotOpened;
}
pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg1;
@ -104,12 +407,12 @@ pub fn handleTest1(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: us
return 0;
}
pub fn handleTest2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
pub fn handleTest2(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
_ = ctx;
return arg1 + arg2 + arg3 + arg4 + arg5;
}
pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) Error!usize {
pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: usize, arg4: usize, arg5: usize) anyerror!usize {
// Suppress unused variable warnings
_ = ctx;
_ = arg1;
@ -117,18 +420,351 @@ pub fn handleTest3(ctx: *const arch.CpuState, arg1: usize, arg2: usize, arg3: us
_ = arg3;
_ = arg4;
_ = arg5;
return std.mem.Allocator.Error.OutOfMemory;
return error.OutOfMemory;
}
fn testInitMem(comptime num_vmm_entries: usize, alloc: std.mem.Allocator, map_all: bool) !std.heap.FixedBufferAllocator {
// handleOpen requires that the name passed is mapped in the VMM
// Allocate them within a buffer so we know the start and end address to give to the VMM
var buffer = try alloc.alloc(u8, num_vmm_entries * vmm.BLOCK_SIZE);
var fixed_buffer_allocator = std.heap.FixedBufferAllocator.init(buffer[0..]);
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(fixed_buffer_allocator.buffer.ptr), @ptrToInt(fixed_buffer_allocator.buffer.ptr) + buffer.len, alloc, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
// The PMM is required as well
const mem_profile = mem.MemProfile{
.vaddr_end = undefined,
.vaddr_start = undefined,
.physaddr_start = undefined,
.physaddr_end = undefined,
.mem_kb = num_vmm_entries * vmm.BLOCK_SIZE / 1024,
.fixed_allocator = undefined,
.virtual_reserved = &[_]mem.Map{},
.physical_reserved = &[_]mem.Range{},
.modules = &[_]mem.Module{},
};
pmm.init(&mem_profile, alloc);
// Set the whole VMM space as mapped so all address within the buffer allocator will be considered valid
if (map_all) _ = try vmm.kernel_vmm.alloc(num_vmm_entries, null, .{ .kernel = true, .writable = true, .cachable = true });
return fixed_buffer_allocator;
}
fn testDeinitMem(alloc: std.mem.Allocator, buffer_allocator: std.heap.FixedBufferAllocator) void {
alloc.free(buffer_allocator.buffer);
vmm.kernel_vmm.deinit();
pmm.deinit();
}
test "getHandler" {
try std.testing.expectEqual(Syscall.Test1.getHandler(), handleTest1);
try std.testing.expectEqual(Syscall.Test2.getHandler(), handleTest2);
try std.testing.expectEqual(Syscall.Test3.getHandler(), handleTest3);
try std.testing.expectEqual(Syscall.Open.getHandler(), handleOpen);
try std.testing.expectEqual(Syscall.Close.getHandler(), handleClose);
try std.testing.expectEqual(Syscall.Read.getHandler(), handleRead);
try std.testing.expectEqual(Syscall.Write.getHandler(), handleWrite);
}
test "handle" {
const state = arch.CpuState.empty();
try std.testing.expectEqual(@as(usize, 0), try handle(.Test1, &state, 0, 0, 0, 0, 0));
try std.testing.expectEqual(@as(usize, 1 + 2 + 3 + 4 + 5), try handle(.Test2, &state, 1, 2, 3, 4, 5));
try std.testing.expectError(Error.OutOfMemory, handle(.Test3, &state, 0, 0, 0, 0, 0));
try std.testing.expectError(error.OutOfMemory, handle(.Test3, &state, 0, 0, 0, 0, 0));
}
test "handleOpen" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, undefined, allocator, true);
defer scheduler.current_task.destroy(allocator);
var current_task = scheduler.current_task;
const empty = arch.CpuState.empty();
// Creating a file
var name1 = try buffer_allocator.dupe(u8, "/abc.txt");
var test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name1.ptr), name1.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
var test_node = (try current_task.getVFSHandle(test_handle)).?;
try testing.expectEqual(testfs.tree.children.items.len, 1);
var tree = testfs.tree.children.items[0];
try testing.expect(tree.val.isFile() and test_node.isFile());
try testing.expectEqual(&test_node.File, &tree.val.File);
try testing.expect(std.mem.eql(u8, tree.name, "abc.txt"));
try testing.expectEqual(tree.data, null);
try testing.expectEqual(tree.children.items.len, 0);
// Creating a dir
var name2 = try buffer_allocator.dupe(u8, "/def");
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined));
test_node = (try current_task.getVFSHandle(test_handle)).?;
try testing.expectEqual(testfs.tree.children.items.len, 2);
tree = testfs.tree.children.items[1];
try testing.expect(tree.val.isDir() and test_node.isDir());
try testing.expectEqual(&test_node.Dir, &tree.val.Dir);
try testing.expect(std.mem.eql(u8, tree.name, "def"));
try testing.expectEqual(tree.data, null);
try testing.expectEqual(tree.children.items.len, 0);
// Creating a file under a new dir
var name3 = try buffer_allocator.dupe(u8, "/def/ghi.zig");
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
test_node = (try current_task.getVFSHandle(test_handle)).?;
try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1);
tree = testfs.tree.children.items[1].children.items[0];
try testing.expect(tree.val.isFile() and test_node.isFile());
try testing.expectEqual(&test_node.File, &tree.val.File);
try testing.expect(std.mem.eql(u8, tree.name, "ghi.zig"));
try testing.expectEqual(tree.data, null);
try testing.expectEqual(tree.children.items.len, 0);
// Opening an existing file
test_handle = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.NO_CREATION), 0, undefined));
test_node = (try current_task.getVFSHandle(test_handle)).?;
try testing.expectEqual(testfs.tree.children.items[1].children.items.len, 1);
try testing.expect(test_node.isFile());
try testing.expectEqual(&test_node.File, &tree.val.File);
}
test "handleRead" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
defer scheduler.current_task.destroy(allocator);
_ = scheduler.current_task;
const empty = arch.CpuState.empty();
var test_file_path = try buffer_allocator.dupe(u8, "/foo.txt");
var test_file = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(test_file_path.ptr), test_file_path.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined));
var f_data = &testfs.tree.children.items[0].data;
var str = "test123";
f_data.* = try testing.allocator.dupe(u8, str);
var buffer: [str.len]u8 = undefined;
{
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len, 0, undefined);
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
}
{
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 1, 0, undefined);
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
}
{
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len + 3, 0, undefined);
try testing.expect(std.mem.eql(u8, str, buffer[0..length]));
}
{
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), buffer.len - 1, 0, undefined);
try testing.expect(std.mem.eql(u8, str[0 .. str.len - 1], buffer[0..length]));
}
{
const length = try handleRead(&empty, test_file, @ptrToInt(&buffer[0]), 0, 0, undefined);
try testing.expect(std.mem.eql(u8, str[0..0], buffer[0..length]));
}
// Try reading from a symlink
var args = try buffer_allocator.create(vfs.OpenArgs);
args.* = vfs.OpenArgs{ .symlink_target = test_file_path };
var link = try buffer_allocator.dupe(u8, "/link");
var test_link = @intCast(task.Handle, try handleOpen(&empty, @ptrToInt(link.ptr), link.len, @enumToInt(vfs.OpenFlags.CREATE_SYMLINK), @ptrToInt(args), undefined));
{
const length = try handleRead(&empty, test_link, @ptrToInt(&buffer[0]), buffer.len, 0, undefined);
try testing.expect(std.mem.eql(u8, str[0..str.len], buffer[0..length]));
}
}
test "handleRead errors" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
{
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
const empty = arch.CpuState.empty();
// The data we pass to handleRead needs to be mapped within the VMM, so we need to know their address
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
var fixed_buffer_allocator = try testInitMem(3, allocator, true);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
defer scheduler.current_task.destroy(allocator);
// Invalid file handle
try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0));
try testing.expectError(error.OutOfBounds, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0));
// Unopened file
try testing.expectError(error.NotOpened, handleRead(&empty, 0, 0, 0, 0, 0));
try testing.expectError(error.NotOpened, handleRead(&empty, 1, 0, 0, 0, 0));
try testing.expectError(error.NotOpened, handleRead(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0));
// Reading from a dir
const name = try buffer_allocator.dupe(u8, "/dir");
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0);
try testing.expectError(error.NotAFile, handleRead(&empty, node, 0, 0, 0, 0));
// User buffer is too big
const name2 = try buffer_allocator.dupe(u8, "/file.txt");
const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0);
scheduler.current_task.kernel = false;
try testing.expectError(Error.TooBig, handleRead(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0));
}
try testing.expect(!testing.allocator_instance.detectLeaks());
}
test "handleWrite" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
var fixed_buffer_allocator = try testInitMem(1, allocator, true);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
defer scheduler.current_task.destroy(allocator);
const empty = arch.CpuState.empty();
// Open test file
const name = try buffer_allocator.dupe(u8, "/abc.txt");
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined);
// Write
const data = try buffer_allocator.dupe(u8, "test_data 123");
const res = try handleWrite(&empty, node, @ptrToInt(data.ptr), data.len, 0, 0);
try testing.expectEqual(res, data.len);
try testing.expectEqualSlices(u8, data, testfs.tree.children.items[0].data.?);
// Write to a file in a folder
const name2 = try buffer_allocator.dupe(u8, "/dir");
_ = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, undefined);
const name3 = try buffer_allocator.dupe(u8, "/dir/def.txt");
const node3 = try handleOpen(&empty, @ptrToInt(name3.ptr), name3.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, undefined);
const data2 = try buffer_allocator.dupe(u8, "some more test data!");
const res2 = try handleWrite(&empty, node3, @ptrToInt(data2.ptr), data2.len, 0, 0);
try testing.expectEqual(res2, data2.len);
try testing.expectEqualSlices(u8, data2, testfs.tree.children.items[1].children.items[0].data.?);
}
test "handleWrite errors" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
{
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
const empty = arch.CpuState.empty();
// The data we pass to handleWrite needs to be mapped within the VMM, so we need to know their address
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
var fixed_buffer_allocator = try testInitMem(3, allocator, true);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
defer scheduler.current_task.destroy(allocator);
// Invalid file handle
try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS, 0, 0, 0, 0));
try testing.expectError(error.OutOfBounds, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS + 1, 0, 0, 0, 0));
// Unopened file
try testing.expectError(error.NotOpened, handleWrite(&empty, 0, 0, 0, 0, 0));
try testing.expectError(error.NotOpened, handleWrite(&empty, 1, 0, 0, 0, 0));
try testing.expectError(error.NotOpened, handleWrite(&empty, task.VFS_HANDLES_PER_PROCESS - 1, 0, 0, 0, 0));
// Writing to a dir
const name = try buffer_allocator.dupe(u8, "/dir");
const node = try handleOpen(&empty, @ptrToInt(name.ptr), name.len, @enumToInt(vfs.OpenFlags.CREATE_DIR), 0, 0);
try testing.expectError(error.NotAFile, handleWrite(&empty, node, 0, 0, 0, 0));
// User buffer is too big
const name2 = try buffer_allocator.dupe(u8, "/file.txt");
const node2 = try handleOpen(&empty, @ptrToInt(name2.ptr), name2.len, @enumToInt(vfs.OpenFlags.CREATE_FILE), 0, 0);
scheduler.current_task.kernel = false;
try testing.expectError(Error.TooBig, handleWrite(&empty, node2, 0, USER_MAX_DATA_LEN + 1, 0, 0));
}
try testing.expect(!testing.allocator_instance.detectLeaks());
}
test "handleOpen errors" {
allocator = std.testing.allocator;
var testfs = try vfs.testInitFs(allocator);
{
defer allocator.destroy(testfs);
defer testfs.deinit();
testfs.instance = 1;
try vfs.setRoot(testfs.tree.val);
const empty = arch.CpuState.empty();
// The data we pass to handleOpen needs to be mapped within the VMM, so we need to know their address
// Allocating the data within a fixed buffer allocator is the best way to know the address of the data
var fixed_buffer_allocator = try testInitMem(3, allocator, false);
var buffer_allocator = fixed_buffer_allocator.allocator();
defer testDeinitMem(allocator, fixed_buffer_allocator);
scheduler.current_task = try task.Task.create(0, true, &vmm.kernel_vmm, allocator, true);
defer scheduler.current_task.destroy(allocator);
// Check opening with no free file handles left
const free_handles = scheduler.current_task.file_handles.num_free_entries;
scheduler.current_task.file_handles.num_free_entries = 0;
try testing.expectError(Error.NoMoreFSHandles, handleOpen(&empty, 0, 0, 0, 0, 0));
scheduler.current_task.file_handles.num_free_entries = free_handles;
// Using a path that is too long
scheduler.current_task.kernel = false;
try testing.expectError(Error.TooBig, handleOpen(&empty, 0, USER_MAX_DATA_LEN + 1, 0, 0, 0));
// Unallocated user address
const test_alloc = try buffer_allocator.alloc(u8, 1);
// The kernel VMM and task VMM need to have their buffers mapped, so we'll temporarily use the buffer allocator since it operates within a known address space
allocator = buffer_allocator;
try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0));
allocator = std.testing.allocator;
// Unallocated kernel address
scheduler.current_task.kernel = true;
try testing.expectError(error.NotAllocated, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 0, 0, 0));
// Invalid flag enum value
try testing.expectError(error.InvalidFlags, handleOpen(&empty, @ptrToInt(test_alloc.ptr), 1, 999, 0, 0));
}
try testing.expect(!testing.allocator_instance.detectLeaks());
}

View file

@ -1,6 +1,7 @@
const std = @import("std");
const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const expect = std.testing.expect;
const builtin = @import("builtin");
const is_test = builtin.is_test;
const build_options = @import("build_options");
@ -11,6 +12,7 @@ const pmm = @import("pmm.zig");
const mem = @import("mem.zig");
const elf = @import("elf.zig");
const bitmap = @import("bitmap.zig");
const vfs = @import("filesystem/vfs.zig");
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.task);
@ -18,6 +20,12 @@ const log = std.log.scoped(.task);
/// as we cannot deallocate this.
extern var KERNEL_STACK_START: *u32;
/// The number of vfs handles that a process can have
pub const VFS_HANDLES_PER_PROCESS = std.math.maxInt(Handle);
/// A vfs handle. 65k is probably a good limit for the number of files a task can have open at once so we use u16 as the type
pub const Handle = u16;
/// The function type for the entry point.
pub const EntryPoint = usize;
@ -25,18 +33,20 @@ pub const EntryPoint = usize;
const PidBitmap = bitmap.Bitmap(1024, usize);
/// The list of PIDs that have been allocated.
var all_pids: PidBitmap = init: {
var pids = PidBitmap.init(1024, null) catch unreachable;
// Reserve PID 0 for the init task
_ = pids.setFirstFree() orelse unreachable;
break :init pids;
};
var all_pids = PidBitmap.init(1024, null) catch unreachable;
const FileHandleBitmap = bitmap.Bitmap(1024, usize);
/// The default stack size of a task. Currently this is set to a page size.
pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32);
/// The task control block for storing all the information needed to save and restore a task.
pub const Task = struct {
pub const Error = error{
/// The supplied vfs handle hasn't been allocated
VFSHandleNotSet,
};
const Self = @This();
/// The unique task identifier
@ -57,6 +67,12 @@ pub const Task = struct {
/// The virtual memory manager belonging to the task
vmm: *vmm.VirtualMemoryManager(arch.VmmPayload),
/// The list of file handles for this process
file_handles: FileHandleBitmap,
/// The mapping between file handles and file nodes
file_handle_mapping: std.hash_map.AutoHashMap(Handle, *vfs.Node),
///
/// Create a task. This will allocate a PID and the stack. The stack will be set up as a
/// kernel task. As this is a new task, the stack will need to be initialised with the CPU
@ -75,15 +91,15 @@ pub const Task = struct {
/// OutOfMemory - If there is no more memory to allocate. Any memory or PID allocated will
/// be freed on return.
///
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator) Allocator.Error!*Task {
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator, alloc_kernel_stack: bool) Allocator.Error!*Task {
var task = try allocator.create(Task);
errdefer allocator.destroy(task);
const pid = allocatePid();
errdefer freePid(pid) catch |e| panic(@errorReturnTrace(), "Failed to free task PID in errdefer ({}): {}\n", .{ pid, e });
var k_stack = try allocator.alloc(usize, STACK_SIZE);
errdefer allocator.free(k_stack);
var k_stack = if (alloc_kernel_stack) try allocator.alloc(usize, STACK_SIZE) else &[_]usize{};
errdefer if (alloc_kernel_stack) allocator.free(k_stack);
var u_stack = if (kernel) &[_]usize{} else try allocator.alloc(usize, STACK_SIZE);
errdefer if (!kernel) allocator.free(u_stack);
@ -92,18 +108,20 @@ pub const Task = struct {
.pid = pid,
.kernel_stack = k_stack,
.user_stack = u_stack,
.stack_pointer = @ptrToInt(&k_stack[STACK_SIZE - 1]),
.stack_pointer = if (!alloc_kernel_stack) 0 else @ptrToInt(&k_stack[STACK_SIZE - 1]),
.kernel = kernel,
.vmm = task_vmm,
.file_handles = FileHandleBitmap.init(null, null) catch unreachable,
.file_handle_mapping = std.hash_map.AutoHashMap(Handle, *vfs.Node).init(allocator),
};
try arch.initTask(task, entry_point, allocator);
try arch.initTask(task, entry_point, allocator, alloc_kernel_stack);
return task;
}
pub fn createFromElf(program_elf: elf.Elf, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator) (bitmap.BitmapError || vmm.VmmError || Allocator.Error)!*Task {
const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator);
const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator, true);
errdefer task.destroy(allocator);
// Iterate over sections
@ -145,14 +163,103 @@ pub const Task = struct {
freePid(self.pid) catch |e| panic(@errorReturnTrace(), "Failed to free task's PID ({}): {}\n", .{ self.pid, e });
// We need to check that the the stack has been allocated as task 0 (init) won't have a
// stack allocated as this in the linker script
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START) and self.kernel_stack.len > 0) {
allocator.free(self.kernel_stack);
}
if (!self.kernel) {
allocator.free(self.user_stack);
}
self.file_handle_mapping.deinit();
allocator.destroy(self);
}
///
/// Get the VFS node associated with a VFS handle.
///
/// Arguments:
/// IN self: *Self - The pointer to self.
/// IN handle: Handle - The handle to get the node for. Must have been returned from addVFSHandle.
///
/// Return: *vfs.Node
/// The node associated with the handle.
///
/// Error: bitmap.BitmapError
/// See Bitmap.
///
pub fn getVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!?*vfs.Node {
return self.file_handle_mapping.get(handle);
}
///
/// Check if the task has free handles to allocate.
///
/// Arguments:
/// IN self: Self - The self.
///
/// Return: bool
/// True if there are free handles, else false.
///
pub fn hasFreeVFSHandle(self: Self) bool {
return self.file_handles.num_free_entries > 0;
}
///
/// Add a handle associated with a node. The node can later be retrieved with getVFSHandle.
///
/// Arguments:
/// IN self: *Self - The pointer to self.
/// IN node: *vfs.Node - The node to associate with the returned handle.
///
/// Return: Handle
/// The handle now associated with the vfs node.
///
/// Error: std.mem.Allocator.Error
///
pub fn addVFSHandle(self: *Self, node: *vfs.Node) std.mem.Allocator.Error!?Handle {
if (self.file_handles.setFirstFree()) |handle| {
const real_handle = @intCast(Handle, handle);
try self.file_handle_mapping.put(real_handle, node);
return real_handle;
}
return null;
}
///
/// Check if the task has a certain handle registered.
///
/// Arguments:
/// IN self: Self - The self.
/// IN handle: Handle - The handle to check.
///
/// Return: bool
/// True if the handle has been registered to this task, else false.
///
/// Error: bitmap.BitmapError
/// See Bitmap.
///
pub fn hasVFSHandle(self: Self, handle: Handle) bitmap.BitmapError!bool {
return self.file_handles.isSet(handle);
}
///
/// Clear a registered handle and de-associate the node from it.
///
/// Arguments:
/// IN self: *Self - The pointer to self.
/// IN handle: Handle - The handle to clear. Must have been registered before.
///
/// Error: bitmap.BitmapError || Error
/// bitmap.BitmapError.* - See bitmap.BitmapError
/// Error.VFSHandleNotSet - The handle has not previously been registered
///
pub fn clearVFSHandle(self: *Self, handle: Handle) (bitmap.BitmapError || Error)!void {
if (try self.hasVFSHandle(handle)) {
try self.file_handles.clearEntry(handle);
_ = self.file_handle_mapping.remove(handle);
} else {
return Error.VFSHandleNotSet;
}
}
};
///
@ -192,8 +299,8 @@ test "create out of memory for task" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 0);
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true));
// Make sure any memory allocated is freed
try expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -208,8 +315,8 @@ test "create out of memory for stack" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 1);
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true));
// Make sure any memory allocated is freed
try expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -221,7 +328,7 @@ test "create out of memory for stack" {
}
test "create expected setup" {
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
defer task.destroy(std.testing.allocator);
// Will allocate the first PID 0
@ -229,7 +336,7 @@ test "create expected setup" {
try expectEqual(task.kernel_stack.len, STACK_SIZE);
try expectEqual(task.user_stack.len, 0);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true);
defer user_task.destroy(std.testing.allocator);
try expectEqual(user_task.pid, 1);
try expectEqual(user_task.user_stack.len, STACK_SIZE);
@ -241,8 +348,8 @@ test "destroy cleans up" {
// So if any alloc were not freed, this will fail the test
var allocator = std.testing.allocator;
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator, true);
task.destroy(allocator);
user_task.destroy(allocator);
@ -254,8 +361,8 @@ test "destroy cleans up" {
}
test "Multiple create" {
var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
try expectEqual(task1.pid, 0);
try expectEqual(task2.pid, 1);
@ -271,7 +378,7 @@ test "Multiple create" {
if (i > 0) try expectEqual(bmp, 0);
}
var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
try expectEqual(task3.pid, 0);
try expectEqual(all_pids.bitmaps[0], 3);
@ -282,7 +389,7 @@ test "Multiple create" {
task2.destroy(std.testing.allocator);
task3.destroy(std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true);
try expectEqual(user_task.pid, 0);
try expectEqual(all_pids.bitmaps[0], 1);
@ -378,3 +485,83 @@ test "createFromElf clean-up" {
the_elf.section_headers[1].flags |= elf.SECTION_ALLOCATABLE;
try std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator));
}
test "create doesn't allocate kernel stack" {
var allocator = std.testing.allocator;
const task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, false);
defer task.destroy(allocator);
try std.testing.expectEqualSlices(usize, task.kernel_stack, &[_]usize{});
try std.testing.expectEqual(task.stack_pointer, 0);
}
test "addVFSHandle" {
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
defer task.destroy(std.testing.allocator);
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
try expectEqual(handle1, 0);
try expectEqual(&node1, task.file_handle_mapping.get(handle1).?);
try expectEqual(true, try task.file_handles.isSet(handle1));
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
try expectEqual(handle2, 1);
try expectEqual(&node2, task.file_handle_mapping.get(handle2).?);
try expectEqual(true, try task.file_handles.isSet(handle2));
}
test "hasFreeVFSHandle" {
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
defer task.destroy(std.testing.allocator);
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
try expect(task.hasFreeVFSHandle());
_ = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
try expect(task.hasFreeVFSHandle());
var i: usize = 0;
const free_entries = task.file_handles.num_free_entries;
while (i < free_entries) : (i += 1) {
try expect(task.hasFreeVFSHandle());
_ = task.file_handles.setFirstFree();
}
try expect(!task.hasFreeVFSHandle());
}
test "getVFSHandle" {
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
defer task.destroy(std.testing.allocator);
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
try expectEqual(&node1, (try task.getVFSHandle(handle1)).?);
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
try expectEqual(&node2, (try task.getVFSHandle(handle2)).?);
try expectEqual(&node1, (try task.getVFSHandle(handle1)).?);
try expectEqual(task.getVFSHandle(handle2 + 1), null);
}
test "clearVFSHandle" {
var task = try Task.create(0, true, undefined, std.testing.allocator, false);
defer task.destroy(std.testing.allocator);
var node1 = vfs.Node{ .Dir = .{ .fs = undefined, .mount = null } };
var node2 = vfs.Node{ .File = .{ .fs = undefined } };
const handle1 = (try task.addVFSHandle(&node1)) orelse return error.FailedToAddVFSHandle;
const handle2 = (try task.addVFSHandle(&node2)) orelse return error.FailedToAddVFSHandle;
try task.clearVFSHandle(handle1);
try expectEqual(false, try task.hasVFSHandle(handle1));
try task.clearVFSHandle(handle2);
try expectEqual(false, try task.hasVFSHandle(handle2));
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2 + 1));
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle2));
try expectError(Task.Error.VFSHandleNotSet, task.clearVFSHandle(handle1));
}

View file

@ -206,11 +206,12 @@ pub fn initMem(payload: BootPayload) Allocator.Error!mem.MemProfile {
};
}
pub fn initTask(t: *Task, entry_point: usize, allocator: Allocator) Allocator.Error!void {
pub fn initTask(t: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void {
// Suppress unused variable warnings
_ = t;
_ = entry_point;
_ = allocator;
_ = set_up_stack;
}
pub fn initKeyboard(allocator: Allocator) Allocator.Error!?*Keyboard {

View file

@ -193,7 +193,7 @@ pub const RuntimeStep = struct {
/// Error.TestFailed - The error if the test failed.
///
fn make(step: *Step) (Thread.SpawnError || ChildProcess.SpawnError || Allocator.Error || Error)!void {
const self = @fieldParentPtr(RuntimeStep, "step", step);
const self: RuntimeStep = @fieldParentPtr("step", step);
// Create the qemu process
self.os_proc = try ChildProcess.init(self.argv, self.builder.allocator);
@ -254,7 +254,7 @@ pub const RuntimeStep = struct {
};
// put line in the queue
var node = self.builder.allocator.create(Node) catch unreachable;
const node = self.builder.allocator.create(Node) catch unreachable;
node.* = .{ .next = null, .data = line };
self.msg_queue.put(node);
}