Merge pull request #228 from ZystemOS/feature/user-mode

Add user mode
This commit is contained in:
Sam Tebbs 2020-11-07 09:04:08 +00:00 committed by GitHub
commit d9e776e898
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 612 additions and 126 deletions

View file

@ -78,10 +78,22 @@ pub fn build(b: *Builder) !void {
var ramdisk_files_al = ArrayList([]const u8).init(b.allocator);
defer ramdisk_files_al.deinit();
// Add some test files for the ramdisk runtime tests
if (test_mode == .Initialisation) {
// Add some test files for the ramdisk runtime tests
try ramdisk_files_al.append("test/ramdisk_test1.txt");
try ramdisk_files_al.append("test/ramdisk_test2.txt");
} else if (test_mode == .Scheduler) {
// Add some test files for the user mode runtime tests
const user_program = b.addAssemble("user_program", "test/user_program.s");
user_program.setOutputDir(b.cache_root);
user_program.setTarget(target);
user_program.setBuildMode(build_mode);
user_program.strip = true;
const copy_user_program = b.addSystemCommand(&[_][]const u8{ "objcopy", "-O", "binary", "zig-cache/user_program.o", "zig-cache/user_program" });
copy_user_program.step.dependOn(&user_program.step);
try ramdisk_files_al.append("zig-cache/user_program");
exec.step.dependOn(&copy_user_program.step);
}
const ramdisk_step = RamdiskStep.create(b, target, ramdisk_files_al.toOwnedSlice(), ramdisk_path);

View file

@ -24,6 +24,7 @@ const Serial = @import("../../serial.zig").Serial;
const panic = @import("../../panic.zig").panic;
const TTY = @import("../../tty.zig").TTY;
const Keyboard = @import("../../keyboard.zig").Keyboard;
const Task = @import("../../task.zig").Task;
const MemProfile = mem.MemProfile;
/// The type of a device.
@ -53,8 +54,9 @@ extern var KERNEL_STACK_END: *u32;
/// The interrupt context that is given to a interrupt handler. It contains most of the registers
/// and the interrupt number and error code (if there is one).
pub const CpuState = packed struct {
// Page directory
cr3: usize,
// Extra segments
ss: u32,
gs: u32,
fs: u32,
es: u32,
@ -102,9 +104,6 @@ pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = vmm.Mapper(VmmPayload){ .mapFn =
/// The size of each allocatable block of memory, normally set to the page size.
pub const MEMORY_BLOCK_SIZE: usize = paging.PAGE_SIZE_4KB;
/// The default stack size of a task. Currently this is set to a page size.
pub const STACK_SIZE: u32 = MEMORY_BLOCK_SIZE / @sizeOf(u32);
///
/// Assembly that reads data from a given port and returns its value.
///
@ -499,50 +498,71 @@ pub fn initKeyboard(allocator: *Allocator) Allocator.Error!*Keyboard {
}
///
/// Initialise a 32bit kernel stack used for creating a task.
/// Initialise a stack used for creating a task.
/// Currently only support fn () noreturn functions for the entry point.
///
/// Arguments:
/// IN task: *Task - The task to be initialised. The function will only modify whatever
/// is required by the architecture. In the case of x86, it will put
/// the initial CpuState on the kernel stack.
/// IN entry_point: usize - The pointer to the entry point of the function. Functions only
/// supported is fn () noreturn
/// IN allocator: *Allocator - The allocator use for allocating a stack.
///
/// Return: struct { stack: []u32, pointer: usize }
/// The stack and stack pointer with the stack initialised as a 32bit kernel stack.
///
/// Error: Allocator.Error
/// OutOfMemory - Unable to allocate space for the stack.
///
pub fn initTaskStack(entry_point: usize, allocator: *Allocator) Allocator.Error!struct { stack: []u32, pointer: usize } {
pub fn initTask(task: *Task, entry_point: usize, allocator: *Allocator) Allocator.Error!void {
const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11;
// Setting the bottom two bits of the code offset designates that this is a ring 3 task
const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11;
// Ring switches push and pop two extra values on interrupt: user_esp and user_ss
const kernel_stack_bottom = if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20;
var stack = &task.kernel_stack;
// TODO Will need to add the exit point
// Set up everything as a kernel task
var stack = try allocator.alloc(u32, STACK_SIZE);
stack[STACK_SIZE - 18] = gdt.KERNEL_DATA_OFFSET; // ss
stack[STACK_SIZE - 17] = gdt.KERNEL_DATA_OFFSET; // gs
stack[STACK_SIZE - 16] = gdt.KERNEL_DATA_OFFSET; // fs
stack[STACK_SIZE - 15] = gdt.KERNEL_DATA_OFFSET; // es
stack[STACK_SIZE - 14] = gdt.KERNEL_DATA_OFFSET; // ds
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs
stack.*[kernel_stack_bottom + 3] = data_offset; // es
stack.*[kernel_stack_bottom + 4] = data_offset; // ds
stack[STACK_SIZE - 13] = 0; // edi
stack[STACK_SIZE - 12] = 0; // esi
stack.*[kernel_stack_bottom + 5] = 0; // edi
stack.*[kernel_stack_bottom + 6] = 0; // esi
// End of the stack
stack[STACK_SIZE - 11] = @ptrToInt(&stack[STACK_SIZE - 1]); // ebp
stack[STACK_SIZE - 10] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp
stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
stack[STACK_SIZE - 9] = 0; // ebx
stack[STACK_SIZE - 8] = 0; // edx
stack[STACK_SIZE - 7] = 0; // ecx
stack[STACK_SIZE - 6] = 0; // eax
stack.*[kernel_stack_bottom + 9] = 0; // ebx
stack.*[kernel_stack_bottom + 10] = 0; // edx
stack.*[kernel_stack_bottom + 11] = 0; // ecx
stack.*[kernel_stack_bottom + 12] = 0; // eax
stack[STACK_SIZE - 5] = 0; // int_num
stack[STACK_SIZE - 4] = 0; // error_code
stack.*[kernel_stack_bottom + 13] = 0; // int_num
stack.*[kernel_stack_bottom + 14] = 0; // error_code
stack[STACK_SIZE - 3] = entry_point; // eip
stack[STACK_SIZE - 2] = gdt.KERNEL_CODE_OFFSET; // cs
stack[STACK_SIZE - 1] = 0x202; // eflags
stack.*[kernel_stack_bottom + 15] = entry_point; // eip
stack.*[kernel_stack_bottom + 16] = code_offset; // cs
stack.*[kernel_stack_bottom + 17] = 0x202; // eflags
const ret = .{ .stack = stack, .pointer = @ptrToInt(&stack[STACK_SIZE - 18]) };
return ret;
if (!task.kernel) {
// Put the extra values on the kernel stack needed when chaning privilege levels
stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp
stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss
if (!builtin.is_test) {
// Create a new page directory for the user task by mirroring the kernel directory
// We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt
task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0];
task.vmm.payload.* = paging.kernel_directory.copy();
stack.*[kernel_stack_bottom] = vmm.kernel_vmm.virtToPhys(@ptrToInt(task.vmm.payload)) catch |e| {
panic(@errorReturnTrace(), "Failed to get the physical address of the user task's page directory: {}\n", .{e});
};
}
}
task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]);
}
pub fn getDevices(allocator: *Allocator) Allocator.Error![]Device {
@ -577,6 +597,19 @@ pub fn init(mem_profile: *const MemProfile) void {
tty.init();
}
///
/// Check the state of the user task used for runtime testing for the expected values. These should mirror those in test/user_program.s
///
/// Arguments:
/// IN ctx: *const CpuState - The task's saved state
///
/// Return: bool
/// True if the expected values were found, else false
///
pub fn runtimeTestCheckUserTaskState(ctx: *const CpuState) bool {
return ctx.eax == 0xCAFE and ctx.ebx == 0xBEEF;
}
test "" {
std.testing.refAllDecls(@This());
}

View file

@ -358,7 +358,7 @@ var gdt_ptr: GdtPtr = GdtPtr{
};
/// The main task state segment entry.
var main_tss_entry: Tss = init: {
pub var main_tss_entry: Tss = init: {
var tss_temp = std.mem.zeroes(Tss);
tss_temp.ss0 = KERNEL_DATA_OFFSET;
tss_temp.io_permissions_base_offset = @sizeOf(Tss);

View file

@ -32,7 +32,8 @@ export fn commonStub() callconv(.Naked) void {
\\push %%es
\\push %%fs
\\push %%gs
\\push %%ss
\\mov %%cr3, %%eax
\\push %%eax
\\mov $0x10, %%ax
\\mov %%ax, %%ds
\\mov %%ax, %%es
@ -42,13 +43,30 @@ export fn commonStub() callconv(.Naked) void {
\\push %%eax
\\call handler
\\mov %%eax, %%esp
\\pop %%ss
);
// Pop off the new cr3 then check if it's the same as the previous cr3
// If so don't change cr3 to avoid a TLB flush
asm volatile (
\\pop %%eax
\\mov %%cr3, %%ebx
\\cmp %%eax, %%ebx
\\je same_cr3
\\mov %%eax, %%cr3
\\same_cr3:
\\pop %%gs
\\pop %%fs
\\pop %%es
\\pop %%ds
\\popa
\\add $0x8, %%esp
);
// The Tss.esp0 value is the stack pointer used when an interrupt occurs. This should be the current process' stack pointer
// So skip the rest of the CpuState, set Tss.esp0 then un-skip the last few fields of the CpuState
asm volatile (
\\add $0x1C, %%esp
\\.extern main_tss_entry
\\mov %%esp, (main_tss_entry + 4)
\\sub $0x14, %%esp
\\iret
);
}

View file

@ -42,6 +42,19 @@ pub const Directory = packed struct {
}
}
}
///
/// Copy the page directory. Changes to one copy will not affect the other
///
/// Arguments:
/// IN self: *const Directory - The directory to copy
///
/// Return: Directory
/// The copy
///
pub fn copy(self: *const Directory) Directory {
return self.*;
}
};
/// An array of table entries. Forms the second level of paging and covers a 4MB memory space.
@ -598,6 +611,26 @@ test "map and unmap" {
}
}
test "copy" {
// Create a dummy page dir
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY, .allocator = std.testing.allocator };
dir.entries[0] = 123;
dir.entries[56] = 794;
var table0 = Table{ .entries = [_]TableEntry{654} ** ENTRIES_PER_TABLE };
var table56 = Table{ .entries = [_]TableEntry{987} ** ENTRIES_PER_TABLE };
dir.tables[0] = &table0;
dir.tables[56] = &table56;
var dir2 = dir.copy();
const dir_slice = @ptrCast([*]const u8, &dir)[0..@sizeOf(Directory)];
const dir2_slice = @ptrCast([*]const u8, &dir2)[0..@sizeOf(Directory)];
testing.expectEqualSlices(u8, dir_slice, dir2_slice);
// Changes to one should not affect the other
dir2.tables[1] = &table0;
dir.tables[0] = &table56;
testing.expect(!std.mem.eql(u8, dir_slice, dir2_slice));
}
// The labels to jump to after attempting to cause a page fault. This is needed as we don't want to cause an
// infinite loop by jumping to the same instruction that caused the fault.
extern var rt_fault_callback: *u32;

View file

@ -239,6 +239,29 @@ pub fn Bitmap(comptime BitmapType: type) type {
return self;
}
///
/// Clone this bitmap.
///
/// Arguments:
/// IN self: *Self - The bitmap to clone.
///
/// Return: Self
/// The cloned bitmap
///
/// Error: std.mem.Allocator.Error
/// OutOfMemory: There isn't enough memory available to allocate the required number of BitmapType.
///
pub fn clone(self: *const Self) std.mem.Allocator.Error!Self {
var copy = try init(self.num_entries, self.allocator);
var i: usize = 0;
while (i < copy.num_entries) : (i += 1) {
if (self.isSet(i) catch unreachable) {
copy.setEntry(i) catch unreachable;
}
}
return copy;
}
///
/// Free the memory occupied by this bitmap's internal state. It will become unusable afterwards.
///

View file

@ -753,7 +753,6 @@ test "traversePath" {
testing.expectEqual(child4_linked, child3);
var child5 = try traversePath("/child4", false, .CREATE_SYMLINK, .{ .symlink_target = "/child2" });
var child5_linked = try traversePath("/child4/child3.txt", true, .NO_CREATION, .{});
std.debug.warn("child5_linked {}, child4_linked {}\n", .{ child5_linked, child4_linked });
testing.expectEqual(child5_linked, child4_linked);
child4_linked.File.close();
child5_linked.File.close();

View file

@ -83,6 +83,12 @@ export fn kmain(boot_payload: arch.BootPayload) void {
arch.init(&mem_profile);
logger.info("Arch init done\n", .{});
// The VMM runtime tests can't happen until the architecture has initialised itself
switch (build_options.test_mode) {
.Initialisation => vmm.runtimeTests(arch.VmmPayload, kernel_vmm, &mem_profile),
else => {},
}
// Give the kernel heap 10% of the available memory. This can be fine-tuned as time goes on.
var heap_size = mem_profile.mem_kb / 10 * 1024;
// The heap size must be a power of two so find the power of two smaller than or equal to the heap_size
@ -101,10 +107,6 @@ export fn kmain(boot_payload: arch.BootPayload) void {
keyboard.addKeyboard(kb) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to add architecture keyboard: {}\n", .{e});
}
scheduler.init(&kernel_heap.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise scheduler: {}\n", .{e});
};
// Get the ramdisk module
const rd_module = for (mem_profile.modules) |module| {
if (std.mem.eql(u8, module.name, "initrd.ramdisk")) {
@ -120,7 +122,6 @@ export fn kmain(boot_payload: arch.BootPayload) void {
var ramdisk_filesystem = initrd.InitrdFS.init(&initrd_stream, &kernel_heap.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise ramdisk: {}\n", .{e});
};
defer ramdisk_filesystem.deinit();
// Can now free the module as new memory is allocated for the ramdisk filesystem
kernel_vmm.free(module.region.start) catch |e| {
@ -129,10 +130,12 @@ export fn kmain(boot_payload: arch.BootPayload) void {
// Need to init the vfs after the ramdisk as we need the root node from the ramdisk filesystem
vfs.setRoot(ramdisk_filesystem.root_node);
// Load all files here
}
scheduler.init(&kernel_heap.allocator, &mem_profile) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise scheduler: {}\n", .{e});
};
// Initialisation is finished, now does other stuff
logger.info("Init\n", .{});
@ -142,10 +145,10 @@ export fn kmain(boot_payload: arch.BootPayload) void {
logger.info("Creating init2\n", .{});
// Create a init2 task
var idle_task = task.Task.create(initStage2, &kernel_heap.allocator) catch |e| {
var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, &kernel_heap.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to create init stage 2 task: {}\n", .{e});
};
scheduler.scheduleTask(idle_task, &kernel_heap.allocator) catch |e| {
scheduler.scheduleTask(stage2_task, &kernel_heap.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to schedule init stage 2 task: {}\n", .{e});
};

View file

@ -10,18 +10,20 @@ const mock_path = build_options.mock_path;
const arch = @import("arch.zig").internals;
const panic = if (is_test) @import(mock_path ++ "panic_mock.zig").panic else @import("panic.zig").panic;
const task = if (is_test) @import(mock_path ++ "task_mock.zig") else @import("task.zig");
const vmm = if (is_test) @import(mock_path ++ "vmm_mock.zig") else @import("vmm.zig");
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
const fs = @import("filesystem/vfs.zig");
const Task = task.Task;
const EntryPoint = task.EntryPoint;
const Allocator = std.mem.Allocator;
const TailQueue = std.TailQueue;
/// The function type for the entry point.
const EntryPointFn = fn () void;
/// The default stack size of a task. Currently this is set to a page size.
const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(usize);
/// Pointer to the start of the main kernel stack
extern var KERNEL_STACK_START: []u32;
extern var KERNEL_STACK_END: []u32;
/// The current task running
var current_task: *Task = undefined;
@ -58,6 +60,14 @@ pub fn taskSwitching(enabled: bool) void {
/// The new stack pointer to the next stack of the next task.
///
pub fn pickNextTask(ctx: *arch.CpuState) usize {
switch (build_options.test_mode) {
.Scheduler => if (!current_task.kernel) {
if (!arch.runtimeTestCheckUserTaskState(ctx)) {
panic(null, "User task state check failed\n", .{});
}
},
else => {},
}
// Save the stack pointer from old task
current_task.stack_pointer = @ptrToInt(ctx);
@ -92,7 +102,7 @@ pub fn pickNextTask(ctx: *arch.CpuState) usize {
/// Create a new task and add it to the scheduling queue. No locking.
///
/// Arguments:
/// IN entry_point: EntryPointFn - The entry point into the task. This must be a function.
/// IN entry_point: EntryPoint - The entry point into the task. This must be a function.
///
/// Error: Allocator.Error
/// OutOfMemory - If there isn't enough memory for the a task/stack. Any memory allocated will
@ -112,11 +122,12 @@ pub fn scheduleTask(new_task: *Task, allocator: *Allocator) Allocator.Error!void
///
/// Arguments:
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
/// IN mem_profile: *const mem.MemProfile - The system's memory profile used for runtime testing.
///
/// Error: Allocator.Error
/// OutOfMemory - There is no more memory. Any memory allocated will be freed on return.
///
pub fn init(allocator: *Allocator) Allocator.Error!void {
pub fn init(allocator: *Allocator, mem_profile: *const mem.MemProfile) Allocator.Error!void {
// TODO: Maybe move the task init here?
log.info("Init\n", .{});
defer log.info("Done\n", .{});
@ -129,17 +140,20 @@ pub fn init(allocator: *Allocator) Allocator.Error!void {
errdefer allocator.destroy(current_task);
// PID 0
current_task.pid = 0;
current_task.stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
const kernel_stack_size = @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START);
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..kernel_stack_size];
current_task.user_stack = &[_]usize{};
current_task.kernel = true;
// ESP will be saved on next schedule
// Run the runtime tests here
switch (build_options.test_mode) {
.Scheduler => runtimeTests(allocator),
.Scheduler => runtimeTests(allocator, mem_profile),
else => {},
}
// Create the idle task when there are no more tasks left
var idle_task = try Task.create(idle, allocator);
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator);
errdefer idle_task.destroy(allocator);
try scheduleTask(idle_task, allocator);
@ -154,20 +168,20 @@ fn test_fn2() void {}
var test_pid_counter: u7 = 1;
fn task_create(entry_point: EntryPointFn, allocator: *Allocator) Allocator.Error!*Task {
fn createTestTask(entry_point: EntryPoint, allocator: *Allocator, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(u8)) Allocator.Error!*Task {
var t = try allocator.create(Task);
errdefer allocator.destroy(t);
t.pid = test_pid_counter;
// Just alloc something
t.stack = try allocator.alloc(u32, 1);
t.kernel_stack = try allocator.alloc(u32, 1);
t.stack_pointer = 0;
test_pid_counter += 1;
return t;
}
fn task_destroy(self: *Task, allocator: *Allocator) void {
if (@ptrToInt(self.stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
allocator.free(self.stack);
fn destroyTestTask(self: *Task, allocator: *Allocator) void {
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
allocator.free(self.kernel_stack);
}
allocator.destroy(self);
}
@ -176,9 +190,9 @@ test "pickNextTask" {
task.initTest();
defer task.freeTest();
task.addConsumeFunction("Task.create", task_create);
task.addConsumeFunction("Task.create", task_create);
task.addRepeatFunction("Task.destroy", task_destroy);
task.addConsumeFunction("Task.create", createTestTask);
task.addConsumeFunction("Task.create", createTestTask);
task.addRepeatFunction("Task.destroy", destroyTestTask);
var ctx: arch.CpuState = std.mem.zeroes(arch.CpuState);
@ -189,15 +203,15 @@ test "pickNextTask" {
current_task = try allocator.create(Task);
defer allocator.destroy(current_task);
current_task.pid = 0;
current_task.stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
current_task.stack_pointer = @ptrToInt(&KERNEL_STACK_START);
// Create two tasks and schedule them
var test_fn1_task = try Task.create(test_fn1, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
var test_fn2_task = try Task.create(test_fn2, allocator);
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator);
defer test_fn2_task.destroy(allocator);
try scheduleTask(test_fn2_task, allocator);
@ -239,8 +253,8 @@ test "createNewTask add new task" {
task.initTest();
defer task.freeTest();
task.addConsumeFunction("Task.create", task_create);
task.addConsumeFunction("Task.destroy", task_destroy);
task.addConsumeFunction("Task.create", createTestTask);
task.addConsumeFunction("Task.destroy", destroyTestTask);
// Set the global allocator
var allocator = std.testing.allocator;
@ -248,7 +262,7 @@ test "createNewTask add new task" {
// Init the task list
tasks = TailQueue(*Task){};
var test_fn1_task = try Task.create(test_fn1, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
@ -262,15 +276,15 @@ test "init" {
task.initTest();
defer task.freeTest();
task.addConsumeFunction("Task.create", task_create);
task.addRepeatFunction("Task.destroy", task_destroy);
task.addConsumeFunction("Task.create", createTestTask);
task.addRepeatFunction("Task.destroy", destroyTestTask);
var allocator = std.testing.allocator;
try init(allocator);
try init(allocator, undefined);
expectEqual(current_task.pid, 0);
expectEqual(current_task.stack, @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096]);
expectEqual(current_task.kernel_stack, @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0 .. @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START)]);
expectEqual(tasks.len, 1);
@ -308,7 +322,7 @@ fn rt_variable_preserved(allocator: *Allocator) void {
defer allocator.destroy(is_set);
is_set.* = true;
var test_task = Task.create(task_function, allocator) catch unreachable;
var test_task = Task.create(@ptrToInt(task_function), true, undefined, allocator) catch unreachable;
scheduleTask(test_task, allocator) catch unreachable;
// TODO: Need to add the ability to remove tasks
@ -348,14 +362,57 @@ fn rt_variable_preserved(allocator: *Allocator) void {
log.info("SUCCESS: Scheduler variables preserved\n", .{});
}
///
/// Test the initialisation and running of a task running in user mode
///
/// Arguments:
/// IN allocator: *std.mem.Allocator - The allocator to use when intialising the task
/// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM.
///
fn rt_user_task(allocator: *Allocator, mem_profile: *const mem.MemProfile) void {
// 1. Create user VMM
var task_vmm = allocator.create(vmm.VirtualMemoryManager(arch.VmmPayload)) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate user task VMM: {}\n", .{e});
};
task_vmm.* = vmm.VirtualMemoryManager(arch.VmmPayload).init(0, @ptrToInt(mem_profile.vaddr_start), allocator, arch.VMM_MAPPER, undefined) catch unreachable;
// 2. Create user task. The code will be loaded at address 0
var user_task = task.Task.create(0, false, task_vmm, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to create user task: {}\n", .{e});
};
// 3. Read the user program file from the filesystem
const user_program_file = fs.openFile("/user_program", .NO_CREATION) catch |e| {
panic(@errorReturnTrace(), "Failed to open /user_program: {}\n", .{e});
};
defer user_program_file.close();
var code: [1024]u8 = undefined;
const code_len = user_program_file.read(code[0..1024]) catch |e| {
panic(@errorReturnTrace(), "Failed to read user program file: {}\n", .{e});
};
// 4. Allocate space in the vmm for the user_program
const code_start = task_vmm.alloc(std.mem.alignForward(code_len, vmm.BLOCK_SIZE) / vmm.BLOCK_SIZE, .{ .kernel = false, .writable = true, .cachable = true }) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate VMM memory for user program code: {}\n", .{e});
} orelse panic(null, "User task VMM didn't allocate space for the user program\n", .{});
if (code_start != 0) panic(null, "User program start address was {} instead of 0\n", .{code_start});
// 5. Copy user_program code over
vmm.kernel_vmm.copyDataToVMM(task_vmm, code[0..code_len], code_start) catch |e| {
panic(@errorReturnTrace(), "Failed to copy user code: {}\n", .{e});
};
// 6. Schedule it
scheduleTask(user_task, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to schedule the user task: {}\n", .{e});
};
}
///
/// The scheduler runtime tests that will test the scheduling functionality.
///
/// Arguments:
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. Used to set up user task VMMs.
///
fn runtimeTests(allocator: *Allocator) void {
fn runtimeTests(allocator: *Allocator, mem_profile: *const mem.MemProfile) void {
arch.enableInterrupts();
rt_user_task(allocator, mem_profile);
rt_variable_preserved(allocator);
while (true) {}
}

View file

@ -8,6 +8,7 @@ const mock_path = build_options.mock_path;
const arch = @import("arch.zig").internals;
const panic = if (is_test) @import(mock_path ++ "panic_mock.zig").panic else @import("panic.zig").panic;
const ComptimeBitmap = @import("bitmap.zig").ComptimeBitmap;
const vmm = @import("vmm.zig");
const Allocator = std.mem.Allocator;
/// The kernels main stack start as this is used to check for if the task being destroyed is this stack
@ -15,7 +16,7 @@ const Allocator = std.mem.Allocator;
extern var KERNEL_STACK_START: *u32;
/// The function type for the entry point.
const EntryPointFn = fn () void;
pub const EntryPoint = usize;
/// The bitmap type for the PIDs
const PidBitmap = if (is_test) ComptimeBitmap(u128) else ComptimeBitmap(u1024);
@ -28,6 +29,9 @@ var all_pids: PidBitmap = brk: {
break :brk pids;
};
/// The default stack size of a task. Currently this is set to a page size.
pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32);
/// The task control block for storing all the information needed to save and restore a task.
pub const Task = struct {
const Self = @This();
@ -35,19 +39,30 @@ pub const Task = struct {
/// The unique task identifier
pid: PidBitmap.IndexType,
/// Pointer to the stack for the task. This will be allocated on initialisation.
stack: []u32,
/// Pointer to the kernel stack for the task. This will be allocated on initialisation.
kernel_stack: []usize,
/// Pointer to the user stack for the task. This will be allocated on initialisation and will be empty if it's a kernel task
user_stack: []usize,
/// The current stack pointer into the stack.
stack_pointer: usize,
/// Whether the process is a kernel process or not
kernel: bool,
/// The virtual memory manager belonging to the task
vmm: *vmm.VirtualMemoryManager(arch.VmmPayload),
///
/// Create a task. This will allocate a PID and the stack. The stack will be set up as a
/// kernel task. As this is a new task, the stack will need to be initialised with the CPU
/// state as described in arch.CpuState struct.
///
/// Arguments:
/// IN entry_point: EntryPointFn - The entry point into the task. This must be a function.
/// IN entry_point: EntryPoint - The entry point into the task. This must be a function.
/// IN kernel: bool - Whether the task has kernel or user privileges.
/// IN task_vmm: *VirtualMemoryManager - The virtual memory manager associated with the task.
/// IN allocator: *Allocator - The allocator for allocating memory for a task.
///
/// Return: *Task
@ -57,16 +72,29 @@ pub const Task = struct {
/// OutOfMemory - If there is no more memory to allocate. Any memory or PID allocated will
/// be freed on return.
///
pub fn create(entry_point: EntryPointFn, allocator: *Allocator) Allocator.Error!*Task {
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: *Allocator) Allocator.Error!*Task {
var task = try allocator.create(Task);
errdefer allocator.destroy(task);
task.pid = allocatePid();
errdefer freePid(task.pid);
const pid = allocatePid();
errdefer freePid(pid);
const task_stack = try arch.initTaskStack(@ptrToInt(entry_point), allocator);
task.stack = task_stack.stack;
task.stack_pointer = task_stack.pointer;
var k_stack = try allocator.alloc(usize, STACK_SIZE);
errdefer allocator.free(k_stack);
var u_stack = if (kernel) &[_]usize{} else try allocator.alloc(usize, STACK_SIZE);
errdefer if (!kernel) allocator.free(u_stack);
task.* = .{
.pid = pid,
.kernel_stack = k_stack,
.user_stack = u_stack,
.stack_pointer = @ptrToInt(&k_stack[STACK_SIZE - 1]),
.kernel = kernel,
.vmm = task_vmm,
};
try arch.initTask(task, entry_point, allocator);
return task;
}
@ -81,8 +109,11 @@ pub const Task = struct {
freePid(self.pid);
// We need to check that the the stack has been allocated as task 0 (init) won't have a
// stack allocated as this in the linker script
if (@ptrToInt(self.stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
allocator.free(self.stack);
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
allocator.free(self.kernel_stack);
}
if (!self.kernel) {
allocator.free(self.user_stack);
}
allocator.destroy(self);
}
@ -122,7 +153,8 @@ test "create out of memory for task" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 0);
expectError(error.OutOfMemory, Task.create(test_fn1, &fa.allocator));
expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, &fa.allocator));
expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, &fa.allocator));
// Make sure any memory allocated is freed
expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -135,7 +167,8 @@ test "create out of memory for stack" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 1);
expectError(error.OutOfMemory, Task.create(test_fn1, &fa.allocator));
expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, &fa.allocator));
expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, &fa.allocator));
// Make sure any memory allocated is freed
expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -145,32 +178,39 @@ test "create out of memory for stack" {
}
test "create expected setup" {
var task = try Task.create(test_fn1, std.testing.allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
defer task.destroy(std.testing.allocator);
// Will allocate the first PID 1, 0 will always be allocated
expectEqual(task.pid, 1);
expectEqual(task.kernel_stack.len, STACK_SIZE);
expectEqual(task.user_stack.len, 0);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
defer user_task.destroy(std.testing.allocator);
expectEqual(user_task.pid, 2);
expectEqual(user_task.user_stack.len, STACK_SIZE);
expectEqual(user_task.kernel_stack.len, STACK_SIZE);
}
test "destroy cleans up" {
// This used the leak detector allocator in testing
// So if any alloc were not freed, this will fail the test
var fa = FailingAllocator.init(testing_allocator, 2);
var allocator = std.testing.allocator;
var task = try Task.create(test_fn1, &fa.allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator);
task.destroy(&fa.allocator);
// Make sure any memory allocated is freed
expectEqual(fa.allocated_bytes, fa.freed_bytes);
task.destroy(allocator);
user_task.destroy(allocator);
// All PIDs were freed
expectEqual(all_pids.bitmap, 1);
}
test "Multiple create" {
var task1 = try Task.create(test_fn1, std.testing.allocator);
var task2 = try Task.create(test_fn1, std.testing.allocator);
var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
expectEqual(task1.pid, 1);
expectEqual(task2.pid, 2);
@ -180,13 +220,21 @@ test "Multiple create" {
expectEqual(all_pids.bitmap, 5);
var task3 = try Task.create(test_fn1, std.testing.allocator);
var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
expectEqual(task3.pid, 1);
expectEqual(all_pids.bitmap, 7);
task2.destroy(std.testing.allocator);
task3.destroy(std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
expectEqual(user_task.pid, 1);
expectEqual(all_pids.bitmap, 3);
user_task.destroy(std.testing.allocator);
expectEqual(all_pids.bitmap, 1);
}
test "allocatePid and freePid" {

View file

@ -107,6 +107,9 @@ pub const VmmError = error{
/// Physical addresses are invalid
InvalidPhysAddresses,
/// Not enough virtual space in the VMM
OutOfMemory,
};
/// The boot-time offset that the virtual addresses are from the physical addresses
@ -180,6 +183,39 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
};
}
///
/// Copy this VMM. Changes to one copy will not affect the other
///
/// Arguments:
/// IN self: *Self - The VMM to copy
///
/// Error: Allocator.Error
/// OutOfMemory - There wasn't enough memory for copying
///
/// Return: Self
/// The copy
///
pub fn copy(self: *const Self) Allocator.Error!Self {
var clone = Self{
.bmp = try self.bmp.clone(),
.start = self.start,
.end = self.end,
.allocator = self.allocator,
.allocations = std.hash_map.AutoHashMap(usize, Allocation).init(self.allocator),
.mapper = self.mapper,
.payload = self.payload,
};
var it = self.allocations.iterator();
while (it.next()) |entry| {
var list = std.ArrayList(usize).init(self.allocator);
for (entry.value.physical.items) |block| {
_ = try list.append(block);
}
_ = try clone.allocations.put(entry.key, Allocation{ .physical = list });
}
return clone;
}
///
/// Free the internal state of the VMM. It is unusable afterwards
///
@ -384,6 +420,84 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
return null;
}
///
/// Copy data from an address in a virtual memory manager to an address in another virtual memory manager
///
/// Arguments:
/// IN self: *Self - The virtual memory that owns the data being copied. This must be the VMM currently in use
/// IN to: *const Self - The virtual memory manager that owns the address that the data is being copied to
/// IN data: []const u8 - The data being copied. Must be within memory mapped in `self`
/// IN dest: usize - The address within `to` to copy the data to. The space covered by `dest` and `dest` + `dest.len` must be mapped in `to`
///
/// Error: VmmError || pmm.PmmError || Allocator.Error
/// VmmError.NotAllocated - Some or all of the destination isn't mapped
/// VmmError.OutOfMemory - There wasn't enough space in the VMM to use for temporary mapping
/// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed
/// Allocator.Error.OutOfMemory - There wasn't enough memory available to fulfill the request
///
pub fn copyDataToVMM(self: *Self, to: *const Self, data: []const u8, dest: usize) (bitmap.Bitmap(usize).BitmapError || VmmError || Allocator.Error)!void {
if (data.len == 0) {
return;
}
const start_addr = std.mem.alignBackward(dest, BLOCK_SIZE);
const end_addr = std.mem.alignForward(dest + data.len, BLOCK_SIZE);
if (end_addr >= to.end or start_addr < to.start)
return bitmap.Bitmap(usize).BitmapError.OutOfBounds;
// Find physical blocks for `dest`
var blocks = std.ArrayList(usize).init(self.allocator);
defer blocks.deinit();
var it = to.allocations.iterator();
while (it.next()) |allocation| {
const virtual = allocation.key;
const physical = allocation.value.physical.items;
if (start_addr >= virtual and virtual + physical.len * BLOCK_SIZE >= end_addr) {
const first_block_idx = (start_addr - virtual) / BLOCK_SIZE;
const last_block_idx = (end_addr - virtual) / BLOCK_SIZE;
try blocks.appendSlice(physical[first_block_idx..last_block_idx]);
}
}
// Make sure the address is actually mapped in the destination VMM
if (blocks.items.len == 0) {
return VmmError.NotAllocated;
}
// Map them into `self` for some vaddr so they can be accessed from this VMM
if (self.bmp.setContiguous(blocks.items.len)) |entry| {
const v_start = entry * BLOCK_SIZE + self.start;
defer {
// Unmap virtual blocks from `self` so they can be used in the future
var v = v_start;
while (v < v_start + blocks.items.len * BLOCK_SIZE) : (v += BLOCK_SIZE) {
// Cannot be out of bounds as it has been set above
self.bmp.clearEntry((v - self.start) / BLOCK_SIZE) catch unreachable;
}
}
for (blocks.items) |block, i| {
const v = v_start + i * BLOCK_SIZE;
const v_end = v + BLOCK_SIZE;
const p = block;
const p_end = p + BLOCK_SIZE;
self.mapper.mapFn(v, v_end, p, p_end, .{ .kernel = true, .writable = true, .cachable = true }, self.allocator, self.payload) catch |e| {
// If we fail to map one of the blocks then attempt to free all previously mapped
if (i > 0) {
self.mapper.unmapFn(v_start, v_end, self.payload) catch |e2| {
// If we can't unmap then just panic
panic(@errorReturnTrace(), "Failed to unmap region 0x{X} -> 0x{X}: {}\n", .{ v_start, v_end, e2 });
};
}
panic(@errorReturnTrace(), "Failed to map vrutal region 0x{X} -> 0x{X} to 0x{X} -> 0x{X}: {}\n", .{ v, v_end, p, p_end, e });
};
}
// Copy to vaddr from above
const align_offset = dest - start_addr;
var data_copy = @intToPtr([*]u8, v_start + align_offset)[0..data.len];
std.mem.copy(u8, data_copy, data);
} else {
return VmmError.OutOfMemory;
}
}
///
/// Free a previous allocation
///
@ -463,10 +577,6 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator
};
}
switch (build_options.test_mode) {
.Initialisation => runtimeTests(arch.VmmPayload, kernel_vmm, mem_profile),
else => {},
}
return &kernel_vmm;
}
@ -475,7 +585,7 @@ test "virtToPhys" {
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const vstart = test_vaddr_start + BLOCK_SIZE;
const vstart = vmm.start + BLOCK_SIZE;
const vend = vstart + BLOCK_SIZE * 3;
const pstart = BLOCK_SIZE * 20;
const pend = BLOCK_SIZE * 23;
@ -498,7 +608,7 @@ test "physToVirt" {
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const vstart = test_vaddr_start + BLOCK_SIZE;
const vstart = vmm.start + BLOCK_SIZE;
const vend = vstart + BLOCK_SIZE * 3;
const pstart = BLOCK_SIZE * 20;
const pend = BLOCK_SIZE * 23;
@ -628,9 +738,79 @@ test "set" {
}
}
var test_allocations: ?bitmap.Bitmap(u64) = null;
test "copy" {
const num_entries = 512;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const attrs = .{ .kernel = true, .cachable = true, .writable = true };
const alloc0 = (try vmm.alloc(24, attrs)).?;
var mirrored = try vmm.copy();
defer mirrored.deinit();
std.testing.expectEqual(vmm.bmp.num_free_entries, mirrored.bmp.num_free_entries);
std.testing.expectEqual(vmm.start, mirrored.start);
std.testing.expectEqual(vmm.end, mirrored.end);
std.testing.expectEqual(vmm.allocations.count(), mirrored.allocations.count());
var it = vmm.allocations.iterator();
while (it.next()) |next| {
for (mirrored.allocations.get(next.key).?.physical.items) |block, i| {
std.testing.expectEqual(block, vmm.allocations.get(next.key).?.physical.items[i]);
}
}
std.testing.expectEqual(vmm.mapper, mirrored.mapper);
std.testing.expectEqual(vmm.payload, mirrored.payload);
// Allocating in the new VMM shouldn't allocate in the mirrored one
const alloc1 = (try mirrored.alloc(3, attrs)).?;
std.testing.expectEqual(vmm.allocations.count() + 1, mirrored.allocations.count());
std.testing.expectEqual(vmm.bmp.num_free_entries - 3, mirrored.bmp.num_free_entries);
std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(alloc1));
// And vice-versa
const alloc2 = (try vmm.alloc(3, attrs)).?;
const alloc3 = (try vmm.alloc(1, attrs)).?;
const alloc4 = (try vmm.alloc(1, attrs)).?;
std.testing.expectEqual(vmm.allocations.count() - 2, mirrored.allocations.count());
std.testing.expectEqual(vmm.bmp.num_free_entries + 2, mirrored.bmp.num_free_entries);
std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc3));
std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc4));
}
test "copyDataToVMM" {
var vmm = try testInit(100);
defer testDeinit(&vmm);
const alloc1_blocks = 1;
const alloc = (try vmm.alloc(alloc1_blocks, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable;
var vmm2 = try VirtualMemoryManager(u8).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, 39);
defer vmm2.deinit();
var vmm_free_entries = vmm.bmp.num_free_entries;
var vmm2_free_entries = vmm2.bmp.num_free_entries;
const buff: []const u8 = &[_]u8{ 10, 11, 12, 13 };
try vmm2.copyDataToVMM(&vmm, buff, alloc);
// Make sure they are the same
const buff2 = @intToPtr([*]u8, alloc)[0..buff.len];
std.testing.expectEqualSlices(u8, buff, buff2);
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries);
// Test NotAllocated
std.testing.expectError(VmmError.NotAllocated, vmm2.copyDataToVMM(&vmm, buff, alloc + alloc1_blocks * BLOCK_SIZE));
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries);
// Test Bitmap.Error.OutOfBounds
std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm2.copyDataToVMM(&vmm, buff, vmm.end));
std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm.copyDataToVMM(&vmm2, buff, vmm2.end));
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries);
}
var test_allocations: ?*bitmap.Bitmap(u64) = null;
var test_mapper = Mapper(u8){ .mapFn = testMap, .unmapFn = testUnmap };
const test_vaddr_start: usize = 0xC0000000;
var test_vmm: VirtualMemoryManager(u8) = undefined;
///
/// Initialise a virtual memory manager used for testing
@ -646,7 +826,8 @@ const test_vaddr_start: usize = 0xC0000000;
///
fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) {
if (test_allocations == null) {
test_allocations = try bitmap.Bitmap(u64).init(num_entries, std.testing.allocator);
test_allocations = try std.testing.allocator.create(bitmap.Bitmap(u64));
test_allocations.?.* = try bitmap.Bitmap(u64).init(num_entries, std.testing.allocator);
} else |allocations| {
var entry: u32 = 0;
while (entry < allocations.num_entries) : (entry += 1) {
@ -666,16 +847,21 @@ fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) {
.modules = &[_]mem.Module{},
};
pmm.init(&mem_profile, std.testing.allocator);
return VirtualMemoryManager(u8).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, 39);
const test_vaddr_start = @ptrToInt(&(try std.testing.allocator.alloc(u8, num_entries * BLOCK_SIZE))[0]);
test_vmm = try VirtualMemoryManager(u8).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, 39);
return test_vmm;
}
fn testDeinit(vmm: *VirtualMemoryManager(u8)) void {
defer vmm.deinit();
defer {
test_allocations.?.deinit();
vmm.deinit();
const space = @intToPtr([*]u8, vmm.start)[0 .. vmm.end - vmm.start];
vmm.allocator.free(space);
if (test_allocations) |allocs| {
allocs.deinit();
std.testing.allocator.destroy(allocs);
test_allocations = null;
}
defer pmm.deinit();
pmm.deinit();
}
///
@ -693,8 +879,9 @@ fn testDeinit(vmm: *VirtualMemoryManager(u8)) void {
fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attributes, allocator: *Allocator, payload: u8) (Allocator.Error || MapperError)!void {
std.testing.expectEqual(@as(u8, 39), payload);
var vaddr = vstart;
var allocations = test_allocations.?;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
(test_allocations.?).setEntry((vaddr - test_vaddr_start) / BLOCK_SIZE) catch unreachable;
allocations.setEntry((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable;
}
}
@ -709,9 +896,10 @@ fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attrib
fn testUnmap(vstart: usize, vend: usize, payload: u8) (Allocator.Error || MapperError)!void {
std.testing.expectEqual(@as(u8, 39), payload);
var vaddr = vstart;
var allocations = test_allocations.?;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
if ((test_allocations.?).isSet((vaddr - test_vaddr_start) / BLOCK_SIZE) catch unreachable) {
(test_allocations.?).clearEntry((vaddr - test_vaddr_start) / BLOCK_SIZE) catch unreachable;
if (allocations.isSet((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable) {
allocations.clearEntry((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable;
} else {
return MapperError.NotMapped;
}
@ -727,7 +915,19 @@ fn testUnmap(vstart: usize, vend: usize, payload: u8) (Allocator.Error || Mapper
/// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved
///
fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void {
pub fn runtimeTests(comptime Payload: type, vmm: *VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void {
rt_correctMapping(Payload, vmm, mem_profile);
rt_copyDataToVMM(vmm);
}
///
/// Test that the correct mappings have been made in the VMM
///
/// Arguments:
/// IN vmm: VirtualMemoryManager(Payload) - The virtual memory manager to test
/// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved
///
fn rt_correctMapping(comptime Payload: type, vmm: *VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void {
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end), BLOCK_SIZE);
@ -760,6 +960,51 @@ fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_
panic(@errorReturnTrace(), "An address was not set in the VMM when it should have been: 0x{x}\n", .{vaddr});
}
}
log.info("Tested allocations\n", .{});
}
///
/// Test copying data to another VMM
///
/// Arguments:
/// IN vmm: *VirtualMemoryManager() - The VMM to copy from
///
fn rt_copyDataToVMM(vmm: *VirtualMemoryManager(arch.VmmPayload)) void {
const expected_free_entries = vmm.bmp.num_free_entries;
// Mirror the VMM
var vmm2 = vmm.copy() catch |e| {
panic(@errorReturnTrace(), "Failed to mirror VMM: {}\n", .{e});
};
// Allocate within secondary VMM
const addr = vmm2.alloc(1, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyDataToVMM: {}\n", .{e});
} orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyDataToVMM\n", .{});
const expected_free_entries2 = vmm2.bmp.num_free_entries;
const expected_free_pmm_entries = pmm.blocksFree();
// Copy an arbitrary buffer into the allocation
const buff = &[_]u8{ 4, 5, 9, 123, 90, 67 };
vmm.copyDataToVMM(&vmm2, buff, addr) catch |e| {
panic(@errorReturnTrace(), "Failed to copy data to secondary VMM in rt_copyDataToVMM: {}\n", .{e});
};
// Make sure the function cleaned up
if (vmm.bmp.num_free_entries != expected_free_entries) {
panic(@errorReturnTrace(), "Expected {} free entries in VMM, but there were {}\n", .{ expected_free_entries, vmm.bmp.num_free_entries });
}
if (vmm2.bmp.num_free_entries != expected_free_entries2) {
panic(@errorReturnTrace(), "Expected {} free entries in the secondary VMM, but there were {}\n", .{ expected_free_entries2, vmm2.bmp.num_free_entries });
}
if (pmm.blocksFree() != expected_free_pmm_entries) {
panic(@errorReturnTrace(), "Expected {} free entries in PMM, but there were {}\n", .{ expected_free_pmm_entries, pmm.blocksFree() });
}
// Make sure that the data at the allocated address is correct
// Since vmm2 is a mirror of vmm, this address should be mapped by the CPU's MMU
const buff2 = @intToPtr([*]u8, addr)[0..buff.len];
if (!std.mem.eql(u8, buff, buff2)) {
panic(@errorReturnTrace(), "buff2 is not the same as buff in rt_copyDataToVMM\n", .{});
}
// Free the secondary VMM
vmm2.free(addr) catch |e| {
panic(@errorReturnTrace(), "Failed to free the allocation in secondary VMM: {}\n", .{e});
};
}

View file

@ -49,6 +49,7 @@ const types = .{
.{ "*const IdtPtr", "PTR_CONST_IDTPTR", "idt_mock", "", "IdtPtr" },
.{ "*Task", "PTR_TASK", "task_mock", "", "Task" },
.{ "*Allocator", "PTR_ALLOCATOR", "", "std.mem", "Allocator" },
.{ "*VirtualMemoryManager(u8)", "PTR_VMM", "vmm_mock", "", "VirtualMemoryManager" },
.{ "IdtError!void", "ERROR_IDTERROR_RET_VOID", "idt_mock", "", "IdtError" },
.{ "Allocator.Error!*Task", "ERROR_ALLOCATOR_RET_PTRTASK", "", "", "" },
@ -82,6 +83,7 @@ const types = .{
.{ "fn (*Task, usize) void", "FN_IPTRTASK_IUSIZE_OVOID", "", "", "" },
.{ "fn (*Task, *Allocator) void", "FN_IPTRTASK_IPTRALLOCATOR_OVOID", "", "", "" },
.{ "fn (fn () void, *Allocator) Allocator.Error!*Task", "FN_IFNOVOID_IPTRALLOCATOR_EALLOCATOR_OPTRTASK", "", "", "" },
.{ "fn (usize, *Allocator, bool, *VirtualMemoryManager(u8)) Allocator.Error!*Task", "FN_IUSIZE_IPTRALLOCATOR_IBOOL_IVMM_EALLOCATOR_OVOID", "", "", "" },
.{ "fn (StatusRegister, u8, bool) void", "FN_ISTATUSREGISTER_IU8_IBOOL_OVOID", "", "", "" },
};

View file

@ -11,7 +11,7 @@ const Serial = @import("../../../src/kernel/serial.zig").Serial;
const TTY = @import("../../../src/kernel/tty.zig").TTY;
const Keyboard = @import("../../../src/kernel/keyboard.zig").Keyboard;
pub const task = @import("task_mock.zig");
pub const task = @import("../../../src/kernel/task.zig");
pub const Device = pci.PciDeviceInfo;
@ -141,10 +141,7 @@ pub fn initMem(payload: BootPayload) Allocator.Error!mem.MemProfile {
};
}
pub fn initTaskStack(entry_point: usize, allocator: *Allocator) Allocator.Error!struct { stack: []u32, pointer: usize } {
const ret = .{ .stack = &([_]u32{}), .pointer = 0 };
return ret;
}
pub fn initTask(t: *Task, entry_point: usize, allocator: *Allocator) Allocator.Error!void {}
pub fn initKeyboard(allocator: *Allocator) Allocator.Error!?*Keyboard {
return null;

View file

@ -146,6 +146,7 @@ fn Mock() type {
1 => fn (fields[0].field_type) RetType,
2 => fn (fields[0].field_type, fields[1].field_type) RetType,
3 => fn (fields[0].field_type, fields[1].field_type, fields[2].field_type) RetType,
4 => fn (fields[0].field_type, fields[1].field_type, fields[2].field_type, fields[3].field_type) RetType,
else => @compileError("More than 3 parameters not supported"),
};
}
@ -167,6 +168,7 @@ fn Mock() type {
1 => function_type(params[0]),
2 => function_type(params[0], params[1]),
3 => function_type(params[0], params[1], params[2]),
4 => function_type(params[0], params[1], params[2], params[3]),
// Should get to this as `getFunctionType` will catch this
else => @compileError("More than 3 parameters not supported"),
};

View file

@ -1,4 +1,6 @@
const std = @import("std");
const vmm = @import("vmm_mock.zig");
const arch = @import("arch_mock.zig");
const Allocator = std.mem.Allocator;
const mock_framework = @import("mock_framework.zig");
@ -8,17 +10,20 @@ pub const addTestParams = mock_framework.addTestParams;
pub const addConsumeFunction = mock_framework.addConsumeFunction;
pub const addRepeatFunction = mock_framework.addRepeatFunction;
const EntryPointFn = fn () void;
pub const EntryPoint = usize;
pub const Task = struct {
const Self = @This();
pid: u32,
stack: []u32,
kernel_stack: []u32,
user_stack: []u32,
stack_pointer: usize,
kernel: bool,
vmm: vmm.VirtualMemoryManager(arch.VmmPayload),
pub fn create(entry_point: EntryPointFn, allocator: *Allocator) Allocator.Error!*Task {
return mock_framework.performAction("Task.create", Allocator.Error!*Task, .{ entry_point, allocator });
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: *Allocator) Allocator.Error!*Task {
return mock_framework.performAction("Task.create", Allocator.Error!*Task, .{ entry_point, allocator, kernel, task_vmm });
}
pub fn destroy(self: *Self, allocator: *Allocator) void {

View file

@ -3,6 +3,7 @@ const bitmap = @import("../../../src/kernel/bitmap.zig");
const vmm = @import("../../../src/kernel/vmm.zig");
const arch = @import("arch_mock.zig");
const std = @import("std");
const Allocator = std.mem.Allocator;
pub const VmmError = error{
/// A memory region expected to be allocated wasn't
@ -45,9 +46,11 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
pub fn free(self: *Self, vaddr: usize) (bitmap.Bitmap(u32).BitmapError || VmmError)!void {
return VmmError.NotAllocated;
}
pub fn copyDataToVMM(self: *Self, to: *const Self, data: []const u8, dest: usize) (bitmap.Bitmap(usize).BitmapError || VmmError || Allocator.Error)!void {}
};
}
pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator.Error!*VirtualMemoryManager(arch.VmmPayload) {
return std.mem.Allocator.Error.OutOfMemory;
}

6
test/user_program.s Normal file
View file

@ -0,0 +1,6 @@
entry:
mov $0xCAFE, %eax
mov $0xBEEF, %ebx
loop:
jmp loop