2020-07-18 22:46:24 +01:00
|
|
|
const std = @import("std");
|
|
|
|
const expectEqual = std.testing.expectEqual;
|
|
|
|
const expectError = std.testing.expectError;
|
|
|
|
const assert = std.debug.assert;
|
2020-08-23 14:32:32 +01:00
|
|
|
const log = std.log.scoped(.scheduler);
|
2020-07-18 22:46:24 +01:00
|
|
|
const builtin = @import("builtin");
|
|
|
|
const is_test = builtin.is_test;
|
|
|
|
const build_options = @import("build_options");
|
|
|
|
const mock_path = build_options.mock_path;
|
|
|
|
const arch = @import("arch.zig").internals;
|
|
|
|
const panic = if (is_test) @import(mock_path ++ "panic_mock.zig").panic else @import("panic.zig").panic;
|
|
|
|
const task = if (is_test) @import(mock_path ++ "task_mock.zig") else @import("task.zig");
|
2020-07-24 00:18:56 +01:00
|
|
|
const vmm = if (is_test) @import(mock_path ++ "vmm_mock.zig") else @import("vmm.zig");
|
|
|
|
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
|
|
|
|
const fs = @import("filesystem/vfs.zig");
|
2020-07-18 22:46:24 +01:00
|
|
|
const Task = task.Task;
|
2020-07-24 00:18:56 +01:00
|
|
|
const EntryPoint = task.EntryPoint;
|
2020-07-18 22:46:24 +01:00
|
|
|
const Allocator = std.mem.Allocator;
|
|
|
|
const TailQueue = std.TailQueue;
|
|
|
|
|
|
|
|
/// The default stack size of a task. Currently this is set to a page size.
|
|
|
|
const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(usize);
|
|
|
|
|
|
|
|
/// Pointer to the start of the main kernel stack
|
|
|
|
extern var KERNEL_STACK_START: []u32;
|
2020-07-24 00:18:56 +01:00
|
|
|
extern var KERNEL_STACK_END: []u32;
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
/// The current task running
|
|
|
|
var current_task: *Task = undefined;
|
|
|
|
|
|
|
|
/// Array list of all runnable tasks
|
|
|
|
var tasks: TailQueue(*Task) = undefined;
|
|
|
|
|
|
|
|
/// Whether the scheduler is allowed to switch tasks.
|
|
|
|
var can_switch: bool = true;
|
|
|
|
|
|
|
|
///
|
|
|
|
/// The idle task that just halts the CPU but the CPU can still handle interrupts.
|
|
|
|
///
|
|
|
|
fn idle() noreturn {
|
|
|
|
arch.spinWait();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn taskSwitching(enabled: bool) void {
|
|
|
|
can_switch = enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Round robin. This will first save the the current tasks stack pointer, then will pick the next
|
|
|
|
/// task to be run from the queue. It will add the current task to the end of the queue and pop the
|
|
|
|
/// next task from the front as set this as the current task. Then will return the stack pointer
|
|
|
|
/// of the next task to be loaded into the stack register to load the next task stack to pop off
|
|
|
|
/// its state. Interrupts are assumed disabled.
|
|
|
|
///
|
|
|
|
/// Argument:
|
|
|
|
/// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents
|
|
|
|
/// of the registers at the time of a exception.
|
|
|
|
///
|
|
|
|
/// Return: usize
|
|
|
|
/// The new stack pointer to the next stack of the next task.
|
|
|
|
///
|
|
|
|
pub fn pickNextTask(ctx: *arch.CpuState) usize {
|
2020-07-24 00:18:56 +01:00
|
|
|
switch (build_options.test_mode) {
|
|
|
|
.Scheduler => if (!current_task.kernel) {
|
|
|
|
if (!arch.runtimeTestCheckUserTaskState(ctx)) {
|
|
|
|
panic(null, "User task state check failed\n", .{});
|
|
|
|
}
|
|
|
|
},
|
|
|
|
else => {},
|
|
|
|
}
|
2020-07-18 22:46:24 +01:00
|
|
|
// Save the stack pointer from old task
|
|
|
|
current_task.stack_pointer = @ptrToInt(ctx);
|
|
|
|
|
|
|
|
// If we can't switch, then continue with the current task
|
|
|
|
if (!can_switch) {
|
|
|
|
return current_task.stack_pointer;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pick the next task
|
|
|
|
// If there isn't one, then just return the same task
|
|
|
|
if (tasks.pop()) |new_task_node| {
|
|
|
|
// Get the next task
|
|
|
|
const next_task = new_task_node.data;
|
|
|
|
|
|
|
|
// Move some pointers to don't need to allocate memory, speeds things up
|
|
|
|
new_task_node.data = current_task;
|
|
|
|
new_task_node.prev = null;
|
|
|
|
new_task_node.next = null;
|
|
|
|
|
|
|
|
// Add the 'current_task' node to the end of the queue
|
|
|
|
tasks.prepend(new_task_node);
|
|
|
|
|
|
|
|
current_task = next_task;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Context switch in the interrupt stub handler which will pop the next task state off the
|
|
|
|
// stack
|
|
|
|
return current_task.stack_pointer;
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Create a new task and add it to the scheduling queue. No locking.
|
|
|
|
///
|
|
|
|
/// Arguments:
|
2020-07-24 00:18:56 +01:00
|
|
|
/// IN entry_point: EntryPoint - The entry point into the task. This must be a function.
|
2020-07-18 22:46:24 +01:00
|
|
|
///
|
|
|
|
/// Error: Allocator.Error
|
|
|
|
/// OutOfMemory - If there isn't enough memory for the a task/stack. Any memory allocated will
|
|
|
|
/// be freed on return.
|
|
|
|
///
|
|
|
|
pub fn scheduleTask(new_task: *Task, allocator: *Allocator) Allocator.Error!void {
|
2020-08-23 14:32:32 +01:00
|
|
|
var task_node = try allocator.create(TailQueue(*Task).Node);
|
|
|
|
task_node.* = .{ .data = new_task };
|
2020-07-18 22:46:24 +01:00
|
|
|
tasks.prepend(task_node);
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// Initialise the scheduler. This will set up the current task to the code that is currently
|
|
|
|
/// running. So if there is a task switch before kmain can finish, can continue when switched back.
|
|
|
|
/// This will set the stack to KERNEL_STACK_START from the linker stript. This will also create the
|
|
|
|
/// idle task for when there is no more tasks to run.
|
|
|
|
///
|
|
|
|
/// Arguments:
|
|
|
|
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
|
2020-07-24 00:18:56 +01:00
|
|
|
/// IN mem_profile: *const mem.MemProfile - The system's memory profile used for runtime testing.
|
2020-07-18 22:46:24 +01:00
|
|
|
///
|
|
|
|
/// Error: Allocator.Error
|
|
|
|
/// OutOfMemory - There is no more memory. Any memory allocated will be freed on return.
|
|
|
|
///
|
2020-07-24 00:18:56 +01:00
|
|
|
pub fn init(allocator: *Allocator, mem_profile: *const mem.MemProfile) Allocator.Error!void {
|
2020-07-18 22:46:24 +01:00
|
|
|
// TODO: Maybe move the task init here?
|
2020-08-23 14:32:32 +01:00
|
|
|
log.info("Init\n", .{});
|
|
|
|
defer log.info("Done\n", .{});
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
// Init the task list for round robin
|
2020-08-23 14:32:32 +01:00
|
|
|
tasks = TailQueue(*Task){};
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
// Set up the init task to continue execution
|
|
|
|
current_task = try allocator.create(Task);
|
|
|
|
errdefer allocator.destroy(current_task);
|
|
|
|
// PID 0
|
|
|
|
current_task.pid = 0;
|
2020-07-24 00:18:56 +01:00
|
|
|
const kernel_stack_size = @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START);
|
|
|
|
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..kernel_stack_size];
|
|
|
|
current_task.user_stack = &[_]usize{};
|
|
|
|
current_task.kernel = true;
|
2020-07-18 22:46:24 +01:00
|
|
|
// ESP will be saved on next schedule
|
|
|
|
|
|
|
|
// Run the runtime tests here
|
|
|
|
switch (build_options.test_mode) {
|
2020-07-24 00:18:56 +01:00
|
|
|
.Scheduler => runtimeTests(allocator, mem_profile),
|
2020-07-18 22:46:24 +01:00
|
|
|
else => {},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the idle task when there are no more tasks left
|
2020-07-24 00:18:56 +01:00
|
|
|
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator);
|
2020-07-18 22:46:24 +01:00
|
|
|
errdefer idle_task.destroy(allocator);
|
|
|
|
|
|
|
|
try scheduleTask(idle_task, allocator);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For testing the errdefer
|
|
|
|
const FailingAllocator = std.testing.FailingAllocator;
|
|
|
|
const testing_allocator = &std.testing.base_allocator_instance.allocator;
|
|
|
|
|
|
|
|
fn test_fn1() void {}
|
|
|
|
fn test_fn2() void {}
|
|
|
|
|
|
|
|
var test_pid_counter: u7 = 1;
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
fn createTestTask(entry_point: EntryPoint, allocator: *Allocator, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(u8)) Allocator.Error!*Task {
|
2020-07-18 22:46:24 +01:00
|
|
|
var t = try allocator.create(Task);
|
|
|
|
errdefer allocator.destroy(t);
|
|
|
|
t.pid = test_pid_counter;
|
|
|
|
// Just alloc something
|
2020-07-24 00:18:56 +01:00
|
|
|
t.kernel_stack = try allocator.alloc(u32, 1);
|
2020-07-18 22:46:24 +01:00
|
|
|
t.stack_pointer = 0;
|
|
|
|
test_pid_counter += 1;
|
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
fn destroyTestTask(self: *Task, allocator: *Allocator) void {
|
|
|
|
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
|
|
|
|
allocator.free(self.kernel_stack);
|
2020-07-18 22:46:24 +01:00
|
|
|
}
|
|
|
|
allocator.destroy(self);
|
|
|
|
}
|
|
|
|
|
|
|
|
test "pickNextTask" {
|
|
|
|
task.initTest();
|
|
|
|
defer task.freeTest();
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
task.addConsumeFunction("Task.create", createTestTask);
|
|
|
|
task.addConsumeFunction("Task.create", createTestTask);
|
|
|
|
task.addRepeatFunction("Task.destroy", destroyTestTask);
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
var ctx: arch.CpuState = std.mem.zeroes(arch.CpuState);
|
|
|
|
|
|
|
|
var allocator = std.testing.allocator;
|
2020-08-23 14:32:32 +01:00
|
|
|
tasks = TailQueue(*Task){};
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
// Set up a current task
|
|
|
|
current_task = try allocator.create(Task);
|
|
|
|
defer allocator.destroy(current_task);
|
|
|
|
current_task.pid = 0;
|
2020-07-24 00:18:56 +01:00
|
|
|
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
|
2020-07-18 22:46:24 +01:00
|
|
|
current_task.stack_pointer = @ptrToInt(&KERNEL_STACK_START);
|
|
|
|
|
|
|
|
// Create two tasks and schedule them
|
2020-07-24 00:18:56 +01:00
|
|
|
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
|
2020-07-18 22:46:24 +01:00
|
|
|
defer test_fn1_task.destroy(allocator);
|
|
|
|
try scheduleTask(test_fn1_task, allocator);
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator);
|
2020-07-18 22:46:24 +01:00
|
|
|
defer test_fn2_task.destroy(allocator);
|
|
|
|
try scheduleTask(test_fn2_task, allocator);
|
|
|
|
|
|
|
|
// Get the stack pointers of the created tasks
|
|
|
|
const fn1_stack_pointer = tasks.first.?.data.stack_pointer;
|
|
|
|
const fn2_stack_pointer = tasks.first.?.next.?.data.stack_pointer;
|
|
|
|
|
|
|
|
expectEqual(pickNextTask(&ctx), fn1_stack_pointer);
|
|
|
|
// The stack pointer of the re-added task should point to the context
|
|
|
|
expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx));
|
|
|
|
|
|
|
|
// Should be the PID of the next task
|
|
|
|
expectEqual(current_task.pid, 1);
|
|
|
|
|
|
|
|
expectEqual(pickNextTask(&ctx), fn2_stack_pointer);
|
|
|
|
// The stack pointer of the re-added task should point to the context
|
|
|
|
expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx));
|
|
|
|
|
|
|
|
// Should be the PID of the next task
|
|
|
|
expectEqual(current_task.pid, 2);
|
|
|
|
|
|
|
|
expectEqual(pickNextTask(&ctx), @ptrToInt(&ctx));
|
|
|
|
// The stack pointer of the re-added task should point to the context
|
|
|
|
expectEqual(tasks.first.?.data.stack_pointer, @ptrToInt(&ctx));
|
|
|
|
|
|
|
|
// Should be back tot he beginning
|
|
|
|
expectEqual(current_task.pid, 0);
|
|
|
|
|
|
|
|
// Reset the test pid
|
|
|
|
test_pid_counter = 1;
|
|
|
|
|
|
|
|
// Free the queue
|
|
|
|
while (tasks.pop()) |elem| {
|
2020-08-23 14:32:32 +01:00
|
|
|
allocator.destroy(elem);
|
2020-07-18 22:46:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
test "createNewTask add new task" {
|
|
|
|
task.initTest();
|
|
|
|
defer task.freeTest();
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
task.addConsumeFunction("Task.create", createTestTask);
|
|
|
|
task.addConsumeFunction("Task.destroy", destroyTestTask);
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
// Set the global allocator
|
|
|
|
var allocator = std.testing.allocator;
|
|
|
|
|
|
|
|
// Init the task list
|
2020-08-23 14:32:32 +01:00
|
|
|
tasks = TailQueue(*Task){};
|
2020-07-18 22:46:24 +01:00
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
|
2020-07-18 22:46:24 +01:00
|
|
|
defer test_fn1_task.destroy(allocator);
|
|
|
|
try scheduleTask(test_fn1_task, allocator);
|
|
|
|
|
|
|
|
expectEqual(tasks.len, 1);
|
|
|
|
|
|
|
|
// Free the memory
|
2020-08-23 14:32:32 +01:00
|
|
|
allocator.destroy(tasks.first.?);
|
2020-07-18 22:46:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
test "init" {
|
|
|
|
task.initTest();
|
|
|
|
defer task.freeTest();
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
task.addConsumeFunction("Task.create", createTestTask);
|
|
|
|
task.addRepeatFunction("Task.destroy", destroyTestTask);
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
var allocator = std.testing.allocator;
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
try init(allocator, undefined);
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
expectEqual(current_task.pid, 0);
|
2020-07-24 00:18:56 +01:00
|
|
|
expectEqual(current_task.kernel_stack, @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0 .. @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START)]);
|
2020-07-18 22:46:24 +01:00
|
|
|
|
|
|
|
expectEqual(tasks.len, 1);
|
|
|
|
|
|
|
|
// Free the tasks created
|
|
|
|
current_task.destroy(allocator);
|
|
|
|
while (tasks.pop()) |elem| {
|
|
|
|
elem.data.destroy(allocator);
|
2020-08-23 14:32:32 +01:00
|
|
|
allocator.destroy(elem);
|
2020-07-18 22:46:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A volatile pointer used to control a loop outside the task. This is so to ensure a task switch
|
|
|
|
/// ocurred.
|
|
|
|
var is_set: *volatile bool = undefined;
|
|
|
|
|
|
|
|
///
|
|
|
|
/// The test task function.
|
|
|
|
///
|
|
|
|
fn task_function() noreturn {
|
2020-08-23 14:32:32 +01:00
|
|
|
log.info("Switched\n", .{});
|
2020-07-18 22:46:24 +01:00
|
|
|
is_set.* = false;
|
|
|
|
while (true) {}
|
|
|
|
}
|
|
|
|
|
|
|
|
///
|
|
|
|
/// This tests that variables in registers and on the stack are preserved when a task switch
|
|
|
|
/// occurs. Also tests that a global volatile can be test in one task and be reacted to in another.
|
|
|
|
///
|
|
|
|
/// Arguments:
|
|
|
|
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
|
|
|
|
///
|
|
|
|
fn rt_variable_preserved(allocator: *Allocator) void {
|
|
|
|
// Create the memory for the boolean
|
|
|
|
is_set = allocator.create(bool) catch unreachable;
|
|
|
|
defer allocator.destroy(is_set);
|
|
|
|
is_set.* = true;
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
var test_task = Task.create(@ptrToInt(task_function), true, undefined, allocator) catch unreachable;
|
2020-07-18 22:46:24 +01:00
|
|
|
scheduleTask(test_task, allocator) catch unreachable;
|
|
|
|
// TODO: Need to add the ability to remove tasks
|
|
|
|
|
|
|
|
var w: u32 = 0;
|
|
|
|
var x: u32 = 1;
|
|
|
|
var y: u32 = 2;
|
|
|
|
var z: u32 = 3;
|
|
|
|
|
|
|
|
while (is_set.*) {
|
|
|
|
if (w != 0) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: w not 0, but: {}\n", .{w});
|
|
|
|
}
|
|
|
|
if (x != 1) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: x not 1, but: {}\n", .{x});
|
|
|
|
}
|
|
|
|
if (y != 2) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: y not 2, but: {}\n", .{y});
|
|
|
|
}
|
|
|
|
if (z != 3) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: z not 3, but: {}\n", .{z});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure these are the same values
|
|
|
|
if (w != 0) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: w not 0, but: {}\n", .{w});
|
|
|
|
}
|
|
|
|
if (x != 1) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: x not 1, but: {}\n", .{x});
|
|
|
|
}
|
|
|
|
if (y != 2) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: y not 2, but: {}\n", .{y});
|
|
|
|
}
|
|
|
|
if (z != 3) {
|
|
|
|
panic(@errorReturnTrace(), "FAILED: z not 3, but: {}\n", .{z});
|
|
|
|
}
|
|
|
|
|
2020-08-23 14:32:32 +01:00
|
|
|
log.info("SUCCESS: Scheduler variables preserved\n", .{});
|
2020-07-18 22:46:24 +01:00
|
|
|
}
|
|
|
|
|
2020-07-24 00:18:56 +01:00
|
|
|
///
|
|
|
|
/// Test the initialisation and running of a task running in user mode
|
|
|
|
///
|
|
|
|
/// Arguments:
|
|
|
|
/// IN allocator: *std.mem.Allocator - The allocator to use when intialising the task
|
|
|
|
/// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM.
|
|
|
|
///
|
|
|
|
fn rt_user_task(allocator: *Allocator, mem_profile: *const mem.MemProfile) void {
|
|
|
|
// 1. Create user VMM
|
|
|
|
var task_vmm = allocator.create(vmm.VirtualMemoryManager(arch.VmmPayload)) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to allocate user task VMM: {}\n", .{e});
|
|
|
|
};
|
|
|
|
task_vmm.* = vmm.VirtualMemoryManager(arch.VmmPayload).init(0, @ptrToInt(mem_profile.vaddr_start), allocator, arch.VMM_MAPPER, undefined) catch unreachable;
|
|
|
|
// 2. Create user task. The code will be loaded at address 0
|
|
|
|
var user_task = task.Task.create(0, false, task_vmm, allocator) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to create user task: {}\n", .{e});
|
|
|
|
};
|
|
|
|
// 3. Read the user program file from the filesystem
|
|
|
|
const user_program_file = fs.openFile("/user_program", .NO_CREATION) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to open /user_program: {}\n", .{e});
|
|
|
|
};
|
|
|
|
defer user_program_file.close();
|
|
|
|
var code: [1024]u8 = undefined;
|
|
|
|
const code_len = user_program_file.read(code[0..1024]) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to read user program file: {}\n", .{e});
|
|
|
|
};
|
|
|
|
// 4. Allocate space in the vmm for the user_program
|
|
|
|
const code_start = task_vmm.alloc(std.mem.alignForward(code_len, vmm.BLOCK_SIZE) / vmm.BLOCK_SIZE, .{ .kernel = false, .writable = true, .cachable = true }) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to allocate VMM memory for user program code: {}\n", .{e});
|
|
|
|
} orelse panic(null, "User task VMM didn't allocate space for the user program\n", .{});
|
|
|
|
if (code_start != 0) panic(null, "User program start address was {} instead of 0\n", .{code_start});
|
|
|
|
// 5. Copy user_program code over
|
2020-11-20 00:47:41 +00:00
|
|
|
vmm.kernel_vmm.copyData(task_vmm, code[0..code_len], code_start, true) catch |e| {
|
2020-07-24 00:18:56 +01:00
|
|
|
panic(@errorReturnTrace(), "Failed to copy user code: {}\n", .{e});
|
|
|
|
};
|
|
|
|
// 6. Schedule it
|
|
|
|
scheduleTask(user_task, allocator) catch |e| {
|
|
|
|
panic(@errorReturnTrace(), "Failed to schedule the user task: {}\n", .{e});
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-07-18 22:46:24 +01:00
|
|
|
///
|
|
|
|
/// The scheduler runtime tests that will test the scheduling functionality.
|
|
|
|
///
|
|
|
|
/// Arguments:
|
|
|
|
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
|
2020-07-24 00:18:56 +01:00
|
|
|
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. Used to set up user task VMMs.
|
2020-07-18 22:46:24 +01:00
|
|
|
///
|
2020-07-24 00:18:56 +01:00
|
|
|
fn runtimeTests(allocator: *Allocator, mem_profile: *const mem.MemProfile) void {
|
2020-07-18 22:46:24 +01:00
|
|
|
arch.enableInterrupts();
|
2020-07-24 00:18:56 +01:00
|
|
|
rt_user_task(allocator, mem_profile);
|
2020-07-18 22:46:24 +01:00
|
|
|
rt_variable_preserved(allocator);
|
|
|
|
while (true) {}
|
|
|
|
}
|