Create task 0 like other tasks

This commit is contained in:
Sam Tebbs 2022-06-05 23:26:29 +01:00
parent 474073a695
commit 6156f1b30a
5 changed files with 86 additions and 80 deletions

View file

@ -531,8 +531,8 @@ pub fn initKeyboard(allocator: Allocator) Allocator.Error!*Keyboard {
}
///
/// Initialise a stack used for creating a task.
/// Currently only support fn () noreturn functions for the entry point.
/// Initialise a stack and vmm payload used for creating a task.
/// Currently only supports fn () noreturn functions for the entry point.
///
/// Arguments:
/// IN task: *Task - The task to be initialised. The function will only modify whatever
@ -541,62 +541,66 @@ pub fn initKeyboard(allocator: Allocator) Allocator.Error!*Keyboard {
/// IN entry_point: usize - The pointer to the entry point of the function. Functions only
/// supported is fn () noreturn
/// IN allocator: Allocator - The allocator use for allocating a stack.
/// IN set_up_stack: bool - Set up the kernel and user stacks (register values, PC etc.) for task entry
///
/// Error: Allocator.Error
/// OutOfMemory - Unable to allocate space for the stack.
///
pub fn initTask(task: *Task, entry_point: usize, allocator: Allocator) Allocator.Error!void {
const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11;
// Setting the bottom two bits of the code offset designates that this is a ring 3 task
const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11;
// Ring switches push and pop two extra values on interrupt: user_esp and user_ss
const kernel_stack_bottom = if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20;
var stack = &task.kernel_stack;
pub fn initTask(task: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void {
// TODO Will need to add the exit point
// Set up everything as a kernel task
task.vmm.payload = &paging.kernel_directory;
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs
stack.*[kernel_stack_bottom + 3] = data_offset; // es
stack.*[kernel_stack_bottom + 4] = data_offset; // ds
stack.*[kernel_stack_bottom + 5] = 0; // edi
stack.*[kernel_stack_bottom + 6] = 0; // esi
// End of the stack
stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp
stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
var stack = &task.kernel_stack;
const kernel_stack_bottom = if (!set_up_stack) 0 else if (task.kernel) task.kernel_stack.len - 18 else task.kernel_stack.len - 20;
if (set_up_stack) {
const data_offset = if (task.kernel) gdt.KERNEL_DATA_OFFSET else gdt.USER_DATA_OFFSET | 0b11;
// Setting the bottom two bits of the code offset designates that this is a ring 3 task
const code_offset = if (task.kernel) gdt.KERNEL_CODE_OFFSET else gdt.USER_CODE_OFFSET | 0b11;
// Ring switches push and pop two extra values on interrupt: user_esp and user_ss
stack.*[kernel_stack_bottom + 9] = 0; // ebx
stack.*[kernel_stack_bottom + 10] = 0; // edx
stack.*[kernel_stack_bottom + 11] = 0; // ecx
stack.*[kernel_stack_bottom + 12] = 0; // eax
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs
stack.*[kernel_stack_bottom + 3] = data_offset; // es
stack.*[kernel_stack_bottom + 4] = data_offset; // ds
stack.*[kernel_stack_bottom + 13] = 0; // int_num
stack.*[kernel_stack_bottom + 14] = 0; // error_code
stack.*[kernel_stack_bottom + 5] = 0; // edi
stack.*[kernel_stack_bottom + 6] = 0; // esi
// End of the stack
stack.*[kernel_stack_bottom + 7] = @ptrToInt(&stack.*[stack.len - 1]); // ebp
stack.*[kernel_stack_bottom + 8] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
stack.*[kernel_stack_bottom + 15] = entry_point; // eip
stack.*[kernel_stack_bottom + 16] = code_offset; // cs
stack.*[kernel_stack_bottom + 17] = 0x202; // eflags
stack.*[kernel_stack_bottom + 9] = 0; // ebx
stack.*[kernel_stack_bottom + 10] = 0; // edx
stack.*[kernel_stack_bottom + 11] = 0; // ecx
stack.*[kernel_stack_bottom + 12] = 0; // eax
if (!task.kernel) {
// Put the extra values on the kernel stack needed when chaning privilege levels
stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp
stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss
stack.*[kernel_stack_bottom + 13] = 0; // int_num
stack.*[kernel_stack_bottom + 14] = 0; // error_code
if (!builtin.is_test) {
// Create a new page directory for the user task by mirroring the kernel directory
// We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt
task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0];
task.vmm.payload.* = paging.kernel_directory.copy();
stack.*[kernel_stack_bottom + 15] = entry_point; // eip
stack.*[kernel_stack_bottom + 16] = code_offset; // cs
stack.*[kernel_stack_bottom + 17] = 0x202; // eflags
if (!task.kernel) {
// Put the extra values on the kernel stack needed when chaning privilege levels
stack.*[kernel_stack_bottom + 18] = @ptrToInt(&task.user_stack[task.user_stack.len - 1]); // user_esp
stack.*[kernel_stack_bottom + 19] = data_offset; // user_ss
}
task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]);
}
if (!task.kernel and !builtin.is_test) {
// Create a new page directory for the user task by mirroring the kernel directory
// We need kernel mem mapped so we don't get a page fault when entering kernel code from an interrupt
task.vmm.payload = &(try allocator.allocAdvanced(paging.Directory, paging.PAGE_SIZE_4KB, 1, .exact))[0];
task.vmm.payload.* = paging.kernel_directory.copy();
if (set_up_stack) {
stack.*[kernel_stack_bottom] = vmm.kernel_vmm.virtToPhys(@ptrToInt(task.vmm.payload)) catch |e| {
panic(@errorReturnTrace(), "Failed to get the physical address of the user task's page directory: {}\n", .{e});
};
}
}
task.stack_pointer = @ptrToInt(&stack.*[kernel_stack_bottom]);
}
///

View file

@ -144,7 +144,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
kmain_log.info("Creating init2\n", .{});
// Create a init2 task
var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, kernel_heap.allocator()) catch |e| {
var stage2_task = task.Task.create(@ptrToInt(initStage2), true, kernel_vmm, kernel_heap.allocator(), true) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to create init stage 2 task: {}\n", .{e});
};
scheduler.scheduleTask(stage2_task, kernel_heap.allocator()) catch |e| {

View file

@ -137,15 +137,13 @@ pub fn init(allocator: Allocator, mem_profile: *const mem.MemProfile) Allocator.
// Init the task list for round robin
tasks = TailQueue(*Task){};
// Set up the init task to continue execution
current_task = try allocator.create(Task);
// Set up the init task to continue execution.
// The kernel stack will point to the stack section rather than the heap
current_task = try Task.create(0, true, &vmm.kernel_vmm, allocator, false);
errdefer allocator.destroy(current_task);
// PID 0
current_task.pid = 0;
const kernel_stack_size = @ptrToInt(&KERNEL_STACK_END) - @ptrToInt(&KERNEL_STACK_START);
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..kernel_stack_size];
current_task.user_stack = &[_]usize{};
current_task.kernel = true;
// ESP will be saved on next schedule
// Run the runtime tests here
@ -155,7 +153,7 @@ pub fn init(allocator: Allocator, mem_profile: *const mem.MemProfile) Allocator.
}
// Create the idle task when there are no more tasks left
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator);
var idle_task = try Task.create(@ptrToInt(idle), true, &vmm.kernel_vmm, allocator, true);
errdefer idle_task.destroy(allocator);
try scheduleTask(idle_task, allocator);
@ -195,21 +193,21 @@ test "pickNextTask" {
tasks = TailQueue(*Task){};
// Set up a current task
var first = try allocator.create(Task);
var first = try Task.create(0, true, &vmm.kernel_vmm, allocator, false);
// We use an intermediary variable to avoid a double-free.
// Deferring freeing current_task will free whatever current_task points to at the end
defer allocator.destroy(first);
defer first.destroy(allocator);
current_task = first;
current_task.pid = 0;
current_task.kernel_stack = @intToPtr([*]u32, @ptrToInt(&KERNEL_STACK_START))[0..4096];
current_task.stack_pointer = @ptrToInt(&KERNEL_STACK_START);
// Create two tasks and schedule them
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator);
var test_fn2_task = try Task.create(@ptrToInt(test_fn2), true, undefined, allocator, true);
defer test_fn2_task.destroy(allocator);
try scheduleTask(test_fn2_task, allocator);
@ -254,7 +252,7 @@ test "createNewTask add new task" {
// Init the task list
tasks = TailQueue(*Task){};
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var test_fn1_task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
defer test_fn1_task.destroy(allocator);
try scheduleTask(test_fn1_task, allocator);
@ -309,7 +307,7 @@ fn rt_variable_preserved(allocator: Allocator) void {
defer allocator.destroy(is_set);
is_set.* = true;
var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e});
var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator, true) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e});
scheduleTask(test_task, allocator) catch |e| panic(@errorReturnTrace(), "Failed to schedule a task in rt_variable_preserved: {}\n", .{e});
// TODO: Need to add the ability to remove tasks

View file

@ -25,12 +25,7 @@ pub const EntryPoint = usize;
const PidBitmap = bitmap.Bitmap(1024, usize);
/// The list of PIDs that have been allocated.
var all_pids: PidBitmap = init: {
var pids = PidBitmap.init(1024, null) catch unreachable;
// Reserve PID 0 for the init task
_ = pids.setFirstFree() orelse unreachable;
break :init pids;
};
var all_pids = PidBitmap.init(1024, null) catch unreachable;
/// The default stack size of a task. Currently this is set to a page size.
pub const STACK_SIZE: u32 = arch.MEMORY_BLOCK_SIZE / @sizeOf(u32);
@ -75,15 +70,15 @@ pub const Task = struct {
/// OutOfMemory - If there is no more memory to allocate. Any memory or PID allocated will
/// be freed on return.
///
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator) Allocator.Error!*Task {
pub fn create(entry_point: EntryPoint, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator, alloc_kernel_stack: bool) Allocator.Error!*Task {
var task = try allocator.create(Task);
errdefer allocator.destroy(task);
const pid = allocatePid();
errdefer freePid(pid) catch |e| panic(@errorReturnTrace(), "Failed to free task PID in errdefer ({}): {}\n", .{ pid, e });
var k_stack = try allocator.alloc(usize, STACK_SIZE);
errdefer allocator.free(k_stack);
var k_stack = if (alloc_kernel_stack) try allocator.alloc(usize, STACK_SIZE) else &[_]usize{};
errdefer if (alloc_kernel_stack) allocator.free(k_stack);
var u_stack = if (kernel) &[_]usize{} else try allocator.alloc(usize, STACK_SIZE);
errdefer if (!kernel) allocator.free(u_stack);
@ -92,18 +87,18 @@ pub const Task = struct {
.pid = pid,
.kernel_stack = k_stack,
.user_stack = u_stack,
.stack_pointer = @ptrToInt(&k_stack[STACK_SIZE - 1]),
.stack_pointer = if (!alloc_kernel_stack) 0 else @ptrToInt(&k_stack[STACK_SIZE - 1]),
.kernel = kernel,
.vmm = task_vmm,
};
try arch.initTask(task, entry_point, allocator);
try arch.initTask(task, entry_point, allocator, alloc_kernel_stack);
return task;
}
pub fn createFromElf(program_elf: elf.Elf, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: Allocator) (bitmap.BitmapError || vmm.VmmError || Allocator.Error)!*Task {
const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator);
const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator, true);
errdefer task.destroy(allocator);
// Iterate over sections
@ -145,7 +140,7 @@ pub const Task = struct {
freePid(self.pid) catch |e| panic(@errorReturnTrace(), "Failed to free task's PID ({}): {}\n", .{ self.pid, e });
// We need to check that the the stack has been allocated as task 0 (init) won't have a
// stack allocated as this in the linker script
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START)) {
if (@ptrToInt(self.kernel_stack.ptr) != @ptrToInt(&KERNEL_STACK_START) and self.kernel_stack.len > 0) {
allocator.free(self.kernel_stack);
}
if (!self.kernel) {
@ -192,8 +187,8 @@ test "create out of memory for task" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 0);
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true));
// Make sure any memory allocated is freed
try expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -208,8 +203,8 @@ test "create out of memory for stack" {
// Set the global allocator
var fa = FailingAllocator.init(testing_allocator, 1);
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator()));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), true, undefined, fa.allocator(), true));
try expectError(error.OutOfMemory, Task.create(@ptrToInt(test_fn1), false, undefined, fa.allocator(), true));
// Make sure any memory allocated is freed
try expectEqual(fa.allocated_bytes, fa.freed_bytes);
@ -221,7 +216,7 @@ test "create out of memory for stack" {
}
test "create expected setup" {
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
defer task.destroy(std.testing.allocator);
// Will allocate the first PID 0
@ -229,7 +224,7 @@ test "create expected setup" {
try expectEqual(task.kernel_stack.len, STACK_SIZE);
try expectEqual(task.user_stack.len, 0);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true);
defer user_task.destroy(std.testing.allocator);
try expectEqual(user_task.pid, 1);
try expectEqual(user_task.user_stack.len, STACK_SIZE);
@ -241,8 +236,8 @@ test "destroy cleans up" {
// So if any alloc were not freed, this will fail the test
var allocator = std.testing.allocator;
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator);
var task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, true);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, allocator, true);
task.destroy(allocator);
user_task.destroy(allocator);
@ -254,8 +249,8 @@ test "destroy cleans up" {
}
test "Multiple create" {
var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task1 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
var task2 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
try expectEqual(task1.pid, 0);
try expectEqual(task2.pid, 1);
@ -271,7 +266,7 @@ test "Multiple create" {
if (i > 0) try expectEqual(bmp, 0);
}
var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator);
var task3 = try Task.create(@ptrToInt(test_fn1), true, undefined, std.testing.allocator, true);
try expectEqual(task3.pid, 0);
try expectEqual(all_pids.bitmaps[0], 3);
@ -282,7 +277,7 @@ test "Multiple create" {
task2.destroy(std.testing.allocator);
task3.destroy(std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator);
var user_task = try Task.create(@ptrToInt(test_fn1), false, undefined, std.testing.allocator, true);
try expectEqual(user_task.pid, 0);
try expectEqual(all_pids.bitmaps[0], 1);
@ -378,3 +373,11 @@ test "createFromElf clean-up" {
the_elf.section_headers[1].flags |= elf.SECTION_ALLOCATABLE;
try std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator));
}
test "create doesn't allocate kernel stack" {
var allocator = std.testing.allocator;
const task = try Task.create(@ptrToInt(test_fn1), true, undefined, allocator, false);
defer task.destroy(allocator);
try std.testing.expectEqualSlices(usize, task.kernel_stack, &[_]usize{});
try std.testing.expectEqual(task.stack_pointer, 0);
}

View file

@ -213,11 +213,12 @@ pub fn initMem(payload: BootPayload) Allocator.Error!mem.MemProfile {
};
}
pub fn initTask(t: *Task, entry_point: usize, allocator: Allocator) Allocator.Error!void {
pub fn initTask(t: *Task, entry_point: usize, allocator: Allocator, set_up_stack: bool) Allocator.Error!void {
// Suppress unused variable warnings
_ = t;
_ = entry_point;
_ = allocator;
_ = set_up_stack;
}
pub fn initKeyboard(allocator: Allocator) Allocator.Error!?*Keyboard {