2020-07-18 22:46:24 +01:00
const std = @import ( " std " ) ;
const expectEqual = std . testing . expectEqual ;
const expectError = std . testing . expectError ;
const assert = std . debug . assert ;
2020-08-23 14:32:32 +01:00
const log = std . log . scoped ( . scheduler ) ;
2020-07-18 22:46:24 +01:00
const builtin = @import ( " builtin " ) ;
const is_test = builtin . is_test ;
const build_options = @import ( " build_options " ) ;
const mock_path = build_options . mock_path ;
const arch = @import ( " arch.zig " ) . internals ;
2020-12-07 23:08:23 +00:00
const panic = @import ( " panic.zig " ) . panic ;
const task = @import ( " task.zig " ) ;
const vmm = @import ( " vmm.zig " ) ;
const mem = @import ( " mem.zig " ) ;
2020-07-24 00:18:56 +01:00
const fs = @import ( " filesystem/vfs.zig " ) ;
2020-11-23 20:02:21 +00:00
const elf = @import ( " elf.zig " ) ;
const pmm = @import ( " pmm.zig " ) ;
2020-07-18 22:46:24 +01:00
const Task = task . Task ;
2020-07-24 00:18:56 +01:00
const EntryPoint = task . EntryPoint ;
2020-07-18 22:46:24 +01:00
const Allocator = std . mem . Allocator ;
const TailQueue = std . TailQueue ;
/// The default stack size of a task. Currently this is set to a page size.
const STACK_SIZE : u32 = arch . MEMORY_BLOCK_SIZE / @sizeOf ( usize ) ;
/// Pointer to the start of the main kernel stack
extern var KERNEL_STACK_START : [ ] u32 ;
2020-07-24 00:18:56 +01:00
extern var KERNEL_STACK_END : [ ] u32 ;
2020-07-18 22:46:24 +01:00
/// The current task running
var current_task : * Task = undefined ;
/// Array list of all runnable tasks
var tasks : TailQueue ( * Task ) = undefined ;
/// Whether the scheduler is allowed to switch tasks.
var can_switch : bool = true ;
///
/// The idle task that just halts the CPU but the CPU can still handle interrupts.
///
fn idle ( ) noreturn {
arch . spinWait ( ) ;
}
pub fn taskSwitching ( enabled : bool ) void {
can_switch = enabled ;
}
///
/// Round robin. This will first save the the current tasks stack pointer, then will pick the next
/// task to be run from the queue. It will add the current task to the end of the queue and pop the
/// next task from the front as set this as the current task. Then will return the stack pointer
/// of the next task to be loaded into the stack register to load the next task stack to pop off
/// its state. Interrupts are assumed disabled.
///
/// Argument:
/// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents
/// of the registers at the time of a exception.
///
/// Return: usize
/// The new stack pointer to the next stack of the next task.
///
pub fn pickNextTask ( ctx : * arch . CpuState ) usize {
2020-07-24 00:18:56 +01:00
switch ( build_options . test_mode ) {
. Scheduler = > if ( ! current_task . kernel ) {
if ( ! arch . runtimeTestCheckUserTaskState ( ctx ) ) {
panic ( null , " User task state check failed \n " , . { } ) ;
}
} ,
else = > { } ,
}
2020-07-18 22:46:24 +01:00
// Save the stack pointer from old task
current_task . stack_pointer = @ptrToInt ( ctx ) ;
// If we can't switch, then continue with the current task
if ( ! can_switch ) {
return current_task . stack_pointer ;
}
// Pick the next task
// If there isn't one, then just return the same task
if ( tasks . pop ( ) ) | new_task_node | {
// Get the next task
const next_task = new_task_node . data ;
// Move some pointers to don't need to allocate memory, speeds things up
new_task_node . data = current_task ;
new_task_node . prev = null ;
new_task_node . next = null ;
// Add the 'current_task' node to the end of the queue
tasks . prepend ( new_task_node ) ;
current_task = next_task ;
}
// Context switch in the interrupt stub handler which will pop the next task state off the
// stack
return current_task . stack_pointer ;
}
///
/// Create a new task and add it to the scheduling queue. No locking.
///
/// Arguments:
2020-07-24 00:18:56 +01:00
/// IN entry_point: EntryPoint - The entry point into the task. This must be a function.
2020-07-18 22:46:24 +01:00
///
/// Error: Allocator.Error
/// OutOfMemory - If there isn't enough memory for the a task/stack. Any memory allocated will
/// be freed on return.
///
pub fn scheduleTask ( new_task : * Task , allocator : * Allocator ) Allocator . Error ! void {
2020-08-23 14:32:32 +01:00
var task_node = try allocator . create ( TailQueue ( * Task ) . Node ) ;
task_node . * = . { . data = new_task } ;
2020-07-18 22:46:24 +01:00
tasks . prepend ( task_node ) ;
}
///
/// Initialise the scheduler. This will set up the current task to the code that is currently
/// running. So if there is a task switch before kmain can finish, can continue when switched back.
/// This will set the stack to KERNEL_STACK_START from the linker stript. This will also create the
/// idle task for when there is no more tasks to run.
///
/// Arguments:
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
2020-07-24 00:18:56 +01:00
/// IN mem_profile: *const mem.MemProfile - The system's memory profile used for runtime testing.
2020-07-18 22:46:24 +01:00
///
/// Error: Allocator.Error
/// OutOfMemory - There is no more memory. Any memory allocated will be freed on return.
///
2020-07-24 00:18:56 +01:00
pub fn init ( allocator : * Allocator , mem_profile : * const mem . MemProfile ) Allocator . Error ! void {
2020-07-18 22:46:24 +01:00
// TODO: Maybe move the task init here?
2020-08-23 14:32:32 +01:00
log . info ( " Init \n " , . { } ) ;
defer log . info ( " Done \n " , . { } ) ;
2020-07-18 22:46:24 +01:00
// Init the task list for round robin
2020-08-23 14:32:32 +01:00
tasks = TailQueue ( * Task ) { } ;
2020-07-18 22:46:24 +01:00
// Set up the init task to continue execution
current_task = try allocator . create ( Task ) ;
errdefer allocator . destroy ( current_task ) ;
// PID 0
current_task . pid = 0 ;
2020-07-24 00:18:56 +01:00
const kernel_stack_size = @ptrToInt ( & KERNEL_STACK_END ) - @ptrToInt ( & KERNEL_STACK_START ) ;
current_task . kernel_stack = @intToPtr ( [ * ] u32 , @ptrToInt ( & KERNEL_STACK_START ) ) [ 0 . . kernel_stack_size ] ;
current_task . user_stack = & [ _ ] usize { } ;
current_task . kernel = true ;
2020-07-18 22:46:24 +01:00
// ESP will be saved on next schedule
// Run the runtime tests here
switch ( build_options . test_mode ) {
2020-07-24 00:18:56 +01:00
. Scheduler = > runtimeTests ( allocator , mem_profile ) ,
2020-07-18 22:46:24 +01:00
else = > { } ,
}
// Create the idle task when there are no more tasks left
2020-07-24 00:18:56 +01:00
var idle_task = try Task . create ( @ptrToInt ( idle ) , true , & vmm . kernel_vmm , allocator ) ;
2020-07-18 22:46:24 +01:00
errdefer idle_task . destroy ( allocator ) ;
try scheduleTask ( idle_task , allocator ) ;
}
// For testing the errdefer
const FailingAllocator = std . testing . FailingAllocator ;
const testing_allocator = & std . testing . base_allocator_instance . allocator ;
fn test_fn1 ( ) void { }
fn test_fn2 ( ) void { }
var test_pid_counter : u7 = 1 ;
2020-07-24 00:18:56 +01:00
fn createTestTask ( entry_point : EntryPoint , allocator : * Allocator , kernel : bool , task_vmm : * vmm . VirtualMemoryManager ( u8 ) ) Allocator . Error ! * Task {
2020-07-18 22:46:24 +01:00
var t = try allocator . create ( Task ) ;
errdefer allocator . destroy ( t ) ;
t . pid = test_pid_counter ;
// Just alloc something
2020-07-24 00:18:56 +01:00
t . kernel_stack = try allocator . alloc ( u32 , 1 ) ;
2020-07-18 22:46:24 +01:00
t . stack_pointer = 0 ;
test_pid_counter + = 1 ;
return t ;
}
2020-07-24 00:18:56 +01:00
fn destroyTestTask ( self : * Task , allocator : * Allocator ) void {
if ( @ptrToInt ( self . kernel_stack . ptr ) ! = @ptrToInt ( & KERNEL_STACK_START ) ) {
allocator . free ( self . kernel_stack ) ;
2020-07-18 22:46:24 +01:00
}
allocator . destroy ( self ) ;
}
test " pickNextTask " {
var ctx : arch . CpuState = std . mem . zeroes ( arch . CpuState ) ;
var allocator = std . testing . allocator ;
2020-08-23 14:32:32 +01:00
tasks = TailQueue ( * Task ) { } ;
2020-07-18 22:46:24 +01:00
// Set up a current task
current_task = try allocator . create ( Task ) ;
defer allocator . destroy ( current_task ) ;
current_task . pid = 0 ;
2020-07-24 00:18:56 +01:00
current_task . kernel_stack = @intToPtr ( [ * ] u32 , @ptrToInt ( & KERNEL_STACK_START ) ) [ 0 . . 4096 ] ;
2020-07-18 22:46:24 +01:00
current_task . stack_pointer = @ptrToInt ( & KERNEL_STACK_START ) ;
// Create two tasks and schedule them
2020-07-24 00:18:56 +01:00
var test_fn1_task = try Task . create ( @ptrToInt ( test_fn1 ) , true , undefined , allocator ) ;
2020-07-18 22:46:24 +01:00
defer test_fn1_task . destroy ( allocator ) ;
try scheduleTask ( test_fn1_task , allocator ) ;
2020-07-24 00:18:56 +01:00
var test_fn2_task = try Task . create ( @ptrToInt ( test_fn2 ) , true , undefined , allocator ) ;
2020-07-18 22:46:24 +01:00
defer test_fn2_task . destroy ( allocator ) ;
try scheduleTask ( test_fn2_task , allocator ) ;
// Get the stack pointers of the created tasks
2020-12-07 23:08:23 +00:00
const fn1_stack_pointer = test_fn1_task . stack_pointer ;
const fn2_stack_pointer = test_fn2_task . stack_pointer ;
2020-07-18 22:46:24 +01:00
expectEqual ( pickNextTask ( & ctx ) , fn1_stack_pointer ) ;
// The stack pointer of the re-added task should point to the context
expectEqual ( tasks . first . ? . data . stack_pointer , @ptrToInt ( & ctx ) ) ;
// Should be the PID of the next task
expectEqual ( current_task . pid , 1 ) ;
expectEqual ( pickNextTask ( & ctx ) , fn2_stack_pointer ) ;
// The stack pointer of the re-added task should point to the context
expectEqual ( tasks . first . ? . data . stack_pointer , @ptrToInt ( & ctx ) ) ;
// Should be the PID of the next task
expectEqual ( current_task . pid , 2 ) ;
expectEqual ( pickNextTask ( & ctx ) , @ptrToInt ( & ctx ) ) ;
// The stack pointer of the re-added task should point to the context
expectEqual ( tasks . first . ? . data . stack_pointer , @ptrToInt ( & ctx ) ) ;
// Should be back tot he beginning
expectEqual ( current_task . pid , 0 ) ;
// Reset the test pid
test_pid_counter = 1 ;
// Free the queue
while ( tasks . pop ( ) ) | elem | {
2020-08-23 14:32:32 +01:00
allocator . destroy ( elem ) ;
2020-07-18 22:46:24 +01:00
}
}
test " createNewTask add new task " {
// Set the global allocator
var allocator = std . testing . allocator ;
// Init the task list
2020-08-23 14:32:32 +01:00
tasks = TailQueue ( * Task ) { } ;
2020-07-18 22:46:24 +01:00
2020-07-24 00:18:56 +01:00
var test_fn1_task = try Task . create ( @ptrToInt ( test_fn1 ) , true , undefined , allocator ) ;
2020-07-18 22:46:24 +01:00
defer test_fn1_task . destroy ( allocator ) ;
try scheduleTask ( test_fn1_task , allocator ) ;
expectEqual ( tasks . len , 1 ) ;
// Free the memory
2020-08-23 14:32:32 +01:00
allocator . destroy ( tasks . first . ? ) ;
2020-07-18 22:46:24 +01:00
}
test " init " {
var allocator = std . testing . allocator ;
2020-07-24 00:18:56 +01:00
try init ( allocator , undefined ) ;
2020-07-18 22:46:24 +01:00
expectEqual ( current_task . pid , 0 ) ;
2020-12-07 23:08:23 +00:00
expectEqual ( @ptrToInt ( current_task . kernel_stack . ptr ) , @ptrToInt ( & KERNEL_STACK_START ) ) ;
expectEqual ( current_task . kernel_stack . len , @ptrToInt ( & KERNEL_STACK_END ) - @ptrToInt ( & KERNEL_STACK_START ) ) ;
2020-07-18 22:46:24 +01:00
expectEqual ( tasks . len , 1 ) ;
// Free the tasks created
current_task . destroy ( allocator ) ;
while ( tasks . pop ( ) ) | elem | {
elem . data . destroy ( allocator ) ;
2020-08-23 14:32:32 +01:00
allocator . destroy ( elem ) ;
2020-07-18 22:46:24 +01:00
}
}
/// A volatile pointer used to control a loop outside the task. This is so to ensure a task switch
/// ocurred.
var is_set : * volatile bool = undefined ;
///
/// The test task function.
///
fn task_function ( ) noreturn {
2020-08-23 14:32:32 +01:00
log . info ( " Switched \n " , . { } ) ;
2020-07-18 22:46:24 +01:00
is_set . * = false ;
while ( true ) { }
}
///
/// This tests that variables in registers and on the stack are preserved when a task switch
/// occurs. Also tests that a global volatile can be test in one task and be reacted to in another.
///
/// Arguments:
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
///
fn rt_variable_preserved ( allocator : * Allocator ) void {
// Create the memory for the boolean
is_set = allocator . create ( bool ) catch unreachable ;
defer allocator . destroy ( is_set ) ;
is_set . * = true ;
2020-11-23 20:02:21 +00:00
var test_task = Task . create ( @ptrToInt ( task_function ) , true , & vmm . kernel_vmm , allocator ) catch | e | panic ( @errorReturnTrace ( ) , " Failed to create task in rt_variable_preserved: {} \n " , . { e } ) ;
scheduleTask ( test_task , allocator ) catch | e | panic ( @errorReturnTrace ( ) , " Failed to schedule a task in rt_variable_preserved: {} \n " , . { e } ) ;
2020-07-18 22:46:24 +01:00
// TODO: Need to add the ability to remove tasks
var w : u32 = 0 ;
var x : u32 = 1 ;
var y : u32 = 2 ;
var z : u32 = 3 ;
while ( is_set . * ) {
if ( w ! = 0 ) {
panic ( @errorReturnTrace ( ) , " FAILED: w not 0, but: {} \n " , . { w } ) ;
}
if ( x ! = 1 ) {
panic ( @errorReturnTrace ( ) , " FAILED: x not 1, but: {} \n " , . { x } ) ;
}
if ( y ! = 2 ) {
panic ( @errorReturnTrace ( ) , " FAILED: y not 2, but: {} \n " , . { y } ) ;
}
if ( z ! = 3 ) {
panic ( @errorReturnTrace ( ) , " FAILED: z not 3, but: {} \n " , . { z } ) ;
}
}
// Make sure these are the same values
if ( w ! = 0 ) {
panic ( @errorReturnTrace ( ) , " FAILED: w not 0, but: {} \n " , . { w } ) ;
}
if ( x ! = 1 ) {
panic ( @errorReturnTrace ( ) , " FAILED: x not 1, but: {} \n " , . { x } ) ;
}
if ( y ! = 2 ) {
panic ( @errorReturnTrace ( ) , " FAILED: y not 2, but: {} \n " , . { y } ) ;
}
if ( z ! = 3 ) {
panic ( @errorReturnTrace ( ) , " FAILED: z not 3, but: {} \n " , . { z } ) ;
}
2020-08-23 14:32:32 +01:00
log . info ( " SUCCESS: Scheduler variables preserved \n " , . { } ) ;
2020-07-18 22:46:24 +01:00
}
2020-07-24 00:18:56 +01:00
///
/// Test the initialisation and running of a task running in user mode
///
/// Arguments:
/// IN allocator: *std.mem.Allocator - The allocator to use when intialising the task
/// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM.
///
fn rt_user_task ( allocator : * Allocator , mem_profile : * const mem . MemProfile ) void {
2020-11-23 20:02:21 +00:00
for ( & [ _ ] [ ] const u8 { " /user_program_data.elf " , " /user_program.elf " } ) | user_program | {
// 1. Create user VMM
var task_vmm = allocator . create ( vmm . VirtualMemoryManager ( arch . VmmPayload ) ) catch | e | {
panic ( @errorReturnTrace ( ) , " Failed to allocate VMM for {s}: {} \n " , . { user_program , e } ) ;
} ;
task_vmm . * = vmm . VirtualMemoryManager ( arch . VmmPayload ) . init ( 0 , @ptrToInt ( mem_profile . vaddr_start ) , allocator , arch . VMM_MAPPER , undefined ) catch | e | panic ( @errorReturnTrace ( ) , " Failed to create the vmm for {s}: {} \n " , . { user_program , e } ) ;
const user_program_file = fs . openFile ( user_program , . NO_CREATION ) catch | e | {
panic ( @errorReturnTrace ( ) , " Failed to open {s}: {} \n " , . { user_program , e } ) ;
} ;
defer user_program_file . close ( ) ;
var code : [ 1024 * 9 ] u8 = undefined ;
const code_len = user_program_file . read ( code [ 0 . . code . len ] ) catch | e | {
panic ( @errorReturnTrace ( ) , " Failed to read {s}: {} \n " , . { user_program , e } ) ;
} ;
const program_elf = elf . Elf . init ( code [ 0 . . code_len ] , builtin . arch , allocator ) catch | e | panic ( @errorReturnTrace ( ) , " Failed to load {s}: {} \n " , . { user_program , e } ) ;
defer program_elf . deinit ( ) ;
const current_physical_blocks = pmm . blocksFree ( ) ;
var user_task = task . Task . createFromElf ( program_elf , false , task_vmm , allocator ) catch | e | {
panic ( @errorReturnTrace ( ) , " Failed to create task for {s}: {} \n " , . { user_program , e } ) ;
} ;
scheduleTask ( user_task , allocator ) catch | e | {
panic ( @errorReturnTrace ( ) , " Failed to schedule the task for {s}: {} \n " , . { user_program , e } ) ;
} ;
var num_allocatable_sections : usize = 0 ;
var size_allocatable_sections : usize = 0 ;
for ( program_elf . section_headers ) | section | {
if ( section . flags & elf . SECTION_ALLOCATABLE ! = 0 ) {
num_allocatable_sections + = 1 ;
size_allocatable_sections + = std . mem . alignForward ( section . size , vmm . BLOCK_SIZE ) ;
}
}
// Only a certain number of elf section are expected to have been allocated in the vmm
if ( task_vmm . allocations . count ( ) ! = num_allocatable_sections ) {
panic ( @errorReturnTrace ( ) , " VMM allocated wrong number of virtual regions for {s}. Expected {} but found {} \n " , . { user_program , num_allocatable_sections , task_vmm . allocations . count ( ) } ) ;
}
const allocated_size = ( task_vmm . bmp . num_entries - task_vmm . bmp . num_free_entries ) * vmm . BLOCK_SIZE ;
if ( size_allocatable_sections ! = allocated_size ) {
panic ( @errorReturnTrace ( ) , " VMM allocated wrong amount of memory for {s}. Expected {} but found {} \n " , . { user_program , size_allocatable_sections , allocated_size } ) ;
}
}
2020-07-24 00:18:56 +01:00
}
2020-07-18 22:46:24 +01:00
///
/// The scheduler runtime tests that will test the scheduling functionality.
///
/// Arguments:
/// IN allocator: *Allocator - The allocator to use when needing to allocate memory.
2020-07-24 00:18:56 +01:00
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. Used to set up user task VMMs.
2020-07-18 22:46:24 +01:00
///
2020-07-24 00:18:56 +01:00
fn runtimeTests ( allocator : * Allocator , mem_profile : * const mem . MemProfile ) void {
2020-07-18 22:46:24 +01:00
arch . enableInterrupts ( ) ;
2020-07-24 00:18:56 +01:00
rt_user_task ( allocator , mem_profile ) ;
2020-07-18 22:46:24 +01:00
rt_variable_preserved ( allocator ) ;
while ( true ) { }
}