Initial scheduler

Fix TSS

Also change to .{} syntax where appropriate.
Added the SS segment
Fixed spelling

Refactoring GDT


Multitasking working for now


WIP scheduler

Refactored Bitmap a bit

WIP still


Task switching working

Handlers return the stack pointer that will be used to restore the tasks stack, normal handlers will return the same stack pointer it was called with where task switching will return the stack pointer of the next task and restore its state using the interrupt stub.

Initial scheduler done


Created a stage 2 init task


Change u32 to usize


Move Task to arch specific


WIP


WIP2


Removed esp from task, replaced with stack_pointer


Removed the debug logs


Fixed init task stack


Change pickNextTask to pointer manipulation

This allows less allocations so faster switching

Temporary enable interrupts for some runtime tests

PIT and RTC need interrupts enabled to run their runtime tests

Renamed schedule => pickNextTask, comptime bitmap for pids not task init

And some other stuff: No pub for the task anymore
Use the leak detector allocator

Fmt


Fix unit tests

And some other stuff :P

PR review

Moved Task out of arch and have the stack init in the arch file
Mocking clean up
Removed commented code
Renamed createTask to scheduleTask where the user will have to provide a task to schedule
Removed redundant pub in log runtime test
Removed global allocator for scheduler
Cleaner assembly in paging

Fmt


Added new Scheduler test mode


Added new test mode to CI


Removed one of the prints


Added doc comment, task test for i386


Removed test


WIP


Runtime tests work

Have a global set in one task and reacted to in another. Also test that local variables are preserved after a task switch.

Removed new lines


Increased line length


Move the allocation of the bool above the task creation
This commit is contained in:
DrDeano 2020-07-18 22:46:24 +01:00
parent 20826548e8
commit d600be874c
No known key found for this signature in database
GPG key ID: 96188600582B9ED7
30 changed files with 1127 additions and 395 deletions

View file

@ -1,47 +1,54 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
const builtin = @import("builtin");
const cmos = @import("cmos.zig");
const gdt = @import("gdt.zig");
const idt = @import("idt.zig");
const pic = @import("pic.zig");
const irq = @import("irq.zig");
const isr = @import("isr.zig");
const paging = @import("paging.zig");
const pic = @import("pic.zig");
const pit = @import("pit.zig");
const rtc = @import("rtc.zig");
const serial = @import("serial.zig");
const paging = @import("paging.zig");
const syscalls = @import("syscalls.zig");
const mem = @import("../../mem.zig");
const multiboot = @import("multiboot.zig");
const pmm = @import("pmm.zig");
const vmm = @import("../../vmm.zig");
const log = @import("../../log.zig");
const tty = @import("tty.zig");
const vga = @import("vga.zig");
const mem = @import("../../mem.zig");
const multiboot = @import("multiboot.zig");
const vmm = @import("../../vmm.zig");
const log = @import("../../log.zig");
const Serial = @import("../../serial.zig").Serial;
const panic = @import("../../panic.zig").panic;
const TTY = @import("../../tty.zig").TTY;
const MemProfile = mem.MemProfile;
/// The virtual end of the kernel code
/// The virtual end of the kernel code.
extern var KERNEL_VADDR_END: *u32;
/// The virtual start of the kernel code
/// The virtual start of the kernel code.
extern var KERNEL_VADDR_START: *u32;
/// The physical end of the kernel code
/// The physical end of the kernel code.
extern var KERNEL_PHYSADDR_END: *u32;
/// The physical start of the kernel code
/// The physical start of the kernel code.
extern var KERNEL_PHYSADDR_START: *u32;
/// The boot-time offset that the virtual addresses are from the physical addresses
/// The boot-time offset that the virtual addresses are from the physical addresses.
extern var KERNEL_ADDR_OFFSET: *u32;
/// The virtual address of the top limit of the stack.
extern var KERNEL_STACK_START: *u32;
/// The virtual address of the base of the stack.
extern var KERNEL_STACK_END: *u32;
/// The interrupt context that is given to a interrupt handler. It contains most of the registers
/// and the interrupt number and error code (if there is one).
pub const InterruptContext = struct {
pub const CpuState = packed struct {
// Extra segments
ss: u32,
gs: u32,
fs: u32,
es: u32,
@ -68,7 +75,7 @@ pub const InterruptContext = struct {
cs: u32,
eflags: u32,
user_esp: u32,
ss: u32,
user_ss: u32,
};
/// x86's boot payload is the multiboot info passed by grub
@ -89,6 +96,9 @@ pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = vmm.Mapper(VmmPayload){ .mapFn =
/// The size of each allocatable block of memory, normally set to the page size.
pub const MEMORY_BLOCK_SIZE: usize = paging.PAGE_SIZE_4KB;
/// The default stack size of a task. Currently this is set to a page size.
pub const STACK_SIZE: u32 = MEMORY_BLOCK_SIZE / @sizeOf(u32);
///
/// Assembly to write to a given port with a byte of data.
///
@ -239,10 +249,9 @@ pub fn halt() void {
/// Wait the kernel but still can handle interrupts.
///
pub fn spinWait() noreturn {
enableInterrupts();
while (true) {
enableInterrupts();
halt();
disableInterrupts();
}
}
@ -312,13 +321,21 @@ pub fn initTTY(boot_payload: BootPayload) TTY {
/// Return: mem.MemProfile
/// The constructed memory profile
///
/// Error: std.mem.Allocator.Error
/// std.mem.Allocator.Error.OutOfMemory - There wasn't enough memory in the allocated created to populate the memory profile, consider increasing mem.FIXED_ALLOC_SIZE
/// Error: Allocator.Error
/// Allocator.Error.OutOfMemory - There wasn't enough memory in the allocated created to populate the memory profile, consider increasing mem.FIXED_ALLOC_SIZE
///
pub fn initMem(mb_info: BootPayload) std.mem.Allocator.Error!MemProfile {
pub fn initMem(mb_info: BootPayload) Allocator.Error!MemProfile {
log.logInfo("Init mem\n", .{});
defer log.logInfo("Done mem\n", .{});
log.logDebug("KERNEL_ADDR_OFFSET: 0x{X}\n", .{@ptrToInt(&KERNEL_ADDR_OFFSET)});
log.logDebug("KERNEL_STACK_START: 0x{X}\n", .{@ptrToInt(&KERNEL_STACK_START)});
log.logDebug("KERNEL_STACK_END: 0x{X}\n", .{@ptrToInt(&KERNEL_STACK_END)});
log.logDebug("KERNEL_VADDR_START: 0x{X}\n", .{@ptrToInt(&KERNEL_VADDR_START)});
log.logDebug("KERNEL_VADDR_END: 0x{X}\n", .{@ptrToInt(&KERNEL_VADDR_END)});
log.logDebug("KERNEL_PHYSADDR_START: 0x{X}\n", .{@ptrToInt(&KERNEL_PHYSADDR_START)});
log.logDebug("KERNEL_PHYSADDR_END: 0x{X}\n", .{@ptrToInt(&KERNEL_PHYSADDR_END)});
const mods_count = mb_info.mods_count;
mem.ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET);
const mmap_addr = mb_info.mmap_addr;
@ -338,6 +355,7 @@ pub fn initMem(mb_info: BootPayload) std.mem.Allocator.Error!MemProfile {
try reserved_physical_mem.append(.{ .start = @intCast(usize, entry.addr), .end = end });
}
}
// Map the multiboot info struct itself
const mb_region = mem.Range{
.start = @ptrToInt(mb_info),
@ -384,19 +402,63 @@ pub fn initMem(mb_info: BootPayload) std.mem.Allocator.Error!MemProfile {
};
}
///
/// Initialise a 32bit kernel stack used for creating a task.
/// Currently only support fn () noreturn functions for the entry point.
///
/// Arguments:
/// IN entry_point: usize - The pointer to the entry point of the function. Functions only
/// supported is fn () noreturn
/// IN allocator: *Allocator - The allocator use for allocating a stack.
///
/// Return: struct { stack: []u32, pointer: usize }
/// The stack and stack pointer with the stack initialised as a 32bit kernel stack.
///
/// Error: Allocator.Error
/// OutOfMemory - Unable to allocate space for the stack.
///
pub fn initTaskStack(entry_point: usize, allocator: *Allocator) Allocator.Error!struct { stack: []u32, pointer: usize } {
// TODO Will need to add the exit point
// Set up everything as a kernel task
var stack = try allocator.alloc(u32, STACK_SIZE);
stack[STACK_SIZE - 18] = gdt.KERNEL_DATA_OFFSET; // ss
stack[STACK_SIZE - 17] = gdt.KERNEL_DATA_OFFSET; // gs
stack[STACK_SIZE - 16] = gdt.KERNEL_DATA_OFFSET; // fs
stack[STACK_SIZE - 15] = gdt.KERNEL_DATA_OFFSET; // es
stack[STACK_SIZE - 14] = gdt.KERNEL_DATA_OFFSET; // ds
stack[STACK_SIZE - 13] = 0; // edi
stack[STACK_SIZE - 12] = 0; // esi
// End of the stack
stack[STACK_SIZE - 11] = @ptrToInt(&stack[STACK_SIZE - 1]); // ebp
stack[STACK_SIZE - 10] = 0; // esp (temp) this won't be popped by popa bc intel is dump XD
stack[STACK_SIZE - 9] = 0; // ebx
stack[STACK_SIZE - 8] = 0; // edx
stack[STACK_SIZE - 7] = 0; // ecx
stack[STACK_SIZE - 6] = 0; // eax
stack[STACK_SIZE - 5] = 0; // int_num
stack[STACK_SIZE - 4] = 0; // error_code
stack[STACK_SIZE - 3] = entry_point; // eip
stack[STACK_SIZE - 2] = gdt.KERNEL_CODE_OFFSET; // cs
stack[STACK_SIZE - 1] = 0x202; // eflags
const ret = .{ .stack = stack, .pointer = @ptrToInt(&stack[STACK_SIZE - 18]) };
return ret;
}
///
/// Initialise the architecture
///
/// Arguments:
/// IN boot_payload: BootPayload - The multiboot information from the GRUB bootloader.
/// IN mem_profile: *const MemProfile - The memory profile of the computer. Used to set up
/// paging.
/// IN allocator: *Allocator - The allocator use to handle memory.
/// IN comptime options: type - The build options that is passed to the kernel to be
/// used for run time testing.
///
pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile, allocator: *Allocator) void {
disableInterrupts();
pub fn init(boot_payload: BootPayload, mem_profile: *const MemProfile, allocator: *Allocator) void {
gdt.init();
idt.init();
@ -404,15 +466,13 @@ pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile
isr.init();
irq.init();
paging.init(mb_info, mem_profile, allocator);
paging.init(boot_payload, mem_profile, allocator);
pit.init();
rtc.init();
syscalls.init();
enableInterrupts();
// Initialise the VGA and TTY here since their tests belong the architecture and so should be a part of the
// arch init test messages
vga.init();
@ -420,17 +480,5 @@ pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile
}
test "" {
_ = @import("gdt.zig");
_ = @import("idt.zig");
_ = @import("pic.zig");
_ = @import("isr.zig");
_ = @import("irq.zig");
_ = @import("pit.zig");
_ = @import("cmos.zig");
_ = @import("rtc.zig");
_ = @import("syscalls.zig");
_ = @import("paging.zig");
_ = @import("serial.zig");
_ = @import("tty.zig");
_ = @import("vga.zig");
std.meta.refAllDecls(@This());
}

View file

@ -1,5 +1,5 @@
const constants = @import("constants");
const multiboot_info = @import("multiboot.zig").multiboot_info_t;
const arch = @import("arch.zig");
/// The multiboot header
const MultiBoot = packed struct {
@ -66,7 +66,7 @@ export var boot_page_directory: [1024]u32 align(4096) linksection(".rodata.boot"
export var kernel_stack: [16 * 1024]u8 align(16) linksection(".bss.stack") = undefined;
extern var KERNEL_ADDR_OFFSET: *u32;
extern fn kmain(mb_info: *multiboot_info) void;
extern fn kmain(mb_info: arch.BootPayload) void;
export fn _start() align(16) linksection(".text.boot") callconv(.Naked) noreturn {
// Set the page directory to the boot directory
@ -109,6 +109,6 @@ export fn start_higher_half() callconv(.Naked) noreturn {
\\mov %%ebx, %[res]
: [res] "=r" (-> usize)
) + @ptrToInt(&KERNEL_ADDR_OFFSET);
kmain(@intToPtr(*multiboot_info, mb_info_addr));
kmain(@intToPtr(arch.BootPayload, mb_info_addr));
while (true) {}
}

View file

@ -84,27 +84,31 @@ const GdtEntry = packed struct {
};
/// The TSS entry structure
const TtsEntry = packed struct {
const Tss = packed struct {
/// Pointer to the previous TSS entry
prev_tss: u32,
prev_tss: u16,
reserved1: u16,
/// Ring 0 32 bit stack pointer.
esp0: u32,
/// Ring 0 32 bit stack pointer.
ss0: u32,
ss0: u16,
reserved2: u16,
/// Ring 1 32 bit stack pointer.
esp1: u32,
/// Ring 1 32 bit stack pointer.
ss1: u32,
ss1: u16,
reserved3: u16,
/// Ring 2 32 bit stack pointer.
esp2: u32,
/// Ring 2 32 bit stack pointer.
ss2: u32,
ss2: u16,
reserved4: u16,
/// The CR3 control register 3.
cr3: u32,
@ -140,25 +144,32 @@ const TtsEntry = packed struct {
edi: u32,
/// The extra segment.
es: u32,
es: u16,
reserved5: u16,
/// The code segment.
cs: u32,
cs: u16,
reserved6: u16,
/// The stack segment.
ss: u32,
ss: u16,
reserved7: u16,
/// The data segment.
ds: u32,
ds: u16,
reserved8: u16,
/// A extra segment FS.
fs: u32,
fs: u16,
reserved9: u16,
/// A extra segment GS.
gs: u32,
gs: u16,
reserved10: u16,
/// The local descriptor table register.
ldtr: u32,
ldtr: u16,
reserved11: u16,
/// ?
trap: u16,
@ -177,8 +188,8 @@ pub const GdtPtr = packed struct {
base: u32,
};
/// The total number of entries in the GTD: null, kernel code, kernel data, user code, user data
/// and TSS
/// The total number of entries in the GDT including: null, kernel code, kernel data, user code,
/// user data and the TSS.
const NUMBER_OF_ENTRIES: u16 = 0x06;
/// The size of the GTD in bytes (minus 1).
@ -315,24 +326,28 @@ pub const USER_DATA_OFFSET: u16 = 0x20;
pub const TSS_OFFSET: u16 = 0x28;
/// The GDT entry table of NUMBER_OF_ENTRIES entries.
var gdt_entries: [NUMBER_OF_ENTRIES]GdtEntry = [_]GdtEntry{
var gdt_entries: [NUMBER_OF_ENTRIES]GdtEntry = init: {
var gdt_entries_temp: [NUMBER_OF_ENTRIES]GdtEntry = undefined;
// Null descriptor
makeEntry(0, 0, NULL_SEGMENT, NULL_FLAGS),
gdt_entries_temp[0] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
// Kernel Code
makeEntry(0, 0xFFFFF, KERNEL_SEGMENT_CODE, PAGING_32_BIT),
// Kernel code descriptor
gdt_entries_temp[1] = makeGdtEntry(0, 0xFFFFF, KERNEL_SEGMENT_CODE, PAGING_32_BIT);
// Kernel Data
makeEntry(0, 0xFFFFF, KERNEL_SEGMENT_DATA, PAGING_32_BIT),
// Kernel data descriptor
gdt_entries_temp[2] = makeGdtEntry(0, 0xFFFFF, KERNEL_SEGMENT_DATA, PAGING_32_BIT);
// User Code
makeEntry(0, 0xFFFFF, USER_SEGMENT_CODE, PAGING_32_BIT),
// User code descriptor
gdt_entries_temp[3] = makeGdtEntry(0, 0xFFFFF, USER_SEGMENT_CODE, PAGING_32_BIT);
// User Data
makeEntry(0, 0xFFFFF, USER_SEGMENT_DATA, PAGING_32_BIT),
// User data descriptor
gdt_entries_temp[4] = makeGdtEntry(0, 0xFFFFF, USER_SEGMENT_DATA, PAGING_32_BIT);
// Fill in TSS at runtime
makeEntry(0, 0, NULL_SEGMENT, NULL_FLAGS),
// TSS descriptor, one each for each processor
// Will initialise the TSS at runtime
gdt_entries_temp[5] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
break :init gdt_entries_temp;
};
/// The GDT pointer that the CPU is loaded with that contains the base address of the GDT and the
@ -342,35 +357,12 @@ var gdt_ptr: GdtPtr = GdtPtr{
.base = undefined,
};
/// The task state segment entry.
var tss: TtsEntry = TtsEntry{
.prev_tss = 0,
.esp0 = 0,
.ss0 = KERNEL_DATA_OFFSET,
.esp1 = 0,
.ss1 = 0,
.esp2 = 0,
.ss2 = 0,
.cr3 = 0,
.eip = 0,
.eflags = 0,
.eax = 0,
.ecx = 0,
.edx = 0,
.ebx = 0,
.esp = 0,
.ebp = 0,
.esi = 0,
.edi = 0,
.es = 0,
.cs = 0,
.ss = 0,
.ds = 0,
.fs = 0,
.gs = 0,
.ldtr = 0,
.trap = 0,
.io_permissions_base_offset = @sizeOf(TtsEntry),
/// The main task state segment entry.
var main_tss_entry: Tss = init: {
var tss_temp = std.mem.zeroes(Tss);
tss_temp.ss0 = KERNEL_DATA_OFFSET;
tss_temp.io_permissions_base_offset = @sizeOf(Tss);
break :init tss_temp;
};
///
@ -386,11 +378,11 @@ var tss: TtsEntry = TtsEntry{
/// A new GDT entry with the give access and flag bits set with the base at 0x00000000 and
/// limit at 0xFFFFF.
///
fn makeEntry(base: u32, limit: u20, access: AccessBits, flags: FlagBits) GdtEntry {
return GdtEntry{
fn makeGdtEntry(base: u32, limit: u20, access: AccessBits, flags: FlagBits) GdtEntry {
return .{
.limit_low = @truncate(u16, limit),
.base_low = @truncate(u24, base),
.access = AccessBits{
.access = .{
.accessed = access.accessed,
.read_write = access.read_write,
.direction_conforming = access.direction_conforming,
@ -400,7 +392,7 @@ fn makeEntry(base: u32, limit: u20, access: AccessBits, flags: FlagBits) GdtEntr
.present = access.present,
},
.limit_high = @truncate(u4, limit >> 16),
.flags = FlagBits{
.flags = .{
.reserved_zero = flags.reserved_zero,
.is_64_bit = flags.is_64_bit,
.is_32_bit = flags.is_32_bit,
@ -410,16 +402,6 @@ fn makeEntry(base: u32, limit: u20, access: AccessBits, flags: FlagBits) GdtEntr
};
}
///
/// Set the stack pointer in the TSS entry.
///
/// Arguments:
/// IN esp0: u32 - The stack pointer.
///
pub fn setTssStack(esp0: u32) void {
tss.esp0 = esp0;
}
///
/// Initialise the Global Descriptor table.
///
@ -427,7 +409,7 @@ pub fn init() void {
log.logInfo("Init gdt\n", .{});
defer log.logInfo("Done gdt\n", .{});
// Initiate TSS
gdt_entries[TSS_INDEX] = makeEntry(@ptrToInt(&tss), @sizeOf(TtsEntry) - 1, TSS_SEGMENT, NULL_FLAGS);
gdt_entries[TSS_INDEX] = makeGdtEntry(@ptrToInt(&main_tss_entry), @sizeOf(Tss) - 1, TSS_SEGMENT, NULL_FLAGS);
// Set the base address where all the GDT entries are.
gdt_ptr.base = @ptrToInt(&gdt_entries[0]);
@ -453,7 +435,7 @@ test "GDT entries" {
expectEqual(@as(u32, 1), @sizeOf(AccessBits));
expectEqual(@as(u32, 1), @sizeOf(FlagBits));
expectEqual(@as(u32, 8), @sizeOf(GdtEntry));
expectEqual(@as(u32, 104), @sizeOf(TtsEntry));
expectEqual(@as(u32, 104), @sizeOf(Tss));
expectEqual(@as(u32, 6), @sizeOf(GdtPtr));
const null_entry = gdt_entries[NULL_INDEX];
@ -476,45 +458,45 @@ test "GDT entries" {
expectEqual(TABLE_SIZE, gdt_ptr.limit);
expectEqual(@as(u32, 0), tss.prev_tss);
expectEqual(@as(u32, 0), tss.esp0);
expectEqual(@as(u32, KERNEL_DATA_OFFSET), tss.ss0);
expectEqual(@as(u32, 0), tss.esp1);
expectEqual(@as(u32, 0), tss.ss1);
expectEqual(@as(u32, 0), tss.esp2);
expectEqual(@as(u32, 0), tss.ss2);
expectEqual(@as(u32, 0), tss.cr3);
expectEqual(@as(u32, 0), tss.eip);
expectEqual(@as(u32, 0), tss.eflags);
expectEqual(@as(u32, 0), tss.eax);
expectEqual(@as(u32, 0), tss.ecx);
expectEqual(@as(u32, 0), tss.edx);
expectEqual(@as(u32, 0), tss.ebx);
expectEqual(@as(u32, 0), tss.esp);
expectEqual(@as(u32, 0), tss.ebp);
expectEqual(@as(u32, 0), tss.esi);
expectEqual(@as(u32, 0), tss.edi);
expectEqual(@as(u32, 0), tss.es);
expectEqual(@as(u32, 0), tss.cs);
expectEqual(@as(u32, 0), tss.ss);
expectEqual(@as(u32, 0), tss.ds);
expectEqual(@as(u32, 0), tss.fs);
expectEqual(@as(u32, 0), tss.gs);
expectEqual(@as(u32, 0), tss.ldtr);
expectEqual(@as(u16, 0), tss.trap);
expectEqual(@as(u32, 0), main_tss_entry.prev_tss);
expectEqual(@as(u32, 0), main_tss_entry.esp0);
expectEqual(@as(u32, KERNEL_DATA_OFFSET), main_tss_entry.ss0);
expectEqual(@as(u32, 0), main_tss_entry.esp1);
expectEqual(@as(u32, 0), main_tss_entry.ss1);
expectEqual(@as(u32, 0), main_tss_entry.esp2);
expectEqual(@as(u32, 0), main_tss_entry.ss2);
expectEqual(@as(u32, 0), main_tss_entry.cr3);
expectEqual(@as(u32, 0), main_tss_entry.eip);
expectEqual(@as(u32, 0), main_tss_entry.eflags);
expectEqual(@as(u32, 0), main_tss_entry.eax);
expectEqual(@as(u32, 0), main_tss_entry.ecx);
expectEqual(@as(u32, 0), main_tss_entry.edx);
expectEqual(@as(u32, 0), main_tss_entry.ebx);
expectEqual(@as(u32, 0), main_tss_entry.esp);
expectEqual(@as(u32, 0), main_tss_entry.ebp);
expectEqual(@as(u32, 0), main_tss_entry.esi);
expectEqual(@as(u32, 0), main_tss_entry.edi);
expectEqual(@as(u32, 0), main_tss_entry.es);
expectEqual(@as(u32, 0), main_tss_entry.cs);
expectEqual(@as(u32, 0), main_tss_entry.ss);
expectEqual(@as(u32, 0), main_tss_entry.ds);
expectEqual(@as(u32, 0), main_tss_entry.fs);
expectEqual(@as(u32, 0), main_tss_entry.gs);
expectEqual(@as(u32, 0), main_tss_entry.ldtr);
expectEqual(@as(u16, 0), main_tss_entry.trap);
// Size of TtsEntry will fit in a u16 as 104 < 65535 (2^16)
expectEqual(@as(u16, @sizeOf(TtsEntry)), tss.io_permissions_base_offset);
// Size of Tss will fit in a u16 as 104 < 65535 (2^16)
expectEqual(@as(u16, @sizeOf(Tss)), main_tss_entry.io_permissions_base_offset);
}
test "makeEntry NULL" {
const actual = makeEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
test "makeGdtEntry NULL" {
const actual = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
const expected: u64 = 0;
expectEqual(expected, @bitCast(u64, actual));
}
test "makeEntry alternating bit pattern" {
test "makeGdtEntry alternating bit pattern" {
const alt_access = AccessBits{
.accessed = 1,
.read_write = 0,
@ -536,106 +518,12 @@ test "makeEntry alternating bit pattern" {
expectEqual(@as(u4, 0b0101), @bitCast(u4, alt_flag));
const actual = makeEntry(0b01010101010101010101010101010101, 0b01010101010101010101, alt_access, alt_flag);
const actual = makeGdtEntry(0b01010101010101010101010101010101, 0b01010101010101010101, alt_access, alt_flag);
const expected: u64 = 0b0101010101010101010101010101010101010101010101010101010101010101;
expectEqual(expected, @bitCast(u64, actual));
}
test "setTssStack" {
// Pre-testing
expectEqual(@as(u32, 0), tss.prev_tss);
expectEqual(@as(u32, 0), tss.esp0);
expectEqual(@as(u32, KERNEL_DATA_OFFSET), tss.ss0);
expectEqual(@as(u32, 0), tss.esp1);
expectEqual(@as(u32, 0), tss.ss1);
expectEqual(@as(u32, 0), tss.esp2);
expectEqual(@as(u32, 0), tss.ss2);
expectEqual(@as(u32, 0), tss.cr3);
expectEqual(@as(u32, 0), tss.eip);
expectEqual(@as(u32, 0), tss.eflags);
expectEqual(@as(u32, 0), tss.eax);
expectEqual(@as(u32, 0), tss.ecx);
expectEqual(@as(u32, 0), tss.edx);
expectEqual(@as(u32, 0), tss.ebx);
expectEqual(@as(u32, 0), tss.esp);
expectEqual(@as(u32, 0), tss.ebp);
expectEqual(@as(u32, 0), tss.esi);
expectEqual(@as(u32, 0), tss.edi);
expectEqual(@as(u32, 0), tss.es);
expectEqual(@as(u32, 0), tss.cs);
expectEqual(@as(u32, 0), tss.ss);
expectEqual(@as(u32, 0), tss.ds);
expectEqual(@as(u32, 0), tss.fs);
expectEqual(@as(u32, 0), tss.gs);
expectEqual(@as(u32, 0), tss.ldtr);
expectEqual(@as(u16, 0), tss.trap);
expectEqual(@as(u16, @sizeOf(TtsEntry)), tss.io_permissions_base_offset);
// Call function
setTssStack(100);
// Post-testing
expectEqual(@as(u32, 0), tss.prev_tss);
expectEqual(@as(u32, 100), tss.esp0);
expectEqual(@as(u32, KERNEL_DATA_OFFSET), tss.ss0);
expectEqual(@as(u32, 0), tss.esp1);
expectEqual(@as(u32, 0), tss.ss1);
expectEqual(@as(u32, 0), tss.esp2);
expectEqual(@as(u32, 0), tss.ss2);
expectEqual(@as(u32, 0), tss.cr3);
expectEqual(@as(u32, 0), tss.eip);
expectEqual(@as(u32, 0), tss.eflags);
expectEqual(@as(u32, 0), tss.eax);
expectEqual(@as(u32, 0), tss.ecx);
expectEqual(@as(u32, 0), tss.edx);
expectEqual(@as(u32, 0), tss.ebx);
expectEqual(@as(u32, 0), tss.esp);
expectEqual(@as(u32, 0), tss.ebp);
expectEqual(@as(u32, 0), tss.esi);
expectEqual(@as(u32, 0), tss.edi);
expectEqual(@as(u32, 0), tss.es);
expectEqual(@as(u32, 0), tss.cs);
expectEqual(@as(u32, 0), tss.ss);
expectEqual(@as(u32, 0), tss.ds);
expectEqual(@as(u32, 0), tss.fs);
expectEqual(@as(u32, 0), tss.gs);
expectEqual(@as(u32, 0), tss.ldtr);
expectEqual(@as(u16, 0), tss.trap);
expectEqual(@as(u16, @sizeOf(TtsEntry)), tss.io_permissions_base_offset);
// Clean up
setTssStack(0);
expectEqual(@as(u32, 0), tss.prev_tss);
expectEqual(@as(u32, 0), tss.esp0);
expectEqual(@as(u32, KERNEL_DATA_OFFSET), tss.ss0);
expectEqual(@as(u32, 0), tss.esp1);
expectEqual(@as(u32, 0), tss.ss1);
expectEqual(@as(u32, 0), tss.esp2);
expectEqual(@as(u32, 0), tss.ss2);
expectEqual(@as(u32, 0), tss.cr3);
expectEqual(@as(u32, 0), tss.eip);
expectEqual(@as(u32, 0), tss.eflags);
expectEqual(@as(u32, 0), tss.eax);
expectEqual(@as(u32, 0), tss.ecx);
expectEqual(@as(u32, 0), tss.edx);
expectEqual(@as(u32, 0), tss.ebx);
expectEqual(@as(u32, 0), tss.esp);
expectEqual(@as(u32, 0), tss.ebp);
expectEqual(@as(u32, 0), tss.esi);
expectEqual(@as(u32, 0), tss.edi);
expectEqual(@as(u32, 0), tss.es);
expectEqual(@as(u32, 0), tss.cs);
expectEqual(@as(u32, 0), tss.ss);
expectEqual(@as(u32, 0), tss.ds);
expectEqual(@as(u32, 0), tss.fs);
expectEqual(@as(u32, 0), tss.gs);
expectEqual(@as(u32, 0), tss.ldtr);
expectEqual(@as(u16, 0), tss.trap);
expectEqual(@as(u16, @sizeOf(TtsEntry)), tss.io_permissions_base_offset);
}
test "init" {
// Set up
arch.initTest();
@ -650,8 +538,8 @@ test "init" {
// Post testing
const tss_entry = gdt_entries[TSS_INDEX];
const tss_limit = @sizeOf(TtsEntry) - 1;
const tss_addr = @ptrToInt(&tss);
const tss_limit = @sizeOf(Tss) - 1;
const tss_addr = @ptrToInt(&main_tss_entry);
var expected: u64 = 0;
expected |= @as(u64, @truncate(u16, tss_limit));
@ -665,7 +553,7 @@ test "init" {
// Reset
gdt_ptr.base = 0;
gdt_entries[TSS_INDEX] = makeEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
gdt_entries[TSS_INDEX] = makeGdtEntry(0, 0, NULL_SEGMENT, NULL_FLAGS);
}
///
@ -686,6 +574,6 @@ fn rt_loadedGDTSuccess() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_loadedGDTSuccess();
}

View file

@ -340,6 +340,6 @@ fn rt_loadedIDTSuccess() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_loadedIDTSuccess();
}

View file

@ -3,22 +3,22 @@ const syscalls = @import("syscalls.zig");
const irq = @import("irq.zig");
const idt = @import("idt.zig");
extern fn irqHandler(ctx: *arch.InterruptContext) void;
extern fn isrHandler(ctx: *arch.InterruptContext) void;
extern fn irqHandler(ctx: *arch.CpuState) usize;
extern fn isrHandler(ctx: *arch.CpuState) usize;
///
/// The main handler for all exceptions and interrupts. This will then go and call the correct
/// handler for an ISR or IRQ.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - Pointer to the exception context containing the contents
/// of the registers at the time of a exception.
/// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents
/// of the registers at the time of a exception.
///
export fn handler(ctx: *arch.InterruptContext) void {
export fn handler(ctx: *arch.CpuState) usize {
if (ctx.int_num < irq.IRQ_OFFSET or ctx.int_num == syscalls.INTERRUPT) {
isrHandler(ctx);
return isrHandler(ctx);
} else {
irqHandler(ctx);
return irqHandler(ctx);
}
}
@ -32,6 +32,7 @@ export fn commonStub() callconv(.Naked) void {
\\push %%es
\\push %%fs
\\push %%gs
\\push %%ss
\\mov $0x10, %%ax
\\mov %%ax, %%ds
\\mov %%ax, %%es
@ -40,7 +41,8 @@ export fn commonStub() callconv(.Naked) void {
\\mov %%esp, %%eax
\\push %%eax
\\call handler
\\pop %%eax
\\mov %%eax, %%esp
\\pop %%ss
\\pop %%gs
\\pop %%fs
\\pop %%es

View file

@ -26,7 +26,7 @@ pub const IrqError = error{
const NUMBER_OF_ENTRIES: u16 = 16;
/// The type of a IRQ handler. A function that takes a interrupt context and returns void.
const IrqHandler = fn (*arch.InterruptContext) void;
const IrqHandler = fn (*arch.CpuState) usize;
// The offset from the interrupt number where the IRQs are.
pub const IRQ_OFFSET: u16 = 32;
@ -38,15 +38,17 @@ var irq_handlers: [NUMBER_OF_ENTRIES]?IrqHandler = [_]?IrqHandler{null} ** NUMBE
/// The IRQ handler that each of the IRQs will call when a interrupt happens.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - Pointer to the interrupt context containing the contents
/// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents
/// of the register at the time of the interrupt.
///
export fn irqHandler(ctx: *arch.InterruptContext) void {
export fn irqHandler(ctx: *arch.CpuState) usize {
// Get the IRQ index, by getting the interrupt number and subtracting the offset.
if (ctx.int_num < IRQ_OFFSET) {
panic(@errorReturnTrace(), "Not an IRQ number: {}\n", .{ctx.int_num});
}
var ret_esp = @ptrToInt(ctx);
const irq_offset = ctx.int_num - IRQ_OFFSET;
if (isValidIrq(irq_offset)) {
// IRQ index is valid so can truncate
@ -54,7 +56,7 @@ export fn irqHandler(ctx: *arch.InterruptContext) void {
if (irq_handlers[irq_num]) |handler| {
// Make sure it isn't a spurious irq
if (!pic.spuriousIrq(irq_num)) {
handler(ctx);
ret_esp = handler(ctx);
// Send the end of interrupt command
pic.sendEndOfInterrupt(irq_num);
}
@ -64,6 +66,7 @@ export fn irqHandler(ctx: *arch.InterruptContext) void {
} else {
panic(@errorReturnTrace(), "Invalid IRQ index: {}", .{irq_offset});
}
return ret_esp;
}
///
@ -143,8 +146,12 @@ pub fn init() void {
}
fn testFunction0() callconv(.Naked) void {}
fn testFunction1(ctx: *arch.InterruptContext) void {}
fn testFunction2(ctx: *arch.InterruptContext) void {}
fn testFunction1(ctx: *arch.CpuState) u32 {
return 0;
}
fn testFunction2(ctx: *arch.CpuState) u32 {
return 0;
}
test "openIrq" {
idt.initTest();
@ -264,7 +271,7 @@ fn rt_openedIdtEntries() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_unregisteredHandlers();
rt_openedIdtEntries();
}

View file

@ -23,7 +23,7 @@ pub const IsrError = error{
};
/// The type of a ISR handler. A function that takes a interrupt context and returns void.
const IsrHandler = fn (*arch.InterruptContext) void;
const IsrHandler = fn (*arch.CpuState) usize;
/// The number of ISR entries.
const NUMBER_OF_ENTRIES: u8 = 32;
@ -137,32 +137,36 @@ var syscall_handler: ?IsrHandler = null;
/// The exception handler that each of the exceptions will call when a exception happens.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - Pointer to the exception context containing the contents
/// IN ctx: *arch.CpuState - Pointer to the exception context containing the contents
/// of the register at the time of the exception.
///
export fn isrHandler(ctx: *arch.InterruptContext) void {
export fn isrHandler(ctx: *arch.CpuState) usize {
// Get the interrupt number
const isr_num = ctx.int_num;
var ret_esp = @ptrToInt(ctx);
if (isValidIsr(isr_num)) {
if (isr_num == syscalls.INTERRUPT) {
// A syscall, so use the syscall handler
if (syscall_handler) |handler| {
handler(ctx);
ret_esp = handler(ctx);
} else {
panic(@errorReturnTrace(), "Syscall handler not registered\n", .{});
}
} else {
if (isr_handlers[isr_num]) |handler| {
// Regular ISR exception, if there is one registered.
handler(ctx);
ret_esp = handler(ctx);
} else {
panic(@errorReturnTrace(), "ISR not registered to: {}-{}\n", .{ isr_num, exception_msg[isr_num] });
log.logInfo("State: {X}\n", .{ctx});
panic(@errorReturnTrace(), "ISR {} ({}) triggered with error code 0x{X} but not registered\n", .{ exception_msg[isr_num], isr_num, ctx.error_code });
}
}
} else {
panic(@errorReturnTrace(), "Invalid ISR index: {}\n", .{isr_num});
}
return ret_esp;
}
///
@ -251,10 +255,18 @@ pub fn init() void {
}
fn testFunction0() callconv(.Naked) void {}
fn testFunction1(ctx: *arch.InterruptContext) void {}
fn testFunction2(ctx: *arch.InterruptContext) void {}
fn testFunction3(ctx: *arch.InterruptContext) void {}
fn testFunction4(ctx: *arch.InterruptContext) void {}
fn testFunction1(ctx: *arch.CpuState) u32 {
return 0;
}
fn testFunction2(ctx: *arch.CpuState) u32 {
return 0;
}
fn testFunction3(ctx: *arch.CpuState) u32 {
return 0;
}
fn testFunction4(ctx: *arch.CpuState) u32 {
return 0;
}
test "openIsr" {
idt.initTest();
@ -397,7 +409,7 @@ fn rt_openedIdtEntries() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_unregisteredHandlers();
rt_openedIdtEntries();
}

View file

@ -35,6 +35,7 @@ SECTIONS {
}
.bss.stack ALIGN(4K) : AT (ADDR(.bss.stack) - KERNEL_ADDR_OFFSET) {
KERNEL_STACK_START = .;
KEEP(*(.bss.stack))
KERNEL_STACK_END = .;
}

View file

@ -1,9 +1,13 @@
const std = @import("std");
const expectEqual = std.testing.expectEqual;
const expect = std.testing.expect;
const testing = std.testing;
const expectEqual = testing.expectEqual;
const expect = testing.expect;
const builtin = @import("builtin");
const is_test = builtin.is_test;
const panic = @import("../../panic.zig").panic;
const arch = @import("arch.zig");
const build_options = @import("build_options");
const mock_path = build_options.arch_mock_path;
const arch = if (is_test) @import(mock_path ++ "arch_mock.zig") else @import("arch.zig");
const isr = @import("isr.zig");
const MemProfile = @import("../../mem.zig").MemProfile;
const tty = @import("../../tty.zig");
@ -11,8 +15,6 @@ const log = @import("../../log.zig");
const mem = @import("../../mem.zig");
const vmm = @import("../../vmm.zig");
const multiboot = @import("multiboot.zig");
const build_options = @import("build_options");
const testing = std.testing;
/// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space.
pub const Directory = packed struct {
@ -361,27 +363,23 @@ pub fn unmap(virtual_start: usize, virtual_end: usize, dir: *Directory) (std.mem
/// Called when a page fault occurs. This will log the CPU state and control registers.
///
/// Arguments:
/// IN state: *arch.InterruptContext - The CPU's state when the fault occurred.
/// IN state: *arch.CpuState - The CPU's state when the fault occurred.
///
fn pageFault(state: *arch.InterruptContext) void {
fn pageFault(state: *arch.CpuState) u32 {
log.logInfo("State: {X}\n", .{state});
var cr0: u32 = 0;
var cr2: u32 = 0;
var cr3: u32 = 0;
var cr4: u32 = 0;
asm volatile ("mov %%cr0, %[cr0]"
: [cr0] "=r" (cr0)
var cr0 = asm volatile ("mov %%cr0, %[cr0]"
: [cr0] "=r" (-> u32)
);
asm volatile ("mov %%cr2, %[cr2]"
: [cr2] "=r" (cr2)
var cr2 = asm volatile ("mov %%cr2, %[cr2]"
: [cr2] "=r" (-> u32)
);
asm volatile ("mov %%cr3, %[cr3]"
: [cr3] "=r" (cr3)
var cr3 = asm volatile ("mov %%cr3, %[cr3]"
: [cr3] "=r" (-> u32)
);
asm volatile ("mov %%cr4, %[cr4]"
: [cr4] "=r" (cr4)
var cr4 = asm volatile ("mov %%cr4, %[cr4]"
: [cr4] "=r" (-> u32)
);
log.logInfo("CR0: {X}, CR2: {X}, CR3: {X}, CR4: {X}\n\n", .{ cr0, cr2, cr3, cr4 });
log.logInfo("CR0: 0x{X}, CR2: 0x{X}, CR3: 0x{X}, CR4: 0x{X}\n", .{ cr0, cr2, cr3, cr4 });
@panic("Page fault");
}
@ -551,10 +549,12 @@ extern var rt_fault_callback2: *u32;
var faulted = false;
var use_callback2 = false;
fn rt_pageFault(ctx: *arch.InterruptContext) void {
fn rt_pageFault(ctx: *arch.CpuState) u32 {
faulted = true;
// Return to the fault callback
ctx.eip = @ptrToInt(&if (use_callback2) rt_fault_callback2 else rt_fault_callback);
return @ptrToInt(ctx);
}
fn rt_accessUnmappedMem(v_end: u32) void {
@ -592,7 +592,7 @@ fn rt_accessMappedMem(v_end: u32) void {
log.logInfo("Paging: Tested accessing mapped memory\n", .{});
}
fn runtimeTests(v_end: u32) void {
pub fn runtimeTests(v_end: u32) void {
rt_accessUnmappedMem(v_end);
rt_accessMappedMem(v_end);
}

View file

@ -830,6 +830,6 @@ fn rt_picAllMasked() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_picAllMasked();
}

View file

@ -231,11 +231,12 @@ inline fn sendDataToCounter(counter: CounterSelect, data: u8) void {
/// The interrupt handler for the PIT. This will increment a counter for now.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - Pointer to the interrupt context containing the contents
/// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents
/// of the register at the time of the interrupt.
///
fn pitHandler(ctx: *arch.InterruptContext) void {
fn pitHandler(ctx: *arch.CpuState) usize {
ticks +%= 1;
return @ptrToInt(ctx);
}
///
@ -324,25 +325,17 @@ pub fn waitTicks(ticks_to_wait: u32) void {
const wait_ticks2 = ticks_to_wait - wait_ticks1;
while (ticks > wait_ticks1) {
arch.enableInterrupts();
arch.halt();
arch.disableInterrupts();
}
while (ticks < wait_ticks2) {
arch.enableInterrupts();
arch.halt();
arch.disableInterrupts();
}
arch.enableInterrupts();
} else {
const wait_ticks = ticks + ticks_to_wait;
while (ticks < wait_ticks) {
arch.enableInterrupts();
arch.halt();
arch.disableInterrupts();
}
arch.enableInterrupts();
}
}
@ -635,7 +628,11 @@ fn rt_initCounter_0() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
// Interrupts aren't enabled yet, so for the runtime tests, enable it temporary
arch.enableInterrupts();
defer arch.disableInterrupts();
rt_initCounter_0();
rt_waitTicks();
rt_waitTicks2();

View file

@ -6,13 +6,14 @@ const expectEqual = std.testing.expectEqual;
const expectError = std.testing.expectError;
const build_options = @import("build_options");
const mock_path = build_options.arch_mock_path;
const arch = @import("arch.zig");
const arch = if (is_test) @import(mock_path ++ "arch_mock.zig") else @import("arch.zig");
const log = @import("../../log.zig");
const pic = @import("pic.zig");
const pit = @import("pit.zig");
const irq = @import("irq.zig");
const cmos = if (is_test) @import(mock_path ++ "cmos_mock.zig") else @import("cmos.zig");
const panic = if (is_test) @import(mock_path ++ "panic_mock.zig").panic else @import("../../panic.zig").panic;
const scheduler = @import("../../scheduler.zig");
/// The Century register is unreliable. We need a APIC interface to infer if we have a century
/// register. So this is a current TODO.
@ -44,6 +45,8 @@ const RtcError = error{
/// The number of ticks that has passed when RTC was initially set up.
var ticks: u32 = 0;
var schedule: bool = true;
///
/// Checks if the CMOS chip isn't updating the RTC registers. Call this before reading any RTC
/// registers so don't get inconsistent values.
@ -206,14 +209,26 @@ fn readRtc() DateTime {
/// The interrupt handler for the RTC.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - Pointer to the interrupt context containing the contents
/// IN ctx: *arch.CpuState - Pointer to the interrupt context containing the contents
/// of the register at the time of the interrupt.
///
fn rtcHandler(ctx: *arch.InterruptContext) void {
fn rtcHandler(ctx: *arch.CpuState) usize {
ticks +%= 1;
var ret_esp: usize = undefined;
// Call the scheduler
if (schedule) {
ret_esp = scheduler.pickNextTask(ctx);
} else {
ret_esp = @ptrToInt(ctx);
}
// Need to read status register C
// Might need to disable the NMI bit, set to true
const reg_c = cmos.readStatusRegister(cmos.StatusRegister.C, false);
return ret_esp;
}
///
@ -264,9 +279,6 @@ pub fn init() void {
},
};
// Need to disable interrupts went setting up the RTC
arch.disableInterrupts();
// Set the interrupt rate to 512Hz
setRate(7) catch |err| switch (err) {
error.RateError => {
@ -277,9 +289,6 @@ pub fn init() void {
// Enable RTC interrupts
enableInterrupts();
// Can now enable interrupts
arch.enableInterrupts();
// Read status register C to clear any interrupts that may have happened during set up
const reg_c = cmos.readStatusRegister(cmos.StatusRegister.C, false);
@ -739,7 +748,15 @@ fn rt_interrupts() void {
///
/// Run all the runtime tests.
///
fn runtimeTests() void {
pub fn runtimeTests() void {
rt_init();
// Disable the scheduler temporary
schedule = false;
// Interrupts aren't enabled yet, so for the runtime tests, enable it temporary
arch.enableInterrupts();
rt_interrupts();
arch.disableInterrupts();
// Can enable it back
schedule = true;
}

View file

@ -1,9 +1,13 @@
const arch = @import("arch.zig");
const testing = @import("std").testing;
const assert = @import("std").debug.assert;
const std = @import("std");
const builtin = @import("builtin");
const is_test = builtin.is_test;
const build_options = @import("build_options");
const mock_path = build_options.arch_mock_path;
const arch = if (is_test) @import(mock_path ++ "arch_mock.zig") else @import("arch.zig");
const testing = std.testing;
const expect = std.testing.expect;
const isr = @import("isr.zig");
const log = @import("../../log.zig");
const build_options = @import("build_options");
const panic = @import("../../panic.zig").panic;
/// The isr number associated with syscalls
@ -13,7 +17,7 @@ pub const INTERRUPT: u16 = 0x80;
pub const NUM_HANDLERS: u16 = 256;
/// A syscall handler
pub const SyscallHandler = fn (ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32;
pub const SyscallHandler = fn (ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32;
/// Errors that syscall utility functions can throw
pub const SyscallError = error{
@ -44,10 +48,10 @@ pub fn isValidSyscall(syscall: u32) bool {
/// warning is logged.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - The cpu context when the syscall was triggered. The
/// IN ctx: *arch.CpuState - The cpu context when the syscall was triggered. The
/// syscall number is stored in eax.
///
fn handle(ctx: *arch.InterruptContext) void {
fn handle(ctx: *arch.CpuState) u32 {
// The syscall number is put in eax
const syscall = ctx.eax;
if (isValidSyscall(syscall)) {
@ -59,6 +63,7 @@ fn handle(ctx: *arch.InterruptContext) void {
} else {
log.logWarning("Syscall {} is invalid\n", .{syscall});
}
return @ptrToInt(ctx);
}
///
@ -217,13 +222,13 @@ inline fn syscall5(syscall: u32, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg
/// 3 => esi and 4 => edi.
///
/// Arguments:
/// IN ctx: *arch.InterruptContext - The interrupt context from which to get the argument
/// IN ctx: *arch.CpuState - The interrupt context from which to get the argument
/// IN arg_idx: comptime u32 - The argument index to get. Between 0 and 4.
///
/// Return: u32
/// The syscall argument from the given index.
///
inline fn syscallArg(ctx: *arch.InterruptContext, comptime arg_idx: u32) u32 {
inline fn syscallArg(ctx: *arch.CpuState, comptime arg_idx: u32) u32 {
return switch (arg_idx) {
0 => ctx.ebx,
1 => ctx.ecx,
@ -252,32 +257,32 @@ pub fn init() void {
/// Tests
var test_int: u32 = 0;
fn testHandler0(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler0(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += 1;
return 0;
}
fn testHandler1(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler1(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += arg1;
return 1;
}
fn testHandler2(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler2(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += arg1 + arg2;
return 2;
}
fn testHandler3(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler3(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += arg1 + arg2 + arg3;
return 3;
}
fn testHandler4(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler4(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += arg1 + arg2 + arg3 + arg4;
return 4;
}
fn testHandler5(ctx: *arch.InterruptContext, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
fn testHandler5(ctx: *arch.CpuState, arg1: u32, arg2: u32, arg3: u32, arg4: u32, arg5: u32) u32 {
test_int += arg1 + arg2 + arg3 + arg4 + arg5;
return 5;
}
@ -287,7 +292,7 @@ test "registerSyscall returns SyscallExists" {
registerSyscall(123, testHandler0) catch |err| {
return;
};
assert(false);
expect(false);
}
fn runtimeTests() void {