Merge pull request #149 from SamTebbs33/feature/arch-boot-payload

Abstract away boot modules and memory maps
This commit is contained in:
Sam Tebbs 2020-05-30 23:40:20 +01:00 committed by GitHub
commit 122adaba95
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 313 additions and 258 deletions

View file

@ -11,11 +11,28 @@ const rtc = @import("rtc.zig");
const paging = @import("paging.zig"); const paging = @import("paging.zig");
const syscalls = @import("syscalls.zig"); const syscalls = @import("syscalls.zig");
const mem = @import("../../mem.zig"); const mem = @import("../../mem.zig");
const multiboot = @import("../../multiboot.zig"); const multiboot = @import("multiboot.zig");
const pmm = @import("pmm.zig"); const pmm = @import("pmm.zig");
const vmm = @import("../../vmm.zig"); const vmm = @import("../../vmm.zig");
const log = @import("../../log.zig");
const tty = @import("../../tty.zig");
const MemProfile = mem.MemProfile; const MemProfile = mem.MemProfile;
/// The virtual end of the kernel code
extern var KERNEL_VADDR_END: *u32;
/// The virtual start of the kernel code
extern var KERNEL_VADDR_START: *u32;
/// The physical end of the kernel code
extern var KERNEL_PHYSADDR_END: *u32;
/// The physical start of the kernel code
extern var KERNEL_PHYSADDR_START: *u32;
/// The boot-time offset that the virtual addresses are from the physical addresses
extern var KERNEL_ADDR_OFFSET: *u32;
/// The interrupt context that is given to a interrupt handler. It contains most of the registers /// The interrupt context that is given to a interrupt handler. It contains most of the registers
/// and the interrupt number and error code (if there is one). /// and the interrupt number and error code (if there is one).
pub const InterruptContext = struct { pub const InterruptContext = struct {
@ -49,6 +66,9 @@ pub const InterruptContext = struct {
ss: u32, ss: u32,
}; };
/// x86's boot payload is the multiboot info passed by grub
pub const BootPayload = *multiboot.multiboot_info_t;
/// The type of the payload passed to a virtual memory mapper. /// The type of the payload passed to a virtual memory mapper.
/// For x86 it's the page directory that should be mapped. /// For x86 it's the page directory that should be mapped.
pub const VmmPayload = *paging.Directory; pub const VmmPayload = *paging.Directory;
@ -231,6 +251,87 @@ pub fn haltNoInterrupts() noreturn {
} }
} }
///
/// Initialise the system's memory. Populates a memory profile with boot modules from grub, the amount of available memory, the reserved regions of virtual and physical memory as well as the start and end of the kernel code
///
/// Arguments:
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info passed by grub
///
/// Return: mem.MemProfile
/// The constructed memory profile
///
/// Error: std.mem.Allocator.Error
/// std.mem.Allocator.Error.OutOfMemory - There wasn't enough memory in the allocated created to populate the memory profile, consider increasing mem.FIXED_ALLOC_SIZE
///
pub fn initMem(mb_info: BootPayload) std.mem.Allocator.Error!MemProfile {
log.logInfo("Init mem\n", .{});
defer log.logInfo("Done mem\n", .{});
const mods_count = mb_info.mods_count;
mem.ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET);
const mmap_addr = mb_info.mmap_addr;
const num_mmap_entries = mb_info.mmap_length / @sizeOf(multiboot.multiboot_memory_map_t);
const vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END);
var allocator = std.heap.FixedBufferAllocator.init(vaddr_end[0..mem.FIXED_ALLOC_SIZE]);
var reserved_physical_mem = std.ArrayList(mem.Range).init(&allocator.allocator);
var reserved_virtual_mem = std.ArrayList(mem.Map).init(&allocator.allocator);
const mem_map = @intToPtr([*]multiboot.multiboot_memory_map_t, mmap_addr)[0..num_mmap_entries];
// Reserve the unavailable sections from the multiboot memory map
for (mem_map) |entry| {
if (entry.@"type" != multiboot.MULTIBOOT_MEMORY_AVAILABLE) {
// If addr + len is greater than maxInt(usize) just ignore whatever comes after maxInt(usize) since it can't be addressed anyway
const end: usize = if (entry.addr > std.math.maxInt(usize) - entry.len) std.math.maxInt(usize) else @intCast(usize, entry.addr + entry.len);
try reserved_physical_mem.append(.{ .start = @intCast(usize, entry.addr), .end = end });
}
}
// Map the multiboot info struct itself
const mb_region = mem.Range{
.start = @ptrToInt(mb_info),
.end = @ptrToInt(mb_info) + @sizeOf(multiboot.multiboot_info_t),
};
const mb_physical = mem.Range{ .start = mem.virtToPhys(mb_region.start), .end = mem.virtToPhys(mb_region.end) };
try reserved_virtual_mem.append(.{ .virtual = mb_region, .physical = mb_physical });
// Map the tty buffer
const tty_addr = mem.virtToPhys(tty.getVideoBufferAddress());
const tty_region = mem.Range{
.start = tty_addr,
.end = tty_addr + 32 * 1024,
};
try reserved_virtual_mem.append(.{
.physical = tty_region,
.virtual = .{
.start = mem.physToVirt(tty_region.start),
.end = mem.physToVirt(tty_region.end),
},
});
// Map the boot modules
const boot_modules = @intToPtr([*]multiboot.multiboot_mod_list, mem.physToVirt(mb_info.mods_addr))[0..mods_count];
var modules = std.ArrayList(mem.Module).init(&allocator.allocator);
for (boot_modules) |module| {
const virtual = mem.Range{ .start = mem.physToVirt(module.mod_start), .end = mem.physToVirt(module.mod_end) };
const physical = mem.Range{ .start = module.mod_start, .end = module.mod_end };
try modules.append(.{ .region = virtual, .name = std.mem.span(mem.physToVirt(@intToPtr([*:0]u8, module.cmdline))) });
try reserved_virtual_mem.append(.{ .physical = physical, .virtual = virtual });
}
return MemProfile{
.vaddr_end = vaddr_end,
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
// Total memory available including the initial 1MiB that grub doesn't include
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
.modules = modules.items,
.physical_reserved = reserved_physical_mem.items,
.virtual_reserved = reserved_virtual_mem.items,
.fixed_allocator = allocator,
};
}
/// ///
/// Initialise the architecture /// Initialise the architecture
/// ///

View file

@ -102,10 +102,9 @@ export fn start_higher_half() callconv(.Naked) noreturn {
\\xor %%ebp, %%ebp \\xor %%ebp, %%ebp
); );
// Push the bootloader magic number and multiboot header address with virtual offset // Push the multiboot header address with virtual offset
asm volatile ( asm volatile (
\\.extern KERNEL_ADDR_OFFSET \\.extern KERNEL_ADDR_OFFSET
\\push %%eax
\\add $KERNEL_ADDR_OFFSET, %%ebx \\add $KERNEL_ADDR_OFFSET, %%ebx
\\push %%ebx \\push %%ebx
); );

View file

@ -10,7 +10,7 @@ const tty = @import("../../tty.zig");
const log = @import("../../log.zig"); const log = @import("../../log.zig");
const mem = @import("../../mem.zig"); const mem = @import("../../mem.zig");
const vmm = @import("../../vmm.zig"); const vmm = @import("../../vmm.zig");
const multiboot = @import("../../multiboot.zig"); const multiboot = @import("multiboot.zig");
const options = @import("build_options"); const options = @import("build_options");
const testing = std.testing; const testing = std.testing;
@ -404,7 +404,7 @@ pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile
: :
: [addr] "{eax}" (dir_physaddr) : [addr] "{eax}" (dir_physaddr)
); );
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, PAGE_SIZE_4KB); const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, PAGE_SIZE_4KB);
if (options.rt_test) runtimeTests(v_end); if (options.rt_test) runtimeTests(v_end);
} }

View file

@ -4,7 +4,6 @@ const is_test = builtin.is_test;
const build_options = @import("build_options"); const build_options = @import("build_options");
const mock_path = build_options.mock_path; const mock_path = build_options.mock_path;
const arch = @import("arch.zig").internals; const arch = @import("arch.zig").internals;
const multiboot = @import("multiboot.zig");
const tty = @import("tty.zig"); const tty = @import("tty.zig");
const vga = @import("vga.zig"); const vga = @import("vga.zig");
const log = @import("log.zig"); const log = @import("log.zig");
@ -38,28 +37,25 @@ pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn
panic_root.panic(error_return_trace, "{}", .{msg}); panic_root.panic(error_return_trace, "{}", .{msg});
} }
export fn kmain(mb_info: *multiboot.multiboot_info_t, mb_magic: u32) void { export fn kmain(boot_payload: arch.BootPayload) void {
if (mb_magic == multiboot.MULTIBOOT_BOOTLOADER_MAGIC) {
// Booted with compatible bootloader
serial.init(serial.DEFAULT_BAUDRATE, serial.Port.COM1) catch |e| { serial.init(serial.DEFAULT_BAUDRATE, serial.Port.COM1) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise serial: {}", .{e}); panic_root.panic(@errorReturnTrace(), "Failed to initialise serial: {}", .{e});
}; };
if (build_options.rt_test) log.runtimeTests(); if (build_options.rt_test) log.runtimeTests();
const mem_profile = mem.init(mb_info); const mem_profile = arch.initMem(boot_payload) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise memory profile: {}", .{e});
var buffer = mem_profile.vaddr_end[0..mem_profile.fixed_alloc_size]; var fixed_allocator = mem_profile.fixed_allocator;
var fixed_allocator = std.heap.FixedBufferAllocator.init(buffer);
panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| { panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e}); panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e});
}; };
pmm.init(&mem_profile, &fixed_allocator.allocator); pmm.init(&mem_profile, &fixed_allocator.allocator);
kernel_vmm = vmm.init(&mem_profile, mb_info, &fixed_allocator.allocator) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e}); kernel_vmm = vmm.init(&mem_profile, &fixed_allocator.allocator) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e});
log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{}); log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{});
arch.init(mb_info, &mem_profile, &fixed_allocator.allocator); arch.init(boot_payload, &mem_profile, &fixed_allocator.allocator);
log.logInfo("Arch init done\n", .{}); log.logInfo("Arch init done\n", .{});
vga.init(); vga.init();
@ -79,5 +75,4 @@ export fn kmain(mb_info: *multiboot.multiboot_info_t, mb_magic: u32) void {
// The panic runtime tests must run last as they never return // The panic runtime tests must run last as they never return
if (options.rt_test) panic_root.runtimeTests(); if (options.rt_test) panic_root.runtimeTests();
}
} }

View file

@ -1,8 +1,29 @@
const multiboot = @import("multiboot.zig");
const std = @import("std"); const std = @import("std");
const expectEqual = std.testing.expectEqual; const expectEqual = std.testing.expectEqual;
const log = @import("log.zig"); const log = @import("log.zig");
pub const Module = struct {
/// The region of memory occupied by the module
region: Range,
/// The module's name
name: []const u8,
};
pub const Map = struct {
/// The virtual range to reserve
virtual: Range,
/// The physical range to map to, if any
physical: ?Range,
};
/// A range of memory
pub const Range = struct {
/// The start of the range, inclusive
start: usize,
/// The end of the range, exclusive
end: usize,
};
pub const MemProfile = struct { pub const MemProfile = struct {
/// The virtual end address of the kernel code. /// The virtual end address of the kernel code.
vaddr_end: [*]u8, vaddr_end: [*]u8,
@ -19,39 +40,26 @@ pub const MemProfile = struct {
/// The amount of memory in the system, in kilobytes. /// The amount of memory in the system, in kilobytes.
mem_kb: usize, mem_kb: usize,
/// The size of the fixed buffer allocator used as the first memory allocator. /// The modules loaded into memory at boot.
fixed_alloc_size: usize, modules: []Module,
/// The boot modules provided by the bootloader. /// The virtual regions of reserved memory. Should not include what is tracked by the vaddr_* fields but should include the regions occupied by the modules. These are reserved and mapped by the VMM
boot_modules: []multiboot.multiboot_module_t, virtual_reserved: []Map,
/// The memory map provided by the bootloader. Desribes which areas of memory are available and /// The phsyical regions of reserved memory. Should not include what is tracked by the physaddr_* fields but should include the regions occupied by the modules. These are reserved by the PMM
/// which are reserved. physical_reserved: []Range,
mem_map: []multiboot.multiboot_memory_map_t,
/// The allocator to use before a heap can be set up.
fixed_allocator: std.heap.FixedBufferAllocator,
}; };
/// The virtual end of the kernel code
extern var KERNEL_VADDR_END: *u32;
/// The virtual start of the kernel code
extern var KERNEL_VADDR_START: *u32;
/// The physical end of the kernel code
extern var KERNEL_PHYSADDR_END: *u32;
/// The physical start of the kernel code
extern var KERNEL_PHYSADDR_START: *u32;
/// The boot-time offset that the virtual addresses are from the physical addresses
extern var KERNEL_ADDR_OFFSET: *u32;
/// The size of the fixed allocator used before the heap is set up. Set to 1MiB. /// The size of the fixed allocator used before the heap is set up. Set to 1MiB.
const FIXED_ALLOC_SIZE: usize = 1024 * 1024; pub const FIXED_ALLOC_SIZE: usize = 1024 * 1024;
/// The kernel's virtual address offset. It's assigned in the init function and this file's tests. /// The kernel's virtual address offset. It's assigned in the init function and this file's tests.
/// We can't just use KERNEL_ADDR_OFFSET since using externs in the virtToPhys test is broken in /// We can't just use KERNEL_ADDR_OFFSET since using externs in the virtToPhys test is broken in
/// release-safe. This is a workaround until that is fixed. /// release-safe. This is a workaround until that is fixed.
var ADDR_OFFSET: usize = undefined; pub var ADDR_OFFSET: usize = undefined;
/// ///
/// Convert a virtual address to its physical counterpart by subtracting the kernel virtual offset from the virtual address. /// Convert a virtual address to its physical counterpart by subtracting the kernel virtual offset from the virtual address.
@ -89,36 +97,6 @@ pub fn physToVirt(phys: var) @TypeOf(phys) {
}; };
} }
///
/// Initialise the system's memory profile based on linker symbols and the multiboot info struct.
///
/// Arguments:
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info passed by the bootloader.
///
/// Return: MemProfile
/// The memory profile constructed from the exported linker symbols and the relevant multiboot info.
///
pub fn init(mb_info: *multiboot.multiboot_info_t) MemProfile {
log.logInfo("Init mem\n", .{});
defer log.logInfo("Done mem\n", .{});
const mods_count = mb_info.mods_count;
ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET);
const mmap_addr = mb_info.mmap_addr;
const num_mmap_entries = mb_info.mmap_length / @sizeOf(multiboot.multiboot_memory_map_t);
return .{
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
// Total memory available including the initial 1MiB that grub doesn't include
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
.fixed_alloc_size = FIXED_ALLOC_SIZE,
.boot_modules = @intToPtr([*]multiboot.multiboot_mod_list, physToVirt(@intCast(usize, mb_info.mods_addr)))[0..mods_count],
.mem_map = @intToPtr([*]multiboot.multiboot_memory_map_t, mmap_addr)[0..num_mmap_entries],
};
}
test "physToVirt" { test "physToVirt" {
ADDR_OFFSET = 0xC0000000; ADDR_OFFSET = 0xC0000000;
const offset: usize = ADDR_OFFSET; const offset: usize = ADDR_OFFSET;

View file

@ -281,15 +281,14 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) !
defer log.logInfo("Done panic\n", .{}); defer log.logInfo("Done panic\n", .{});
// Exit if we haven't loaded all debug modules // Exit if we haven't loaded all debug modules
if (mem_profile.boot_modules.len < 1) if (mem_profile.modules.len < 1)
return; return;
var kmap_start: usize = 0; var kmap_start: usize = 0;
var kmap_end: usize = 0; var kmap_end: usize = 0;
for (mem_profile.boot_modules) |module| { for (mem_profile.modules) |module| {
const mod_start = mem.physToVirt(@intCast(usize, module.mod_start)); const mod_start = module.region.start;
const mod_end = mem.physToVirt(@intCast(usize, module.mod_end) - 1); const mod_end = module.region.end - 1;
const mod_str_ptr = mem.physToVirt(@intToPtr([*:0]u8, module.cmdline)); if (std.mem.eql(u8, module.name, "kernel.map")) {
if (std.mem.eql(u8, std.mem.span(mod_str_ptr), "kernel.map")) {
kmap_start = mod_start; kmap_start = mod_start;
kmap_end = mod_end; kmap_end = mod_end;
break; break;
@ -302,7 +301,6 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) !
var syms = SymbolMap.init(allocator); var syms = SymbolMap.init(allocator);
errdefer syms.deinit(); errdefer syms.deinit();
var file_index = kmap_start;
var kmap_ptr = @intToPtr([*]u8, kmap_start); var kmap_ptr = @intToPtr([*]u8, kmap_start);
while (@ptrToInt(kmap_ptr) < kmap_end - 1) { while (@ptrToInt(kmap_ptr) < kmap_end - 1) {
const entry = try parseMapEntry(&kmap_ptr, @intToPtr(*const u8, kmap_end)); const entry = try parseMapEntry(&kmap_ptr, @intToPtr(*const u8, kmap_end));

View file

@ -7,7 +7,6 @@ const MemProfile = (if (is_test) @import(mock_path ++ "mem_mock.zig") else @impo
const testing = std.testing; const testing = std.testing;
const panic = @import("panic.zig").panic; const panic = @import("panic.zig").panic;
const log = if (is_test) @import(mock_path ++ "log_mock.zig") else @import("log.zig"); const log = if (is_test) @import(mock_path ++ "log_mock.zig") else @import("log.zig");
const MEMORY_AVAILABLE = @import("multiboot.zig").MULTIBOOT_MEMORY_AVAILABLE;
const Bitmap = @import("bitmap.zig").Bitmap; const Bitmap = @import("bitmap.zig").Bitmap;
const PmmBitmap = Bitmap(u32); const PmmBitmap = Bitmap(u32);
@ -105,13 +104,13 @@ pub fn init(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
bitmap = PmmBitmap.init(mem.mem_kb * 1024 / BLOCK_SIZE, allocator) catch @panic("Bitmap allocation failed"); bitmap = PmmBitmap.init(mem.mem_kb * 1024 / BLOCK_SIZE, allocator) catch @panic("Bitmap allocation failed");
// Occupy the regions of memory that the memory map describes as reserved // Occupy the regions of memory that the memory map describes as reserved
for (mem.mem_map) |entry| { for (mem.physical_reserved) |entry| {
if (entry.@"type" != MEMORY_AVAILABLE) { var addr = std.mem.alignBackward(entry.start, BLOCK_SIZE);
var addr = std.mem.alignBackward(@intCast(usize, entry.addr), BLOCK_SIZE); var end = entry.end - 1;
var end = @intCast(usize, entry.addr + (entry.len - 1));
// If the end address can be aligned without overflowing then align it // If the end address can be aligned without overflowing then align it
if (end <= std.math.maxInt(usize) - BLOCK_SIZE) if (end <= std.math.maxInt(usize) - BLOCK_SIZE) {
end = std.mem.alignForward(end, BLOCK_SIZE); end = std.mem.alignForward(end, BLOCK_SIZE);
}
while (addr < end) : (addr += BLOCK_SIZE) { while (addr < end) : (addr += BLOCK_SIZE) {
setAddr(addr) catch |e| switch (e) { setAddr(addr) catch |e| switch (e) {
// We can ignore out of bounds errors as the memory won't be available anyway // We can ignore out of bounds errors as the memory won't be available anyway
@ -120,7 +119,6 @@ pub fn init(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
}; };
} }
} }
}
if (build_options.rt_test) { if (build_options.rt_test) {
runtimeTests(mem, allocator); runtimeTests(mem, allocator);
@ -144,14 +142,12 @@ fn runtimeTests(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
panic(null, "PMM allocated the same address twice: 0x{x}", .{alloced}); panic(null, "PMM allocated the same address twice: 0x{x}", .{alloced});
} }
prev_alloc = alloced; prev_alloc = alloced;
for (mem.mem_map) |entry| { for (mem.physical_reserved) |entry| {
if (entry.@"type" != MEMORY_AVAILABLE) { var addr = std.mem.alignBackward(entry.start, BLOCK_SIZE);
var addr = std.mem.alignBackward(@intCast(usize, entry.addr), BLOCK_SIZE);
if (addr == alloced) { if (addr == alloced) {
panic(null, "PMM allocated an address that should be reserved by the memory map: 0x{x}", .{addr}); panic(null, "PMM allocated an address that should be reserved by the memory map: 0x{x}", .{addr});
} }
} }
}
alloc_list.append(alloced) catch |e| panic(@errorReturnTrace(), "Failed to add PMM allocation to list: {}", .{e}); alloc_list.append(alloced) catch |e| panic(@errorReturnTrace(), "Failed to add PMM allocation to list: {}", .{e});
} }
// Clean up // Clean up

View file

@ -7,7 +7,6 @@ const bitmap = @import("bitmap.zig");
const pmm = @import("pmm.zig"); const pmm = @import("pmm.zig");
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig"); const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
const tty = @import("tty.zig"); const tty = @import("tty.zig");
const multiboot = @import("multiboot.zig");
const log = @import("log.zig"); const log = @import("log.zig");
const panic = @import("panic.zig").panic; const panic = @import("panic.zig").panic;
const arch = @import("arch.zig").internals; const arch = @import("arch.zig").internals;
@ -195,10 +194,8 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// ///
/// Arguments: /// Arguments:
/// INOUT self: *Self - The manager to modify /// INOUT self: *Self - The manager to modify
/// IN virtual_start: usize - The start of the virtual region /// IN virtual: mem.Range - The virtual region to set
/// IN virtual_end: usize - The end of the virtual region /// IN physical: ?mem.Range - The physical region to map to or null if only the virtual region is to be set
/// IN physical_start: usize - The start of the physical region
/// IN physical_end: usize - The end of the physical region
/// IN attrs: Attributes - The attributes to apply to the memory regions /// IN attrs: Attributes - The attributes to apply to the memory regions
/// ///
/// Error: VmmError || Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError /// Error: VmmError || Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError
@ -211,37 +208,47 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// std.mem.Allocator.Error.OutOfMemory - Allocating the required memory failed /// std.mem.Allocator.Error.OutOfMemory - Allocating the required memory failed
/// MapperError.* - The causes depend on the mapper used /// MapperError.* - The causes depend on the mapper used
/// ///
pub fn set(self: *Self, virtual_start: usize, virtual_end: usize, physical_start: usize, physical_end: usize, attrs: Attributes) (VmmError || bitmap.Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError)!void { pub fn set(self: *Self, virtual: mem.Range, physical: ?mem.Range, attrs: Attributes) (VmmError || bitmap.Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError)!void {
var virt = virtual_start; var virt = virtual.start;
while (virt < virtual_end) : (virt += BLOCK_SIZE) { while (virt < virtual.end) : (virt += BLOCK_SIZE) {
if (try self.isSet(virt)) if (try self.isSet(virt))
return VmmError.AlreadyAllocated; return VmmError.AlreadyAllocated;
} }
var phys = physical_start; if (virtual.start > virtual.end) {
while (phys < physical_end) : (phys += BLOCK_SIZE) { return VmmError.InvalidVirtAddresses;
if (try pmm.isSet(phys)) }
if (physical) |p| {
if (virtual.end - virtual.start != p.end - p.start) {
return VmmError.PhysicalVirtualMismatch;
}
if (p.start > p.end) {
return VmmError.InvalidPhysAddresses;
}
var phys = p.start;
while (phys < p.end) : (phys += BLOCK_SIZE) {
if (try pmm.isSet(phys)) {
return VmmError.PhysicalAlreadyAllocated; return VmmError.PhysicalAlreadyAllocated;
} }
if (virtual_end - virtual_start != physical_end - physical_start) }
return VmmError.PhysicalVirtualMismatch; }
if (physical_start > physical_end)
return VmmError.InvalidPhysAddresses;
if (virtual_start > virtual_end)
return VmmError.InvalidVirtAddresses;
virt = virtual_start; var phys_list = std.ArrayList(usize).init(self.allocator);
while (virt < virtual_end) : (virt += BLOCK_SIZE) {
virt = virtual.start;
while (virt < virtual.end) : (virt += BLOCK_SIZE) {
try self.bmp.setEntry(virt / BLOCK_SIZE); try self.bmp.setEntry(virt / BLOCK_SIZE);
} }
try self.mapper.mapFn(virtual_start, virtual_end, physical_start, physical_end, attrs, self.allocator, self.payload); if (physical) |p| {
try self.mapper.mapFn(virtual.start, virtual.end, p.start, p.end, attrs, self.allocator, self.payload);
var phys_list = std.ArrayList(usize).init(self.allocator); var phys = p.start;
phys = physical_start; while (phys < p.end) : (phys += BLOCK_SIZE) {
while (phys < physical_end) : (phys += BLOCK_SIZE) {
try pmm.setAddr(phys); try pmm.setAddr(phys);
try phys_list.append(phys); try phys_list.append(phys);
} }
}
_ = try self.allocations.put(virt, Allocation{ .physical = phys_list }); _ = try self.allocations.put(virt, Allocation{ .physical = phys_list });
} }
@ -325,23 +332,19 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
} }
/// ///
/// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code, TTY, multiboot info and boot modules /// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code and reserved virtual memory
/// ///
/// Arguments: /// Arguments:
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. This is used to find the kernel code region and boot modules /// IN mem_profile: *const mem.MemProfile - The system's memory profile. This is used to find the kernel code region and boot modules
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info
/// INOUT allocator: *std.mem.Allocator - The allocator to use when needing to allocate memory /// INOUT allocator: *std.mem.Allocator - The allocator to use when needing to allocate memory
/// IN comptime Payload: type - The type of the data to pass as a payload to the virtual memory manager
/// IN mapper: Mapper - The memory mapper to call when allocating and free virtual memory
/// IN payload: Paylaod - The payload data to pass to the virtual memory manager
/// ///
/// Return: VirtualMemoryManager /// Return: VirtualMemoryManager
/// The virtual memory manager created with all stated regions allocated /// The virtual memory manager created with all reserved virtual regions allocated
/// ///
/// Error: std.mem.Allocator.Error /// Error: std.mem.Allocator.Error
/// std.mem.Allocator.Error.OutOfMemory - The allocator cannot allocate the memory required /// std.mem.Allocator.Error.OutOfMemory - The allocator cannot allocate the memory required
/// ///
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) { pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
log.logInfo("Init vmm\n", .{}); log.logInfo("Init vmm\n", .{});
defer log.logInfo("Done vmm\n", .{}); defer log.logInfo("Done vmm\n", .{});
@ -350,37 +353,21 @@ pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_in
// Map in kernel // Map in kernel
// Calculate start and end of mapping // Calculate start and end of mapping
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE); const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE); const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE); const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE); const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
vmm.set(v_start, v_end, p_start, p_end, .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping kernel code in VMM: {}", .{e}); vmm.set(.{ .start = v_start, .end = v_end }, mem.Range{ .start = p_start, .end = p_end }, .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping kernel code in VMM: {}", .{e});
// Map in tty for (mem_profile.virtual_reserved) |entry| {
const tty_addr = tty.getVideoBufferAddress(); const virtual = mem.Range{ .start = std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE), .end = std.mem.alignForward(entry.virtual.end, BLOCK_SIZE) };
const tty_phys = mem.virtToPhys(tty_addr); const physical: ?mem.Range = if (entry.physical) |phys| mem.Range{ .start = std.mem.alignBackward(phys.start, BLOCK_SIZE), .end = std.mem.alignForward(phys.end, BLOCK_SIZE) } else null;
const tty_buff_size = 32 * 1024; vmm.set(virtual, physical, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
vmm.set(tty_addr, tty_addr + tty_buff_size, tty_phys, tty_phys + tty_buff_size, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping TTY in VMM: {}", .{e}); VmmError.AlreadyAllocated => {},
else => panic(@errorReturnTrace(), "Failed mapping region in VMM {}: {}\n", .{ entry, e }),
// Map in the multiboot info struct
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
vmm.set(mb_info_addr, mb_info_end, mem.virtToPhys(mb_info_addr), mem.virtToPhys(mb_info_end), .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping multiboot info in VMM: {}", .{e});
// Map in each boot module
for (mem_profile.boot_modules) |*module| {
const mod_v_struct_start = std.mem.alignBackward(@ptrToInt(module), BLOCK_SIZE);
const mod_v_struct_end = std.mem.alignForward(mod_v_struct_start + @sizeOf(multiboot.multiboot_module_t), BLOCK_SIZE);
vmm.set(mod_v_struct_start, mod_v_struct_end, mem.virtToPhys(mod_v_struct_start), mem.virtToPhys(mod_v_struct_end), .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
// A previous allocation could cover this region so the AlreadyAllocated error can be ignored
VmmError.AlreadyAllocated => break,
else => panic(@errorReturnTrace(), "Failed mapping boot module struct in VMM: {}", .{e}),
}; };
const mod_p_start = std.mem.alignBackward(module.mod_start, BLOCK_SIZE);
const mod_p_end = std.mem.alignForward(module.mod_end, BLOCK_SIZE);
vmm.set(mem.physToVirt(mod_p_start), mem.physToVirt(mod_p_end), mod_p_start, mod_p_end, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping boot module in VMM: {}", .{e});
} }
if (build_options.rt_test) runtimeTests(arch.VmmPayload, vmm, mem_profile, mb_info); if (build_options.rt_test) runtimeTests(arch.VmmPayload, vmm, mem_profile);
return vmm; return vmm;
} }
@ -474,7 +461,7 @@ test "set" {
const pstart = vstart + 123; const pstart = vstart + 123;
const pend = vend + 123; const pend = vend + 123;
const attrs = Attributes{ .kernel = true, .writable = true, .cachable = true }; const attrs = Attributes{ .kernel = true, .writable = true, .cachable = true };
try vmm.set(vstart, vend, pstart, pend, attrs); try vmm.set(.{ .start = vstart, .end = vend }, mem.Range{ .start = pstart, .end = pend }, attrs);
var allocations = test_allocations orelse unreachable; var allocations = test_allocations orelse unreachable;
// The entries before the virtual start shouldn't be set // The entries before the virtual start shouldn't be set
@ -517,7 +504,17 @@ fn testInit(num_entries: u32) std.mem.Allocator.Error!VirtualMemoryManager(u8) {
} }
} }
var allocations = test_allocations orelse unreachable; var allocations = test_allocations orelse unreachable;
const mem_profile = mem.MemProfile{ .vaddr_end = undefined, .vaddr_start = undefined, .physaddr_start = undefined, .physaddr_end = undefined, .mem_kb = num_entries * BLOCK_SIZE / 1024, .fixed_alloc_size = undefined, .mem_map = &[_]multiboot.multiboot_memory_map_t{}, .boot_modules = &[_]multiboot.multiboot_module_t{} }; const mem_profile = mem.MemProfile{
.vaddr_end = undefined,
.vaddr_start = undefined,
.physaddr_start = undefined,
.physaddr_end = undefined,
.mem_kb = num_entries * BLOCK_SIZE / 1024,
.fixed_allocator = undefined,
.virtual_reserved = &[_]mem.Map{},
.physical_reserved = &[_]mem.Range{},
.modules = &[_]mem.Module{},
};
pmm.init(&mem_profile, std.heap.page_allocator); pmm.init(&mem_profile, std.heap.page_allocator);
return try VirtualMemoryManager(u8).init(0, num_entries * BLOCK_SIZE, std.heap.page_allocator, test_mapper, 39); return try VirtualMemoryManager(u8).init(0, num_entries * BLOCK_SIZE, std.heap.page_allocator, test_mapper, 39);
} }
@ -567,55 +564,29 @@ fn testUnmap(vstart: usize, vend: usize, payload: u8) (std.mem.Allocator.Error |
/// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved /// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved /// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved
/// ///
fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t) void { fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void {
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE); const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE); const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
const tty_addr = tty.getVideoBufferAddress();
const tty_phys = mem.virtToPhys(tty_addr);
const tty_buff_size = 32 * 1024;
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
// Make sure all blocks before the mb info are not set
var vaddr = vmm.start; var vaddr = vmm.start;
while (vaddr < mb_info_addr) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before mb_info was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the mb info are set
while (vaddr < mb_info_end) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for mb_info was not set: {x}", .{vaddr});
}
// Make sure all blocks before the kernel code are not set
while (vaddr < tty_addr) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before tty was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the kernel code are set
while (vaddr < tty_addr + tty_buff_size) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for tty was not set: {x}", .{vaddr});
}
// Make sure all blocks before the kernel code are not set
while (vaddr < v_start) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before kernel code was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the kernel code are set
while (vaddr < v_end) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for kernel code was not set: {x}", .{vaddr});
}
// Make sure all blocks after the kernel code are not set
while (vaddr < vmm.end - BLOCK_SIZE) : (vaddr += BLOCK_SIZE) { while (vaddr < vmm.end - BLOCK_SIZE) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if address after {x} is set: {x}", .{ vaddr, e }); const set = vmm.isSet(vaddr) catch unreachable;
if (set) panic(null, "Address after kernel code was set: {x}", .{vaddr}); var should_be_set = false;
if (vaddr < v_end and vaddr >= v_start) {
should_be_set = true;
} else {
for (mem_profile.virtual_reserved) |entry| {
if (vaddr >= std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE) and vaddr < std.mem.alignForward(entry.virtual.end, BLOCK_SIZE)) {
should_be_set = true;
break;
}
}
}
if (set and !should_be_set) {
panic(@errorReturnTrace(), "An address was set in the VMM when it shouldn't have been: 0x{x}\n", .{vaddr});
} else if (!set and should_be_set) {
panic(@errorReturnTrace(), "An address was not set in the VMM when it should have been: 0x{x}\n", .{vaddr});
}
} }
log.logInfo("VMM: Tested allocations\n", .{}); log.logInfo("VMM: Tested allocations\n", .{});

View file

@ -4,7 +4,6 @@ const mem = @import("mem_mock.zig");
const MemProfile = mem.MemProfile; const MemProfile = mem.MemProfile;
const gdt = @import("gdt_mock.zig"); const gdt = @import("gdt_mock.zig");
const idt = @import("idt_mock.zig"); const idt = @import("idt_mock.zig");
const multiboot = @import("../../../src/kernel/multiboot.zig");
const vmm = @import("vmm_mock.zig"); const vmm = @import("vmm_mock.zig");
const paging = @import("paging_mock.zig"); const paging = @import("paging_mock.zig");
@ -41,6 +40,14 @@ pub const VmmPayload = u8;
pub const KERNEL_VMM_PAYLOAD: usize = 0; pub const KERNEL_VMM_PAYLOAD: usize = 0;
pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB; pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB;
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = undefined; pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = undefined;
pub const BootPayload = u8;
// The virtual/physical start/end of the kernel code
var KERNEL_PHYSADDR_START: u32 = 0x00100000;
var KERNEL_PHYSADDR_END: u32 = 0x01000000;
var KERNEL_VADDR_START: u32 = 0xC0100000;
var KERNEL_VADDR_END: u32 = 0xC1100000;
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
pub fn outb(port: u16, data: u8) void { pub fn outb(port: u16, data: u8) void {
return mock_framework.performAction("outb", void, .{ port, data }); return mock_framework.performAction("outb", void, .{ port, data });
@ -94,7 +101,22 @@ pub fn haltNoInterrupts() noreturn {
while (true) {} while (true) {}
} }
pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile, allocator: *Allocator) void { pub fn initMem(payload: BootPayload) std.mem.Allocator.Error!mem.MemProfile {
return MemProfile{
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
// Total memory available including the initial 1MiB that grub doesn't include
.mem_kb = 0,
.fixed_allocator = undefined,
.virtual_reserved = undefined,
.physical_reserved = undefined,
.modules = undefined,
};
}
pub fn init(payload: BootPayload, mem_profile: *const MemProfile, allocator: *Allocator) void {
// I'll get back to this as this doesn't effect the GDT testing. // I'll get back to this as this doesn't effect the GDT testing.
// When I come on to the mem.zig testing, I'll fix :) // When I come on to the mem.zig testing, I'll fix :)
//return mock_framework.performAction("init", void, mem_profile, allocator); //return mock_framework.performAction("init", void, mem_profile, allocator);

View file

@ -1,40 +1,36 @@
const std = @import("std");
const multiboot = @import("../../../src/kernel/multiboot.zig"); const multiboot = @import("../../../src/kernel/multiboot.zig");
pub const Module = struct {
region: Range,
name: []const u8,
};
pub const Map = struct {
virtual: Range,
physical: ?Range,
};
pub const Range = struct {
start: usize,
end: usize,
};
pub const MemProfile = struct { pub const MemProfile = struct {
vaddr_end: [*]u8, vaddr_end: [*]u8,
vaddr_start: [*]u8, vaddr_start: [*]u8,
physaddr_end: [*]u8, physaddr_end: [*]u8,
physaddr_start: [*]u8, physaddr_start: [*]u8,
mem_kb: u32, mem_kb: u32,
fixed_alloc_size: u32, modules: []Module,
mem_map: []multiboot.multiboot_memory_map_t, virtual_reserved: []Map,
boot_modules: []multiboot.multiboot_module_t, physical_reserved: []Range,
fixed_allocator: std.heap.FixedBufferAllocator,
}; };
// The virtual/physical start/end of the kernel code
var KERNEL_PHYSADDR_START: u32 = 0x00100000;
var KERNEL_PHYSADDR_END: u32 = 0x01000000;
var KERNEL_VADDR_START: u32 = 0xC0100000;
var KERNEL_VADDR_END: u32 = 0xC1100000;
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
// The size of the fixed allocator used before the heap is set up. Set to 1MiB. // The size of the fixed allocator used before the heap is set up. Set to 1MiB.
const FIXED_ALLOC_SIZE = 1024 * 1024; const FIXED_ALLOC_SIZE = 1024 * 1024;
pub fn init(mb_info: *multiboot.multiboot_info_t) MemProfile {
return MemProfile{
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
// Total memory available including the initial 1MiB that grub doesn't include
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
.fixed_alloc_size = FIXED_ALLOC_SIZE,
.mem_map = undefined,
.boot_modules = undefined,
};
}
pub fn virtToPhys(virt: var) @TypeOf(virt) { pub fn virtToPhys(virt: var) @TypeOf(virt) {
const T = @TypeOf(virt); const T = @TypeOf(virt);
return switch (@typeInfo(T)) { return switch (@typeInfo(T)) {

View file

@ -1,5 +1,4 @@
const mem = @import("mem_mock.zig"); const mem = @import("mem_mock.zig");
const multiboot = @import("../../../src/kernel/multiboot.zig");
const bitmap = @import("../../../src/kernel/bitmap.zig"); const bitmap = @import("../../../src/kernel/bitmap.zig");
const arch = @import("arch_mock.zig"); const arch = @import("arch_mock.zig");
const std = @import("std"); const std = @import("std");
@ -34,6 +33,6 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
}; };
} }
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) { pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
return std.mem.Allocator.Error.OutOfMemory; return std.mem.Allocator.Error.OutOfMemory;
} }