Abstract away boot modules and memory map
This commit is contained in:
parent
0ca3542fd2
commit
554b9706f2
12 changed files with 313 additions and 258 deletions
|
@ -11,11 +11,28 @@ const rtc = @import("rtc.zig");
|
|||
const paging = @import("paging.zig");
|
||||
const syscalls = @import("syscalls.zig");
|
||||
const mem = @import("../../mem.zig");
|
||||
const multiboot = @import("../../multiboot.zig");
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const pmm = @import("pmm.zig");
|
||||
const vmm = @import("../../vmm.zig");
|
||||
const log = @import("../../log.zig");
|
||||
const tty = @import("../../tty.zig");
|
||||
const MemProfile = mem.MemProfile;
|
||||
|
||||
/// The virtual end of the kernel code
|
||||
extern var KERNEL_VADDR_END: *u32;
|
||||
|
||||
/// The virtual start of the kernel code
|
||||
extern var KERNEL_VADDR_START: *u32;
|
||||
|
||||
/// The physical end of the kernel code
|
||||
extern var KERNEL_PHYSADDR_END: *u32;
|
||||
|
||||
/// The physical start of the kernel code
|
||||
extern var KERNEL_PHYSADDR_START: *u32;
|
||||
|
||||
/// The boot-time offset that the virtual addresses are from the physical addresses
|
||||
extern var KERNEL_ADDR_OFFSET: *u32;
|
||||
|
||||
/// The interrupt context that is given to a interrupt handler. It contains most of the registers
|
||||
/// and the interrupt number and error code (if there is one).
|
||||
pub const InterruptContext = struct {
|
||||
|
@ -49,6 +66,9 @@ pub const InterruptContext = struct {
|
|||
ss: u32,
|
||||
};
|
||||
|
||||
/// x86's boot payload is the multiboot info passed by grub
|
||||
pub const BootPayload = *multiboot.multiboot_info_t;
|
||||
|
||||
/// The type of the payload passed to a virtual memory mapper.
|
||||
/// For x86 it's the page directory that should be mapped.
|
||||
pub const VmmPayload = *paging.Directory;
|
||||
|
@ -231,6 +251,87 @@ pub fn haltNoInterrupts() noreturn {
|
|||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// Initialise the system's memory. Populates a memory profile with boot modules from grub, the amount of available memory, the reserved regions of virtual and physical memory as well as the start and end of the kernel code
|
||||
///
|
||||
/// Arguments:
|
||||
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info passed by grub
|
||||
///
|
||||
/// Return: mem.MemProfile
|
||||
/// The constructed memory profile
|
||||
///
|
||||
/// Error: std.mem.Allocator.Error
|
||||
/// std.mem.Allocator.Error.OutOfMemory - There wasn't enough memory in the allocated created to populate the memory profile, consider increasing mem.FIXED_ALLOC_SIZE
|
||||
///
|
||||
pub fn initMem(mb_info: BootPayload) std.mem.Allocator.Error!MemProfile {
|
||||
log.logInfo("Init mem\n", .{});
|
||||
defer log.logInfo("Done mem\n", .{});
|
||||
|
||||
const mods_count = mb_info.mods_count;
|
||||
mem.ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET);
|
||||
const mmap_addr = mb_info.mmap_addr;
|
||||
const num_mmap_entries = mb_info.mmap_length / @sizeOf(multiboot.multiboot_memory_map_t);
|
||||
const vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END);
|
||||
|
||||
var allocator = std.heap.FixedBufferAllocator.init(vaddr_end[0..mem.FIXED_ALLOC_SIZE]);
|
||||
var reserved_physical_mem = std.ArrayList(mem.Range).init(&allocator.allocator);
|
||||
var reserved_virtual_mem = std.ArrayList(mem.Map).init(&allocator.allocator);
|
||||
const mem_map = @intToPtr([*]multiboot.multiboot_memory_map_t, mmap_addr)[0..num_mmap_entries];
|
||||
|
||||
// Reserve the unavailable sections from the multiboot memory map
|
||||
for (mem_map) |entry| {
|
||||
if (entry.@"type" != multiboot.MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
// If addr + len is greater than maxInt(usize) just ignore whatever comes after maxInt(usize) since it can't be addressed anyway
|
||||
const end: usize = if (entry.addr > std.math.maxInt(usize) - entry.len) std.math.maxInt(usize) else @intCast(usize, entry.addr + entry.len);
|
||||
try reserved_physical_mem.append(.{ .start = @intCast(usize, entry.addr), .end = end });
|
||||
}
|
||||
}
|
||||
// Map the multiboot info struct itself
|
||||
const mb_region = mem.Range{
|
||||
.start = @ptrToInt(mb_info),
|
||||
.end = @ptrToInt(mb_info) + @sizeOf(multiboot.multiboot_info_t),
|
||||
};
|
||||
const mb_physical = mem.Range{ .start = mem.virtToPhys(mb_region.start), .end = mem.virtToPhys(mb_region.end) };
|
||||
try reserved_virtual_mem.append(.{ .virtual = mb_region, .physical = mb_physical });
|
||||
|
||||
// Map the tty buffer
|
||||
const tty_addr = mem.virtToPhys(tty.getVideoBufferAddress());
|
||||
const tty_region = mem.Range{
|
||||
.start = tty_addr,
|
||||
.end = tty_addr + 32 * 1024,
|
||||
};
|
||||
try reserved_virtual_mem.append(.{
|
||||
.physical = tty_region,
|
||||
.virtual = .{
|
||||
.start = mem.physToVirt(tty_region.start),
|
||||
.end = mem.physToVirt(tty_region.end),
|
||||
},
|
||||
});
|
||||
|
||||
// Map the boot modules
|
||||
const boot_modules = @intToPtr([*]multiboot.multiboot_mod_list, mem.physToVirt(mb_info.mods_addr))[0..mods_count];
|
||||
var modules = std.ArrayList(mem.Module).init(&allocator.allocator);
|
||||
for (boot_modules) |module| {
|
||||
const virtual = mem.Range{ .start = mem.physToVirt(module.mod_start), .end = mem.physToVirt(module.mod_end) };
|
||||
const physical = mem.Range{ .start = module.mod_start, .end = module.mod_end };
|
||||
try modules.append(.{ .region = virtual, .name = std.mem.span(mem.physToVirt(@intToPtr([*:0]u8, module.cmdline))) });
|
||||
try reserved_virtual_mem.append(.{ .physical = physical, .virtual = virtual });
|
||||
}
|
||||
|
||||
return MemProfile{
|
||||
.vaddr_end = vaddr_end,
|
||||
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
|
||||
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
|
||||
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
|
||||
// Total memory available including the initial 1MiB that grub doesn't include
|
||||
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
|
||||
.modules = modules.items,
|
||||
.physical_reserved = reserved_physical_mem.items,
|
||||
.virtual_reserved = reserved_virtual_mem.items,
|
||||
.fixed_allocator = allocator,
|
||||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Initialise the architecture
|
||||
///
|
||||
|
|
|
@ -102,10 +102,9 @@ export fn start_higher_half() callconv(.Naked) noreturn {
|
|||
\\xor %%ebp, %%ebp
|
||||
);
|
||||
|
||||
// Push the bootloader magic number and multiboot header address with virtual offset
|
||||
// Push the multiboot header address with virtual offset
|
||||
asm volatile (
|
||||
\\.extern KERNEL_ADDR_OFFSET
|
||||
\\push %%eax
|
||||
\\add $KERNEL_ADDR_OFFSET, %%ebx
|
||||
\\push %%ebx
|
||||
);
|
||||
|
|
|
@ -10,7 +10,7 @@ const tty = @import("../../tty.zig");
|
|||
const log = @import("../../log.zig");
|
||||
const mem = @import("../../mem.zig");
|
||||
const vmm = @import("../../vmm.zig");
|
||||
const multiboot = @import("../../multiboot.zig");
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const options = @import("build_options");
|
||||
const testing = std.testing;
|
||||
|
||||
|
@ -404,7 +404,7 @@ pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile
|
|||
:
|
||||
: [addr] "{eax}" (dir_physaddr)
|
||||
);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, PAGE_SIZE_4KB);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, PAGE_SIZE_4KB);
|
||||
if (options.rt_test) runtimeTests(v_end);
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ const is_test = builtin.is_test;
|
|||
const build_options = @import("build_options");
|
||||
const mock_path = build_options.mock_path;
|
||||
const arch = @import("arch.zig").internals;
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const tty = @import("tty.zig");
|
||||
const vga = @import("vga.zig");
|
||||
const log = @import("log.zig");
|
||||
|
@ -38,46 +37,42 @@ pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn
|
|||
panic_root.panic(error_return_trace, "{}", .{msg});
|
||||
}
|
||||
|
||||
export fn kmain(mb_info: *multiboot.multiboot_info_t, mb_magic: u32) void {
|
||||
if (mb_magic == multiboot.MULTIBOOT_BOOTLOADER_MAGIC) {
|
||||
// Booted with compatible bootloader
|
||||
serial.init(serial.DEFAULT_BAUDRATE, serial.Port.COM1) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise serial: {}", .{e});
|
||||
};
|
||||
export fn kmain(boot_payload: arch.BootPayload) void {
|
||||
serial.init(serial.DEFAULT_BAUDRATE, serial.Port.COM1) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise serial: {}", .{e});
|
||||
};
|
||||
|
||||
if (build_options.rt_test) log.runtimeTests();
|
||||
if (build_options.rt_test) log.runtimeTests();
|
||||
|
||||
const mem_profile = mem.init(mb_info);
|
||||
var buffer = mem_profile.vaddr_end[0..mem_profile.fixed_alloc_size];
|
||||
var fixed_allocator = std.heap.FixedBufferAllocator.init(buffer);
|
||||
const mem_profile = arch.initMem(boot_payload) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise memory profile: {}", .{e});
|
||||
var fixed_allocator = mem_profile.fixed_allocator;
|
||||
|
||||
panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e});
|
||||
};
|
||||
panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e});
|
||||
};
|
||||
|
||||
pmm.init(&mem_profile, &fixed_allocator.allocator);
|
||||
kernel_vmm = vmm.init(&mem_profile, mb_info, &fixed_allocator.allocator) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e});
|
||||
pmm.init(&mem_profile, &fixed_allocator.allocator);
|
||||
kernel_vmm = vmm.init(&mem_profile, &fixed_allocator.allocator) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e});
|
||||
|
||||
log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{});
|
||||
arch.init(mb_info, &mem_profile, &fixed_allocator.allocator);
|
||||
log.logInfo("Arch init done\n", .{});
|
||||
log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{});
|
||||
arch.init(boot_payload, &mem_profile, &fixed_allocator.allocator);
|
||||
log.logInfo("Arch init done\n", .{});
|
||||
|
||||
vga.init();
|
||||
tty.init();
|
||||
// Give the kernel heap 10% of the available memory. This can be fine-tuned as time goes on.
|
||||
var heap_size = mem_profile.mem_kb / 10 * 1024;
|
||||
// The heap size must be a power of two so find the power of two smaller than or equal to the heap_size
|
||||
if (!std.math.isPowerOfTwo(heap_size)) {
|
||||
heap_size = std.math.floorPowerOfTwo(usize, heap_size);
|
||||
}
|
||||
var kernel_heap = heap.init(arch.VmmPayload, &kernel_vmm, vmm.Attributes{ .kernel = true, .writable = true, .cachable = true }, heap_size, &fixed_allocator.allocator) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
|
||||
};
|
||||
log.logInfo("Init done\n", .{});
|
||||
|
||||
tty.print("Hello Pluto from kernel :)\n", .{});
|
||||
|
||||
// The panic runtime tests must run last as they never return
|
||||
if (options.rt_test) panic_root.runtimeTests();
|
||||
vga.init();
|
||||
tty.init();
|
||||
// Give the kernel heap 10% of the available memory. This can be fine-tuned as time goes on.
|
||||
var heap_size = mem_profile.mem_kb / 10 * 1024;
|
||||
// The heap size must be a power of two so find the power of two smaller than or equal to the heap_size
|
||||
if (!std.math.isPowerOfTwo(heap_size)) {
|
||||
heap_size = std.math.floorPowerOfTwo(usize, heap_size);
|
||||
}
|
||||
var kernel_heap = heap.init(arch.VmmPayload, &kernel_vmm, vmm.Attributes{ .kernel = true, .writable = true, .cachable = true }, heap_size, &fixed_allocator.allocator) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
|
||||
};
|
||||
log.logInfo("Init done\n", .{});
|
||||
|
||||
tty.print("Hello Pluto from kernel :)\n", .{});
|
||||
|
||||
// The panic runtime tests must run last as they never return
|
||||
if (options.rt_test) panic_root.runtimeTests();
|
||||
}
|
||||
|
|
|
@ -1,8 +1,29 @@
|
|||
const multiboot = @import("multiboot.zig");
|
||||
const std = @import("std");
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const log = @import("log.zig");
|
||||
|
||||
pub const Module = struct {
|
||||
/// The region of memory occupied by the module
|
||||
region: Range,
|
||||
/// The module's name
|
||||
name: []const u8,
|
||||
};
|
||||
|
||||
pub const Map = struct {
|
||||
/// The virtual range to reserve
|
||||
virtual: Range,
|
||||
/// The physical range to map to, if any
|
||||
physical: ?Range,
|
||||
};
|
||||
|
||||
/// A range of memory
|
||||
pub const Range = struct {
|
||||
/// The start of the range, inclusive
|
||||
start: usize,
|
||||
/// The end of the range, exclusive
|
||||
end: usize,
|
||||
};
|
||||
|
||||
pub const MemProfile = struct {
|
||||
/// The virtual end address of the kernel code.
|
||||
vaddr_end: [*]u8,
|
||||
|
@ -19,39 +40,26 @@ pub const MemProfile = struct {
|
|||
/// The amount of memory in the system, in kilobytes.
|
||||
mem_kb: usize,
|
||||
|
||||
/// The size of the fixed buffer allocator used as the first memory allocator.
|
||||
fixed_alloc_size: usize,
|
||||
/// The modules loaded into memory at boot.
|
||||
modules: []Module,
|
||||
|
||||
/// The boot modules provided by the bootloader.
|
||||
boot_modules: []multiboot.multiboot_module_t,
|
||||
/// The virtual regions of reserved memory. Should not include what is tracked by the vaddr_* fields but should include the regions occupied by the modules. These are reserved and mapped by the VMM
|
||||
virtual_reserved: []Map,
|
||||
|
||||
/// The memory map provided by the bootloader. Desribes which areas of memory are available and
|
||||
/// which are reserved.
|
||||
mem_map: []multiboot.multiboot_memory_map_t,
|
||||
/// The phsyical regions of reserved memory. Should not include what is tracked by the physaddr_* fields but should include the regions occupied by the modules. These are reserved by the PMM
|
||||
physical_reserved: []Range,
|
||||
|
||||
/// The allocator to use before a heap can be set up.
|
||||
fixed_allocator: std.heap.FixedBufferAllocator,
|
||||
};
|
||||
|
||||
/// The virtual end of the kernel code
|
||||
extern var KERNEL_VADDR_END: *u32;
|
||||
|
||||
/// The virtual start of the kernel code
|
||||
extern var KERNEL_VADDR_START: *u32;
|
||||
|
||||
/// The physical end of the kernel code
|
||||
extern var KERNEL_PHYSADDR_END: *u32;
|
||||
|
||||
/// The physical start of the kernel code
|
||||
extern var KERNEL_PHYSADDR_START: *u32;
|
||||
|
||||
/// The boot-time offset that the virtual addresses are from the physical addresses
|
||||
extern var KERNEL_ADDR_OFFSET: *u32;
|
||||
|
||||
/// The size of the fixed allocator used before the heap is set up. Set to 1MiB.
|
||||
const FIXED_ALLOC_SIZE: usize = 1024 * 1024;
|
||||
pub const FIXED_ALLOC_SIZE: usize = 1024 * 1024;
|
||||
|
||||
/// The kernel's virtual address offset. It's assigned in the init function and this file's tests.
|
||||
/// We can't just use KERNEL_ADDR_OFFSET since using externs in the virtToPhys test is broken in
|
||||
/// release-safe. This is a workaround until that is fixed.
|
||||
var ADDR_OFFSET: usize = undefined;
|
||||
pub var ADDR_OFFSET: usize = undefined;
|
||||
|
||||
///
|
||||
/// Convert a virtual address to its physical counterpart by subtracting the kernel virtual offset from the virtual address.
|
||||
|
@ -89,36 +97,6 @@ pub fn physToVirt(phys: var) @TypeOf(phys) {
|
|||
};
|
||||
}
|
||||
|
||||
///
|
||||
/// Initialise the system's memory profile based on linker symbols and the multiboot info struct.
|
||||
///
|
||||
/// Arguments:
|
||||
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info passed by the bootloader.
|
||||
///
|
||||
/// Return: MemProfile
|
||||
/// The memory profile constructed from the exported linker symbols and the relevant multiboot info.
|
||||
///
|
||||
pub fn init(mb_info: *multiboot.multiboot_info_t) MemProfile {
|
||||
log.logInfo("Init mem\n", .{});
|
||||
defer log.logInfo("Done mem\n", .{});
|
||||
|
||||
const mods_count = mb_info.mods_count;
|
||||
ADDR_OFFSET = @ptrToInt(&KERNEL_ADDR_OFFSET);
|
||||
const mmap_addr = mb_info.mmap_addr;
|
||||
const num_mmap_entries = mb_info.mmap_length / @sizeOf(multiboot.multiboot_memory_map_t);
|
||||
return .{
|
||||
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
|
||||
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
|
||||
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
|
||||
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
|
||||
// Total memory available including the initial 1MiB that grub doesn't include
|
||||
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
|
||||
.fixed_alloc_size = FIXED_ALLOC_SIZE,
|
||||
.boot_modules = @intToPtr([*]multiboot.multiboot_mod_list, physToVirt(@intCast(usize, mb_info.mods_addr)))[0..mods_count],
|
||||
.mem_map = @intToPtr([*]multiboot.multiboot_memory_map_t, mmap_addr)[0..num_mmap_entries],
|
||||
};
|
||||
}
|
||||
|
||||
test "physToVirt" {
|
||||
ADDR_OFFSET = 0xC0000000;
|
||||
const offset: usize = ADDR_OFFSET;
|
||||
|
|
|
@ -281,15 +281,14 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) !
|
|||
defer log.logInfo("Done panic\n", .{});
|
||||
|
||||
// Exit if we haven't loaded all debug modules
|
||||
if (mem_profile.boot_modules.len < 1)
|
||||
if (mem_profile.modules.len < 1)
|
||||
return;
|
||||
var kmap_start: usize = 0;
|
||||
var kmap_end: usize = 0;
|
||||
for (mem_profile.boot_modules) |module| {
|
||||
const mod_start = mem.physToVirt(@intCast(usize, module.mod_start));
|
||||
const mod_end = mem.physToVirt(@intCast(usize, module.mod_end) - 1);
|
||||
const mod_str_ptr = mem.physToVirt(@intToPtr([*:0]u8, module.cmdline));
|
||||
if (std.mem.eql(u8, std.mem.span(mod_str_ptr), "kernel.map")) {
|
||||
for (mem_profile.modules) |module| {
|
||||
const mod_start = module.region.start;
|
||||
const mod_end = module.region.end - 1;
|
||||
if (std.mem.eql(u8, module.name, "kernel.map")) {
|
||||
kmap_start = mod_start;
|
||||
kmap_end = mod_end;
|
||||
break;
|
||||
|
@ -302,7 +301,6 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) !
|
|||
|
||||
var syms = SymbolMap.init(allocator);
|
||||
errdefer syms.deinit();
|
||||
var file_index = kmap_start;
|
||||
var kmap_ptr = @intToPtr([*]u8, kmap_start);
|
||||
while (@ptrToInt(kmap_ptr) < kmap_end - 1) {
|
||||
const entry = try parseMapEntry(&kmap_ptr, @intToPtr(*const u8, kmap_end));
|
||||
|
|
|
@ -7,7 +7,6 @@ const MemProfile = (if (is_test) @import(mock_path ++ "mem_mock.zig") else @impo
|
|||
const testing = std.testing;
|
||||
const panic = @import("panic.zig").panic;
|
||||
const log = if (is_test) @import(mock_path ++ "log_mock.zig") else @import("log.zig");
|
||||
const MEMORY_AVAILABLE = @import("multiboot.zig").MULTIBOOT_MEMORY_AVAILABLE;
|
||||
const Bitmap = @import("bitmap.zig").Bitmap;
|
||||
|
||||
const PmmBitmap = Bitmap(u32);
|
||||
|
@ -105,20 +104,19 @@ pub fn init(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
|
|||
bitmap = PmmBitmap.init(mem.mem_kb * 1024 / BLOCK_SIZE, allocator) catch @panic("Bitmap allocation failed");
|
||||
|
||||
// Occupy the regions of memory that the memory map describes as reserved
|
||||
for (mem.mem_map) |entry| {
|
||||
if (entry.@"type" != MEMORY_AVAILABLE) {
|
||||
var addr = std.mem.alignBackward(@intCast(usize, entry.addr), BLOCK_SIZE);
|
||||
var end = @intCast(usize, entry.addr + (entry.len - 1));
|
||||
// If the end address can be aligned without overflowing then align it
|
||||
if (end <= std.math.maxInt(usize) - BLOCK_SIZE)
|
||||
end = std.mem.alignForward(end, BLOCK_SIZE);
|
||||
while (addr < end) : (addr += BLOCK_SIZE) {
|
||||
setAddr(addr) catch |e| switch (e) {
|
||||
// We can ignore out of bounds errors as the memory won't be available anyway
|
||||
PmmBitmap.BitmapError.OutOfBounds => break,
|
||||
else => panic(@errorReturnTrace(), "Failed setting address 0x{x} from memory map as occupied: {}", .{ addr, e }),
|
||||
};
|
||||
}
|
||||
for (mem.physical_reserved) |entry| {
|
||||
var addr = std.mem.alignBackward(entry.start, BLOCK_SIZE);
|
||||
var end = entry.end - 1;
|
||||
// If the end address can be aligned without overflowing then align it
|
||||
if (end <= std.math.maxInt(usize) - BLOCK_SIZE) {
|
||||
end = std.mem.alignForward(end, BLOCK_SIZE);
|
||||
}
|
||||
while (addr < end) : (addr += BLOCK_SIZE) {
|
||||
setAddr(addr) catch |e| switch (e) {
|
||||
// We can ignore out of bounds errors as the memory won't be available anyway
|
||||
PmmBitmap.BitmapError.OutOfBounds => break,
|
||||
else => panic(@errorReturnTrace(), "Failed setting address 0x{x} from memory map as occupied: {}", .{ addr, e }),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,12 +142,10 @@ fn runtimeTests(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
|
|||
panic(null, "PMM allocated the same address twice: 0x{x}", .{alloced});
|
||||
}
|
||||
prev_alloc = alloced;
|
||||
for (mem.mem_map) |entry| {
|
||||
if (entry.@"type" != MEMORY_AVAILABLE) {
|
||||
var addr = std.mem.alignBackward(@intCast(usize, entry.addr), BLOCK_SIZE);
|
||||
if (addr == alloced) {
|
||||
panic(null, "PMM allocated an address that should be reserved by the memory map: 0x{x}", .{addr});
|
||||
}
|
||||
for (mem.physical_reserved) |entry| {
|
||||
var addr = std.mem.alignBackward(entry.start, BLOCK_SIZE);
|
||||
if (addr == alloced) {
|
||||
panic(null, "PMM allocated an address that should be reserved by the memory map: 0x{x}", .{addr});
|
||||
}
|
||||
}
|
||||
alloc_list.append(alloced) catch |e| panic(@errorReturnTrace(), "Failed to add PMM allocation to list: {}", .{e});
|
||||
|
|
|
@ -7,7 +7,6 @@ const bitmap = @import("bitmap.zig");
|
|||
const pmm = @import("pmm.zig");
|
||||
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
|
||||
const tty = @import("tty.zig");
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const log = @import("log.zig");
|
||||
const panic = @import("panic.zig").panic;
|
||||
const arch = @import("arch.zig").internals;
|
||||
|
@ -195,10 +194,8 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
///
|
||||
/// Arguments:
|
||||
/// INOUT self: *Self - The manager to modify
|
||||
/// IN virtual_start: usize - The start of the virtual region
|
||||
/// IN virtual_end: usize - The end of the virtual region
|
||||
/// IN physical_start: usize - The start of the physical region
|
||||
/// IN physical_end: usize - The end of the physical region
|
||||
/// IN virtual: mem.Range - The virtual region to set
|
||||
/// IN physical: ?mem.Range - The physical region to map to or null if only the virtual region is to be set
|
||||
/// IN attrs: Attributes - The attributes to apply to the memory regions
|
||||
///
|
||||
/// Error: VmmError || Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError
|
||||
|
@ -211,36 +208,46 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
/// std.mem.Allocator.Error.OutOfMemory - Allocating the required memory failed
|
||||
/// MapperError.* - The causes depend on the mapper used
|
||||
///
|
||||
pub fn set(self: *Self, virtual_start: usize, virtual_end: usize, physical_start: usize, physical_end: usize, attrs: Attributes) (VmmError || bitmap.Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError)!void {
|
||||
var virt = virtual_start;
|
||||
while (virt < virtual_end) : (virt += BLOCK_SIZE) {
|
||||
pub fn set(self: *Self, virtual: mem.Range, physical: ?mem.Range, attrs: Attributes) (VmmError || bitmap.Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError)!void {
|
||||
var virt = virtual.start;
|
||||
while (virt < virtual.end) : (virt += BLOCK_SIZE) {
|
||||
if (try self.isSet(virt))
|
||||
return VmmError.AlreadyAllocated;
|
||||
}
|
||||
var phys = physical_start;
|
||||
while (phys < physical_end) : (phys += BLOCK_SIZE) {
|
||||
if (try pmm.isSet(phys))
|
||||
return VmmError.PhysicalAlreadyAllocated;
|
||||
}
|
||||
if (virtual_end - virtual_start != physical_end - physical_start)
|
||||
return VmmError.PhysicalVirtualMismatch;
|
||||
if (physical_start > physical_end)
|
||||
return VmmError.InvalidPhysAddresses;
|
||||
if (virtual_start > virtual_end)
|
||||
if (virtual.start > virtual.end) {
|
||||
return VmmError.InvalidVirtAddresses;
|
||||
}
|
||||
|
||||
virt = virtual_start;
|
||||
while (virt < virtual_end) : (virt += BLOCK_SIZE) {
|
||||
if (physical) |p| {
|
||||
if (virtual.end - virtual.start != p.end - p.start) {
|
||||
return VmmError.PhysicalVirtualMismatch;
|
||||
}
|
||||
if (p.start > p.end) {
|
||||
return VmmError.InvalidPhysAddresses;
|
||||
}
|
||||
var phys = p.start;
|
||||
while (phys < p.end) : (phys += BLOCK_SIZE) {
|
||||
if (try pmm.isSet(phys)) {
|
||||
return VmmError.PhysicalAlreadyAllocated;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var phys_list = std.ArrayList(usize).init(self.allocator);
|
||||
|
||||
virt = virtual.start;
|
||||
while (virt < virtual.end) : (virt += BLOCK_SIZE) {
|
||||
try self.bmp.setEntry(virt / BLOCK_SIZE);
|
||||
}
|
||||
|
||||
try self.mapper.mapFn(virtual_start, virtual_end, physical_start, physical_end, attrs, self.allocator, self.payload);
|
||||
if (physical) |p| {
|
||||
try self.mapper.mapFn(virtual.start, virtual.end, p.start, p.end, attrs, self.allocator, self.payload);
|
||||
|
||||
var phys_list = std.ArrayList(usize).init(self.allocator);
|
||||
phys = physical_start;
|
||||
while (phys < physical_end) : (phys += BLOCK_SIZE) {
|
||||
try pmm.setAddr(phys);
|
||||
try phys_list.append(phys);
|
||||
var phys = p.start;
|
||||
while (phys < p.end) : (phys += BLOCK_SIZE) {
|
||||
try pmm.setAddr(phys);
|
||||
try phys_list.append(phys);
|
||||
}
|
||||
}
|
||||
_ = try self.allocations.put(virt, Allocation{ .physical = phys_list });
|
||||
}
|
||||
|
@ -325,23 +332,19 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
}
|
||||
|
||||
///
|
||||
/// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code, TTY, multiboot info and boot modules
|
||||
/// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code and reserved virtual memory
|
||||
///
|
||||
/// Arguments:
|
||||
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. This is used to find the kernel code region and boot modules
|
||||
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info
|
||||
/// INOUT allocator: *std.mem.Allocator - The allocator to use when needing to allocate memory
|
||||
/// IN comptime Payload: type - The type of the data to pass as a payload to the virtual memory manager
|
||||
/// IN mapper: Mapper - The memory mapper to call when allocating and free virtual memory
|
||||
/// IN payload: Paylaod - The payload data to pass to the virtual memory manager
|
||||
///
|
||||
/// Return: VirtualMemoryManager
|
||||
/// The virtual memory manager created with all stated regions allocated
|
||||
/// The virtual memory manager created with all reserved virtual regions allocated
|
||||
///
|
||||
/// Error: std.mem.Allocator.Error
|
||||
/// std.mem.Allocator.Error.OutOfMemory - The allocator cannot allocate the memory required
|
||||
///
|
||||
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
|
||||
pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
|
||||
log.logInfo("Init vmm\n", .{});
|
||||
defer log.logInfo("Done vmm\n", .{});
|
||||
|
||||
|
@ -350,37 +353,21 @@ pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_in
|
|||
// Map in kernel
|
||||
// Calculate start and end of mapping
|
||||
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
|
||||
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
|
||||
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
|
||||
vmm.set(v_start, v_end, p_start, p_end, .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping kernel code in VMM: {}", .{e});
|
||||
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
|
||||
vmm.set(.{ .start = v_start, .end = v_end }, mem.Range{ .start = p_start, .end = p_end }, .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping kernel code in VMM: {}", .{e});
|
||||
|
||||
// Map in tty
|
||||
const tty_addr = tty.getVideoBufferAddress();
|
||||
const tty_phys = mem.virtToPhys(tty_addr);
|
||||
const tty_buff_size = 32 * 1024;
|
||||
vmm.set(tty_addr, tty_addr + tty_buff_size, tty_phys, tty_phys + tty_buff_size, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping TTY in VMM: {}", .{e});
|
||||
|
||||
// Map in the multiboot info struct
|
||||
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
|
||||
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
|
||||
vmm.set(mb_info_addr, mb_info_end, mem.virtToPhys(mb_info_addr), mem.virtToPhys(mb_info_end), .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping multiboot info in VMM: {}", .{e});
|
||||
|
||||
// Map in each boot module
|
||||
for (mem_profile.boot_modules) |*module| {
|
||||
const mod_v_struct_start = std.mem.alignBackward(@ptrToInt(module), BLOCK_SIZE);
|
||||
const mod_v_struct_end = std.mem.alignForward(mod_v_struct_start + @sizeOf(multiboot.multiboot_module_t), BLOCK_SIZE);
|
||||
vmm.set(mod_v_struct_start, mod_v_struct_end, mem.virtToPhys(mod_v_struct_start), mem.virtToPhys(mod_v_struct_end), .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
|
||||
// A previous allocation could cover this region so the AlreadyAllocated error can be ignored
|
||||
VmmError.AlreadyAllocated => break,
|
||||
else => panic(@errorReturnTrace(), "Failed mapping boot module struct in VMM: {}", .{e}),
|
||||
for (mem_profile.virtual_reserved) |entry| {
|
||||
const virtual = mem.Range{ .start = std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE), .end = std.mem.alignForward(entry.virtual.end, BLOCK_SIZE) };
|
||||
const physical: ?mem.Range = if (entry.physical) |phys| mem.Range{ .start = std.mem.alignBackward(phys.start, BLOCK_SIZE), .end = std.mem.alignForward(phys.end, BLOCK_SIZE) } else null;
|
||||
vmm.set(virtual, physical, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
|
||||
VmmError.AlreadyAllocated => {},
|
||||
else => panic(@errorReturnTrace(), "Failed mapping region in VMM {}: {}\n", .{ entry, e }),
|
||||
};
|
||||
const mod_p_start = std.mem.alignBackward(module.mod_start, BLOCK_SIZE);
|
||||
const mod_p_end = std.mem.alignForward(module.mod_end, BLOCK_SIZE);
|
||||
vmm.set(mem.physToVirt(mod_p_start), mem.physToVirt(mod_p_end), mod_p_start, mod_p_end, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping boot module in VMM: {}", .{e});
|
||||
}
|
||||
|
||||
if (build_options.rt_test) runtimeTests(arch.VmmPayload, vmm, mem_profile, mb_info);
|
||||
if (build_options.rt_test) runtimeTests(arch.VmmPayload, vmm, mem_profile);
|
||||
return vmm;
|
||||
}
|
||||
|
||||
|
@ -474,7 +461,7 @@ test "set" {
|
|||
const pstart = vstart + 123;
|
||||
const pend = vend + 123;
|
||||
const attrs = Attributes{ .kernel = true, .writable = true, .cachable = true };
|
||||
try vmm.set(vstart, vend, pstart, pend, attrs);
|
||||
try vmm.set(.{ .start = vstart, .end = vend }, mem.Range{ .start = pstart, .end = pend }, attrs);
|
||||
|
||||
var allocations = test_allocations orelse unreachable;
|
||||
// The entries before the virtual start shouldn't be set
|
||||
|
@ -517,7 +504,17 @@ fn testInit(num_entries: u32) std.mem.Allocator.Error!VirtualMemoryManager(u8) {
|
|||
}
|
||||
}
|
||||
var allocations = test_allocations orelse unreachable;
|
||||
const mem_profile = mem.MemProfile{ .vaddr_end = undefined, .vaddr_start = undefined, .physaddr_start = undefined, .physaddr_end = undefined, .mem_kb = num_entries * BLOCK_SIZE / 1024, .fixed_alloc_size = undefined, .mem_map = &[_]multiboot.multiboot_memory_map_t{}, .boot_modules = &[_]multiboot.multiboot_module_t{} };
|
||||
const mem_profile = mem.MemProfile{
|
||||
.vaddr_end = undefined,
|
||||
.vaddr_start = undefined,
|
||||
.physaddr_start = undefined,
|
||||
.physaddr_end = undefined,
|
||||
.mem_kb = num_entries * BLOCK_SIZE / 1024,
|
||||
.fixed_allocator = undefined,
|
||||
.virtual_reserved = &[_]mem.Map{},
|
||||
.physical_reserved = &[_]mem.Range{},
|
||||
.modules = &[_]mem.Module{},
|
||||
};
|
||||
pmm.init(&mem_profile, std.heap.page_allocator);
|
||||
return try VirtualMemoryManager(u8).init(0, num_entries * BLOCK_SIZE, std.heap.page_allocator, test_mapper, 39);
|
||||
}
|
||||
|
@ -567,55 +564,29 @@ fn testUnmap(vstart: usize, vend: usize, payload: u8) (std.mem.Allocator.Error |
|
|||
/// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved
|
||||
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved
|
||||
///
|
||||
fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t) void {
|
||||
fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile) void {
|
||||
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
|
||||
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
|
||||
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
|
||||
const tty_addr = tty.getVideoBufferAddress();
|
||||
const tty_phys = mem.virtToPhys(tty_addr);
|
||||
const tty_buff_size = 32 * 1024;
|
||||
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
|
||||
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
|
||||
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem.FIXED_ALLOC_SIZE, BLOCK_SIZE);
|
||||
|
||||
// Make sure all blocks before the mb info are not set
|
||||
var vaddr = vmm.start;
|
||||
while (vaddr < mb_info_addr) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
|
||||
if (set) panic(null, "Address before mb_info was set: {x}", .{vaddr});
|
||||
}
|
||||
// Make sure all blocks associated with the mb info are set
|
||||
while (vaddr < mb_info_end) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
|
||||
if (!set) panic(null, "Address for mb_info was not set: {x}", .{vaddr});
|
||||
}
|
||||
|
||||
// Make sure all blocks before the kernel code are not set
|
||||
while (vaddr < tty_addr) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
|
||||
if (set) panic(null, "Address before tty was set: {x}", .{vaddr});
|
||||
}
|
||||
// Make sure all blocks associated with the kernel code are set
|
||||
while (vaddr < tty_addr + tty_buff_size) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
|
||||
if (!set) panic(null, "Address for tty was not set: {x}", .{vaddr});
|
||||
}
|
||||
|
||||
// Make sure all blocks before the kernel code are not set
|
||||
while (vaddr < v_start) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
|
||||
if (set) panic(null, "Address before kernel code was set: {x}", .{vaddr});
|
||||
}
|
||||
// Make sure all blocks associated with the kernel code are set
|
||||
while (vaddr < v_end) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
|
||||
if (!set) panic(null, "Address for kernel code was not set: {x}", .{vaddr});
|
||||
}
|
||||
|
||||
// Make sure all blocks after the kernel code are not set
|
||||
while (vaddr < vmm.end - BLOCK_SIZE) : (vaddr += BLOCK_SIZE) {
|
||||
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if address after {x} is set: {x}", .{ vaddr, e });
|
||||
if (set) panic(null, "Address after kernel code was set: {x}", .{vaddr});
|
||||
const set = vmm.isSet(vaddr) catch unreachable;
|
||||
var should_be_set = false;
|
||||
if (vaddr < v_end and vaddr >= v_start) {
|
||||
should_be_set = true;
|
||||
} else {
|
||||
for (mem_profile.virtual_reserved) |entry| {
|
||||
if (vaddr >= std.mem.alignBackward(entry.virtual.start, BLOCK_SIZE) and vaddr < std.mem.alignForward(entry.virtual.end, BLOCK_SIZE)) {
|
||||
should_be_set = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (set and !should_be_set) {
|
||||
panic(@errorReturnTrace(), "An address was set in the VMM when it shouldn't have been: 0x{x}\n", .{vaddr});
|
||||
} else if (!set and should_be_set) {
|
||||
panic(@errorReturnTrace(), "An address was not set in the VMM when it should have been: 0x{x}\n", .{vaddr});
|
||||
}
|
||||
}
|
||||
|
||||
log.logInfo("VMM: Tested allocations\n", .{});
|
||||
|
|
|
@ -4,7 +4,6 @@ const mem = @import("mem_mock.zig");
|
|||
const MemProfile = mem.MemProfile;
|
||||
const gdt = @import("gdt_mock.zig");
|
||||
const idt = @import("idt_mock.zig");
|
||||
const multiboot = @import("../../../src/kernel/multiboot.zig");
|
||||
const vmm = @import("vmm_mock.zig");
|
||||
const paging = @import("paging_mock.zig");
|
||||
|
||||
|
@ -41,6 +40,14 @@ pub const VmmPayload = u8;
|
|||
pub const KERNEL_VMM_PAYLOAD: usize = 0;
|
||||
pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB;
|
||||
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = undefined;
|
||||
pub const BootPayload = u8;
|
||||
|
||||
// The virtual/physical start/end of the kernel code
|
||||
var KERNEL_PHYSADDR_START: u32 = 0x00100000;
|
||||
var KERNEL_PHYSADDR_END: u32 = 0x01000000;
|
||||
var KERNEL_VADDR_START: u32 = 0xC0100000;
|
||||
var KERNEL_VADDR_END: u32 = 0xC1100000;
|
||||
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
|
||||
|
||||
pub fn outb(port: u16, data: u8) void {
|
||||
return mock_framework.performAction("outb", void, .{ port, data });
|
||||
|
@ -94,7 +101,22 @@ pub fn haltNoInterrupts() noreturn {
|
|||
while (true) {}
|
||||
}
|
||||
|
||||
pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile, allocator: *Allocator) void {
|
||||
pub fn initMem(payload: BootPayload) std.mem.Allocator.Error!mem.MemProfile {
|
||||
return MemProfile{
|
||||
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
|
||||
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
|
||||
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
|
||||
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
|
||||
// Total memory available including the initial 1MiB that grub doesn't include
|
||||
.mem_kb = 0,
|
||||
.fixed_allocator = undefined,
|
||||
.virtual_reserved = undefined,
|
||||
.physical_reserved = undefined,
|
||||
.modules = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn init(payload: BootPayload, mem_profile: *const MemProfile, allocator: *Allocator) void {
|
||||
// I'll get back to this as this doesn't effect the GDT testing.
|
||||
// When I come on to the mem.zig testing, I'll fix :)
|
||||
//return mock_framework.performAction("init", void, mem_profile, allocator);
|
||||
|
|
|
@ -1,40 +1,36 @@
|
|||
const std = @import("std");
|
||||
const multiboot = @import("../../../src/kernel/multiboot.zig");
|
||||
|
||||
pub const Module = struct {
|
||||
region: Range,
|
||||
name: []const u8,
|
||||
};
|
||||
|
||||
pub const Map = struct {
|
||||
virtual: Range,
|
||||
physical: ?Range,
|
||||
};
|
||||
|
||||
pub const Range = struct {
|
||||
start: usize,
|
||||
end: usize,
|
||||
};
|
||||
|
||||
pub const MemProfile = struct {
|
||||
vaddr_end: [*]u8,
|
||||
vaddr_start: [*]u8,
|
||||
physaddr_end: [*]u8,
|
||||
physaddr_start: [*]u8,
|
||||
mem_kb: u32,
|
||||
fixed_alloc_size: u32,
|
||||
mem_map: []multiboot.multiboot_memory_map_t,
|
||||
boot_modules: []multiboot.multiboot_module_t,
|
||||
modules: []Module,
|
||||
virtual_reserved: []Map,
|
||||
physical_reserved: []Range,
|
||||
fixed_allocator: std.heap.FixedBufferAllocator,
|
||||
};
|
||||
|
||||
// The virtual/physical start/end of the kernel code
|
||||
var KERNEL_PHYSADDR_START: u32 = 0x00100000;
|
||||
var KERNEL_PHYSADDR_END: u32 = 0x01000000;
|
||||
var KERNEL_VADDR_START: u32 = 0xC0100000;
|
||||
var KERNEL_VADDR_END: u32 = 0xC1100000;
|
||||
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
|
||||
|
||||
// The size of the fixed allocator used before the heap is set up. Set to 1MiB.
|
||||
const FIXED_ALLOC_SIZE = 1024 * 1024;
|
||||
|
||||
pub fn init(mb_info: *multiboot.multiboot_info_t) MemProfile {
|
||||
return MemProfile{
|
||||
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
|
||||
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
|
||||
.physaddr_end = @ptrCast([*]u8, &KERNEL_PHYSADDR_END),
|
||||
.physaddr_start = @ptrCast([*]u8, &KERNEL_PHYSADDR_START),
|
||||
// Total memory available including the initial 1MiB that grub doesn't include
|
||||
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
|
||||
.fixed_alloc_size = FIXED_ALLOC_SIZE,
|
||||
.mem_map = undefined,
|
||||
.boot_modules = undefined,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn virtToPhys(virt: var) @TypeOf(virt) {
|
||||
const T = @TypeOf(virt);
|
||||
return switch (@typeInfo(T)) {
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
const mem = @import("mem_mock.zig");
|
||||
const multiboot = @import("../../../src/kernel/multiboot.zig");
|
||||
const bitmap = @import("../../../src/kernel/bitmap.zig");
|
||||
const arch = @import("arch_mock.zig");
|
||||
const std = @import("std");
|
||||
|
@ -34,6 +33,6 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
};
|
||||
}
|
||||
|
||||
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
|
||||
pub fn init(mem_profile: *const mem.MemProfile, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
|
||||
return std.mem.Allocator.Error.OutOfMemory;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue