Merge pull request #227 from ZystemOS/bugfix/paging-virtToPhys
Use vmm.virtToPhys in x86 paging instead of mem.virtToPhys
This commit is contained in:
commit
3625c996cf
7 changed files with 75 additions and 52 deletions
|
@ -360,6 +360,20 @@ pub fn initMem(mb_info: BootPayload) Allocator.Error!MemProfile {
|
|||
}
|
||||
}
|
||||
|
||||
// Map the kernel code
|
||||
const kernel_virt = mem.Range{
|
||||
.start = @ptrToInt(&KERNEL_VADDR_START),
|
||||
.end = @ptrToInt(&KERNEL_STACK_START),
|
||||
};
|
||||
const kernel_phy = mem.Range{
|
||||
.start = mem.virtToPhys(kernel_virt.start),
|
||||
.end = mem.virtToPhys(kernel_virt.end),
|
||||
};
|
||||
try reserved_virtual_mem.append(.{
|
||||
.virtual = kernel_virt,
|
||||
.physical = kernel_phy,
|
||||
});
|
||||
|
||||
// Map the multiboot info struct itself
|
||||
const mb_region = mem.Range{
|
||||
.start = @ptrToInt(mb_info),
|
||||
|
@ -424,20 +438,6 @@ pub fn initMem(mb_info: BootPayload) Allocator.Error!MemProfile {
|
|||
.physical = kernel_stack_phy,
|
||||
});
|
||||
|
||||
// Map the rest of the kernel
|
||||
const kernel_virt = mem.Range{
|
||||
.start = @ptrToInt(&KERNEL_VADDR_START),
|
||||
.end = @ptrToInt(&KERNEL_STACK_START),
|
||||
};
|
||||
const kernel_phy = mem.Range{
|
||||
.start = mem.virtToPhys(kernel_virt.start),
|
||||
.end = mem.virtToPhys(kernel_virt.end),
|
||||
};
|
||||
try reserved_virtual_mem.append(.{
|
||||
.virtual = kernel_virt,
|
||||
.physical = kernel_phy,
|
||||
});
|
||||
|
||||
return MemProfile{
|
||||
.vaddr_end = @ptrCast([*]u8, &KERNEL_VADDR_END),
|
||||
.vaddr_start = @ptrCast([*]u8, &KERNEL_VADDR_START),
|
||||
|
|
|
@ -12,8 +12,9 @@ const arch = if (is_test) @import(mock_path ++ "arch_mock.zig") else @import("ar
|
|||
const isr = @import("isr.zig");
|
||||
const MemProfile = @import("../../mem.zig").MemProfile;
|
||||
const tty = @import("../../tty.zig");
|
||||
const mem = @import("../../mem.zig");
|
||||
const vmm = @import("../../vmm.zig");
|
||||
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("../../mem.zig");
|
||||
const vmm = if (is_test) @import(mock_path ++ "vmm_mock.zig") else @import("../../vmm.zig");
|
||||
const pmm = @import("../../pmm.zig");
|
||||
const multiboot = @import("multiboot.zig");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
|
@ -208,7 +209,9 @@ fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start:
|
|||
// Create a table and put the physical address in the dir entry
|
||||
table = &(try allocator.alignedAlloc(Table, @truncate(u29, PAGE_SIZE_4KB), 1))[0];
|
||||
@memset(@ptrCast([*]u8, table), 0, @sizeOf(Table));
|
||||
const table_phys_addr = @ptrToInt(mem.virtToPhys(table));
|
||||
const table_phys_addr = vmm.kernel_vmm.virtToPhys(@ptrToInt(table)) catch |e| {
|
||||
panic(@errorReturnTrace(), "Failed getting the physical address for an allocated page table: {}\n", .{e});
|
||||
};
|
||||
dir_entry.* |= DENTRY_PAGE_ADDR & table_phys_addr;
|
||||
dir.tables[entry] = table;
|
||||
}
|
||||
|
@ -499,29 +502,33 @@ test "virtToTableEntryIdx" {
|
|||
test "mapDirEntry" {
|
||||
var allocator = std.heap.page_allocator;
|
||||
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
|
||||
const attrs = vmm.Attributes{ .kernel = false, .writable = false, .cachable = false };
|
||||
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(PAGE_SIZE_4MB, 0xFFFFFFFF, allocator, arch.VMM_MAPPER, undefined);
|
||||
{
|
||||
const phys: usize = 0 * PAGE_SIZE_4MB;
|
||||
const phys_end: usize = phys + PAGE_SIZE_4MB;
|
||||
const virt: usize = 1 * PAGE_SIZE_4MB;
|
||||
const virt_end: usize = virt + PAGE_SIZE_4MB;
|
||||
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, .{ .kernel = true, .writable = true, .cachable = true }, allocator);
|
||||
|
||||
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, attrs, allocator);
|
||||
|
||||
const entry_idx = virtToDirEntryIdx(virt);
|
||||
const entry = dir.entries[entry_idx];
|
||||
const table = dir.tables[entry_idx] orelse unreachable;
|
||||
checkDirEntry(entry, virt, virt_end, phys, .{ .kernel = true, .writable = true, .cachable = true }, table, true);
|
||||
checkDirEntry(entry, virt, virt_end, phys, attrs, table, true);
|
||||
}
|
||||
{
|
||||
const phys: usize = 7 * PAGE_SIZE_4MB;
|
||||
const phys_end: usize = phys + PAGE_SIZE_4MB;
|
||||
const virt: usize = 8 * PAGE_SIZE_4MB;
|
||||
const virt_end: usize = virt + PAGE_SIZE_4MB;
|
||||
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, .{ .kernel = false, .writable = false, .cachable = false }, allocator);
|
||||
|
||||
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, attrs, allocator);
|
||||
|
||||
const entry_idx = virtToDirEntryIdx(virt);
|
||||
const entry = dir.entries[entry_idx];
|
||||
const table = dir.tables[entry_idx] orelse unreachable;
|
||||
checkDirEntry(entry, virt, virt_end, phys, .{ .kernel = false, .writable = false, .cachable = false }, table, true);
|
||||
checkDirEntry(entry, virt, virt_end, phys, attrs, table, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,9 +29,6 @@ comptime {
|
|||
}
|
||||
}
|
||||
|
||||
/// The virtual memory manager associated with the kernel address space
|
||||
var kernel_vmm: vmm.VirtualMemoryManager(arch.VmmPayload) = undefined;
|
||||
|
||||
// This is for unit testing as we need to export KERNEL_ADDR_OFFSET as it is no longer available
|
||||
// from the linker script
|
||||
// These will need to be kept up to date with the debug logs in the mem init.
|
||||
|
@ -75,7 +72,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
|
|||
};
|
||||
|
||||
pmm.init(&mem_profile, &fixed_allocator.allocator);
|
||||
kernel_vmm = vmm.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
|
||||
var kernel_vmm = vmm.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e});
|
||||
};
|
||||
|
||||
|
@ -89,7 +86,7 @@ export fn kmain(boot_payload: arch.BootPayload) void {
|
|||
if (!std.math.isPowerOfTwo(heap_size)) {
|
||||
heap_size = std.math.floorPowerOfTwo(usize, heap_size);
|
||||
}
|
||||
var kernel_heap = heap.init(arch.VmmPayload, &kernel_vmm, vmm.Attributes{ .kernel = true, .writable = true, .cachable = true }, heap_size) catch |e| {
|
||||
var kernel_heap = heap.init(arch.VmmPayload, kernel_vmm, vmm.Attributes{ .kernel = true, .writable = true, .cachable = true }, heap_size) catch |e| {
|
||||
panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel heap: {}\n", .{e});
|
||||
};
|
||||
|
||||
|
|
|
@ -42,10 +42,10 @@ pub const MemProfile = struct {
|
|||
/// The modules loaded into memory at boot.
|
||||
modules: []Module,
|
||||
|
||||
/// The virtual regions of reserved memory. Should not include what is tracked by the vaddr_* fields but should include the regions occupied by the modules. These are reserved and mapped by the VMM
|
||||
/// The virtual regions of reserved memory. These are reserved and mapped by the VMM
|
||||
virtual_reserved: []Map,
|
||||
|
||||
/// The physical regions of reserved memory. Should not include what is tracked by the physaddr_* fields but should include the regions occupied by the modules. These are reserved by the PMM
|
||||
/// The physical regions of reserved memory. These are reserved by the PMM
|
||||
physical_reserved: []Range,
|
||||
|
||||
/// The allocator to use before a heap can be set up.
|
||||
|
|
|
@ -9,7 +9,7 @@ const pmm = @import("pmm.zig");
|
|||
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
|
||||
const tty = @import("tty.zig");
|
||||
const panic = @import("panic.zig").panic;
|
||||
const arch = @import("arch.zig").internals;
|
||||
const arch = if (is_test) @import(mock_path ++ "arch_mock.zig") else @import("arch.zig").internals;
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
/// Attributes for a virtual memory allocation
|
||||
|
@ -113,6 +113,9 @@ pub const VmmError = error{
|
|||
/// This is the start of the memory owned by the kernel and so is where the kernel VMM starts
|
||||
extern var KERNEL_ADDR_OFFSET: *u32;
|
||||
|
||||
/// The virtual memory manager associated with the kernel address space
|
||||
pub var kernel_vmm: VirtualMemoryManager(arch.VmmPayload) = undefined;
|
||||
|
||||
///
|
||||
/// Construct a virtual memory manager to keep track of allocated and free virtual memory regions within a certain space
|
||||
///
|
||||
|
@ -193,10 +196,6 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
pub fn virtToPhys(self: *const Self, virt: usize) VmmError!usize {
|
||||
for (self.allocations.unmanaged.entries.items) |entry| {
|
||||
const vaddr = entry.key;
|
||||
// If we've gone past the address without finding a covering region then it hasn't been mapped
|
||||
if (vaddr > virt) {
|
||||
break;
|
||||
}
|
||||
|
||||
const allocation = entry.value;
|
||||
// If this allocation range covers the virtual address then figure out the corresponding physical block
|
||||
|
@ -308,15 +307,19 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
}
|
||||
|
||||
if (physical) |p| {
|
||||
try self.mapper.mapFn(virtual.start, virtual.end, p.start, p.end, attrs, self.allocator, self.payload);
|
||||
|
||||
var phys = p.start;
|
||||
while (phys < p.end) : (phys += BLOCK_SIZE) {
|
||||
try pmm.setAddr(phys);
|
||||
try phys_list.append(phys);
|
||||
}
|
||||
}
|
||||
|
||||
// Do this before mapping as the mapper may depend on the allocation being tracked
|
||||
_ = try self.allocations.put(virtual.start, Allocation{ .physical = phys_list });
|
||||
|
||||
if (physical) |p| {
|
||||
try self.mapper.mapFn(virtual.start, virtual.end, p.start, p.end, attrs, self.allocator, self.payload);
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
|
@ -418,11 +421,11 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
|
|||
/// Error: Allocator.Error
|
||||
/// error.OutOfMemory - The allocator cannot allocate the memory required
|
||||
///
|
||||
pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
|
||||
pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator.Error!*VirtualMemoryManager(arch.VmmPayload) {
|
||||
log.info("Init\n", .{});
|
||||
defer log.info("Done\n", .{});
|
||||
|
||||
var vmm = try VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(&KERNEL_ADDR_OFFSET), 0xFFFFFFFF, allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
|
||||
kernel_vmm = try VirtualMemoryManager(arch.VmmPayload).init(@ptrToInt(&KERNEL_ADDR_OFFSET), 0xFFFFFFFF, allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
|
||||
|
||||
// Map all the reserved virtual addresses.
|
||||
for (mem_profile.virtual_reserved) |entry| {
|
||||
|
@ -437,17 +440,17 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator
|
|||
}
|
||||
else
|
||||
null;
|
||||
vmm.set(virtual, physical, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
|
||||
kernel_vmm.set(virtual, physical, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
|
||||
VmmError.AlreadyAllocated => {},
|
||||
else => panic(@errorReturnTrace(), "Failed mapping region in VMM {X}: {}\n", .{ entry, e }),
|
||||
};
|
||||
}
|
||||
|
||||
switch (build_options.test_mode) {
|
||||
.Initialisation => runtimeTests(arch.VmmPayload, vmm, mem_profile),
|
||||
.Initialisation => runtimeTests(arch.VmmPayload, kernel_vmm, mem_profile),
|
||||
else => {},
|
||||
}
|
||||
return vmm;
|
||||
return &kernel_vmm;
|
||||
}
|
||||
|
||||
test "virtToPhys" {
|
||||
|
|
|
@ -29,12 +29,13 @@ pub const MemProfile = struct {
|
|||
};
|
||||
|
||||
const FIXED_ALLOC_SIZE = 1024 * 1024;
|
||||
const ADDR_OFFSET: usize = 100;
|
||||
|
||||
pub fn virtToPhys(virt: anytype) @TypeOf(virt) {
|
||||
const T = @TypeOf(virt);
|
||||
return switch (@typeInfo(T)) {
|
||||
.Pointer => @intToPtr(T, @ptrToInt(virt) - KERNEL_ADDR_OFFSET),
|
||||
.Int => virt - KERNEL_ADDR_OFFSET,
|
||||
.Pointer => @intToPtr(T, @ptrToInt(virt) - ADDR_OFFSET),
|
||||
.Int => virt - ADDR_OFFSET,
|
||||
else => @compileError("Only pointers and integers are supported"),
|
||||
};
|
||||
}
|
||||
|
@ -42,8 +43,8 @@ pub fn virtToPhys(virt: anytype) @TypeOf(virt) {
|
|||
pub fn physToVirt(phys: anytype) @TypeOf(phys) {
|
||||
const T = @TypeOf(phys);
|
||||
return switch (@typeInfo(T)) {
|
||||
.Pointer => @intToPtr(T, @ptrToInt(phys) + KERNEL_ADDR_OFFSET),
|
||||
.Int => phys + KERNEL_ADDR_OFFSET,
|
||||
.Pointer => @intToPtr(T, @ptrToInt(phys) + ADDR_OFFSET),
|
||||
.Int => phys + ADDR_OFFSET,
|
||||
else => @compileError("Only pointers and integers are supported"),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
const mem = @import("mem_mock.zig");
|
||||
const bitmap = @import("../../../src/kernel/bitmap.zig");
|
||||
const vmm = @import("../../../src/kernel/vmm.zig");
|
||||
const arch = @import("arch_mock.zig");
|
||||
const std = @import("std");
|
||||
|
||||
|
@ -8,21 +9,35 @@ pub const VmmError = error{
|
|||
NotAllocated,
|
||||
};
|
||||
|
||||
pub const Attributes = struct {
|
||||
kernel: bool,
|
||||
writable: bool,
|
||||
cachable: bool,
|
||||
};
|
||||
pub const Attributes = vmm.Attributes;
|
||||
|
||||
pub const BLOCK_SIZE: u32 = 1024;
|
||||
|
||||
pub fn Mapper(comptime Payload: type) type {
|
||||
return struct {};
|
||||
}
|
||||
pub const Mapper = vmm.Mapper;
|
||||
|
||||
pub const MapperError = error{
|
||||
InvalidVirtualAddress,
|
||||
InvalidPhysicalAddress,
|
||||
AddressMismatch,
|
||||
MisalignedVirtualAddress,
|
||||
MisalignedPhysicalAddress,
|
||||
NotMapped,
|
||||
};
|
||||
|
||||
pub var kernel_vmm: VirtualMemoryManager(arch.VmmPayload) = undefined;
|
||||
|
||||
pub fn VirtualMemoryManager(comptime Payload: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(start: usize, end: usize, allocator: *std.mem.Allocator, mapper: Mapper(Payload), payload: Payload) std.mem.Allocator.Error!Self {
|
||||
return Self{};
|
||||
}
|
||||
|
||||
pub fn virtToPhys(self: *const Self, virt: usize) VmmError!usize {
|
||||
return 0;
|
||||
}
|
||||
|
||||
pub fn alloc(self: *Self, num: u32, attrs: Attributes) std.mem.Allocator.Error!?usize {
|
||||
return std.mem.Allocator.Error.OutOfMemory;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue