Merge pull request #128 from SamTebbs33/feature/virtual-mem-manager

Add virtual memory manager
This commit is contained in:
Sam Tebbs 2020-04-14 04:03:52 +01:00 committed by GitHub
commit 7f470a4668
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 1024 additions and 170 deletions

View file

@ -13,6 +13,7 @@ const syscalls = @import("syscalls.zig");
const mem = @import("../../mem.zig"); const mem = @import("../../mem.zig");
const multiboot = @import("../../multiboot.zig"); const multiboot = @import("../../multiboot.zig");
const pmm = @import("pmm.zig"); const pmm = @import("pmm.zig");
const vmm = @import("../../vmm.zig");
const MemProfile = mem.MemProfile; const MemProfile = mem.MemProfile;
/// The interrupt context that is given to a interrupt handler. It contains most of the registers /// The interrupt context that is given to a interrupt handler. It contains most of the registers
@ -48,6 +49,18 @@ pub const InterruptContext = struct {
ss: u32, ss: u32,
}; };
/// The type of the payload passed to a virtual memory mapper.
/// For x86 it's the page directory that should be mapped.
pub const VmmPayload = *paging.Directory;
/// The payload used in the kernel virtual memory manager.
/// For x86 it's the kernel's page directory.
pub const KERNEL_VMM_PAYLOAD = &paging.kernel_directory;
/// The architecture's virtual memory mapper.
/// For x86, it simply forwards the calls to the paging subsystem.
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = vmm.Mapper(VmmPayload){ .mapFn = paging.map, .unmapFn = paging.unmap };
/// The size of each allocatable block of memory, normally set to the page size. /// The size of each allocatable block of memory, normally set to the page size.
pub const MEMORY_BLOCK_SIZE = paging.PAGE_SIZE_4KB; pub const MEMORY_BLOCK_SIZE = paging.PAGE_SIZE_4KB;

View file

@ -9,12 +9,13 @@ const MemProfile = @import("../../mem.zig").MemProfile;
const tty = @import("../../tty.zig"); const tty = @import("../../tty.zig");
const log = @import("../../log.zig"); const log = @import("../../log.zig");
const mem = @import("../../mem.zig"); const mem = @import("../../mem.zig");
const vmm = @import("../../vmm.zig");
const multiboot = @import("../../multiboot.zig"); const multiboot = @import("../../multiboot.zig");
const options = @import("build_options"); const options = @import("build_options");
const testing = std.testing; const testing = std.testing;
/// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space. /// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space.
const Directory = packed struct { pub const Directory = packed struct {
/// The directory entries. /// The directory entries.
entries: [ENTRIES_PER_DIRECTORY]DirectoryEntry, entries: [ENTRIES_PER_DIRECTORY]DirectoryEntry,
@ -28,24 +29,6 @@ const Table = packed struct {
entries: [ENTRIES_PER_TABLE]TableEntry, entries: [ENTRIES_PER_TABLE]TableEntry,
}; };
/// All errors that can be thrown by paging functions.
const PagingError = error{
/// Physical addresses are invalid (definition is up to the function).
InvalidPhysAddresses,
/// Virtual addresses are invalid (definition is up to the function).
InvalidVirtAddresses,
/// Physical and virtual addresses don't cover spaces of the same size.
PhysicalVirtualMismatch,
/// Physical addresses aren't aligned by page size.
UnalignedPhysAddresses,
/// Virtual addresses aren't aligned by page size.
UnalignedVirtAddresses,
};
/// An entry within a directory. References a single page table. /// An entry within a directory. References a single page table.
/// Bit 0: Present. Set if present in physical memory. /// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use. /// When not set, all remaining 31 bits are ignored and available for use.
@ -121,6 +104,9 @@ pub const PAGE_SIZE_4MB: u32 = 0x400000;
/// The number of bytes in 4KB /// The number of bytes in 4KB
pub const PAGE_SIZE_4KB: u32 = PAGE_SIZE_4MB / 1024; pub const PAGE_SIZE_4KB: u32 = PAGE_SIZE_4MB / 1024;
/// The kernel's page directory. Should only be used to map kernel-owned code and data
pub var kernel_directory: Directory align(@truncate(u29, PAGE_SIZE_4KB)) = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
/// ///
/// Convert a virtual address to an index within an array of directory entries. /// Convert a virtual address to an index within an array of directory entries.
/// ///
@ -147,53 +133,92 @@ inline fn virtToTableEntryIdx(virt: usize) usize {
return (virt / PAGE_SIZE_4KB) % ENTRIES_PER_TABLE; return (virt / PAGE_SIZE_4KB) % ENTRIES_PER_TABLE;
} }
///
/// Set the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
/// attr: u32 - The bits corresponding to the atttribute to set
///
inline fn setAttribute(val: *align(1) u32, attr: u32) void {
val.* |= attr;
}
///
/// Clear the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
/// attr: u32 - The bits corresponding to the atttribute to clear
///
inline fn clearAttribute(val: *align(1) u32, attr: u32) void {
val.* &= ~attr;
}
/// ///
/// Map a page directory entry, setting the present, size, writable, write-through and physical address bits. /// Map a page directory entry, setting the present, size, writable, write-through and physical address bits.
/// Clears the user and cache disabled bits. Entry should be zero'ed. /// Clears the user and cache disabled bits. Entry should be zero'ed.
/// ///
/// Arguments: /// Arguments:
/// OUT dir: *Directory - The directory that this entry is in
/// IN virt_addr: usize - The start of the virtual space to map /// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map /// IN virt_end: usize - The end of the virtual space to map
/// IN phys_addr: usize - The start of the physical space to map /// IN phys_addr: usize - The start of the physical space to map
/// IN phys_end: usize - The end of the physical space to map /// IN phys_end: usize - The end of the physical space to map
/// IN attrs: vmm.Attributes - The attributes to apply to this mapping
/// IN allocator: *Allocator - The allocator to use to map any tables needed /// IN allocator: *Allocator - The allocator to use to map any tables needed
/// OUT dir: *Directory - The directory that this entry is in
/// ///
/// Error: PagingError || std.mem.Allocator.Error /// Error: vmm.MapperError || std.mem.Allocator.Error
/// PagingError.InvalidPhysAddresses - The physical start address is greater than the end. /// vmm.MapperError.InvalidPhysicalAddress - The physical start address is greater than the end
/// PagingError.InvalidVirtAddresses - The virtual start address is greater than the end or is larger than 4GB. /// vmm.MapperError.InvalidVirtualAddress - The virtual start address is greater than the end or is larger than 4GB
/// PagingError.PhysicalVirtualMismatch - The differences between the virtual addresses and the physical addresses aren't the same. /// vmm.MapperError.AddressMismatch - The differences between the virtual addresses and the physical addresses aren't the same
/// PagingError.UnalignedPhysAddresses - One or both of the physical addresses aren't page size aligned. /// vmm.MapperError.MisalignedPhysicalAddress - One or both of the physical addresses aren't page size aligned
/// PagingError.UnalignedVirtAddresses - One or both of the virtual addresses aren't page size aligned. /// vmm.MapperError.MisalignedVirtualAddress - One or both of the virtual addresses aren't page size aligned
/// std.mem.Allocator.Error.* - See std.mem.Allocator.alignedAlloc. /// std.mem.Allocator.Error.* - See std.mem.Allocator.alignedAlloc
/// ///
fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start: usize, phys_end: usize, allocator: *std.mem.Allocator) (PagingError || std.mem.Allocator.Error)!void { fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start: usize, phys_end: usize, attrs: vmm.Attributes, allocator: *std.mem.Allocator) (vmm.MapperError || std.mem.Allocator.Error)!void {
if (phys_start > phys_end) { if (phys_start > phys_end) {
return PagingError.InvalidPhysAddresses; return vmm.MapperError.InvalidPhysicalAddress;
} }
if (virt_start > virt_end) { if (virt_start > virt_end) {
return PagingError.InvalidVirtAddresses; return vmm.MapperError.InvalidVirtualAddress;
} }
if (phys_end - phys_start != virt_end - virt_start) { if (phys_end - phys_start != virt_end - virt_start) {
return PagingError.PhysicalVirtualMismatch; return vmm.MapperError.AddressMismatch;
} }
if (!std.mem.isAligned(phys_start, PAGE_SIZE_4KB) or !std.mem.isAligned(phys_end, PAGE_SIZE_4KB)) { if (!std.mem.isAligned(phys_start, PAGE_SIZE_4KB) or !std.mem.isAligned(phys_end, PAGE_SIZE_4KB)) {
return PagingError.UnalignedPhysAddresses; return vmm.MapperError.MisalignedPhysicalAddress;
} }
if (!std.mem.isAligned(virt_start, PAGE_SIZE_4KB) or !std.mem.isAligned(virt_end, PAGE_SIZE_4KB)) { if (!std.mem.isAligned(virt_start, PAGE_SIZE_4KB) or !std.mem.isAligned(virt_end, PAGE_SIZE_4KB)) {
return PagingError.UnalignedVirtAddresses; return vmm.MapperError.MisalignedVirtualAddress;
} }
const entry = virt_start / PAGE_SIZE_4MB; const entry = virt_start / PAGE_SIZE_4MB;
if (entry >= ENTRIES_PER_DIRECTORY) if (entry >= ENTRIES_PER_DIRECTORY)
return PagingError.InvalidVirtAddresses; return vmm.MapperError.InvalidVirtualAddress;
var dir_entry = &dir.entries[entry]; var dir_entry = &dir.entries[entry];
dir_entry.* |= DENTRY_PRESENT;
dir_entry.* |= DENTRY_WRITABLE; setAttribute(dir_entry, DENTRY_PRESENT);
dir_entry.* &= ~DENTRY_USER; setAttribute(dir_entry, DENTRY_WRITE_THROUGH);
dir_entry.* |= DENTRY_WRITE_THROUGH; clearAttribute(dir_entry, DENTRY_4MB_PAGES);
dir_entry.* &= ~DENTRY_CACHE_DISABLED;
dir_entry.* &= ~DENTRY_4MB_PAGES; if (attrs.writable) {
setAttribute(dir_entry, DENTRY_WRITABLE);
} else {
clearAttribute(dir_entry, DENTRY_WRITABLE);
}
if (attrs.kernel) {
clearAttribute(dir_entry, DENTRY_USER);
} else {
setAttribute(dir_entry, DENTRY_USER);
}
if (attrs.cachable) {
clearAttribute(dir_entry, DENTRY_CACHE_DISABLED);
} else {
setAttribute(dir_entry, DENTRY_CACHE_DISABLED);
}
// Only create a new table if one hasn't already been created for this dir entry. // Only create a new table if one hasn't already been created for this dir entry.
// Prevents us from overriding previous mappings. // Prevents us from overriding previous mappings.
@ -218,7 +243,7 @@ fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start:
phys += PAGE_SIZE_4KB; phys += PAGE_SIZE_4KB;
tentry += 1; tentry += 1;
}) { }) {
try mapTableEntry(&table.entries[tentry], phys); try mapTableEntry(&table.entries[tentry], phys, attrs);
} }
} }
@ -233,34 +258,55 @@ fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start:
/// Error: PagingError /// Error: PagingError
/// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned. /// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned.
/// ///
fn mapTableEntry(entry: *align(1) TableEntry, phys_addr: usize) PagingError!void { fn mapTableEntry(entry: *align(1) TableEntry, phys_addr: usize, attrs: vmm.Attributes) vmm.MapperError!void {
if (!std.mem.isAligned(phys_addr, PAGE_SIZE_4KB)) { if (!std.mem.isAligned(phys_addr, PAGE_SIZE_4KB)) {
return PagingError.UnalignedPhysAddresses; return vmm.MapperError.MisalignedPhysicalAddress;
} }
entry.* |= TENTRY_PRESENT; setAttribute(entry, TENTRY_PRESENT);
entry.* |= TENTRY_WRITABLE; if (attrs.writable) {
entry.* &= ~TENTRY_USER; setAttribute(entry, TENTRY_WRITABLE);
entry.* |= TENTRY_WRITE_THROUGH; } else {
entry.* &= ~TENTRY_CACHE_DISABLED; clearAttribute(entry, TENTRY_WRITABLE);
entry.* &= ~TENTRY_GLOBAL; }
entry.* |= TENTRY_PAGE_ADDR & phys_addr; if (attrs.kernel) {
clearAttribute(entry, TENTRY_USER);
} else {
setAttribute(entry, TENTRY_USER);
}
if (attrs.writable) {
setAttribute(entry, TENTRY_WRITE_THROUGH);
} else {
clearAttribute(entry, TENTRY_WRITE_THROUGH);
}
if (attrs.cachable) {
clearAttribute(entry, TENTRY_CACHE_DISABLED);
} else {
setAttribute(entry, TENTRY_CACHE_DISABLED);
}
clearAttribute(entry, TENTRY_GLOBAL);
setAttribute(entry, TENTRY_PAGE_ADDR & phys_addr);
} }
/// ///
/// Map a page directory. The addresses passed must be page size aligned and be the same distance apart. /// Map a virtual region of memory to a physical region with a set of attributes within a directory.
/// If this call is made to a directory that has been loaded by the CPU, the virtual memory will immediately be accessible (given the proper attributes)
/// and will be mirrored to the physical region given. Otherwise it will be accessible once the given directory is loaded by the CPU.
///
/// This call will panic if mapDir returns an error when called with any of the arguments given.
/// ///
/// Arguments: /// Arguments:
/// OUT entry: *Directory - The directory to map /// IN virtual_start: usize - The start of the virtual region to map
/// IN virt_start: usize - The virtual address at which to start mapping /// IN virtual_end: usize - The end (exclusive) of the virtual region to map
/// IN virt_end: usize - The virtual address at which to stop mapping /// IN physical_start: usize - The start of the physical region to mape to
/// IN phys_start: usize - The physical address at which to start mapping /// IN physical_end: usize - The end (exclusive) of the physical region to map to
/// IN phys_end: usize - The physical address at which to stop mapping /// IN attrs: vmm.Attributes - The attributes to apply to this mapping
/// IN allocator: *Allocator - The allocator to use to map any tables needed /// INOUT allocator: *std.mem.Allocator - The allocator to use to allocate any intermediate data structures required to map this region
/// INOUT dir: *Directory - The page directory to map within
/// ///
/// Error: std.mem.allocator.Error || PagingError /// Error: vmm.MapperError || std.mem.Allocator.Error
/// * - See mapDirEntry. /// * - See mapDirEntry
/// ///
fn mapDir(dir: *Directory, virt_start: usize, virt_end: usize, phys_start: usize, phys_end: usize, allocator: *std.mem.Allocator) (std.mem.Allocator.Error || PagingError)!void { pub fn map(virt_start: usize, virt_end: usize, phys_start: usize, phys_end: usize, attrs: vmm.Attributes, allocator: *std.mem.Allocator, dir: *Directory) (std.mem.Allocator.Error || vmm.MapperError)!void {
var virt_addr = virt_start; var virt_addr = virt_start;
var phys_addr = phys_start; var phys_addr = phys_start;
var page = virt_addr / PAGE_SIZE_4KB; var page = virt_addr / PAGE_SIZE_4KB;
@ -270,7 +316,44 @@ fn mapDir(dir: *Directory, virt_start: usize, virt_end: usize, phys_start: usize
virt_addr += PAGE_SIZE_4MB; virt_addr += PAGE_SIZE_4MB;
entry_idx += 1; entry_idx += 1;
}) { }) {
try mapDirEntry(dir, virt_addr, std.math.min(virt_end, virt_addr + PAGE_SIZE_4MB), phys_addr, std.math.min(phys_end, phys_addr + PAGE_SIZE_4MB), allocator); try mapDirEntry(dir, virt_addr, std.math.min(virt_end, virt_addr + PAGE_SIZE_4MB), phys_addr, std.math.min(phys_end, phys_addr + PAGE_SIZE_4MB), attrs, allocator);
}
}
///
/// Unmap a virtual region of memory within a directory so that it is no longer accessible.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual region to unmap
/// IN virtual_end: usize - The end (exclusive) of the virtual region to unmap
/// INOUT dir: *Directory - The page directory to unmap within
///
/// Error: std.mem.Allocator.Error || vmm.MapperError
/// vmm.MapperError.NotMapped - If the region being unmapped wasn't mapped in the first place
///
pub fn unmap(virtual_start: usize, virtual_end: usize, dir: *Directory) (std.mem.Allocator.Error || vmm.MapperError)!void {
var virt_addr = virtual_start;
var page = virt_addr / PAGE_SIZE_4KB;
var entry_idx = virt_addr / PAGE_SIZE_4MB;
while (entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virtual_end) : ({
virt_addr += PAGE_SIZE_4MB;
entry_idx += 1;
}) {
var dir_entry = &dir.entries[entry_idx];
const table = dir.tables[entry_idx] orelse return vmm.MapperError.NotMapped;
const end = std.math.min(virtual_end, virt_addr + PAGE_SIZE_4MB);
var addr = virt_addr;
while (addr < end) : (addr += PAGE_SIZE_4KB) {
var table_entry = &table.entries[virtToTableEntryIdx(addr)];
if (table_entry.* & TENTRY_PRESENT != 0) {
clearAttribute(table_entry, TENTRY_PRESENT);
} else {
return vmm.MapperError.NotMapped;
}
}
// If the region to be mapped covers all of this directory entry, set the whole thing as not present
if (virtual_end - virt_addr >= PAGE_SIZE_4MB)
clearAttribute(dir_entry, DENTRY_PRESENT);
} }
} }
@ -295,74 +378,24 @@ pub fn init(mb_info: *multiboot.multiboot_info_t, mem_profile: *const MemProfile
log.logInfo("Init paging\n", .{}); log.logInfo("Init paging\n", .{});
defer log.logInfo("Done paging\n", .{}); defer log.logInfo("Done paging\n", .{});
// Calculate start and end of mapping isr.registerIsr(isr.PAGE_FAULT, if (options.rt_test) rt_pageFault else pageFault) catch |e| {
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), PAGE_SIZE_4KB); panic(@errorReturnTrace(), "Failed to register page fault ISR: {}\n", .{e});
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, PAGE_SIZE_4KB);
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), PAGE_SIZE_4KB);
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, PAGE_SIZE_4KB);
var tmp = allocator.alignedAlloc(Directory, @truncate(u29, PAGE_SIZE_4KB), 1) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate page directory: {}\n", .{e});
}; };
var kernel_directory = @ptrCast(*Directory, tmp.ptr); const dir_physaddr = @ptrToInt(mem.virtToPhys(&kernel_directory));
@memset(@ptrCast([*]u8, kernel_directory), 0, @sizeOf(Directory));
// Map in kernel
mapDir(kernel_directory, v_start, v_end, p_start, p_end, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to map kernel directory: {}\n", .{e});
};
const tty_addr = tty.getVideoBufferAddress();
// If the previous mapping space didn't cover the tty buffer, do so now
if (v_start > tty_addr or v_end <= tty_addr) {
const tty_phys = mem.virtToPhys(tty_addr);
const tty_buff_size = 32 * 1024;
mapDir(kernel_directory, tty_addr, tty_addr + tty_buff_size, tty_phys, tty_phys + tty_buff_size, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to map vga buffer in kernel directory: {}\n", .{e});
};
}
// If the kernel mapping didn't cover the multiboot info, map it so it can be accessed by code later on
// There's no way to know the size, so an estimated size of 2MB is used. This will need increasing as the kernel gets bigger.
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), PAGE_SIZE_4KB);
if (v_start > mb_info_addr) {
const mb_info_end = mb_info_addr + PAGE_SIZE_4MB / 2;
mapDir(kernel_directory, mb_info_addr, mb_info_end, mem.virtToPhys(mb_info_addr), mem.virtToPhys(mb_info_end), allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to map mb_info in kernel directory: {}\n", .{e});
};
}
// Map in each boot module
for (mem_profile.boot_modules) |*module| {
const mod_v_struct_start = std.mem.alignBackward(@ptrToInt(module), PAGE_SIZE_4KB);
const mod_v_struct_end = std.mem.alignForward(mod_v_struct_start + @sizeOf(multiboot.multiboot_module_t), PAGE_SIZE_4KB);
mapDir(kernel_directory, mod_v_struct_start, mod_v_struct_end, mem.virtToPhys(mod_v_struct_start), mem.virtToPhys(mod_v_struct_end), allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to map module struct: {}\n", .{e});
};
const mod_p_start = std.mem.alignBackward(module.mod_start, PAGE_SIZE_4KB);
const mod_p_end = std.mem.alignForward(module.mod_end, PAGE_SIZE_4KB);
mapDir(kernel_directory, mem.physToVirt(mod_p_start), mem.physToVirt(mod_p_end), mod_p_start, mod_p_end, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to map boot module in kernel directory: {}\n", .{e});
};
}
const dir_physaddr = @ptrToInt(mem.virtToPhys(kernel_directory));
asm volatile ("mov %[addr], %%cr3" asm volatile ("mov %[addr], %%cr3"
: :
: [addr] "{eax}" (dir_physaddr) : [addr] "{eax}" (dir_physaddr)
); );
isr.registerIsr(isr.PAGE_FAULT, if (options.rt_test) rt_pageFault else pageFault) catch |e| { const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, PAGE_SIZE_4KB);
panic(@errorReturnTrace(), "Failed to register page fault ISR: {}\n", .{e});
};
if (options.rt_test) runtimeTests(v_end); if (options.rt_test) runtimeTests(v_end);
} }
fn checkDirEntry(entry: DirectoryEntry, virt_start: usize, virt_end: usize, phys_start: usize, table: *Table) void { fn checkDirEntry(entry: DirectoryEntry, virt_start: usize, virt_end: usize, phys_start: usize, attrs: vmm.Attributes, table: *Table, present: bool) void {
expect(entry & DENTRY_PRESENT != 0); expectEqual(entry & DENTRY_PRESENT, if (present) DENTRY_PRESENT else 0);
expect(entry & DENTRY_WRITABLE != 0); expectEqual(entry & DENTRY_WRITABLE, if (attrs.writable) DENTRY_WRITABLE else 0);
expectEqual(entry & DENTRY_USER, 0); expectEqual(entry & DENTRY_USER, if (attrs.kernel) 0 else DENTRY_USER);
expect(entry & DENTRY_WRITE_THROUGH != 0); expectEqual(entry & DENTRY_WRITE_THROUGH, DENTRY_WRITE_THROUGH);
expectEqual(entry & DENTRY_CACHE_DISABLED, 0); expectEqual(entry & DENTRY_CACHE_DISABLED, if (attrs.cachable) 0 else DENTRY_CACHE_DISABLED);
expectEqual(entry & DENTRY_4MB_PAGES, 0); expectEqual(entry & DENTRY_4MB_PAGES, 0);
expectEqual(entry & DENTRY_ZERO, 0); expectEqual(entry & DENTRY_ZERO, 0);
@ -374,21 +407,38 @@ fn checkDirEntry(entry: DirectoryEntry, virt_start: usize, virt_end: usize, phys
phys += PAGE_SIZE_4KB; phys += PAGE_SIZE_4KB;
}) { }) {
const tentry = table.entries[tentry_idx]; const tentry = table.entries[tentry_idx];
checkTableEntry(tentry, phys); checkTableEntry(tentry, phys, attrs, present);
} }
} }
fn checkTableEntry(entry: TableEntry, page_phys: usize) void { fn checkTableEntry(entry: TableEntry, page_phys: usize, attrs: vmm.Attributes, present: bool) void {
expect(entry & TENTRY_PRESENT != 0); expectEqual(entry & TENTRY_PRESENT, if (present) TENTRY_PRESENT else 0);
expect(entry & TENTRY_WRITABLE != 0); expectEqual(entry & TENTRY_WRITABLE, if (attrs.writable) TENTRY_WRITABLE else 0);
expectEqual(entry & TENTRY_USER, 0); expectEqual(entry & TENTRY_USER, if (attrs.kernel) 0 else TENTRY_USER);
expect(entry & TENTRY_WRITE_THROUGH != 0); expectEqual(entry & TENTRY_WRITE_THROUGH, TENTRY_WRITE_THROUGH);
expectEqual(entry & TENTRY_CACHE_DISABLED, 0); expectEqual(entry & TENTRY_CACHE_DISABLED, if (attrs.cachable) 0 else TENTRY_CACHE_DISABLED);
expectEqual(entry & TENTRY_ZERO, 0); expectEqual(entry & TENTRY_ZERO, 0);
expectEqual(entry & TENTRY_GLOBAL, 0); expectEqual(entry & TENTRY_GLOBAL, 0);
expectEqual(entry & TENTRY_PAGE_ADDR, page_phys); expectEqual(entry & TENTRY_PAGE_ADDR, page_phys);
} }
test "setAttribute and clearAttribute" {
var val: u32 = 0;
const attrs = [_]u32{ DENTRY_PRESENT, DENTRY_WRITABLE, DENTRY_USER, DENTRY_WRITE_THROUGH, DENTRY_CACHE_DISABLED, DENTRY_ACCESSED, DENTRY_ZERO, DENTRY_4MB_PAGES, DENTRY_IGNORED, DENTRY_AVAILABLE, DENTRY_PAGE_ADDR };
for (attrs) |attr| {
const old_val = val;
setAttribute(&val, attr);
std.testing.expectEqual(val, old_val | attr);
}
for (attrs) |attr| {
const old_val = val;
clearAttribute(&val, attr);
std.testing.expectEqual(val, old_val & ~attr);
}
}
test "virtToDirEntryIdx" { test "virtToDirEntryIdx" {
expectEqual(virtToDirEntryIdx(0), 0); expectEqual(virtToDirEntryIdx(0), 0);
expectEqual(virtToDirEntryIdx(123), 0); expectEqual(virtToDirEntryIdx(123), 0);
@ -417,32 +467,34 @@ test "mapDirEntry" {
const phys_end: usize = phys + PAGE_SIZE_4MB; const phys_end: usize = phys + PAGE_SIZE_4MB;
const virt: usize = 1 * PAGE_SIZE_4MB; const virt: usize = 1 * PAGE_SIZE_4MB;
const virt_end: usize = virt + PAGE_SIZE_4MB; const virt_end: usize = virt + PAGE_SIZE_4MB;
try mapDirEntry(&dir, virt, virt_end, phys, phys_end, allocator); try mapDirEntry(&dir, virt, virt_end, phys, phys_end, .{ .kernel = true, .writable = true, .cachable = true }, allocator);
const entry_idx = virtToDirEntryIdx(virt); const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx]; const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx] orelse unreachable; const table = dir.tables[entry_idx] orelse unreachable;
checkDirEntry(entry, virt, virt_end, phys, table); checkDirEntry(entry, virt, virt_end, phys, .{ .kernel = true, .writable = true, .cachable = true }, table, true);
} }
test "mapDirEntry returns errors correctly" { test "mapDirEntry returns errors correctly" {
var allocator = std.heap.page_allocator; var allocator = std.heap.page_allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = undefined }; var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = undefined };
testing.expectError(PagingError.UnalignedVirtAddresses, mapDirEntry(&dir, 1, PAGE_SIZE_4KB + 1, 0, PAGE_SIZE_4KB, allocator)); const attrs = vmm.Attributes{ .kernel = true, .writable = true, .cachable = true };
testing.expectError(PagingError.UnalignedPhysAddresses, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB + 1, allocator)); testing.expectError(vmm.MapperError.MisalignedVirtualAddress, mapDirEntry(&dir, 1, PAGE_SIZE_4KB + 1, 0, PAGE_SIZE_4KB, attrs, allocator));
testing.expectError(PagingError.PhysicalVirtualMismatch, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB, allocator)); testing.expectError(vmm.MapperError.MisalignedPhysicalAddress, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB + 1, attrs, allocator));
testing.expectError(PagingError.InvalidVirtAddresses, mapDirEntry(&dir, 1, 0, 0, PAGE_SIZE_4KB, allocator)); testing.expectError(vmm.MapperError.AddressMismatch, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB, attrs, allocator));
testing.expectError(PagingError.InvalidPhysAddresses, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, 0, allocator)); testing.expectError(vmm.MapperError.InvalidVirtualAddress, mapDirEntry(&dir, 1, 0, 0, PAGE_SIZE_4KB, attrs, allocator));
testing.expectError(vmm.MapperError.InvalidPhysicalAddress, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, 0, attrs, allocator));
} }
test "mapDir" { test "map and unmap" {
var allocator = std.heap.page_allocator; var allocator = std.heap.page_allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY }; var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
const phys_start: usize = PAGE_SIZE_4MB * 2; const phys_start: usize = PAGE_SIZE_4MB * 2;
const virt_start: usize = PAGE_SIZE_4MB * 4; const virt_start: usize = PAGE_SIZE_4MB * 4;
const phys_end: usize = PAGE_SIZE_4MB * 4; const phys_end: usize = PAGE_SIZE_4MB * 4;
const virt_end: usize = PAGE_SIZE_4MB * 6; const virt_end: usize = PAGE_SIZE_4MB * 6;
mapDir(&dir, virt_start, virt_end, phys_start, phys_end, allocator) catch unreachable; const attrs = vmm.Attributes{ .kernel = true, .writable = true, .cachable = true };
map(virt_start, virt_end, phys_start, phys_end, attrs, allocator, &dir) catch unreachable;
var virt = virt_start; var virt = virt_start;
var phys = phys_start; var phys = phys_start;
@ -453,7 +505,20 @@ test "mapDir" {
const entry_idx = virtToDirEntryIdx(virt); const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx]; const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx] orelse unreachable; const table = dir.tables[entry_idx] orelse unreachable;
checkDirEntry(entry, virt, virt + PAGE_SIZE_4MB, phys, table); checkDirEntry(entry, virt, virt + PAGE_SIZE_4MB, phys, attrs, table, true);
}
unmap(virt_start, virt_end, &dir) catch unreachable;
virt = virt_start;
phys = phys_start;
while (virt < virt_end) : ({
virt += PAGE_SIZE_4MB;
phys += PAGE_SIZE_4MB;
}) {
const entry_idx = virtToDirEntryIdx(virt);
const entry = dir.entries[entry_idx];
const table = dir.tables[entry_idx] orelse unreachable;
checkDirEntry(entry, virt, virt + PAGE_SIZE_4MB, phys, attrs, table, false);
} }
} }

View file

@ -105,16 +105,82 @@ pub fn Bitmap(comptime BitmapType: type) type {
/// Convert a global bitmap index into the bit corresponding to an entry within a single BitmapType. /// Convert a global bitmap index into the bit corresponding to an entry within a single BitmapType.
/// ///
/// Arguments: /// Arguments:
/// IN self: *Self - The bitmap to use. /// IN self: *const Self - The bitmap to use.
/// IN idx: u32 - The index into all of the bitmap's entries. /// IN idx: u32 - The index into all of the bitmap's entries.
/// ///
/// Return: BitmapType. /// Return: BitmapType.
/// The bit corresponding to that index but within a single BitmapType. /// The bit corresponding to that index but within a single BitmapType.
/// ///
fn indexToBit(self: *Self, idx: u32) BitmapType { fn indexToBit(self: *const Self, idx: u32) BitmapType {
return @as(BitmapType, 1) << @intCast(IndexType, idx % ENTRIES_PER_BITMAP); return @as(BitmapType, 1) << @intCast(IndexType, idx % ENTRIES_PER_BITMAP);
} }
///
/// Find a number of contiguous free entries and set them.
///
/// Arguments:
/// INOUT self: *Self - The bitmap to modify.
/// IN num: u32 - The number of entries to set.
///
/// Return: ?u32
/// The first entry set or null if there weren't enough contiguous entries.
///
pub fn setContiguous(self: *Self, num: u32) ?u32 {
if (num > self.num_free_entries) {
return null;
}
var count: u32 = 0;
var start: ?u32 = null;
for (self.bitmaps) |bmp, i| {
var bit: IndexType = 0;
while (true) {
const entry = bit + @intCast(u32, i * ENTRIES_PER_BITMAP);
if (entry >= self.num_entries) {
return null;
}
if ((bmp & @as(u32, 1) << bit) != 0) {
// This is a one so clear the progress
count = 0;
start = null;
} else {
// It's a zero so increment the count
count += 1;
if (start == null) {
// Start of the contiguous zeroes
start = entry;
}
if (count == num) {
// Reached the desired number
break;
}
}
// Avoiding overflow by checking if bit is less than the max - 1
if (bit < ENTRIES_PER_BITMAP - 1) {
bit += 1;
} else {
// Reached the end of the bitmap
break;
}
}
if (count == num) {
break;
}
}
if (count == num) {
if (start) |start_entry| {
var i: u32 = 0;
while (i < num) : (i += 1) {
// Can't fail as the entry was found to be free
self.setEntry(start_entry + i) catch unreachable;
}
return start_entry;
}
}
return null;
}
/// ///
/// Set the first free entry within the bitmaps as occupied. /// Set the first free entry within the bitmaps as occupied.
/// ///
@ -140,7 +206,7 @@ pub fn Bitmap(comptime BitmapType: type) type {
/// Check if an entry is set. /// Check if an entry is set.
/// ///
/// Arguments: /// Arguments:
/// IN self: *Bitmap - The bitmap to check. /// IN self: *const Self - The bitmap to check.
/// IN idx: u32 - The entry to check. /// IN idx: u32 - The entry to check.
/// ///
/// Return: bool. /// Return: bool.
@ -149,7 +215,7 @@ pub fn Bitmap(comptime BitmapType: type) type {
/// Error: BitmapError. /// Error: BitmapError.
/// OutOfBounds: The index given is out of bounds. /// OutOfBounds: The index given is out of bounds.
/// ///
pub fn isSet(self: *Self, idx: u32) BitmapError!bool { pub fn isSet(self: *const Self, idx: u32) BitmapError!bool {
if (idx >= self.num_entries) return BitmapError.OutOfBounds; if (idx >= self.num_entries) return BitmapError.OutOfBounds;
return (self.bitmaps[idx / ENTRIES_PER_BITMAP] & self.indexToBit(idx)) != 0; return (self.bitmaps[idx / ENTRIES_PER_BITMAP] & self.indexToBit(idx)) != 0;
} }
@ -304,3 +370,25 @@ test "indexToBit" {
testing.expectEqual(bmp.indexToBit(8), 1); testing.expectEqual(bmp.indexToBit(8), 1);
testing.expectEqual(bmp.indexToBit(9), 2); testing.expectEqual(bmp.indexToBit(9), 2);
} }
test "setContiguous" {
var bmp = try Bitmap(u4).init(15, std.heap.page_allocator);
// Test trying to set more entries than the bitmap has
testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1), null);
// All entries should still be free
testing.expectEqual(bmp.num_free_entries, bmp.num_entries);
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 0);
testing.expectEqual(bmp.setContiguous(4) orelse unreachable, 3);
// 0b0000.0000.0111.1111
bmp.bitmaps[2] |= 2;
// 0b0000.0010.0111.1111
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 10);
// 0b0001.1110.0111.1111
testing.expectEqual(bmp.setContiguous(5), null);
testing.expectEqual(bmp.setContiguous(2), 7);
// 0b001.1111.1111.1111
// Test trying to set beyond the end of the bitmaps
testing.expectEqual(bmp.setContiguous(3), null);
testing.expectEqual(bmp.setContiguous(2), 13);
}

View file

@ -10,6 +10,7 @@ const vga = @import("vga.zig");
const log = @import("log.zig"); const log = @import("log.zig");
const serial = @import("serial.zig"); const serial = @import("serial.zig");
const pmm = @import("pmm.zig"); const pmm = @import("pmm.zig");
const vmm = if (is_test) @import(mock_path ++ "vmm_mock.zig") else @import("vmm.zig");
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig"); const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
const panic_root = if (is_test) @import(mock_path ++ "panic_mock.zig") else @import("panic.zig"); const panic_root = if (is_test) @import(mock_path ++ "panic_mock.zig") else @import("panic.zig");
const options = @import("build_options"); const options = @import("build_options");
@ -23,6 +24,9 @@ comptime {
} }
} }
/// The virtual memory manager associated with the kernel address space
var kernel_vmm: vmm.VirtualMemoryManager(arch.VmmPayload) = undefined;
// This is for unit testing as we need to export KERNEL_ADDR_OFFSET as it is no longer available // This is for unit testing as we need to export KERNEL_ADDR_OFFSET as it is no longer available
// from the linker script // from the linker script
export var KERNEL_ADDR_OFFSET: u32 = if (builtin.is_test) 0xC0000000 else undefined; export var KERNEL_ADDR_OFFSET: u32 = if (builtin.is_test) 0xC0000000 else undefined;
@ -46,13 +50,17 @@ export fn kmain(mb_info: *multiboot.multiboot_info_t, mb_magic: u32) void {
var buffer = mem_profile.vaddr_end[0..mem_profile.fixed_alloc_size]; var buffer = mem_profile.vaddr_end[0..mem_profile.fixed_alloc_size];
var fixed_allocator = std.heap.FixedBufferAllocator.init(buffer); var fixed_allocator = std.heap.FixedBufferAllocator.init(buffer);
pmm.init(&mem_profile, &fixed_allocator.allocator);
log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{});
arch.init(mb_info, &mem_profile, &fixed_allocator.allocator);
log.logInfo("Arch init done\n", .{});
panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| { panic_root.init(&mem_profile, &fixed_allocator.allocator) catch |e| {
panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e}); panic_root.panic(@errorReturnTrace(), "Failed to initialise panic: {}", .{e});
}; };
pmm.init(&mem_profile, &fixed_allocator.allocator);
kernel_vmm = vmm.init(&mem_profile, mb_info, &fixed_allocator.allocator) catch |e| panic_root.panic(@errorReturnTrace(), "Failed to initialise kernel VMM: {}", .{e});
log.logInfo("Init arch " ++ @tagName(builtin.arch) ++ "\n", .{});
arch.init(mb_info, &mem_profile, &fixed_allocator.allocator);
log.logInfo("Arch init done\n", .{});
vga.init(); vga.init();
tty.init(); tty.init();

View file

@ -62,7 +62,7 @@ var ADDR_OFFSET: usize = undefined;
/// Return: @TypeOf(virt) /// Return: @TypeOf(virt)
/// The physical address. /// The physical address.
/// ///
pub inline fn virtToPhys(virt: var) @TypeOf(virt) { pub fn virtToPhys(virt: var) @TypeOf(virt) {
const T = @TypeOf(virt); const T = @TypeOf(virt);
return switch (@typeInfo(T)) { return switch (@typeInfo(T)) {
.Pointer => @intToPtr(T, @ptrToInt(virt) - ADDR_OFFSET), .Pointer => @intToPtr(T, @ptrToInt(virt) - ADDR_OFFSET),
@ -80,7 +80,7 @@ pub inline fn virtToPhys(virt: var) @TypeOf(virt) {
/// Return: @TypeOf(virt) /// Return: @TypeOf(virt)
/// The virtual address. /// The virtual address.
/// ///
pub inline fn physToVirt(phys: var) @TypeOf(phys) { pub fn physToVirt(phys: var) @TypeOf(phys) {
const T = @TypeOf(phys); const T = @TypeOf(phys);
return switch (@typeInfo(T)) { return switch (@typeInfo(T)) {
.Pointer => @intToPtr(T, @ptrToInt(phys) + ADDR_OFFSET), .Pointer => @intToPtr(T, @ptrToInt(phys) + ADDR_OFFSET),

View file

@ -6,7 +6,7 @@ const arch = @import("arch.zig").internals;
const MemProfile = (if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig")).MemProfile; const MemProfile = (if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig")).MemProfile;
const testing = std.testing; const testing = std.testing;
const panic = @import("panic.zig").panic; const panic = @import("panic.zig").panic;
const log = @import("log.zig"); const log = if (is_test) @import(mock_path ++ "log_mock.zig") else @import("log.zig");
const MEMORY_AVAILABLE = @import("multiboot.zig").MULTIBOOT_MEMORY_AVAILABLE; const MEMORY_AVAILABLE = @import("multiboot.zig").MULTIBOOT_MEMORY_AVAILABLE;
const Bitmap = @import("bitmap.zig").Bitmap; const Bitmap = @import("bitmap.zig").Bitmap;
@ -19,7 +19,7 @@ const PmmError = error{
}; };
/// The size of memory associated with each bitmap entry /// The size of memory associated with each bitmap entry
const BLOCK_SIZE = arch.MEMORY_BLOCK_SIZE; pub const BLOCK_SIZE = arch.MEMORY_BLOCK_SIZE;
var bitmap: PmmBitmap = undefined; var bitmap: PmmBitmap = undefined;
@ -32,7 +32,7 @@ var bitmap: PmmBitmap = undefined;
/// Error: PmmBitmap.BitmapError. /// Error: PmmBitmap.BitmapError.
/// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds. /// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds.
/// ///
fn setAddr(addr: usize) PmmBitmap.BitmapError!void { pub fn setAddr(addr: usize) PmmBitmap.BitmapError!void {
try bitmap.setEntry(@intCast(u32, addr / BLOCK_SIZE)); try bitmap.setEntry(@intCast(u32, addr / BLOCK_SIZE));
} }
@ -47,7 +47,7 @@ fn setAddr(addr: usize) PmmBitmap.BitmapError!void {
/// Error: PmmBitmap.BitmapError. /// Error: PmmBitmap.BitmapError.
/// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds. /// *: See PmmBitmap.setEntry. Could occur if the address is out of bounds.
/// ///
fn isSet(addr: usize) PmmBitmap.BitmapError!bool { pub fn isSet(addr: usize) PmmBitmap.BitmapError!bool {
return bitmap.isSet(@intCast(u32, addr / BLOCK_SIZE)); return bitmap.isSet(@intCast(u32, addr / BLOCK_SIZE));
} }
@ -83,6 +83,15 @@ pub fn free(addr: usize) (PmmBitmap.BitmapError || PmmError)!void {
} }
/// ///
/// Get the number of unallocated blocks of memory.
///
/// Return: u32.
/// The number of unallocated blocks of memory
///
pub fn blocksFree() u32 {
return bitmap.num_free_entries;
}
/// Intiialise the physical memory manager and set all unavailable regions as occupied (those from the memory map and those from the linker symbols). /// Intiialise the physical memory manager and set all unavailable regions as occupied (those from the memory map and those from the linker symbols).
/// ///
/// Arguments: /// Arguments:
@ -112,16 +121,10 @@ pub fn init(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
} }
} }
} }
// Occupy kernel memory
var addr = std.mem.alignBackward(@ptrToInt(mem.physaddr_start), BLOCK_SIZE);
while (addr < @ptrToInt(mem.physaddr_end)) : (addr += BLOCK_SIZE) {
setAddr(addr) catch |e| switch (e) {
error.OutOfBounds => panic(@errorReturnTrace(), "Failed setting kernel code address 0x{x} as occupied. The amount of system memory seems to be too low for the kernel image: {}", .{ addr, e }),
else => panic(@errorReturnTrace(), "Failed setting kernel code address 0x{x} as occupied: {}", .{ addr, e }),
};
}
if (build_options.rt_test) runtimeTests(mem); if (build_options.rt_test) {
runtimeTests(mem, allocator);
}
} }
/// ///
@ -129,10 +132,13 @@ pub fn init(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
/// ///
/// Arguments: /// Arguments:
/// IN mem: *const MemProfile - The memory profile to check for reserved memory regions. /// IN mem: *const MemProfile - The memory profile to check for reserved memory regions.
/// INOUT allocator: *std.mem.Allocator - The allocator to use when needing to create intermediate structures used for testing
/// ///
fn runtimeTests(mem: *const MemProfile) void { fn runtimeTests(mem: *const MemProfile, allocator: *std.mem.Allocator) void {
// Make sure that occupied memory can't be allocated // Make sure that occupied memory can't be allocated
var prev_alloc: usize = std.math.maxInt(usize); var prev_alloc: usize = std.math.maxInt(usize);
var alloc_list = std.ArrayList(usize).init(allocator);
defer alloc_list.deinit();
while (alloc()) |alloced| { while (alloc()) |alloced| {
if (prev_alloc == alloced) { if (prev_alloc == alloced) {
panic(null, "PMM allocated the same address twice: 0x{x}", .{alloced}); panic(null, "PMM allocated the same address twice: 0x{x}", .{alloced});
@ -146,9 +152,11 @@ fn runtimeTests(mem: *const MemProfile) void {
} }
} }
} }
if (alloced >= std.mem.alignBackward(@ptrToInt(mem.physaddr_start), BLOCK_SIZE) and alloced < std.mem.alignForward(@ptrToInt(mem.physaddr_end), BLOCK_SIZE)) { alloc_list.append(alloced) catch |e| panic(@errorReturnTrace(), "Failed to add PMM allocation to list: {}", .{e});
panic(null, "PMM allocated an address that should be reserved by kernel code: 0x{x}", .{alloced}); }
} // Clean up
for (alloc_list.items) |alloced| {
free(alloced) catch |e| panic(@errorReturnTrace(), "Failed freeing allocation in PMM rt test: {}", .{e});
} }
log.logInfo("PMM: Tested allocation\n", .{}); log.logInfo("PMM: Tested allocation\n", .{});
} }
@ -165,6 +173,7 @@ test "alloc" {
testing.expect(!(try isSet(addr))); testing.expect(!(try isSet(addr)));
testing.expect(alloc().? == addr); testing.expect(alloc().? == addr);
testing.expect(try isSet(addr)); testing.expect(try isSet(addr));
testing.expectEqual(blocksFree(), 31 - i);
} }
// Allocation should now fail // Allocation should now fail
testing.expect(alloc() == null); testing.expect(alloc() == null);
@ -177,7 +186,9 @@ test "free" {
inline while (i < 32) : (i += 1) { inline while (i < 32) : (i += 1) {
const addr = alloc().?; const addr = alloc().?;
testing.expect(try isSet(addr)); testing.expect(try isSet(addr));
testing.expectEqual(blocksFree(), 31);
try free(addr); try free(addr);
testing.expectEqual(blocksFree(), 32);
testing.expect(!(try isSet(addr))); testing.expect(!(try isSet(addr)));
// Double frees should be caught // Double frees should be caught
testing.expectError(PmmError.NotAllocated, free(addr)); testing.expectError(PmmError.NotAllocated, free(addr));
@ -203,9 +214,11 @@ test "setAddr and isSet" {
testing.expect(try isSet(addr2)); testing.expect(try isSet(addr2));
} }
testing.expectEqual(blocksFree(), num_entries - i);
// Set the current block // Set the current block
try setAddr(addr); try setAddr(addr);
testing.expect(try isSet(addr)); testing.expect(try isSet(addr));
testing.expectEqual(blocksFree(), num_entries - i - 1);
// Ensure all successive entries are not set // Ensure all successive entries are not set
var j: u32 = i + 1; var j: u32 = i + 1;

625
src/kernel/vmm.zig Normal file
View file

@ -0,0 +1,625 @@
const build_options = @import("build_options");
const mock_path = build_options.mock_path;
const builtin = @import("builtin");
const is_test = builtin.is_test;
const std = @import("std");
const bitmap = @import("bitmap.zig");
const pmm = @import("pmm.zig");
const mem = if (is_test) @import(mock_path ++ "mem_mock.zig") else @import("mem.zig");
const tty = @import("tty.zig");
const multiboot = @import("multiboot.zig");
const log = @import("log.zig");
const panic = @import("panic.zig").panic;
const arch = @import("arch.zig").internals;
/// Attributes for a virtual memory allocation
pub const Attributes = struct {
/// Whether this memory belongs to the kernel and can therefore not be accessed in user mode
kernel: bool,
/// If this memory can be written to
writable: bool,
/// If this memory can be cached. Memory mapped to a device shouldn't, for example
cachable: bool,
};
/// All data that must be remembered for a virtual memory allocation
const Allocation = struct {
/// The physical blocks of memory associated with this allocation
physical: std.ArrayList(usize),
};
/// The size of each allocatable block, the same as the physical memory manager's block size
pub const BLOCK_SIZE: u32 = pmm.BLOCK_SIZE;
pub const MapperError = error{
InvalidVirtualAddress,
InvalidPhysicalAddress,
AddressMismatch,
MisalignedVirtualAddress,
MisalignedPhysicalAddress,
NotMapped,
};
///
/// Returns a container that can map and unmap virtual memory to physical memory.
/// The mapper can pass some payload data when mapping an unmapping, which is of type `Payload`. This can be anything that the underlying mapper needs to carry out the mapping process.
/// For x86, it would be the page directory that is being mapped within. An architecture or other mapper can specify the data it needs when mapping by specifying this type.
///
/// Arguments:
/// IN comptime Payload: type - The type of the VMM-specific payload to pass when mapping and unmapping
///
/// Return: type
/// The Mapper type constructed.
///
pub fn Mapper(comptime Payload: type) type {
return struct {
///
/// Map a region (can span more than one block) of virtual memory to physical memory. After a call to this function, the memory should be present the next time it is accessed.
/// The attributes given must be obeyed when possible.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual memory to map
/// IN virtual_end: usize - The end of the virtual memory to map
/// IN physical_start: usize - The start of the physical memory to map to
/// IN physical_end: usize - The end of the physical memory to map to
/// IN attrs: Attributes - The attributes to apply to this region of memory
/// INOUT allocator: std.mem.Allocator - The allocator to use when mapping, if required
/// IN spec: Payload - The payload to pass to the mapper
///
/// Error: std.mem.AllocatorError || MapperError
/// The causes depend on the mapper used
///
mapFn: fn (virtual_start: usize, virtual_end: usize, physical_start: usize, physical_end: usize, attrs: Attributes, allocator: *std.mem.Allocator, spec: Payload) (std.mem.Allocator.Error || MapperError)!void,
///
/// Unmap a region (can span more than one block) of virtual memory from its physical memory. After a call to this function, the memory should not be accesible without error.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual region to unmap
/// IN virtual_end: usize - The end of the virtual region to unmap
/// IN spec: Payload - The payload to pass to the mapper
///
/// Error: std.mem.AllocatorError || MapperError
/// The causes depend on the mapper used
///
unmapFn: fn (virtual_start: usize, virtual_end: usize, spec: Payload) (std.mem.Allocator.Error || MapperError)!void,
};
}
/// Errors that can be returned by VMM functions
pub const VmmError = error{
/// A memory region expected to be allocated wasn't
NotAllocated,
/// A memory region expected to not be allocated was
AlreadyAllocated,
/// A physical memory region expected to not be allocated was
PhysicalAlreadyAllocated,
/// A physical region of memory isn't of the same size as a virtual region
PhysicalVirtualMismatch,
/// Virtual addresses are invalid
InvalidVirtAddresses,
/// Physical addresses are invalid
InvalidPhysAddresses,
};
///
/// Construct a virtual memory manager to keep track of allocated and free virtual memory regions within a certain space
///
/// Arguments:
/// IN comptime Payload: type - The type of the payload to pass to the mapper
///
/// Return: type
/// The constructed type
///
pub fn VirtualMemoryManager(comptime Payload: type) type {
return struct {
/// The bitmap that keeps track of allocated and free regions
bmp: bitmap.Bitmap(u32),
/// The start of the memory to be tracked
start: usize,
/// The end of the memory to be tracked
end: usize,
/// The allocator to use when allocating and freeing regions
allocator: *std.mem.Allocator,
/// All allocations that have been made with this manager
allocations: std.hash_map.AutoHashMap(usize, Allocation),
/// The mapper to use when allocating and freeing regions
mapper: Mapper(Payload),
/// The payload to pass to the mapper functions
payload: Payload,
const Self = @This();
///
/// Initialise a virtual memory manager
///
/// Arguments:
/// IN start: usize - The start of the memory region to manage
/// IN end: usize - The end of the memory region to manage. Must be greater than the start
/// INOUT allocator: *std.mem.Allocator - The allocator to use when allocating and freeing regions
/// IN mapper: Mapper - The mapper to use when allocating and freeing regions
/// IN payload: Payload - The payload data to be passed to the mapper
///
/// Return: Self
/// The manager constructed
///
/// Error: std.mem.Allocator.Error
/// std.mem.Allocator.Error.OutOfMemory - The allocator cannot allocate the memory required
///
pub fn init(start: usize, end: usize, allocator: *std.mem.Allocator, mapper: Mapper(Payload), payload: Payload) std.mem.Allocator.Error!Self {
const size = end - start;
var bmp = try bitmap.Bitmap(u32).init(@floatToInt(u32, @ceil(@intToFloat(f32, size) / @intToFloat(f32, pmm.BLOCK_SIZE))), allocator);
return Self{
.bmp = bmp,
.start = start,
.end = end,
.allocator = allocator,
.allocations = std.hash_map.AutoHashMap(usize, Allocation).init(allocator),
.mapper = mapper,
.payload = payload,
};
}
///
/// Check if a virtual memory address has been set
///
/// Arguments:
/// IN self: *Self - The manager to check
/// IN virt: usize - The virtual memory address to check
///
/// Return: bool
/// Whether the address is set
///
/// Error: pmm.PmmError
/// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed
///
pub fn isSet(self: *const Self, virt: usize) bitmap.Bitmap(u32).BitmapError!bool {
return try self.bmp.isSet(virt / BLOCK_SIZE);
}
///
/// Map a region (can span more than one block) of virtual memory to a specific region of memory
///
/// Arguments:
/// INOUT self: *Self - The manager to modify
/// IN virtual_start: usize - The start of the virtual region
/// IN virtual_end: usize - The end of the virtual region
/// IN physical_start: usize - The start of the physical region
/// IN physical_end: usize - The end of the physical region
/// IN attrs: Attributes - The attributes to apply to the memory regions
///
/// Error: VmmError || Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError
/// VmmError.AlreadyAllocated - The virtual address has arlready been allocated
/// VmmError.PhysicalAlreadyAllocated - The physical address has already been allocated
/// VmmError.PhysicalVirtualMismatch - The physical region and virtual region are of different sizes
/// VmmError.InvalidVirtAddresses - The start virtual address is greater than the end address
/// VmmError.InvalidPhysicalAddresses - The start physical address is greater than the end address
/// Bitmap.BitmapError.OutOfBounds - The physical or virtual addresses are out of bounds
/// std.mem.Allocator.Error.OutOfMemory - Allocating the required memory failed
/// MapperError.* - The causes depend on the mapper used
///
pub fn set(self: *Self, virtual_start: usize, virtual_end: usize, physical_start: usize, physical_end: usize, attrs: Attributes) (VmmError || bitmap.Bitmap(u32).BitmapError || std.mem.Allocator.Error || MapperError)!void {
var virt = virtual_start;
while (virt < virtual_end) : (virt += BLOCK_SIZE) {
if (try self.isSet(virt))
return VmmError.AlreadyAllocated;
}
var phys = physical_start;
while (phys < physical_end) : (phys += BLOCK_SIZE) {
if (try pmm.isSet(phys))
return VmmError.PhysicalAlreadyAllocated;
}
if (virtual_end - virtual_start != physical_end - physical_start)
return VmmError.PhysicalVirtualMismatch;
if (physical_start > physical_end)
return VmmError.InvalidPhysAddresses;
if (virtual_start > virtual_end)
return VmmError.InvalidVirtAddresses;
virt = virtual_start;
while (virt < virtual_end) : (virt += BLOCK_SIZE) {
try self.bmp.setEntry(virt / BLOCK_SIZE);
}
try self.mapper.mapFn(virtual_start, virtual_end, physical_start, physical_end, attrs, self.allocator, self.payload);
var phys_list = std.ArrayList(usize).init(self.allocator);
phys = physical_start;
while (phys < physical_end) : (phys += BLOCK_SIZE) {
try pmm.setAddr(phys);
try phys_list.append(phys);
}
_ = try self.allocations.put(virt, Allocation{ .physical = phys_list });
}
///
/// Allocate a number of contiguous blocks of virtual memory
///
/// Arguments:
/// INOUT self: *Self - The manager to allocate for
/// IN num: u32 - The number of blocks to allocate
/// IN attrs: Attributes - The attributes to apply to the mapped memory
///
/// Return: ?usize
/// The address at the start of the allocated region, or null if no region could be allocated due to a lack of contiguous blocks.
///
/// Error: std.mem.Allocator.Error
/// std.mem.AllocatorError.OutOfMemory: The required amount of memory couldn't be allocated
///
pub fn alloc(self: *Self, num: u32, attrs: Attributes) std.mem.Allocator.Error!?usize {
if (num == 0)
return null;
// Ensure that there is both enough physical and virtual address space free
if (pmm.blocksFree() >= num and self.bmp.num_free_entries >= num) {
// The virtual address space must be contiguous
if (self.bmp.setContiguous(num)) |entry| {
var block_list = std.ArrayList(usize).init(self.allocator);
try block_list.ensureCapacity(num);
var i: u32 = 0;
var first_addr: ?usize = null;
const vaddr_start = entry * BLOCK_SIZE;
var vaddr = vaddr_start;
// Map the blocks to physical memory
while (i < num) : (i += 1) {
const addr = pmm.alloc() orelse unreachable;
if (i == 0)
first_addr = addr;
try block_list.append(addr);
// The map function failing isn't the caller's responsibility so panic as it shouldn't happen
self.mapper.mapFn(vaddr, vaddr + BLOCK_SIZE, addr, addr + BLOCK_SIZE, attrs, self.allocator, self.payload) catch |e| panic(@errorReturnTrace(), "Failed to map virtual memory: {}\n", .{e});
vaddr += BLOCK_SIZE;
}
_ = try self.allocations.put(vaddr_start, Allocation{ .physical = block_list });
return first_addr;
}
}
return null;
}
///
/// Free a previous allocation
///
/// Arguments:
/// INOUT self: *Self - The manager to free within
/// IN vaddr: usize - The start of the allocation to free. This should be the address returned from a prior `alloc` call
///
/// Error: Bitmap.BitmapError || VmmError
/// VmmError.NotAllocated - This address hasn't been allocated yet
/// Bitmap.BitmapError.OutOfBounds - The address is out of the manager's bounds
///
pub fn free(self: *Self, vaddr: usize) (bitmap.Bitmap(u32).BitmapError || VmmError)!void {
const entry = vaddr / BLOCK_SIZE;
if (try self.bmp.isSet(entry)) {
// There will be an allocation associated with this virtual address
const allocation = self.allocations.get(vaddr) orelse unreachable;
const physical = allocation.value.physical;
defer physical.deinit();
const num_physical_allocations = physical.items.len;
for (physical.items) |block, i| {
// Clear the address space entry, unmap the virtual memory and free the physical memory
try self.bmp.clearEntry(entry + i);
pmm.free(block) catch |e| panic(@errorReturnTrace(), "Failed to free PMM reserved memory at {x}: {}\n", .{ block * BLOCK_SIZE, e });
}
// Unmap the entire range
const region_start = entry * BLOCK_SIZE;
const region_end = (entry + num_physical_allocations) * BLOCK_SIZE;
self.mapper.unmapFn(region_start, region_end, self.payload) catch |e| panic(@errorReturnTrace(), "Failed to unmap VMM reserved memory from {x} to {x}: {}\n", .{ region_start, region_end, e });
// The allocation is freed so remove from the map
self.allocations.removeAssertDiscard(vaddr);
} else {
return VmmError.NotAllocated;
}
}
};
}
///
/// Initialise the main system virtual memory manager covering 4GB. Maps in the kernel code, TTY, multiboot info and boot modules
///
/// Arguments:
/// IN mem_profile: *const mem.MemProfile - The system's memory profile. This is used to find the kernel code region and boot modules
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info
/// INOUT allocator: *std.mem.Allocator - The allocator to use when needing to allocate memory
/// IN comptime Payload: type - The type of the data to pass as a payload to the virtual memory manager
/// IN mapper: Mapper - The memory mapper to call when allocating and free virtual memory
/// IN payload: Paylaod - The payload data to pass to the virtual memory manager
///
/// Return: VirtualMemoryManager
/// The virtual memory manager created with all stated regions allocated
///
/// Error: std.mem.Allocator.Error
/// std.mem.Allocator.Error.OutOfMemory - The allocator cannot allocate the memory required
///
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
log.logInfo("Init vmm\n", .{});
defer log.logInfo("Done vmm\n", .{});
var vmm = try VirtualMemoryManager(arch.VmmPayload).init(0, 0xFFFFFFFF, allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
// Map in kernel
// Calculate start and end of mapping
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
vmm.set(v_start, v_end, p_start, p_end, .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping kernel code in VMM: {}", .{e});
// Map in tty
const tty_addr = tty.getVideoBufferAddress();
const tty_phys = mem.virtToPhys(tty_addr);
const tty_buff_size = 32 * 1024;
vmm.set(tty_addr, tty_addr + tty_buff_size, tty_phys, tty_phys + tty_buff_size, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping TTY in VMM: {}", .{e});
// Map in the multiboot info struct
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
vmm.set(mb_info_addr, mb_info_end, mem.virtToPhys(mb_info_addr), mem.virtToPhys(mb_info_end), .{ .kernel = true, .writable = false, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping multiboot info in VMM: {}", .{e});
// Map in each boot module
for (mem_profile.boot_modules) |*module| {
const mod_v_struct_start = std.mem.alignBackward(@ptrToInt(module), BLOCK_SIZE);
const mod_v_struct_end = std.mem.alignForward(mod_v_struct_start + @sizeOf(multiboot.multiboot_module_t), BLOCK_SIZE);
vmm.set(mod_v_struct_start, mod_v_struct_end, mem.virtToPhys(mod_v_struct_start), mem.virtToPhys(mod_v_struct_end), .{ .kernel = true, .writable = true, .cachable = true }) catch |e| switch (e) {
// A previous allocation could cover this region so the AlreadyAllocated error can be ignored
VmmError.AlreadyAllocated => break,
else => panic(@errorReturnTrace(), "Failed mapping boot module struct in VMM: {}", .{e}),
};
const mod_p_start = std.mem.alignBackward(module.mod_start, BLOCK_SIZE);
const mod_p_end = std.mem.alignForward(module.mod_end, BLOCK_SIZE);
vmm.set(mem.physToVirt(mod_p_start), mem.physToVirt(mod_p_end), mod_p_start, mod_p_end, .{ .kernel = true, .writable = true, .cachable = true }) catch |e| panic(@errorReturnTrace(), "Failed mapping boot module in VMM: {}", .{e});
}
if (build_options.rt_test) runtimeTests(arch.VmmPayload, vmm, mem_profile, mb_info);
return vmm;
}
test "alloc and free" {
const num_entries = 512;
var vmm = try testInit(num_entries);
var allocations = test_allocations orelse unreachable;
var virtual_allocations = std.ArrayList(usize).init(std.testing.allocator);
defer virtual_allocations.deinit();
var entry: u32 = 0;
while (entry < num_entries) {
// Test allocating various numbers of blocks all at once
// Rather than using a random number generator, just set the number of blocks to allocate based on how many entries have been done so far
var num_to_alloc: u32 = if (entry > 400) @as(u32, 8) else if (entry > 320) @as(u32, 14) else if (entry > 270) @as(u32, 9) else if (entry > 150) @as(u32, 26) else @as(u32, 1);
const result = try vmm.alloc(num_to_alloc, .{ .kernel = true, .writable = true, .cachable = true });
var should_be_set = true;
if (entry + num_to_alloc > num_entries) {
// If the number to allocate exceeded the number of entries, then allocation should have failed
std.testing.expectEqual(@as(?usize, null), result);
should_be_set = false;
} else {
// Else it should have succedded and allocated the correct address
std.testing.expectEqual(@as(?usize, entry * BLOCK_SIZE), result);
try virtual_allocations.append(result orelse unreachable);
}
// Make sure that the entries are set or not depending on the allocation success
var vaddr = entry * BLOCK_SIZE;
while (vaddr < (entry + num_to_alloc) * BLOCK_SIZE) : (vaddr += BLOCK_SIZE) {
if (should_be_set) {
// Allocation succeeded so this address should be set
std.testing.expect(try vmm.isSet(vaddr));
// The test mapper should have received this address
std.testing.expect(try allocations.isSet(vaddr / BLOCK_SIZE));
} else {
// Allocation failed as there weren't enough free entries
if (vaddr >= num_entries * BLOCK_SIZE) {
// If this address is beyond the VMM's end address, it should be out of bounds
std.testing.expectError(bitmap.Bitmap(u32).BitmapError.OutOfBounds, vmm.isSet(vaddr));
std.testing.expectError(bitmap.Bitmap(u64).BitmapError.OutOfBounds, allocations.isSet(vaddr / BLOCK_SIZE));
} else {
// Else it should not be set
std.testing.expect(!(try vmm.isSet(vaddr)));
// The test mapper should not have received this address
std.testing.expect(!(try allocations.isSet(vaddr / BLOCK_SIZE)));
}
}
}
entry += num_to_alloc;
// All later entries should not be set
var later_entry = entry;
while (later_entry < num_entries) : (later_entry += 1) {
std.testing.expect(!(try vmm.isSet(later_entry * BLOCK_SIZE)));
std.testing.expect(!(try pmm.isSet(later_entry * BLOCK_SIZE)));
}
}
// Try freeing all allocations
for (virtual_allocations.items) |alloc| {
const alloc_group = vmm.allocations.get(alloc);
std.testing.expect(alloc_group != null);
const physical = alloc_group.?.value.physical;
// We need to create a copy of the physical allocations since the free call deinits them
var physical_copy = std.ArrayList(usize).init(std.testing.allocator);
defer physical_copy.deinit();
// Make sure they are all reserved in the PMM
for (physical.items) |phys| {
std.testing.expect(try pmm.isSet(phys));
try physical_copy.append(phys);
}
vmm.free(alloc) catch unreachable;
// This virtual allocation should no longer be in the hashmap
std.testing.expectEqual(vmm.allocations.get(alloc), null);
std.testing.expect(!try vmm.isSet(alloc));
// And all its physical blocks should now be free
for (physical_copy.items) |phys| {
std.testing.expect(!try pmm.isSet(phys));
}
}
}
test "set" {
const num_entries = 512;
var vmm = try testInit(num_entries);
const vstart = BLOCK_SIZE * 37;
const vend = BLOCK_SIZE * 46;
const pstart = vstart + 123;
const pend = vend + 123;
const attrs = Attributes{ .kernel = true, .writable = true, .cachable = true };
try vmm.set(vstart, vend, pstart, pend, attrs);
var allocations = test_allocations orelse unreachable;
// The entries before the virtual start shouldn't be set
var vaddr = vmm.start;
while (vaddr < vstart) : (vaddr += BLOCK_SIZE) {
std.testing.expect(!(try allocations.isSet(vaddr / BLOCK_SIZE)));
}
// The entries up until the virtual end should be set
while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
std.testing.expect(try allocations.isSet(vaddr / BLOCK_SIZE));
}
// The entries after the virtual end should not be set
while (vaddr < vmm.end) : (vaddr += BLOCK_SIZE) {
std.testing.expect(!(try allocations.isSet(vaddr / BLOCK_SIZE)));
}
}
var test_allocations: ?bitmap.Bitmap(u64) = null;
var test_mapper = Mapper(u8){ .mapFn = testMap, .unmapFn = testUnmap };
///
/// Initialise a virtual memory manager used for testing
///
/// Arguments:
/// IN num_entries: u32 - The number of entries the VMM should track
///
/// Return: VirtualMemoryManager(u8)
/// The VMM constructed
///
/// Error: std.mem.Allocator.Error
/// OutOfMemory: The allocator couldn't allocate the structures needed
///
fn testInit(num_entries: u32) std.mem.Allocator.Error!VirtualMemoryManager(u8) {
if (test_allocations == null) {
test_allocations = try bitmap.Bitmap(u64).init(num_entries, std.heap.page_allocator);
} else |allocations| {
var entry: u32 = 0;
while (entry < allocations.num_entries) : (entry += 1) {
allocations.clearEntry(entry) catch unreachable;
}
}
var allocations = test_allocations orelse unreachable;
const mem_profile = mem.MemProfile{ .vaddr_end = undefined, .vaddr_start = undefined, .physaddr_start = undefined, .physaddr_end = undefined, .mem_kb = num_entries * BLOCK_SIZE / 1024, .fixed_alloc_size = undefined, .mem_map = &[_]multiboot.multiboot_memory_map_t{}, .boot_modules = &[_]multiboot.multiboot_module_t{} };
pmm.init(&mem_profile, std.heap.page_allocator);
return try VirtualMemoryManager(u8).init(0, num_entries * BLOCK_SIZE, std.heap.page_allocator, test_mapper, 39);
}
///
/// A mapping function used when doing unit tests
///
/// Arguments:
/// IN vstart: usize - The start of the virtual region to map
/// IN vend: usize - The end of the virtual region to map
/// IN pstart: usize - The start of the physical region to map
/// IN pend: usize - The end of the physical region to map
/// IN attrs: Attributes - The attributes to map with
/// INOUT allocator: *std.mem.Allocator - The allocator to use. Ignored
/// IN payload: u8 - The payload value. Expected to be 39
///
fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attributes, allocator: *std.mem.Allocator, payload: u8) (std.mem.Allocator.Error || MapperError)!void {
std.testing.expectEqual(@as(u8, 39), payload);
var vaddr = vstart;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
(test_allocations orelse unreachable).setEntry(vaddr / BLOCK_SIZE) catch unreachable;
}
}
///
/// An unmapping function used when doing unit tests
///
/// Arguments:
/// IN vstart: usize - The start of the virtual region to unmap
/// IN vend: usize - The end of the virtual region to unmap
/// IN payload: u8 - The payload value. Expected to be 39
///
fn testUnmap(vstart: usize, vend: usize, payload: u8) (std.mem.Allocator.Error || MapperError)!void {
std.testing.expectEqual(@as(u8, 39), payload);
var vaddr = vstart;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
(test_allocations orelse unreachable).clearEntry(vaddr / BLOCK_SIZE) catch unreachable;
}
}
///
/// Run the runtime tests.
///
/// Arguments:
/// IN comptime Payload: type - The type of the payload passed to the mapper
/// IN vmm: VirtualMemoryManager(Payload) - The virtual memory manager to test
/// IN mem_profile: *const mem.MemProfile - The mem profile with details about all the memory regions that should be reserved
/// IN mb_info: *multiboot.multiboot_info_t - The multiboot info struct that should also be reserved
///
fn runtimeTests(comptime Payload: type, vmm: VirtualMemoryManager(Payload), mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t) void {
const v_start = std.mem.alignBackward(@ptrToInt(mem_profile.vaddr_start), BLOCK_SIZE);
const v_end = std.mem.alignForward(@ptrToInt(mem_profile.vaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
const p_start = std.mem.alignBackward(@ptrToInt(mem_profile.physaddr_start), BLOCK_SIZE);
const p_end = std.mem.alignForward(@ptrToInt(mem_profile.physaddr_end) + mem_profile.fixed_alloc_size, BLOCK_SIZE);
const tty_addr = tty.getVideoBufferAddress();
const tty_phys = mem.virtToPhys(tty_addr);
const tty_buff_size = 32 * 1024;
const mb_info_addr = std.mem.alignBackward(@ptrToInt(mb_info), BLOCK_SIZE);
const mb_info_end = std.mem.alignForward(mb_info_addr + @sizeOf(multiboot.multiboot_info_t), BLOCK_SIZE);
// Make sure all blocks before the mb info are not set
var vaddr = vmm.start;
while (vaddr < mb_info_addr) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before mb_info was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the mb info are set
while (vaddr < mb_info_end) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if mb_info address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for mb_info was not set: {x}", .{vaddr});
}
// Make sure all blocks before the kernel code are not set
while (vaddr < tty_addr) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before tty was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the kernel code are set
while (vaddr < tty_addr + tty_buff_size) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if tty address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for tty was not set: {x}", .{vaddr});
}
// Make sure all blocks before the kernel code are not set
while (vaddr < v_start) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address before kernel code was set: {x}", .{vaddr});
}
// Make sure all blocks associated with the kernel code are set
while (vaddr < v_end) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if kernel code address {x} is set: {x}", .{ vaddr, e });
if (!set) panic(null, "Address for kernel code was not set: {x}", .{vaddr});
}
// Make sure all blocks after the kernel code are not set
while (vaddr < vmm.end - BLOCK_SIZE) : (vaddr += BLOCK_SIZE) {
const set = vmm.isSet(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to check if address after {x} is set: {x}", .{ vaddr, e });
if (set) panic(null, "Address after kernel code was set: {x}", .{vaddr});
}
log.logInfo("VMM: Tested allocations\n", .{});
}

View file

@ -5,6 +5,7 @@ const MemProfile = mem.MemProfile;
const gdt = @import("gdt_mock.zig"); const gdt = @import("gdt_mock.zig");
const idt = @import("idt_mock.zig"); const idt = @import("idt_mock.zig");
const multiboot = @import("../../../src/kernel/multiboot.zig"); const multiboot = @import("../../../src/kernel/multiboot.zig");
const vmm = @import("vmm_mock.zig");
const paging = @import("paging_mock.zig"); const paging = @import("paging_mock.zig");
const mock_framework = @import("mock_framework.zig"); const mock_framework = @import("mock_framework.zig");
@ -36,7 +37,10 @@ pub const InterruptContext = struct {
ss: u32, ss: u32,
}; };
pub const MEMORY_BLOCK_SIZE = paging.PAGE_SIZE_4KB; pub const VmmPayload = u8;
pub const KERNEL_VMM_PAYLOAD: usize = 0;
pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB;
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = undefined;
pub fn outb(port: u16, data: u8) void { pub fn outb(port: u16, data: u8) void {
return mock_framework.performAction("outb", void, .{ port, data }); return mock_framework.performAction("outb", void, .{ port, data });

View file

@ -8,6 +8,7 @@ pub const MemProfile = struct {
mem_kb: u32, mem_kb: u32,
fixed_alloc_size: u32, fixed_alloc_size: u32,
mem_map: []multiboot.multiboot_memory_map_t, mem_map: []multiboot.multiboot_memory_map_t,
boot_modules: []multiboot.multiboot_module_t,
}; };
// The virtual/physical start/end of the kernel code // The virtual/physical start/end of the kernel code
@ -30,5 +31,24 @@ pub fn init(mb_info: *multiboot.multiboot_info_t) MemProfile {
.mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024, .mem_kb = mb_info.mem_upper + mb_info.mem_lower + 1024,
.fixed_alloc_size = FIXED_ALLOC_SIZE, .fixed_alloc_size = FIXED_ALLOC_SIZE,
.mem_map = undefined, .mem_map = undefined,
.boot_modules = undefined,
};
}
pub fn virtToPhys(virt: var) @TypeOf(virt) {
const T = @TypeOf(virt);
return switch (@typeInfo(T)) {
.Pointer => @intToPtr(T, @ptrToInt(virt) - KERNEL_ADDR_OFFSET),
.Int => virt - KERNEL_ADDR_OFFSET,
else => @compileError("Only pointers and integers are supported"),
};
}
pub fn physToVirt(phys: var) @TypeOf(phys) {
const T = @TypeOf(phys);
return switch (@typeInfo(T)) {
.Pointer => @intToPtr(T, @ptrToInt(phys) + KERNEL_ADDR_OFFSET),
.Int => phys + KERNEL_ADDR_OFFSET,
else => @compileError("Only pointers and integers are supported"),
}; };
} }

View file

@ -0,0 +1,16 @@
const mem = @import("mem_mock.zig");
const multiboot = @import("../../../src/kernel/multiboot.zig");
const arch = @import("arch_mock.zig");
const std = @import("std");
pub fn Mapper(comptime Payload: type) type {
return struct {};
}
pub fn VirtualMemoryManager(comptime Payload: type) type {
return struct {};
}
pub fn init(mem_profile: *const mem.MemProfile, mb_info: *multiboot.multiboot_info_t, allocator: *std.mem.Allocator) std.mem.Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
return std.mem.Allocator.Error.OutOfMemory;
}

View file

@ -40,20 +40,22 @@ def get_pre_archinit_cases():
TestCase("Mem init", [r"Init mem"]), TestCase("Mem init", [r"Init mem"]),
TestCase("Mem done", [r"Done mem"]), TestCase("Mem done", [r"Done mem"]),
TestCase("Panic init", [r"Init panic"]),
TestCase("Panic done", [r"Done panic"]),
TestCase("PMM init", [r"Init pmm"]), TestCase("PMM init", [r"Init pmm"]),
TestCase("PMM tests", [r"PMM: Tested allocation"]), TestCase("PMM tests", [r"PMM: Tested allocation"]),
TestCase("PMM done", [r"Done pmm"]), TestCase("PMM done", [r"Done pmm"]),
TestCase("VMM init", [r"Init vmm"]),
TestCase("VMM tests", [r"VMM: Tested allocations"]),
TestCase("VMM done", [r"Done vmm"]),
TestCase("Arch init starts", [r"Init arch \w+"]) TestCase("Arch init starts", [r"Init arch \w+"])
] ]
def get_post_archinit_cases(): def get_post_archinit_cases():
return [ return [
TestCase("Arch init finishes", [r"Arch init done"]), TestCase("Arch init finishes", [r"Arch init done"]),
TestCase("Panic init", [r"Init panic"]),
TestCase("Panic done", [r"Done panic"]),
TestCase("VGA init", [r"Init vga"]), TestCase("VGA init", [r"Init vga"]),
TestCase("VGA tests", [r"VGA: Tested max scan line", r"VGA: Tested cursor shape", r"VGA: Tested updating cursor"]), TestCase("VGA tests", [r"VGA: Tested max scan line", r"VGA: Tested cursor shape", r"VGA: Tested updating cursor"]),
TestCase("VGA done", [r"Done vga"]), TestCase("VGA done", [r"Done vga"]),