2019-05-23 18:50:37 +02:00
const std = @import ( " std " ) ;
2020-07-18 23:46:24 +02:00
const testing = std . testing ;
const expectEqual = testing . expectEqual ;
const expect = testing . expect ;
2020-08-23 15:32:32 +02:00
const log = std . log . scoped ( . x86_paging ) ;
2019-09-16 02:48:32 +02:00
const builtin = @import ( " builtin " ) ;
2020-07-18 23:46:24 +02:00
const is_test = builtin . is_test ;
2019-09-29 13:55:34 +02:00
const panic = @import ( " ../../panic.zig " ) . panic ;
2020-07-18 23:46:24 +02:00
const build_options = @import ( " build_options " ) ;
const mock_path = build_options . arch_mock_path ;
const arch = if ( is_test ) @import ( mock_path + + " arch_mock.zig " ) else @import ( " arch.zig " ) ;
2019-05-23 18:50:37 +02:00
const isr = @import ( " isr.zig " ) ;
const MemProfile = @import ( " ../../mem.zig " ) . MemProfile ;
2019-09-16 02:48:32 +02:00
const tty = @import ( " ../../tty.zig " ) ;
2019-11-02 02:22:10 +01:00
const mem = @import ( " ../../mem.zig " ) ;
2020-01-09 17:16:51 +01:00
const vmm = @import ( " ../../vmm.zig " ) ;
2020-05-14 18:34:50 +02:00
const multiboot = @import ( " multiboot.zig " ) ;
2020-07-25 12:18:19 +02:00
const Allocator = std . mem . Allocator ;
2019-05-23 18:50:37 +02:00
2019-09-16 02:48:32 +02:00
/// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space.
2020-01-09 17:16:51 +01:00
pub const Directory = packed struct {
2019-09-16 02:48:32 +02:00
/// The directory entries.
2019-09-08 21:48:23 +02:00
entries : [ ENTRIES_PER_DIRECTORY ] DirectoryEntry ,
2019-09-16 02:48:32 +02:00
/// The tables allocated for the directory. This is ignored by the CPU.
tables : [ ENTRIES_PER_DIRECTORY ] ? * Table ,
} ;
/// An array of table entries. Forms the second level of paging and covers a 4MB memory space.
const Table = packed struct {
/// The table entries.
entries : [ ENTRIES_PER_TABLE ] TableEntry ,
2019-05-23 18:50:37 +02:00
} ;
2019-09-16 02:48:32 +02:00
/// An entry within a directory. References a single page table.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this table.
/// Bit 5: Accessed. Set by the CPU when the table is accessed. Not cleared by CPU.
/// Bit 6: Zero.
/// Bit 7: Page size. Set if this entry covers a single 4MB page rather than 1024 4KB pages.
/// Bit 8: Ignored.
/// Bits 9-11: Ignored and available for use by kernel.
/// Bits 12-31: The 4KB aligned physical address of the corresponding page table.
/// Must be 4MB aligned if the page size bit is set.
const DirectoryEntry = u32 ;
/// An entry within a page table. References a single page.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this page.
/// Bit 5: Accessed. Set by the CPU when the page is accessed. Not cleared by CPU.
/// Bit 6: Dirty. Set by the CPU when the page has been written to. Not cleared by the CPU.
/// Bit 7: Zero.
/// Bit 8: Global. Set if the cached address for this page shouldn't be updated when cr3 is changed.
/// Bits 9-11: Ignored and available for use by the kernel.
/// Bits 12-31: The 4KB aligned physical address mapped to this page.
const TableEntry = u32 ;
/// Each directory has 1024 entries
const ENTRIES_PER_DIRECTORY : u32 = 1024 ;
/// Each table has 1024 entries
const ENTRIES_PER_TABLE : u32 = 1024 ;
/// There are 1024 entries per directory with each one covering 4KB
const PAGES_PER_DIR_ENTRY : u32 = 1024 ;
/// There are 1 million pages per directory
const PAGES_PER_DIR : u32 = ENTRIES_PER_DIRECTORY * PAGES_PER_DIR_ENTRY ;
/// The bitmasks for the bits in a DirectoryEntry
const DENTRY_PRESENT : u32 = 0x1 ;
const DENTRY_WRITABLE : u32 = 0x2 ;
const DENTRY_USER : u32 = 0x4 ;
const DENTRY_WRITE_THROUGH : u32 = 0x8 ;
const DENTRY_CACHE_DISABLED : u32 = 0x10 ;
const DENTRY_ACCESSED : u32 = 0x20 ;
const DENTRY_ZERO : u32 = 0x40 ;
const DENTRY_4MB_PAGES : u32 = 0x80 ;
const DENTRY_IGNORED : u32 = 0x100 ;
const DENTRY_AVAILABLE : u32 = 0xE00 ;
const DENTRY_PAGE_ADDR : u32 = 0xFFFFF000 ;
/// The bitmasks for the bits in a TableEntry
const TENTRY_PRESENT : u32 = 0x1 ;
const TENTRY_WRITABLE : u32 = 0x2 ;
const TENTRY_USER : u32 = 0x4 ;
const TENTRY_WRITE_THROUGH : u32 = 0x8 ;
const TENTRY_CACHE_DISABLED : u32 = 0x10 ;
const TENTRY_ACCESSED : u32 = 0x20 ;
const TENTRY_DIRTY : u32 = 0x40 ;
const TENTRY_ZERO : u32 = 0x80 ;
const TENTRY_GLOBAL : u32 = 0x100 ;
const TENTRY_AVAILABLE : u32 = 0xE00 ;
const TENTRY_PAGE_ADDR : u32 = 0xFFFFF000 ;
2019-09-10 00:38:06 +02:00
/// The number of bytes in 4MB
2020-05-12 15:08:29 +02:00
pub const PAGE_SIZE_4MB : usize = 0x400000 ;
2019-09-10 00:38:06 +02:00
/// The number of bytes in 4KB
2020-05-12 15:08:29 +02:00
pub const PAGE_SIZE_4KB : usize = PAGE_SIZE_4MB / 1024 ;
2019-09-10 00:38:06 +02:00
2020-01-09 17:16:51 +01:00
/// The kernel's page directory. Should only be used to map kernel-owned code and data
pub var kernel_directory : Directory align ( @truncate ( u29 , PAGE_SIZE_4KB ) ) = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = [ _ ] ? * Table { null } * * ENTRIES_PER_DIRECTORY } ;
2019-05-23 18:50:37 +02:00
///
2019-09-16 02:48:32 +02:00
/// Convert a virtual address to an index within an array of directory entries.
2019-05-23 18:50:37 +02:00
///
/// Arguments:
2019-09-16 02:48:32 +02:00
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of directory entries.
///
inline fn virtToDirEntryIdx ( virt : usize ) usize {
2020-07-26 14:21:06 +02:00
return virt / PAGE_SIZE_4MB ;
2019-09-16 02:48:32 +02:00
}
///
/// Convert a virtual address to an index within an array of table entries.
///
/// Arguments:
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of table entries.
///
inline fn virtToTableEntryIdx ( virt : usize ) usize {
return ( virt / PAGE_SIZE_4KB ) % ENTRIES_PER_TABLE ;
}
2020-01-09 17:16:51 +01:00
///
/// Set the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
2020-06-23 13:43:52 +02:00
/// attr: u32 - The bits corresponding to the attribute to set
2020-01-09 17:16:51 +01:00
///
inline fn setAttribute ( val : * align ( 1 ) u32 , attr : u32 ) void {
val . * | = attr ;
}
///
/// Clear the bit(s) associated with an attribute of a table or directory entry.
///
/// Arguments:
/// val: *align(1) u32 - The entry to modify
2020-06-23 13:43:52 +02:00
/// attr: u32 - The bits corresponding to the attribute to clear
2020-01-09 17:16:51 +01:00
///
inline fn clearAttribute ( val : * align ( 1 ) u32 , attr : u32 ) void {
val . * & = ~ attr ;
}
2019-09-16 02:48:32 +02:00
///
/// Map a page directory entry, setting the present, size, writable, write-through and physical address bits.
2020-06-23 13:43:52 +02:00
/// Clears the user and cache disabled bits. Entry should be zeroed.
2019-09-16 02:48:32 +02:00
///
/// Arguments:
/// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map
/// IN phys_addr: usize - The start of the physical space to map
/// IN phys_end: usize - The end of the physical space to map
2020-01-09 17:16:51 +01:00
/// IN attrs: vmm.Attributes - The attributes to apply to this mapping
2019-05-23 18:50:37 +02:00
/// IN allocator: *Allocator - The allocator to use to map any tables needed
2020-01-09 17:16:51 +01:00
/// OUT dir: *Directory - The directory that this entry is in
2019-05-23 18:50:37 +02:00
///
2020-07-25 12:18:19 +02:00
/// Error: vmm.MapperError || Allocator.Error
2020-01-09 17:16:51 +01:00
/// vmm.MapperError.InvalidPhysicalAddress - The physical start address is greater than the end
/// vmm.MapperError.InvalidVirtualAddress - The virtual start address is greater than the end or is larger than 4GB
/// vmm.MapperError.AddressMismatch - The differences between the virtual addresses and the physical addresses aren't the same
/// vmm.MapperError.MisalignedPhysicalAddress - One or both of the physical addresses aren't page size aligned
/// vmm.MapperError.MisalignedVirtualAddress - One or both of the virtual addresses aren't page size aligned
2020-07-25 12:18:19 +02:00
/// Allocator.Error.* - See Allocator.alignedAlloc
2019-09-16 02:48:32 +02:00
///
2020-07-25 12:18:19 +02:00
fn mapDirEntry ( dir : * Directory , virt_start : usize , virt_end : usize , phys_start : usize , phys_end : usize , attrs : vmm . Attributes , allocator : * Allocator ) ( vmm . MapperError | | Allocator . Error ) ! void {
2019-05-23 18:50:37 +02:00
if ( phys_start > phys_end ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . InvalidPhysicalAddress ;
2019-05-23 18:50:37 +02:00
}
if ( virt_start > virt_end ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . InvalidVirtualAddress ;
2019-05-23 18:50:37 +02:00
}
if ( phys_end - phys_start ! = virt_end - virt_start ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . AddressMismatch ;
2019-05-23 18:50:37 +02:00
}
2019-09-16 02:48:32 +02:00
if ( ! std . mem . isAligned ( phys_start , PAGE_SIZE_4KB ) or ! std . mem . isAligned ( phys_end , PAGE_SIZE_4KB ) ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . MisalignedPhysicalAddress ;
2019-05-23 18:50:37 +02:00
}
2019-09-16 02:48:32 +02:00
if ( ! std . mem . isAligned ( virt_start , PAGE_SIZE_4KB ) or ! std . mem . isAligned ( virt_end , PAGE_SIZE_4KB ) ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . MisalignedVirtualAddress ;
2019-05-23 18:50:37 +02:00
}
2020-07-26 14:21:06 +02:00
const entry = virtToDirEntryIdx ( virt_start ) ;
2019-09-16 02:48:32 +02:00
var dir_entry = & dir . entries [ entry ] ;
2020-01-09 17:16:51 +01:00
2020-07-26 15:29:42 +02:00
// Only create a new table if one hasn't already been created for this dir entry.
// Prevents us from overriding previous mappings.
var table : * Table = undefined ;
if ( dir . tables [ entry ] ) | tbl | {
table = tbl ;
} else {
// Create a table and put the physical address in the dir entry
table = & ( try allocator . alignedAlloc ( Table , @truncate ( u29 , PAGE_SIZE_4KB ) , 1 ) ) [ 0 ] ;
@memset ( @ptrCast ( [ * ] u8 , table ) , 0 , @sizeOf ( Table ) ) ;
const table_phys_addr = @ptrToInt ( mem . virtToPhys ( table ) ) ;
dir_entry . * | = DENTRY_PAGE_ADDR & table_phys_addr ;
dir . tables [ entry ] = table ;
}
2020-01-09 17:16:51 +01:00
setAttribute ( dir_entry , DENTRY_PRESENT ) ;
setAttribute ( dir_entry , DENTRY_WRITE_THROUGH ) ;
clearAttribute ( dir_entry , DENTRY_4MB_PAGES ) ;
if ( attrs . writable ) {
setAttribute ( dir_entry , DENTRY_WRITABLE ) ;
} else {
clearAttribute ( dir_entry , DENTRY_WRITABLE ) ;
}
if ( attrs . kernel ) {
clearAttribute ( dir_entry , DENTRY_USER ) ;
} else {
setAttribute ( dir_entry , DENTRY_USER ) ;
}
if ( attrs . cachable ) {
clearAttribute ( dir_entry , DENTRY_CACHE_DISABLED ) ;
} else {
setAttribute ( dir_entry , DENTRY_CACHE_DISABLED ) ;
}
2019-09-16 02:48:32 +02:00
// Map the table entries within the requested space
var virt = virt_start ;
var phys = phys_start ;
var tentry = virtToTableEntryIdx ( virt ) ;
while ( virt < virt_end ) : ( {
virt + = PAGE_SIZE_4KB ;
phys + = PAGE_SIZE_4KB ;
tentry + = 1 ;
} ) {
2020-01-09 17:16:51 +01:00
try mapTableEntry ( & table . entries [ tentry ] , phys , attrs ) ;
2019-09-16 02:48:32 +02:00
}
}
2020-07-26 14:21:06 +02:00
///
/// Unmap a page directory entry, clearing the present bits.
///
/// Arguments:
/// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map
/// OUT dir: *Directory - The directory that this entry is in
///
/// Error: vmm.MapperError
/// vmm.MapperError.NotMapped - If the region being unmapped wasn't mapped in the first place
///
fn unmapDirEntry ( dir : * Directory , virt_start : usize , virt_end : usize ) vmm . MapperError ! void {
const entry = virtToDirEntryIdx ( virt_start ) ;
var dir_entry = & dir . entries [ entry ] ;
const table = dir . tables [ entry ] orelse return vmm . MapperError . NotMapped ;
var addr = virt_start ;
while ( addr < virt_end ) : ( addr + = PAGE_SIZE_4KB ) {
var table_entry = & table . entries [ virtToTableEntryIdx ( addr ) ] ;
if ( table_entry . * & TENTRY_PRESENT ! = 0 ) {
clearAttribute ( table_entry , TENTRY_PRESENT ) ;
} else {
return vmm . MapperError . NotMapped ;
}
}
}
2019-09-16 02:48:32 +02:00
///
/// Map a table entry by setting its bits to the appropriate values.
/// Sets the entry to be present, writable, kernel access, write through, cache enabled, non-global and the page address bits.
///
/// Arguments:
/// OUT entry: *align(1) TableEntry - The entry to map. 1 byte aligned.
/// IN phys_addr: usize - The physical address to map the table entry to.
///
/// Error: PagingError
/// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned.
///
2020-01-09 17:16:51 +01:00
fn mapTableEntry ( entry : * align ( 1 ) TableEntry , phys_addr : usize , attrs : vmm . Attributes ) vmm . MapperError ! void {
2019-09-16 02:48:32 +02:00
if ( ! std . mem . isAligned ( phys_addr , PAGE_SIZE_4KB ) ) {
2020-01-09 17:16:51 +01:00
return vmm . MapperError . MisalignedPhysicalAddress ;
}
setAttribute ( entry , TENTRY_PRESENT ) ;
if ( attrs . writable ) {
setAttribute ( entry , TENTRY_WRITABLE ) ;
} else {
clearAttribute ( entry , TENTRY_WRITABLE ) ;
2019-09-16 02:48:32 +02:00
}
2020-01-09 17:16:51 +01:00
if ( attrs . kernel ) {
clearAttribute ( entry , TENTRY_USER ) ;
} else {
setAttribute ( entry , TENTRY_USER ) ;
}
if ( attrs . writable ) {
setAttribute ( entry , TENTRY_WRITE_THROUGH ) ;
} else {
clearAttribute ( entry , TENTRY_WRITE_THROUGH ) ;
}
if ( attrs . cachable ) {
clearAttribute ( entry , TENTRY_CACHE_DISABLED ) ;
} else {
setAttribute ( entry , TENTRY_CACHE_DISABLED ) ;
}
clearAttribute ( entry , TENTRY_GLOBAL ) ;
setAttribute ( entry , TENTRY_PAGE_ADDR & phys_addr ) ;
2019-09-16 02:48:32 +02:00
}
///
2020-01-09 17:16:51 +01:00
/// Map a virtual region of memory to a physical region with a set of attributes within a directory.
/// If this call is made to a directory that has been loaded by the CPU, the virtual memory will immediately be accessible (given the proper attributes)
/// and will be mirrored to the physical region given. Otherwise it will be accessible once the given directory is loaded by the CPU.
///
/// This call will panic if mapDir returns an error when called with any of the arguments given.
2019-09-16 02:48:32 +02:00
///
/// Arguments:
2020-01-09 17:16:51 +01:00
/// IN virtual_start: usize - The start of the virtual region to map
/// IN virtual_end: usize - The end (exclusive) of the virtual region to map
2020-06-23 13:43:52 +02:00
/// IN physical_start: usize - The start of the physical region to map to
2020-01-09 17:16:51 +01:00
/// IN physical_end: usize - The end (exclusive) of the physical region to map to
/// IN attrs: vmm.Attributes - The attributes to apply to this mapping
2020-07-25 12:18:19 +02:00
/// IN/OUT allocator: *Allocator - The allocator to use to allocate any intermediate data structures required to map this region
2020-06-23 13:43:52 +02:00
/// IN/OUT dir: *Directory - The page directory to map within
2019-09-16 02:48:32 +02:00
///
2020-07-25 12:18:19 +02:00
/// Error: vmm.MapperError || Allocator.Error
2020-01-09 17:16:51 +01:00
/// * - See mapDirEntry
2019-09-16 02:48:32 +02:00
///
2020-07-26 14:21:06 +02:00
pub fn map ( virtual_start : usize , virtual_end : usize , phys_start : usize , phys_end : usize , attrs : vmm . Attributes , allocator : * Allocator , dir : * Directory ) ( Allocator . Error | | vmm . MapperError ) ! void {
var virt_addr = virtual_start ;
2019-05-23 18:50:37 +02:00
var phys_addr = phys_start ;
2020-07-26 14:21:06 +02:00
var virt_next = std . math . min ( virtual_end , std . mem . alignBackward ( virt_addr , PAGE_SIZE_4MB ) + PAGE_SIZE_4MB ) ;
var phys_next = std . math . min ( phys_end , std . mem . alignBackward ( phys_addr , PAGE_SIZE_4MB ) + PAGE_SIZE_4MB ) ;
var entry_idx = virtToDirEntryIdx ( virt_addr ) ;
while ( entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virtual_end ) : ( {
virt_addr = virt_next ;
phys_addr = phys_next ;
virt_next = std . math . min ( virtual_end , virt_next + PAGE_SIZE_4MB ) ;
phys_next = std . math . min ( phys_end , phys_next + PAGE_SIZE_4MB ) ;
2019-05-23 18:50:37 +02:00
entry_idx + = 1 ;
2019-09-16 02:48:32 +02:00
} ) {
2020-07-26 14:21:06 +02:00
try mapDirEntry ( dir , virt_addr , virt_next , phys_addr , phys_next , attrs , allocator ) ;
2020-01-09 17:16:51 +01:00
}
}
///
/// Unmap a virtual region of memory within a directory so that it is no longer accessible.
///
/// Arguments:
/// IN virtual_start: usize - The start of the virtual region to unmap
/// IN virtual_end: usize - The end (exclusive) of the virtual region to unmap
2020-06-23 13:43:52 +02:00
/// IN/OUT dir: *Directory - The page directory to unmap within
2020-01-09 17:16:51 +01:00
///
2020-07-25 12:18:19 +02:00
/// Error: Allocator.Error || vmm.MapperError
2020-01-09 17:16:51 +01:00
/// vmm.MapperError.NotMapped - If the region being unmapped wasn't mapped in the first place
///
2020-07-25 12:18:19 +02:00
pub fn unmap ( virtual_start : usize , virtual_end : usize , dir : * Directory ) ( Allocator . Error | | vmm . MapperError ) ! void {
2020-01-09 17:16:51 +01:00
var virt_addr = virtual_start ;
2020-07-26 14:21:06 +02:00
var virt_next = std . math . min ( virtual_end , std . mem . alignBackward ( virt_addr , PAGE_SIZE_4MB ) + PAGE_SIZE_4MB ) ;
var entry_idx = virtToDirEntryIdx ( virt_addr ) ;
2020-01-09 17:16:51 +01:00
while ( entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virtual_end ) : ( {
2020-07-26 14:21:06 +02:00
virt_addr = virt_next ;
virt_next = std . math . min ( virtual_end , virt_next + PAGE_SIZE_4MB ) ;
2020-01-09 17:16:51 +01:00
entry_idx + = 1 ;
} ) {
2020-07-26 14:21:06 +02:00
try unmapDirEntry ( dir , virt_addr , virt_next ) ;
if ( virt_next - virt_addr > = PAGE_SIZE_4MB ) {
clearAttribute ( & dir . entries [ entry_idx ] , DENTRY_PRESENT ) ;
2020-01-09 17:16:51 +01:00
}
2019-05-23 18:50:37 +02:00
}
}
2019-09-16 02:48:32 +02:00
///
2020-05-07 00:04:13 +02:00
/// Called when a page fault occurs. This will log the CPU state and control registers.
2019-09-16 02:48:32 +02:00
///
/// Arguments:
2020-07-18 23:46:24 +02:00
/// IN state: *arch.CpuState - The CPU's state when the fault occurred.
2019-09-16 02:48:32 +02:00
///
2020-07-18 23:46:24 +02:00
fn pageFault ( state : * arch . CpuState ) u32 {
2020-08-23 15:32:32 +02:00
log . info ( " State: {X} \n " , . { state } ) ;
2020-07-18 23:46:24 +02:00
var cr0 = asm volatile ( " mov %%cr0, %[cr0] "
: [ cr0 ] " =r " ( - > u32 )
2020-05-07 00:04:13 +02:00
) ;
2020-07-18 23:46:24 +02:00
var cr2 = asm volatile ( " mov %%cr2, %[cr2] "
: [ cr2 ] " =r " ( - > u32 )
2020-05-07 00:04:13 +02:00
) ;
2020-07-18 23:46:24 +02:00
var cr3 = asm volatile ( " mov %%cr3, %[cr3] "
: [ cr3 ] " =r " ( - > u32 )
2020-05-07 00:04:13 +02:00
) ;
2020-07-18 23:46:24 +02:00
var cr4 = asm volatile ( " mov %%cr4, %[cr4] "
: [ cr4 ] " =r " ( - > u32 )
2020-05-07 00:04:13 +02:00
) ;
2020-08-23 15:32:32 +02:00
log . info ( " CR0: 0x{X}, CR2: 0x{X}, CR3: 0x{X}, CR4: 0x{X} \n " , . { cr0 , cr2 , cr3 , cr4 } ) ;
2019-05-23 18:50:37 +02:00
@panic ( " Page fault " ) ;
}
///
/// Initialise x86 paging, overwriting any previous paging set up.
///
/// Arguments:
/// IN mem_profile: *const MemProfile - The memory profile of the system and kernel
///
2020-07-25 00:51:27 +02:00
pub fn init ( mem_profile : * const MemProfile ) void {
2020-08-23 15:32:32 +02:00
log . info ( " Init \n " , . { } ) ;
defer log . info ( " Done \n " , . { } ) ;
2020-04-12 23:26:34 +02:00
2020-06-23 13:43:52 +02:00
isr . registerIsr ( isr . PAGE_FAULT , if ( build_options . test_mode = = . Initialisation ) rt_pageFault else pageFault ) catch | e | {
2020-01-09 17:16:51 +01:00
panic ( @errorReturnTrace ( ) , " Failed to register page fault ISR: {} \n " , . { e } ) ;
2019-09-08 23:13:13 +02:00
} ;
2020-01-09 17:16:51 +01:00
const dir_physaddr = @ptrToInt ( mem . virtToPhys ( & kernel_directory ) ) ;
2019-09-12 23:15:00 +02:00
asm volatile ( " mov %[addr], %%cr3 "
:
: [ addr ] " {eax} " ( dir_physaddr )
) ;
2020-07-25 00:51:27 +02:00
const v_end = std . mem . alignForward ( @ptrToInt ( mem_profile . vaddr_end ) , PAGE_SIZE_4KB ) ;
2020-06-23 13:43:52 +02:00
switch ( build_options . test_mode ) {
. Initialisation = > runtimeTests ( v_end ) ,
else = > { } ,
}
2019-05-23 18:50:37 +02:00
}
2020-01-09 17:16:51 +01:00
fn checkDirEntry ( entry : DirectoryEntry , virt_start : usize , virt_end : usize , phys_start : usize , attrs : vmm . Attributes , table : * Table , present : bool ) void {
expectEqual ( entry & DENTRY_PRESENT , if ( present ) DENTRY_PRESENT else 0 ) ;
expectEqual ( entry & DENTRY_WRITABLE , if ( attrs . writable ) DENTRY_WRITABLE else 0 ) ;
expectEqual ( entry & DENTRY_USER , if ( attrs . kernel ) 0 else DENTRY_USER ) ;
expectEqual ( entry & DENTRY_WRITE_THROUGH , DENTRY_WRITE_THROUGH ) ;
expectEqual ( entry & DENTRY_CACHE_DISABLED , if ( attrs . cachable ) 0 else DENTRY_CACHE_DISABLED ) ;
2019-09-16 02:48:32 +02:00
expectEqual ( entry & DENTRY_4MB_PAGES , 0 ) ;
expectEqual ( entry & DENTRY_ZERO , 0 ) ;
var tentry_idx = virtToTableEntryIdx ( virt_start ) ;
var tentry_idx_end = virtToTableEntryIdx ( virt_end ) ;
var phys = phys_start ;
while ( tentry_idx < tentry_idx_end ) : ( {
tentry_idx + = 1 ;
phys + = PAGE_SIZE_4KB ;
} ) {
const tentry = table . entries [ tentry_idx ] ;
2020-01-09 17:16:51 +01:00
checkTableEntry ( tentry , phys , attrs , present ) ;
2019-09-16 02:48:32 +02:00
}
}
2020-01-09 17:16:51 +01:00
fn checkTableEntry ( entry : TableEntry , page_phys : usize , attrs : vmm . Attributes , present : bool ) void {
expectEqual ( entry & TENTRY_PRESENT , if ( present ) TENTRY_PRESENT else 0 ) ;
expectEqual ( entry & TENTRY_WRITABLE , if ( attrs . writable ) TENTRY_WRITABLE else 0 ) ;
expectEqual ( entry & TENTRY_USER , if ( attrs . kernel ) 0 else TENTRY_USER ) ;
expectEqual ( entry & TENTRY_WRITE_THROUGH , TENTRY_WRITE_THROUGH ) ;
expectEqual ( entry & TENTRY_CACHE_DISABLED , if ( attrs . cachable ) 0 else TENTRY_CACHE_DISABLED ) ;
2019-09-16 02:48:32 +02:00
expectEqual ( entry & TENTRY_ZERO , 0 ) ;
expectEqual ( entry & TENTRY_GLOBAL , 0 ) ;
2020-01-09 14:08:00 +01:00
expectEqual ( entry & TENTRY_PAGE_ADDR , page_phys ) ;
2019-09-16 02:48:32 +02:00
}
2020-01-09 17:16:51 +01:00
test " setAttribute and clearAttribute " {
var val : u32 = 0 ;
const attrs = [ _ ] u32 { DENTRY_PRESENT , DENTRY_WRITABLE , DENTRY_USER , DENTRY_WRITE_THROUGH , DENTRY_CACHE_DISABLED , DENTRY_ACCESSED , DENTRY_ZERO , DENTRY_4MB_PAGES , DENTRY_IGNORED , DENTRY_AVAILABLE , DENTRY_PAGE_ADDR } ;
for ( attrs ) | attr | {
const old_val = val ;
setAttribute ( & val , attr ) ;
std . testing . expectEqual ( val , old_val | attr ) ;
}
for ( attrs ) | attr | {
const old_val = val ;
clearAttribute ( & val , attr ) ;
std . testing . expectEqual ( val , old_val & ~ attr ) ;
}
}
2019-09-16 02:48:32 +02:00
test " virtToDirEntryIdx " {
expectEqual ( virtToDirEntryIdx ( 0 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( 123 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB - 1 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB ) , 1 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB + 1 ) , 1 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB * 2 ) , 2 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB * ( ENTRIES_PER_DIRECTORY - 1 ) ) , ENTRIES_PER_DIRECTORY - 1 ) ;
}
test " virtToTableEntryIdx " {
expectEqual ( virtToTableEntryIdx ( 0 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( 123 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB - 1 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB ) , 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB + 1 ) , 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * 2 ) , 2 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * ( ENTRIES_PER_TABLE - 1 ) ) , ENTRIES_PER_TABLE - 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * ( ENTRIES_PER_TABLE ) ) , 0 ) ;
2019-05-23 18:50:37 +02:00
}
test " mapDirEntry " {
2020-04-12 23:26:34 +02:00
var allocator = std . heap . page_allocator ;
2019-09-16 02:48:32 +02:00
var dir : Directory = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = [ _ ] ? * Table { null } * * ENTRIES_PER_DIRECTORY } ;
2020-07-26 15:29:42 +02:00
{
const phys : usize = 0 * PAGE_SIZE_4MB ;
const phys_end : usize = phys + PAGE_SIZE_4MB ;
const virt : usize = 1 * PAGE_SIZE_4MB ;
const virt_end : usize = virt + PAGE_SIZE_4MB ;
try mapDirEntry ( & dir , virt , virt_end , phys , phys_end , . { . kernel = true , . writable = true , . cachable = true } , allocator ) ;
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
checkDirEntry ( entry , virt , virt_end , phys , . { . kernel = true , . writable = true , . cachable = true } , table , true ) ;
}
{
const phys : usize = 7 * PAGE_SIZE_4MB ;
const phys_end : usize = phys + PAGE_SIZE_4MB ;
const virt : usize = 8 * PAGE_SIZE_4MB ;
const virt_end : usize = virt + PAGE_SIZE_4MB ;
try mapDirEntry ( & dir , virt , virt_end , phys , phys_end , . { . kernel = false , . writable = false , . cachable = false } , allocator ) ;
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
checkDirEntry ( entry , virt , virt_end , phys , . { . kernel = false , . writable = false , . cachable = false } , table , true ) ;
}
2019-05-23 18:50:37 +02:00
}
2019-09-08 23:13:13 +02:00
test " mapDirEntry returns errors correctly " {
2020-04-12 23:26:34 +02:00
var allocator = std . heap . page_allocator ;
2019-09-08 23:13:13 +02:00
var dir = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = undefined } ;
2020-01-09 17:16:51 +01:00
const attrs = vmm . Attributes { . kernel = true , . writable = true , . cachable = true } ;
testing . expectError ( vmm . MapperError . MisalignedVirtualAddress , mapDirEntry ( & dir , 1 , PAGE_SIZE_4KB + 1 , 0 , PAGE_SIZE_4KB , attrs , allocator ) ) ;
testing . expectError ( vmm . MapperError . MisalignedPhysicalAddress , mapDirEntry ( & dir , 0 , PAGE_SIZE_4KB , 1 , PAGE_SIZE_4KB + 1 , attrs , allocator ) ) ;
testing . expectError ( vmm . MapperError . AddressMismatch , mapDirEntry ( & dir , 0 , PAGE_SIZE_4KB , 1 , PAGE_SIZE_4KB , attrs , allocator ) ) ;
testing . expectError ( vmm . MapperError . InvalidVirtualAddress , mapDirEntry ( & dir , 1 , 0 , 0 , PAGE_SIZE_4KB , attrs , allocator ) ) ;
testing . expectError ( vmm . MapperError . InvalidPhysicalAddress , mapDirEntry ( & dir , 0 , PAGE_SIZE_4KB , 1 , 0 , attrs , allocator ) ) ;
2019-09-08 23:13:13 +02:00
}
2020-01-09 17:16:51 +01:00
test " map and unmap " {
2020-04-12 23:26:34 +02:00
var allocator = std . heap . page_allocator ;
2019-09-16 02:48:32 +02:00
var dir = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = [ _ ] ? * Table { null } * * ENTRIES_PER_DIRECTORY } ;
const phys_start : usize = PAGE_SIZE_4MB * 2 ;
const virt_start : usize = PAGE_SIZE_4MB * 4 ;
const phys_end : usize = PAGE_SIZE_4MB * 4 ;
const virt_end : usize = PAGE_SIZE_4MB * 6 ;
2020-01-09 17:16:51 +01:00
const attrs = vmm . Attributes { . kernel = true , . writable = true , . cachable = true } ;
map ( virt_start , virt_end , phys_start , phys_end , attrs , allocator , & dir ) catch unreachable ;
2019-09-16 02:48:32 +02:00
var virt = virt_start ;
var phys = phys_start ;
while ( virt < virt_end ) : ( {
virt + = PAGE_SIZE_4MB ;
phys + = PAGE_SIZE_4MB ;
} ) {
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
2020-01-09 17:16:51 +01:00
checkDirEntry ( entry , virt , virt + PAGE_SIZE_4MB , phys , attrs , table , true ) ;
}
unmap ( virt_start , virt_end , & dir ) catch unreachable ;
virt = virt_start ;
phys = phys_start ;
while ( virt < virt_end ) : ( {
virt + = PAGE_SIZE_4MB ;
phys + = PAGE_SIZE_4MB ;
} ) {
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
checkDirEntry ( entry , virt , virt + PAGE_SIZE_4MB , phys , attrs , table , false ) ;
2019-09-16 02:48:32 +02:00
}
2019-05-23 18:50:37 +02:00
}
2019-09-08 23:13:13 +02:00
// The labels to jump to after attempting to cause a page fault. This is needed as we don't want to cause an
2020-06-23 13:43:52 +02:00
// infinite loop by jumping to the same instruction that caused the fault.
2019-09-08 23:13:13 +02:00
extern var rt_fault_callback : * u32 ;
extern var rt_fault_callback2 : * u32 ;
var faulted = false ;
var use_callback2 = false ;
2020-07-18 23:46:24 +02:00
fn rt_pageFault ( ctx : * arch . CpuState ) u32 {
2019-09-08 23:13:13 +02:00
faulted = true ;
// Return to the fault callback
ctx . eip = @ptrToInt ( & if ( use_callback2 ) rt_fault_callback2 else rt_fault_callback ) ;
2020-07-18 23:46:24 +02:00
return @ptrToInt ( ctx ) ;
2019-09-08 23:13:13 +02:00
}
fn rt_accessUnmappedMem ( v_end : u32 ) void {
use_callback2 = false ;
faulted = false ;
// Accessing unmapped mem causes a page fault
var ptr = @intToPtr ( * u8 , v_end ) ;
var value = ptr . * ;
2020-06-23 13:43:52 +02:00
// Need this as in release builds the above is optimised out so it needs to be use
2020-08-23 15:32:32 +02:00
log . emerg ( " FAILURE: Value: {} \n " , . { value } ) ;
2019-09-08 23:13:13 +02:00
// This is the label that we return to after processing the page fault
asm volatile (
\\.global rt_fault_callback
\\rt_fault_callback:
) ;
2020-06-23 13:43:52 +02:00
if ( ! faulted ) {
panic ( @errorReturnTrace ( ) , " FAILURE: Paging should have faulted \n " , . { } ) ;
}
2020-08-23 15:32:32 +02:00
log . info ( " Tested accessing unmapped memory \n " , . { } ) ;
2019-09-08 23:13:13 +02:00
}
fn rt_accessMappedMem ( v_end : u32 ) void {
use_callback2 = true ;
faulted = false ;
2020-06-23 13:43:52 +02:00
// Accessing mapped memory doesn't cause a page fault
2019-09-08 23:13:13 +02:00
var ptr = @intToPtr ( * u8 , v_end - PAGE_SIZE_4KB ) ;
var value = ptr . * ;
asm volatile (
\\.global rt_fault_callback2
\\rt_fault_callback2:
) ;
2020-06-23 13:43:52 +02:00
if ( faulted ) {
panic ( @errorReturnTrace ( ) , " FAILURE: Paging shouldn't have faulted \n " , . { } ) ;
}
2020-08-23 15:32:32 +02:00
log . info ( " Tested accessing mapped memory \n " , . { } ) ;
2019-09-08 23:13:13 +02:00
}
2020-07-18 23:46:24 +02:00
pub fn runtimeTests ( v_end : u32 ) void {
2019-09-08 23:13:13 +02:00
rt_accessUnmappedMem ( v_end ) ;
rt_accessMappedMem ( v_end ) ;
}