2019-05-23 18:50:37 +02:00
const std = @import ( " std " ) ;
2019-09-16 02:48:32 +02:00
const expectEqual = std . testing . expectEqual ;
const expect = std . testing . expect ;
const builtin = @import ( " builtin " ) ;
const panic = @import ( " ../../panic.zig " ) ;
const arch = @import ( " arch.zig " ) ;
2019-05-23 18:50:37 +02:00
const isr = @import ( " isr.zig " ) ;
const MemProfile = @import ( " ../../mem.zig " ) . MemProfile ;
2019-09-16 02:48:32 +02:00
const tty = @import ( " ../../tty.zig " ) ;
2019-05-23 18:50:37 +02:00
2019-09-16 02:48:32 +02:00
/// An array of directory entries and page tables. Forms the first level of paging and covers the entire 4GB memory space.
2019-05-23 18:50:37 +02:00
const Directory = packed struct {
2019-09-16 02:48:32 +02:00
/// The directory entries.
2019-09-08 21:48:23 +02:00
entries : [ ENTRIES_PER_DIRECTORY ] DirectoryEntry ,
2019-09-16 02:48:32 +02:00
/// The tables allocated for the directory. This is ignored by the CPU.
tables : [ ENTRIES_PER_DIRECTORY ] ? * Table ,
} ;
/// An array of table entries. Forms the second level of paging and covers a 4MB memory space.
const Table = packed struct {
/// The table entries.
entries : [ ENTRIES_PER_TABLE ] TableEntry ,
2019-05-23 18:50:37 +02:00
} ;
2019-09-16 02:48:32 +02:00
/// All errors that can be thrown by paging functions.
2019-09-12 23:15:00 +02:00
const PagingError = error {
2019-09-16 02:48:32 +02:00
/// Physical addresses are invalid (definition is up to the function).
2019-05-23 18:50:37 +02:00
InvalidPhysAddresses ,
2019-09-16 02:48:32 +02:00
/// Virtual addresses are invalid (definition is up to the function).
2019-05-23 18:50:37 +02:00
InvalidVirtAddresses ,
2019-09-16 02:48:32 +02:00
/// Physical and virtual addresses don't cover spaces of the same size.
2019-05-23 18:50:37 +02:00
PhysicalVirtualMismatch ,
2019-09-16 02:48:32 +02:00
/// Physical addressses aren't aligned by page size.
2019-05-23 18:50:37 +02:00
UnalignedPhysAddresses ,
2019-09-16 02:48:32 +02:00
/// Virtual addressses aren't aligned by page size.
2019-09-08 21:48:23 +02:00
UnalignedVirtAddresses ,
2019-05-23 18:50:37 +02:00
} ;
2019-09-16 02:48:32 +02:00
/// An entry within a directory. References a single page table.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this table.
/// Bit 5: Accessed. Set by the CPU when the table is accessed. Not cleared by CPU.
/// Bit 6: Zero.
/// Bit 7: Page size. Set if this entry covers a single 4MB page rather than 1024 4KB pages.
/// Bit 8: Ignored.
/// Bits 9-11: Ignored and available for use by kernel.
/// Bits 12-31: The 4KB aligned physical address of the corresponding page table.
/// Must be 4MB aligned if the page size bit is set.
const DirectoryEntry = u32 ;
/// An entry within a page table. References a single page.
/// Bit 0: Present. Set if present in physical memory.
/// When not set, all remaining 31 bits are ignored and available for use.
/// Bit 1: Writable. Set if writable.
/// Bit 2: User. Set if accessible by user mode.
/// Bit 3: Write through. Set if write-through caching is enabled.
/// Bit 4: Cache disabled. Set if caching is disabled for this page.
/// Bit 5: Accessed. Set by the CPU when the page is accessed. Not cleared by CPU.
/// Bit 6: Dirty. Set by the CPU when the page has been written to. Not cleared by the CPU.
/// Bit 7: Zero.
/// Bit 8: Global. Set if the cached address for this page shouldn't be updated when cr3 is changed.
/// Bits 9-11: Ignored and available for use by the kernel.
/// Bits 12-31: The 4KB aligned physical address mapped to this page.
const TableEntry = u32 ;
/// Each directory has 1024 entries
const ENTRIES_PER_DIRECTORY : u32 = 1024 ;
/// Each table has 1024 entries
const ENTRIES_PER_TABLE : u32 = 1024 ;
/// The number of bytes in 4MB
const PAGE_SIZE_4MB : u32 = 0x400000 ;
/// The number of bytes in 4KB
const PAGE_SIZE_4KB : u32 = PAGE_SIZE_4MB / 1024 ;
/// There are 1024 entries per directory with each one covering 4KB
const PAGES_PER_DIR_ENTRY : u32 = 1024 ;
/// There are 1 million pages per directory
const PAGES_PER_DIR : u32 = ENTRIES_PER_DIRECTORY * PAGES_PER_DIR_ENTRY ;
/// The bitmasks for the bits in a DirectoryEntry
const DENTRY_PRESENT : u32 = 0x1 ;
const DENTRY_WRITABLE : u32 = 0x2 ;
const DENTRY_USER : u32 = 0x4 ;
const DENTRY_WRITE_THROUGH : u32 = 0x8 ;
const DENTRY_CACHE_DISABLED : u32 = 0x10 ;
const DENTRY_ACCESSED : u32 = 0x20 ;
const DENTRY_ZERO : u32 = 0x40 ;
const DENTRY_4MB_PAGES : u32 = 0x80 ;
const DENTRY_IGNORED : u32 = 0x100 ;
const DENTRY_AVAILABLE : u32 = 0xE00 ;
const DENTRY_PAGE_ADDR : u32 = 0xFFFFF000 ;
/// The bitmasks for the bits in a TableEntry
const TENTRY_PRESENT : u32 = 0x1 ;
const TENTRY_WRITABLE : u32 = 0x2 ;
const TENTRY_USER : u32 = 0x4 ;
const TENTRY_WRITE_THROUGH : u32 = 0x8 ;
const TENTRY_CACHE_DISABLED : u32 = 0x10 ;
const TENTRY_ACCESSED : u32 = 0x20 ;
const TENTRY_DIRTY : u32 = 0x40 ;
const TENTRY_ZERO : u32 = 0x80 ;
const TENTRY_GLOBAL : u32 = 0x100 ;
const TENTRY_AVAILABLE : u32 = 0xE00 ;
const TENTRY_PAGE_ADDR : u32 = 0xFFFFF000 ;
/// The kernel's virtual address offset. It's assigned in the init function and the virtToPhys test.
/// We can't just use KERNEL_ADDR_OFFSET since using externs in the virtToPhys test is broken in
/// release-safe. This is a workaround until that is fixed.
var ADDR_OFFSET : usize = undefined ;
extern var KERNEL_ADDR_OFFSET : * u32 ;
2019-05-23 18:50:37 +02:00
///
2019-09-16 02:48:32 +02:00
/// Convert a virtual address to its physical counterpart by subtracting the kernel virtual offset from the virtual address.
2019-05-23 18:50:37 +02:00
///
/// Arguments:
2019-09-16 02:48:32 +02:00
/// IN virt: var - The virtual address to covert. Either an integer or pointer.
2019-05-23 18:50:37 +02:00
///
2019-09-16 02:48:32 +02:00
/// Return: @typeOf(virt)
/// The physical address.
///
inline fn virtToPhys ( virt : var ) @typeOf ( virt ) {
const offset = ADDR_OFFSET ;
const T = @typeOf ( virt ) ;
return switch ( @typeId ( T ) ) {
. Pointer = > @intToPtr ( T , @ptrToInt ( virt ) - offset ) ,
. Int = > virt - offset ,
else = > @compileError ( " Only pointers and integers are supported " ) ,
} ;
2019-05-23 18:50:37 +02:00
}
///
2019-09-16 02:48:32 +02:00
/// Convert a virtual address to an index within an array of directory entries.
2019-05-23 18:50:37 +02:00
///
/// Arguments:
2019-09-16 02:48:32 +02:00
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of directory entries.
///
inline fn virtToDirEntryIdx ( virt : usize ) usize {
return ( virt / PAGE_SIZE_4MB ) % ENTRIES_PER_DIRECTORY ;
}
///
/// Convert a virtual address to an index within an array of table entries.
///
/// Arguments:
/// IN virt: usize - The virtual address to convert.
///
/// Return: usize
/// The index into an array of table entries.
///
inline fn virtToTableEntryIdx ( virt : usize ) usize {
return ( virt / PAGE_SIZE_4KB ) % ENTRIES_PER_TABLE ;
}
///
/// Map a page directory entry, setting the present, size, writable, write-through and physical address bits.
/// Clears the user and cache disabled bits. Entry should be zero'ed.
///
/// Arguments:
/// OUT dir: *Directory - The directory that this entry is in
/// IN virt_addr: usize - The start of the virtual space to map
/// IN virt_end: usize - The end of the virtual space to map
/// IN phys_addr: usize - The start of the physical space to map
/// IN phys_end: usize - The end of the physical space to map
2019-05-23 18:50:37 +02:00
/// IN allocator: *Allocator - The allocator to use to map any tables needed
///
2019-09-16 02:48:32 +02:00
/// Error: PagingError || std.mem.Allocator.Error
/// PagingError.InvalidPhysAddresses - The physical start address is greater than the end.
/// PagingError.InvalidVirtAddresses - The virtual start address is greater than the end or is larger than 4GB.
/// PagingError.PhysicalVirtualMismatch - The differences between the virtual addresses and the physical addresses aren't the same.
/// PagingError.UnalignedPhysAddresses - One or both of the physical addresses aren't page size aligned.
/// PagingError.UnalignedVirtAddresses - One or both of the virtual addresses aren't page size aligned.
/// std.mem.Allocator.Error.* - See std.mem.Allocator.alignedAlloc.
///
fn mapDirEntry ( dir : * Directory , virt_start : usize , virt_end : usize , phys_start : usize , phys_end : usize , allocator : * std . mem . Allocator ) ( PagingError | | std . mem . Allocator . Error ) ! void {
2019-05-23 18:50:37 +02:00
if ( phys_start > phys_end ) {
return PagingError . InvalidPhysAddresses ;
}
if ( virt_start > virt_end ) {
return PagingError . InvalidVirtAddresses ;
}
if ( phys_end - phys_start ! = virt_end - virt_start ) {
return PagingError . PhysicalVirtualMismatch ;
}
2019-09-16 02:48:32 +02:00
if ( ! std . mem . isAligned ( phys_start , PAGE_SIZE_4KB ) or ! std . mem . isAligned ( phys_end , PAGE_SIZE_4KB ) ) {
2019-05-23 18:50:37 +02:00
return PagingError . UnalignedPhysAddresses ;
}
2019-09-16 02:48:32 +02:00
if ( ! std . mem . isAligned ( virt_start , PAGE_SIZE_4KB ) or ! std . mem . isAligned ( virt_end , PAGE_SIZE_4KB ) ) {
2019-05-23 18:50:37 +02:00
return PagingError . UnalignedVirtAddresses ;
}
2019-09-16 02:48:32 +02:00
const entry = virt_start / PAGE_SIZE_4MB ;
if ( entry > = ENTRIES_PER_DIRECTORY )
return PagingError . InvalidVirtAddresses ;
var dir_entry = & dir . entries [ entry ] ;
dir_entry . * | = DENTRY_PRESENT ;
dir_entry . * | = DENTRY_WRITABLE ;
dir_entry . * & = ~ u32 ( DENTRY_USER ) ;
dir_entry . * | = DENTRY_WRITE_THROUGH ;
dir_entry . * & = ~ u32 ( DENTRY_CACHE_DISABLED ) ;
dir_entry . * & = ~ u32 ( DENTRY_4MB_PAGES ) ;
// Only create a new table if one hasn't already been created for this dir entry.
// Prevents us from overriding previous mappings.
var table : * Table = undefined ;
if ( dir . tables [ entry ] ) | tbl | {
table = tbl ;
} else {
// Create a table and put the physical address in the dir entry
table = & ( try allocator . alignedAlloc ( Table , @truncate ( u29 , PAGE_SIZE_4KB ) , 1 ) ) [ 0 ] ;
@memset ( @ptrCast ( [ * ] u8 , table ) , 0 , @sizeOf ( Table ) ) ;
const table_phys_addr = @ptrToInt ( virtToPhys ( table ) ) ;
dir_entry . * | = @intCast ( u32 , DENTRY_PAGE_ADDR & table_phys_addr ) ;
dir . tables [ entry ] = table ;
}
// Map the table entries within the requested space
var virt = virt_start ;
var phys = phys_start ;
var tentry = virtToTableEntryIdx ( virt ) ;
while ( virt < virt_end ) : ( {
virt + = PAGE_SIZE_4KB ;
phys + = PAGE_SIZE_4KB ;
tentry + = 1 ;
} ) {
try mapTableEntry ( & table . entries [ tentry ] , phys ) ;
}
}
///
/// Map a table entry by setting its bits to the appropriate values.
/// Sets the entry to be present, writable, kernel access, write through, cache enabled, non-global and the page address bits.
///
/// Arguments:
/// OUT entry: *align(1) TableEntry - The entry to map. 1 byte aligned.
/// IN phys_addr: usize - The physical address to map the table entry to.
///
/// Error: PagingError
/// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned.
///
fn mapTableEntry ( entry : * align ( 1 ) TableEntry , phys_addr : usize ) PagingError ! void {
if ( ! std . mem . isAligned ( phys_addr , PAGE_SIZE_4KB ) ) {
return PagingError . UnalignedPhysAddresses ;
}
entry . * | = TENTRY_PRESENT ;
entry . * | = TENTRY_WRITABLE ;
entry . * & = ~ u32 ( TENTRY_USER ) ;
entry . * | = TENTRY_WRITE_THROUGH ;
entry . * & = ~ u32 ( TENTRY_CACHE_DISABLED ) ;
entry . * & = ~ u32 ( TENTRY_GLOBAL ) ;
entry . * | = TENTRY_PAGE_ADDR & @intCast ( u32 , phys_addr ) ;
}
///
/// Map a page directory. The addresses passed must be page size aligned and be the same distance apart.
///
/// Arguments:
/// OUT entry: *Directory - The directory to map
/// IN phys_start: usize - The physical address at which to start mapping
/// IN phys_end: usize - The physical address at which to stop mapping
/// IN virt_start: usize - The virtual address at which to start mapping
/// IN virt_end: usize - The virtual address at which to stop mapping
/// IN allocator: *Allocator - The allocator to use to map any tables needed
///
/// Error: std.mem.allocator.Error || PagingError
/// * - See mapDirEntry.
///
fn mapDir ( dir : * Directory , phys_start : usize , phys_end : usize , virt_start : usize , virt_end : usize , allocator : * std . mem . Allocator ) ( std . mem . Allocator . Error | | PagingError ) ! void {
2019-05-23 18:50:37 +02:00
var virt_addr = virt_start ;
var phys_addr = phys_start ;
var page = virt_addr / PAGE_SIZE_4KB ;
2019-09-16 02:48:32 +02:00
var entry_idx = virt_addr / PAGE_SIZE_4MB ;
while ( entry_idx < ENTRIES_PER_DIRECTORY and virt_addr < virt_end ) : ( {
phys_addr + = PAGE_SIZE_4MB ;
virt_addr + = PAGE_SIZE_4MB ;
2019-05-23 18:50:37 +02:00
entry_idx + = 1 ;
2019-09-16 02:48:32 +02:00
} ) {
try mapDirEntry ( dir , virt_addr , std . math . min ( virt_end , virt_addr + PAGE_SIZE_4MB ) , phys_addr , std . math . min ( phys_end , phys_addr + PAGE_SIZE_4MB ) , allocator ) ;
2019-05-23 18:50:37 +02:00
}
}
2019-09-16 02:48:32 +02:00
///
/// Called when a page fault occurs.
///
/// Arguments:
/// IN state: *arch.InterruptContext - The CPU's state when the fault occured.
///
2019-05-23 18:50:37 +02:00
fn pageFault ( state : * arch . InterruptContext ) void {
@panic ( " Page fault " ) ;
}
///
/// Initialise x86 paging, overwriting any previous paging set up.
///
/// Arguments:
/// IN mem_profile: *const MemProfile - The memory profile of the system and kernel
/// IN allocator: *std.mem.Allocator - The allocator to use
///
pub fn init ( mem_profile : * const MemProfile , allocator : * std . mem . Allocator ) void {
2019-09-16 02:48:32 +02:00
ADDR_OFFSET = @ptrToInt ( & KERNEL_ADDR_OFFSET ) ;
2019-05-23 18:50:37 +02:00
// Calculate start and end of mapping
2019-09-16 02:48:32 +02:00
const v_start = std . mem . alignBackward ( @ptrToInt ( mem_profile . vaddr_start ) , PAGE_SIZE_4KB ) ;
const v_end = std . mem . alignForward ( @ptrToInt ( mem_profile . vaddr_end ) + mem_profile . fixed_alloc_size , PAGE_SIZE_4KB ) ;
const p_start = std . mem . alignBackward ( @ptrToInt ( mem_profile . physaddr_start ) , PAGE_SIZE_4KB ) ;
const p_end = std . mem . alignForward ( @ptrToInt ( mem_profile . physaddr_end ) + mem_profile . fixed_alloc_size , PAGE_SIZE_4KB ) ;
2019-05-23 18:50:37 +02:00
2019-09-16 02:48:32 +02:00
var tmp = allocator . alignedAlloc ( Directory , @truncate ( u29 , PAGE_SIZE_4KB ) , 1 ) catch panic . panicFmt ( @errorReturnTrace ( ) , " Failed to allocate page directory " ) ;
2019-05-23 18:50:37 +02:00
var kernel_directory = @ptrCast ( * Directory , tmp . ptr ) ;
@memset ( @ptrCast ( [ * ] u8 , kernel_directory ) , 0 , @sizeOf ( Directory ) ) ;
2019-09-16 02:48:32 +02:00
// Map in kernel
mapDir ( kernel_directory , p_start , p_end , v_start , v_end , allocator ) catch panic . panicFmt ( @errorReturnTrace ( ) , " Failed to map kernel directory " ) ;
const tty_addr = tty . getVideoBufferAddress ( ) ;
// If the previous mappping space didn't cover the tty buffer, do so now
if ( v_start > tty_addr or v_end < = tty_addr ) {
const tty_phys = virtToPhys ( tty_addr ) ;
const tty_buff_size = 32 * 1024 ;
mapDir ( kernel_directory , tty_phys , tty_phys + tty_buff_size , tty_addr , tty_addr + tty_buff_size , allocator ) catch panic . panicFmt ( @errorReturnTrace ( ) , " Failed to map vga buffer in kernel directory " ) ;
}
const dir_physaddr = @ptrToInt ( virtToPhys ( kernel_directory ) ) ;
2019-09-12 23:15:00 +02:00
asm volatile ( " mov %[addr], %%cr3 "
:
: [ addr ] " {eax} " ( dir_physaddr )
) ;
2019-09-16 02:48:32 +02:00
isr . registerIsr ( 14 , pageFault ) catch panic . panicFmt ( @errorReturnTrace ( ) , " Failed to register page fault ISR " ) ;
2019-05-23 18:50:37 +02:00
}
2019-09-16 02:48:32 +02:00
fn checkDirEntry ( entry : DirectoryEntry , virt_start : usize , virt_end : usize , phys_start : usize , table : * Table ) void {
expect ( entry & DENTRY_PRESENT ! = 0 ) ;
expect ( entry & DENTRY_WRITABLE ! = 0 ) ;
expectEqual ( entry & DENTRY_USER , 0 ) ;
expect ( entry & DENTRY_WRITE_THROUGH ! = 0 ) ;
expectEqual ( entry & DENTRY_CACHE_DISABLED , 0 ) ;
expectEqual ( entry & DENTRY_4MB_PAGES , 0 ) ;
expectEqual ( entry & DENTRY_ZERO , 0 ) ;
var tentry_idx = virtToTableEntryIdx ( virt_start ) ;
var tentry_idx_end = virtToTableEntryIdx ( virt_end ) ;
var phys = phys_start ;
while ( tentry_idx < tentry_idx_end ) : ( {
tentry_idx + = 1 ;
phys + = PAGE_SIZE_4KB ;
} ) {
const tentry = table . entries [ tentry_idx ] ;
checkTableEntry ( tentry , phys ) ;
}
}
fn checkTableEntry ( entry : TableEntry , page_phys : usize ) void {
expect ( entry & TENTRY_PRESENT ! = 0 ) ;
expect ( entry & TENTRY_WRITABLE ! = 0 ) ;
expectEqual ( entry & TENTRY_USER , 0 ) ;
expect ( entry & TENTRY_WRITE_THROUGH ! = 0 ) ;
expectEqual ( entry & TENTRY_CACHE_DISABLED , 0 ) ;
expectEqual ( entry & TENTRY_ZERO , 0 ) ;
expectEqual ( entry & TENTRY_GLOBAL , 0 ) ;
expectEqual ( entry & TENTRY_PAGE_ADDR , @intCast ( u32 , page_phys ) ) ;
}
test " virtToPhys " {
ADDR_OFFSET = 0xC0000000 ;
const offset : usize = ADDR_OFFSET ;
expectEqual ( virtToPhys ( offset + 0 ) , 0 ) ;
expectEqual ( virtToPhys ( offset + 123 ) , 123 ) ;
expectEqual ( virtToPhys ( @intToPtr ( * usize , offset + 123 ) ) , @intToPtr ( * usize , 123 ) ) ;
}
test " virtToDirEntryIdx " {
expectEqual ( virtToDirEntryIdx ( 0 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( 123 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB - 1 ) , 0 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB ) , 1 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB + 1 ) , 1 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB * 2 ) , 2 ) ;
expectEqual ( virtToDirEntryIdx ( PAGE_SIZE_4MB * ( ENTRIES_PER_DIRECTORY - 1 ) ) , ENTRIES_PER_DIRECTORY - 1 ) ;
}
test " virtToTableEntryIdx " {
expectEqual ( virtToTableEntryIdx ( 0 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( 123 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB - 1 ) , 0 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB ) , 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB + 1 ) , 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * 2 ) , 2 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * ( ENTRIES_PER_TABLE - 1 ) ) , ENTRIES_PER_TABLE - 1 ) ;
expectEqual ( virtToTableEntryIdx ( PAGE_SIZE_4KB * ( ENTRIES_PER_TABLE ) ) , 0 ) ;
2019-05-23 18:50:37 +02:00
}
test " mapDirEntry " {
2019-09-16 02:48:32 +02:00
var allocator = std . heap . direct_allocator ;
var dir : Directory = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = [ _ ] ? * Table { null } * * ENTRIES_PER_DIRECTORY } ;
var phys : usize = 0 * PAGE_SIZE_4MB ;
const phys_end : usize = phys + PAGE_SIZE_4MB ;
const virt : usize = 1 * PAGE_SIZE_4MB ;
const virt_end : usize = virt + PAGE_SIZE_4MB ;
try mapDirEntry ( & dir , virt , virt_end , phys , phys_end , allocator ) ;
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
checkDirEntry ( entry , virt , virt_end , phys , table ) ;
2019-05-23 18:50:37 +02:00
}
test " mapDir " {
2019-09-16 02:48:32 +02:00
var allocator = std . heap . direct_allocator ;
var dir = Directory { . entries = [ _ ] DirectoryEntry { 0 } * * ENTRIES_PER_DIRECTORY , . tables = [ _ ] ? * Table { null } * * ENTRIES_PER_DIRECTORY } ;
const phys_start : usize = PAGE_SIZE_4MB * 2 ;
const virt_start : usize = PAGE_SIZE_4MB * 4 ;
const phys_end : usize = PAGE_SIZE_4MB * 4 ;
const virt_end : usize = PAGE_SIZE_4MB * 6 ;
2019-05-23 18:50:37 +02:00
mapDir ( & dir , phys_start , phys_end , virt_start , virt_end , allocator ) catch unreachable ;
2019-09-16 02:48:32 +02:00
var virt = virt_start ;
var phys = phys_start ;
while ( virt < virt_end ) : ( {
virt + = PAGE_SIZE_4MB ;
phys + = PAGE_SIZE_4MB ;
} ) {
const entry_idx = virtToDirEntryIdx ( virt ) ;
const entry = dir . entries [ entry_idx ] ;
const table = dir . tables [ entry_idx ] orelse unreachable ;
checkDirEntry ( entry , virt , virt + PAGE_SIZE_4MB , phys , table ) ;
}
2019-05-23 18:50:37 +02:00
}