Merge pull request #273 from ZystemOS/feature/exec-elf

Create a task from elf data
This commit is contained in:
Sam Tebbs 2021-06-26 12:22:26 +01:00 committed by GitHub
commit 2484cb08d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 505 additions and 200 deletions

View file

@ -39,10 +39,10 @@ pub fn build(b: *Builder) !void {
const main_src = "src/kernel/kmain.zig"; const main_src = "src/kernel/kmain.zig";
const arch_root = "src/kernel/arch"; const arch_root = "src/kernel/arch";
const linker_script_path = try fs.path.join(b.allocator, &[_][]const u8{ arch_root, arch, "link.ld" }); const linker_script_path = try fs.path.join(b.allocator, &[_][]const u8{ arch_root, arch, "link.ld" });
const output_iso = try fs.path.join(b.allocator, &[_][]const u8{ b.exe_dir, "pluto.iso" }); const output_iso = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "pluto.iso" });
const iso_dir_path = try fs.path.join(b.allocator, &[_][]const u8{ b.exe_dir, "iso" }); const iso_dir_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso" });
const boot_path = try fs.path.join(b.allocator, &[_][]const u8{ b.exe_dir, "iso", "boot" }); const boot_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso", "boot" });
const modules_path = try fs.path.join(b.allocator, &[_][]const u8{ b.exe_dir, "iso", "modules" }); const modules_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "iso", "modules" });
const ramdisk_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "initrd.ramdisk" }); const ramdisk_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "initrd.ramdisk" });
const fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "fat32.img" }); const fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ b.install_path, "fat32.img" });
const test_fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ "test", "fat32", "test_fat32.img" }); const test_fat32_image_path = try fs.path.join(b.allocator, &[_][]const u8{ "test", "fat32", "test_fat32.img" });
@ -82,19 +82,19 @@ pub fn build(b: *Builder) !void {
try ramdisk_files_al.append("test/ramdisk_test1.txt"); try ramdisk_files_al.append("test/ramdisk_test1.txt");
try ramdisk_files_al.append("test/ramdisk_test2.txt"); try ramdisk_files_al.append("test/ramdisk_test2.txt");
} else if (test_mode == .Scheduler) { } else if (test_mode == .Scheduler) {
inline for (&[_][]const u8{ "user_program_data", "user_program" }) |user_program| {
// Add some test files for the user mode runtime tests // Add some test files for the user mode runtime tests
const user_program = b.addAssemble("user_program", "test/user_program.s"); const user_program_step = b.addExecutable(user_program ++ ".elf", null);
user_program.setOutputDir(b.install_path); user_program_step.setLinkerScriptPath("test/user_program.ld");
user_program.setTarget(target); user_program_step.addAssemblyFile("test/" ++ user_program ++ ".s");
user_program.setBuildMode(build_mode); user_program_step.setOutputDir(b.install_path);
user_program.strip = true; user_program_step.setTarget(target);
user_program_step.setBuildMode(build_mode);
const user_program_path = try std.mem.join(b.allocator, "/", &[_][]const u8{ b.install_path, "user_program" }); user_program_step.strip = true;
const user_program_obj_path = try std.mem.join(b.allocator, "/", &[_][]const u8{ b.install_path, "user_program.o" }); exec.step.dependOn(&user_program_step.step);
const copy_user_program = b.addSystemCommand(&[_][]const u8{ "objcopy", "-O", "binary", user_program_obj_path, user_program_path }); const user_program_path = try std.mem.join(b.allocator, "/", &[_][]const u8{ b.install_path, user_program ++ ".elf" });
copy_user_program.step.dependOn(&user_program.step);
try ramdisk_files_al.append(user_program_path); try ramdisk_files_al.append(user_program_path);
exec.step.dependOn(&copy_user_program.step); }
} }
const ramdisk_step = RamdiskStep.create(b, target, ramdisk_files_al.toOwnedSlice(), ramdisk_path); const ramdisk_step = RamdiskStep.create(b, target, ramdisk_files_al.toOwnedSlice(), ramdisk_path);

View file

@ -526,6 +526,7 @@ pub fn initTask(task: *Task, entry_point: usize, allocator: *Allocator) Allocato
// TODO Will need to add the exit point // TODO Will need to add the exit point
// Set up everything as a kernel task // Set up everything as a kernel task
task.vmm.payload = &paging.kernel_directory;
stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory)); stack.*[kernel_stack_bottom] = mem.virtToPhys(@ptrToInt(&paging.kernel_directory));
stack.*[kernel_stack_bottom + 1] = data_offset; // gs stack.*[kernel_stack_bottom + 1] = data_offset; // gs
stack.*[kernel_stack_bottom + 2] = data_offset; // fs stack.*[kernel_stack_bottom + 2] = data_offset; // fs

View file

@ -222,10 +222,8 @@ fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start:
// Create a table and put the physical address in the dir entry // Create a table and put the physical address in the dir entry
table = &(try allocator.alignedAlloc(Table, @truncate(u29, PAGE_SIZE_4KB), 1))[0]; table = &(try allocator.alignedAlloc(Table, @truncate(u29, PAGE_SIZE_4KB), 1))[0];
@memset(@ptrCast([*]u8, table), 0, @sizeOf(Table)); @memset(@ptrCast([*]u8, table), 0, @sizeOf(Table));
const table_phys_addr = vmm.kernel_vmm.virtToPhys(@ptrToInt(table)) catch |e| blk: { const table_phys_addr = if (builtin.is_test) @ptrToInt(table) else vmm.kernel_vmm.virtToPhys(@ptrToInt(table)) catch |e| {
// When testing this will fail, but that's ok panic(@errorReturnTrace(), "Failed getting the physical address for a page table: {}\n", .{e});
if (!is_test) panic(@errorReturnTrace(), "Failed getting the physical address for a page table: {}\n", .{e});
break :blk 0;
}; };
dir_entry.* |= DENTRY_PAGE_ADDR & table_phys_addr; dir_entry.* |= DENTRY_PAGE_ADDR & table_phys_addr;
dir.tables[entry] = table; dir.tables[entry] = table;
@ -262,7 +260,7 @@ fn mapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, phys_start:
phys += PAGE_SIZE_4KB; phys += PAGE_SIZE_4KB;
tentry += 1; tentry += 1;
}) { }) {
try mapTableEntry(&table.entries[tentry], phys, attrs); try mapTableEntry(dir, &table.entries[tentry], virt, phys, attrs);
} }
} }
@ -286,6 +284,13 @@ fn unmapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, allocator:
var table_entry = &table.entries[virtToTableEntryIdx(addr)]; var table_entry = &table.entries[virtToTableEntryIdx(addr)];
if (table_entry.* & TENTRY_PRESENT != 0) { if (table_entry.* & TENTRY_PRESENT != 0) {
clearAttribute(table_entry, TENTRY_PRESENT); clearAttribute(table_entry, TENTRY_PRESENT);
if (dir == &kernel_directory) {
asm volatile ("invlpg (%[addr])"
:
: [addr] "r" (addr)
: "memory"
);
}
} else { } else {
return vmm.MapperError.NotMapped; return vmm.MapperError.NotMapped;
} }
@ -297,13 +302,17 @@ fn unmapDirEntry(dir: *Directory, virt_start: usize, virt_end: usize, allocator:
/// Sets the entry to be present, writable, kernel access, write through, cache enabled, non-global and the page address bits. /// Sets the entry to be present, writable, kernel access, write through, cache enabled, non-global and the page address bits.
/// ///
/// Arguments: /// Arguments:
/// IN dir: *const Directory - The directory that is being mapped within.
/// The function checks if this is the kernel directory and if so invalidates the page being mapped so the TLB reloads it.
/// OUT entry: *align(1) TableEntry - The entry to map. 1 byte aligned. /// OUT entry: *align(1) TableEntry - The entry to map. 1 byte aligned.
/// IN virt_addr: usize - The virtual address that this table entry is responsible for.
/// Used to invalidate the page if mapping within the kernel page directory.
/// IN phys_addr: usize - The physical address to map the table entry to. /// IN phys_addr: usize - The physical address to map the table entry to.
/// ///
/// Error: PagingError /// Error: PagingError
/// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned. /// PagingError.UnalignedPhysAddresses - If the physical address isn't page size aligned.
/// ///
fn mapTableEntry(entry: *align(1) TableEntry, phys_addr: usize, attrs: vmm.Attributes) vmm.MapperError!void { fn mapTableEntry(dir: *const Directory, entry: *align(1) TableEntry, virt_addr: usize, phys_addr: usize, attrs: vmm.Attributes) vmm.MapperError!void {
if (!std.mem.isAligned(phys_addr, PAGE_SIZE_4KB)) { if (!std.mem.isAligned(phys_addr, PAGE_SIZE_4KB)) {
return vmm.MapperError.MisalignedPhysicalAddress; return vmm.MapperError.MisalignedPhysicalAddress;
} }
@ -318,18 +327,24 @@ fn mapTableEntry(entry: *align(1) TableEntry, phys_addr: usize, attrs: vmm.Attri
} else { } else {
setAttribute(entry, TENTRY_USER); setAttribute(entry, TENTRY_USER);
} }
if (attrs.writable) {
setAttribute(entry, TENTRY_WRITE_THROUGH);
} else {
clearAttribute(entry, TENTRY_WRITE_THROUGH);
}
if (attrs.cachable) { if (attrs.cachable) {
clearAttribute(entry, TENTRY_WRITE_THROUGH);
clearAttribute(entry, TENTRY_CACHE_DISABLED); clearAttribute(entry, TENTRY_CACHE_DISABLED);
} else { } else {
setAttribute(entry, TENTRY_WRITE_THROUGH);
setAttribute(entry, TENTRY_CACHE_DISABLED); setAttribute(entry, TENTRY_CACHE_DISABLED);
} }
clearAttribute(entry, TENTRY_GLOBAL); clearAttribute(entry, TENTRY_GLOBAL);
setAttribute(entry, TENTRY_PAGE_ADDR & phys_addr); setAttribute(entry, TENTRY_PAGE_ADDR & phys_addr);
if (dir == &kernel_directory) {
asm volatile ("invlpg (%[addr])"
:
: [addr] "r" (virt_addr)
: "memory"
);
}
} }
/// ///

View file

@ -89,11 +89,13 @@ pub fn ComptimeBitmap(comptime BitmapType: type) type {
/// Arguments: /// Arguments:
/// IN/OUT self: *Self - The bitmap to modify. /// IN/OUT self: *Self - The bitmap to modify.
/// IN num: usize - The number of entries to set. /// IN num: usize - The number of entries to set.
/// IN from: ?IndexType - The entry number to start allocate from or null if it can start anywhere
/// ///
/// Return: ?IndexType /// Return: ?IndexType
/// The first entry set or null if there weren't enough contiguous entries. /// The first entry set or null if there weren't enough contiguous entries.
/// If `from` was not null and any entry between `from` and `from` + num is set then null is returned.
/// ///
pub fn setContiguous(self: *Self, num: usize) ?IndexType { pub fn setContiguous(self: *Self, num: usize, from: ?IndexType) ?IndexType {
if (num > self.num_free_entries) { if (num > self.num_free_entries) {
return null; return null;
} }
@ -101,7 +103,7 @@ pub fn ComptimeBitmap(comptime BitmapType: type) type {
var count: usize = 0; var count: usize = 0;
var start: ?IndexType = null; var start: ?IndexType = null;
var bit: IndexType = 0; var bit = from orelse 0;
while (true) { while (true) {
const entry = bit; const entry = bit;
if (entry >= NUM_ENTRIES) { if (entry >= NUM_ENTRIES) {
@ -111,6 +113,11 @@ pub fn ComptimeBitmap(comptime BitmapType: type) type {
// This is a one so clear the progress // This is a one so clear the progress
count = 0; count = 0;
start = null; start = null;
// If the caller requested the allocation to start from
// a specific entry and it failed then return null
if (from) |_| {
return null;
}
} else { } else {
// It's a zero so increment the count // It's a zero so increment the count
count += 1; count += 1;
@ -334,19 +341,26 @@ pub fn Bitmap(comptime BitmapType: type) type {
/// Arguments: /// Arguments:
/// IN/OUT self: *Self - The bitmap to modify. /// IN/OUT self: *Self - The bitmap to modify.
/// IN num: usize - The number of entries to set. /// IN num: usize - The number of entries to set.
/// IN from: ?usize - The entry number to allocate from or null if it can start anywhere
/// ///
/// Return: ?usize /// Return: ?usize
/// The first entry set or null if there weren't enough contiguous entries. /// The first entry set or null if there weren't enough contiguous entries.
/// If `from` was not null and any entry between `from` and `from` + num is set then null is returned.
/// ///
pub fn setContiguous(self: *Self, num: usize) ?usize { pub fn setContiguous(self: *Self, num: usize, from: ?usize) ?usize {
if (num > self.num_free_entries) { if (num > self.num_free_entries) {
return null; return null;
} }
var count: usize = 0; var count: usize = 0;
var start: ?usize = null; var start: ?usize = from;
for (self.bitmaps) |bmp, i| { var i: usize = if (from) |f| f / ENTRIES_PER_BITMAP else 0;
var bit: IndexType = 0; var bit: IndexType = if (from) |f| @truncate(IndexType, f % ENTRIES_PER_BITMAP) else 0;
while (i < self.bitmaps.len) : ({
i += 1;
bit = 0;
}) {
var bmp = self.bitmaps[i];
while (true) { while (true) {
const entry = bit + i * ENTRIES_PER_BITMAP; const entry = bit + i * ENTRIES_PER_BITMAP;
if (entry >= self.num_entries) { if (entry >= self.num_entries) {
@ -356,6 +370,11 @@ pub fn Bitmap(comptime BitmapType: type) type {
// This is a one so clear the progress // This is a one so clear the progress
count = 0; count = 0;
start = null; start = null;
// If the caller requested the allocation to start from
// a specific entry and it failed then return null
if (from) |_| {
return null;
}
} else { } else {
// It's a zero so increment the count // It's a zero so increment the count
count += 1; count += 1;
@ -383,10 +402,10 @@ pub fn Bitmap(comptime BitmapType: type) type {
if (count == num) { if (count == num) {
if (start) |start_entry| { if (start) |start_entry| {
var i: usize = 0; var j: usize = 0;
while (i < num) : (i += 1) { while (j < num) : (j += 1) {
// Can't fail as the entry was found to be free // Can't fail as the entry was found to be free
self.setEntry(start_entry + i) catch unreachable; self.setEntry(start_entry + j) catch unreachable;
} }
return start_entry; return start_entry;
} }
@ -547,24 +566,40 @@ test "Comptime indexToBit" {
} }
test "Comptime setContiguous" { test "Comptime setContiguous" {
var bmp = ComptimeBitmap(u15).init(); var bmp = ComptimeBitmap(u16).init();
// Test trying to set more entries than the bitmap has // Test trying to set more entries than the bitmap has
testing.expectEqual(bmp.setContiguous(ComptimeBitmap(u15).NUM_ENTRIES + 1), null); testing.expectEqual(bmp.setContiguous(bmp.num_free_entries + 1, null), null);
testing.expectEqual(bmp.setContiguous(bmp.num_free_entries + 1, 1), null);
// All entries should still be free // All entries should still be free
testing.expectEqual(bmp.num_free_entries, ComptimeBitmap(u15).NUM_ENTRIES); testing.expectEqual(bmp.num_free_entries, 16);
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 0); testing.expectEqual(bmp.bitmap, 0b0000000000000000);
testing.expectEqual(bmp.setContiguous(4) orelse unreachable, 3);
// 0b0000.0000.0111.1111 testing.expectEqual(bmp.setContiguous(3, 0) orelse unreachable, 0);
bmp.bitmap |= 0x200; testing.expectEqual(bmp.bitmap, 0b0000000000000111);
// 0b0000.0010.0111.1111
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 10); // Test setting from top
// 0b0001.1110.0111.1111 testing.expectEqual(bmp.setContiguous(2, 14) orelse unreachable, 14);
testing.expectEqual(bmp.setContiguous(5), null); testing.expectEqual(bmp.bitmap, 0b1100000000000111);
testing.expectEqual(bmp.setContiguous(2), 7);
// 0b001.1111.1111.1111 testing.expectEqual(bmp.setContiguous(3, 12), null);
// Test trying to set beyond the end of the bitmaps testing.expectEqual(bmp.bitmap, 0b1100000000000111);
testing.expectEqual(bmp.setContiguous(3), null);
testing.expectEqual(bmp.setContiguous(2), 13); testing.expectEqual(bmp.setContiguous(3, null) orelse unreachable, 3);
testing.expectEqual(bmp.bitmap, 0b1100000000111111);
// Test setting beyond the what is available
testing.expectEqual(bmp.setContiguous(9, null), null);
testing.expectEqual(bmp.bitmap, 0b1100000000111111);
testing.expectEqual(bmp.setContiguous(8, null) orelse unreachable, 6);
testing.expectEqual(bmp.bitmap, 0b1111111111111111);
// No more are possible
testing.expectEqual(bmp.setContiguous(1, null), null);
testing.expectEqual(bmp.bitmap, 0b1111111111111111);
testing.expectEqual(bmp.setContiguous(1, 0), null);
testing.expectEqual(bmp.bitmap, 0b1111111111111111);
} }
test "setEntry" { test "setEntry" {
@ -723,25 +758,47 @@ test "indexToBit" {
testing.expectEqual(bmp.indexToBit(9), 2); testing.expectEqual(bmp.indexToBit(9), 2);
} }
fn testCheckBitmaps(bmp: Bitmap(u4), b1: u4, b2: u4, b3: u4, b4: u4) void {
testing.expectEqual(@as(u4, b1), bmp.bitmaps[0]);
testing.expectEqual(@as(u4, b2), bmp.bitmaps[1]);
testing.expectEqual(@as(u4, b3), bmp.bitmaps[2]);
testing.expectEqual(@as(u4, b4), bmp.bitmaps[3]);
}
test "setContiguous" { test "setContiguous" {
var bmp = try Bitmap(u4).init(15, std.testing.allocator); var bmp = try Bitmap(u4).init(16, std.testing.allocator);
defer bmp.deinit(); defer bmp.deinit();
// Test trying to set more entries than the bitmap has // Test trying to set more entries than the bitmap has
testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1), null); testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1, null), null);
testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1, 1), null);
// All entries should still be free // All entries should still be free
testing.expectEqual(bmp.num_free_entries, bmp.num_entries); testing.expectEqual(bmp.num_free_entries, bmp.num_entries);
testCheckBitmaps(bmp, 0, 0, 0, 0);
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 0); testing.expectEqual(bmp.setContiguous(3, 0) orelse unreachable, 0);
testing.expectEqual(bmp.setContiguous(4) orelse unreachable, 3); testCheckBitmaps(bmp, 0b0111, 0, 0, 0);
// 0b0000.0000.0111.1111
bmp.bitmaps[2] |= 2; // Test setting from top
// 0b0000.0010.0111.1111 testing.expectEqual(bmp.setContiguous(2, 14) orelse unreachable, 14);
testing.expectEqual(bmp.setContiguous(3) orelse unreachable, 10); testCheckBitmaps(bmp, 0b0111, 0, 0, 0b1100);
// 0b0001.1110.0111.1111
testing.expectEqual(bmp.setContiguous(5), null); testing.expectEqual(bmp.setContiguous(3, 12), null);
testing.expectEqual(bmp.setContiguous(2), 7); testCheckBitmaps(bmp, 0b0111, 0, 0, 0b1100);
// 0b001.1111.1111.1111
// Test trying to set beyond the end of the bitmaps testing.expectEqual(bmp.setContiguous(3, null) orelse unreachable, 3);
testing.expectEqual(bmp.setContiguous(3), null); testCheckBitmaps(bmp, 0b1111, 0b0011, 0, 0b1100);
testing.expectEqual(bmp.setContiguous(2), 13);
// Test setting beyond the what is available
testing.expectEqual(bmp.setContiguous(9, null), null);
testCheckBitmaps(bmp, 0b1111, 0b0011, 0, 0b1100);
testing.expectEqual(bmp.setContiguous(8, null) orelse unreachable, 6);
testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111);
// No more are possible
testing.expectEqual(bmp.setContiguous(1, null), null);
testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111);
testing.expectEqual(bmp.setContiguous(1, 0), null);
testCheckBitmaps(bmp, 0b1111, 0b1111, 0b1111, 0b1111);
} }

View file

@ -278,18 +278,12 @@ pub const SectionType = enum(u32) {
/// ///
pub fn hasData(self: @This()) bool { pub fn hasData(self: @This()) bool {
return switch (self) { return switch (self) {
.Unused, .ProgramData, .ProgramSpace, .Reserved => false, .Unused, .ProgramSpace, .Reserved => false,
else => true, else => true,
}; };
} }
}; };
comptime {
std.debug.assert(@sizeOf(SectionHeader) == if (@bitSizeOf(usize) == 32) 0x28 else 0x40);
std.debug.assert(@sizeOf(Header) == if (@bitSizeOf(usize) == 32) 0x32 else 0x40);
std.debug.assert(@sizeOf(ProgramHeader) == if (@bitSizeOf(usize) == 32) 0x20 else 0x38);
}
/// The section is writable /// The section is writable
pub const SECTION_WRITABLE = 1; pub const SECTION_WRITABLE = 1;
/// The section occupies memory during execution /// The section occupies memory during execution
@ -461,21 +455,23 @@ pub const Error = error{
}; };
fn testSetHeader(data: []u8, header: Header) void { fn testSetHeader(data: []u8, header: Header) void {
std.mem.copy(u8, data[0..@sizeOf(Header)], @ptrCast([*]const u8, &header)[0..@sizeOf(Header)]); std.mem.copy(u8, data, @ptrCast([*]const u8, &header)[0..@sizeOf(Header)]);
} }
fn testSetSection(data: []u8, header: SectionHeader, idx: usize) void { fn testSetSection(data: []u8, header: SectionHeader, idx: usize) void {
const offset = @sizeOf(Header) + @sizeOf(SectionHeader) * idx; const offset = @sizeOf(Header) + @sizeOf(SectionHeader) * idx;
std.mem.copy(u8, data[offset .. offset + @sizeOf(SectionHeader)], @ptrCast([*]const u8, &header)[0..@sizeOf(SectionHeader)]); var dest = data[offset .. offset + @sizeOf(SectionHeader)];
std.mem.copy(u8, dest, @ptrCast([*]const u8, &header)[0..@sizeOf(SectionHeader)]);
} }
fn testInitData(section_name: []const u8, string_section_name: []const u8, file_type: Type, entry_address: usize, flags: u32, section_flags: u32, strings_flags: u32, section_address: usize, strings_address: usize) []u8 { pub fn testInitData(allocator: *std.mem.Allocator, section_name: []const u8, string_section_name: []const u8, file_type: Type, entry_address: usize, flags: u32, section_flags: u32, strings_flags: u32, section_address: usize, strings_address: usize) ![]u8 {
const is_32_bit = @bitSizeOf(usize) == 32; const is_32_bit = @bitSizeOf(usize) == 32;
const header_size = if (is_32_bit) 0x34 else 0x40; const header_size = if (is_32_bit) 0x34 else 0x40;
const p_header_size = if (is_32_bit) 0x20 else 0x38; const p_header_size = if (is_32_bit) 0x20 else 0x38;
const s_header_size = if (is_32_bit) 0x28 else 0x40; const s_header_size = if (is_32_bit) 0x28 else 0x40;
const data_size = header_size + s_header_size + s_header_size + section_name.len + 1 + string_section_name.len + 1; const section_size = 1024;
var data = testing.allocator.alloc(u8, data_size) catch unreachable; const data_size = header_size + s_header_size + s_header_size + section_name.len + 1 + string_section_name.len + 1 + section_size;
var data = try allocator.alloc(u8, data_size);
var header = Header{ var header = Header{
.magic_number = 0x464C457F, .magic_number = 0x464C457F,
@ -495,7 +491,11 @@ fn testInitData(section_name: []const u8, string_section_name: []const u8, file_
.padding2 = 0, .padding2 = 0,
.padding3 = 0, .padding3 = 0,
.file_type = file_type, .file_type = file_type,
.architecture = .AMD_64, .architecture = switch (builtin.arch) {
.i386 => .x86,
.x86_64 => .AMD_64,
else => unreachable,
},
.version2 = 1, .version2 = 1,
.entry_address = entry_address, .entry_address = entry_address,
.program_header_offset = undefined, .program_header_offset = undefined,
@ -517,8 +517,8 @@ fn testInitData(section_name: []const u8, string_section_name: []const u8, file_
.section_type = .ProgramData, .section_type = .ProgramData,
.flags = section_flags, .flags = section_flags,
.virtual_address = section_address, .virtual_address = section_address,
.offset = 0, .offset = data_offset + s_header_size + s_header_size,
.size = 0, .size = section_size,
.linked_section_idx = undefined, .linked_section_idx = undefined,
.info = undefined, .info = undefined,
.alignment = 1, .alignment = 1,
@ -532,7 +532,7 @@ fn testInitData(section_name: []const u8, string_section_name: []const u8, file_
.section_type = .StringTable, .section_type = .StringTable,
.flags = strings_flags, .flags = strings_flags,
.virtual_address = strings_address, .virtual_address = strings_address,
.offset = data_offset + s_header_size, .offset = data_offset + s_header_size + section_size,
.size = section_name.len + 1 + string_section_name.len + 1, .size = section_name.len + 1 + string_section_name.len + 1,
.linked_section_idx = undefined, .linked_section_idx = undefined,
.info = undefined, .info = undefined,
@ -542,6 +542,9 @@ fn testInitData(section_name: []const u8, string_section_name: []const u8, file_
testSetSection(data, string_section_header, 1); testSetSection(data, string_section_header, 1);
data_offset += s_header_size; data_offset += s_header_size;
std.mem.set(u8, data[data_offset .. data_offset + section_size], 0);
data_offset += section_size;
std.mem.copy(u8, data[data_offset .. data_offset + section_name.len], section_name); std.mem.copy(u8, data[data_offset .. data_offset + section_name.len], section_name);
data_offset += section_name.len; data_offset += section_name.len;
data[data_offset] = 0; data[data_offset] = 0;
@ -551,23 +554,27 @@ fn testInitData(section_name: []const u8, string_section_name: []const u8, file_
data_offset += string_section_name.len; data_offset += string_section_name.len;
data[data_offset] = 0; data[data_offset] = 0;
data_offset += 1; data_offset += 1;
return data[0..data_size]; return data;
} }
test "init" { test "init" {
const section_name = "some_section"; const section_name = "some_section";
const string_section_name = "strings"; const string_section_name = "strings";
const is_32_bit = @bitSizeOf(usize) == 32; const is_32_bit = @bitSizeOf(usize) == 32;
var data = testInitData(section_name, string_section_name, .Executable, 0, undefined, 123, 789, 456, 012); var data = try testInitData(testing.allocator, section_name, string_section_name, .Executable, 0, 0, 123, 789, 456, 012);
defer testing.allocator.free(data); defer testing.allocator.free(data);
const elf = try Elf.init(data, builtin.arch, testing.allocator); const elf = try Elf.init(data, builtin.arch, testing.allocator);
defer elf.deinit(); defer elf.deinit();
testing.expectEqual(elf.header.data_size, if (is_32_bit) .ThirtyTwoBit else .SixtyFourBit); testing.expectEqual(elf.header.data_size, if (is_32_bit) .ThirtyTwoBit else .SixtyFourBit);
testing.expectEqual(elf.header.file_type, .Executable); testing.expectEqual(elf.header.file_type, .Executable);
testing.expectEqual(elf.header.architecture, .AMD_64); testing.expectEqual(elf.header.architecture, switch (builtin.arch) {
.i386 => .x86,
.x86_64 => .AMD_64,
else => unreachable,
});
testing.expectEqual(elf.header.entry_address, 0); testing.expectEqual(elf.header.entry_address, 0);
testing.expectEqual(elf.header.flags, undefined); testing.expectEqual(elf.header.flags, 0);
testing.expectEqual(elf.header.section_name_index, 1); testing.expectEqual(elf.header.section_name_index, 1);
testing.expectEqual(elf.program_headers.len, 0); testing.expectEqual(elf.program_headers.len, 0);
@ -586,7 +593,7 @@ test "init" {
testing.expectEqual(@as(usize, 012), section_two.virtual_address); testing.expectEqual(@as(usize, 012), section_two.virtual_address);
testing.expectEqual(@as(usize, 2), elf.section_data.len); testing.expectEqual(@as(usize, 2), elf.section_data.len);
testing.expectEqual(@as(?[]const u8, null), elf.section_data[0]); testing.expectEqual(elf.section_headers[0].size, elf.section_data[0].?.len);
for ("some_section" ++ [_]u8{0} ++ "strings" ++ [_]u8{0}) |char, i| { for ("some_section" ++ [_]u8{0} ++ "strings" ++ [_]u8{0}) |char, i| {
testing.expectEqual(char, elf.section_data[1].?[i]); testing.expectEqual(char, elf.section_data[1].?[i]);
} }
@ -639,7 +646,7 @@ test "getName" {
// The entire ELF test data. The header, program header, two section headers and the section name (with the null terminator) // The entire ELF test data. The header, program header, two section headers and the section name (with the null terminator)
var section_name = "some_section"; var section_name = "some_section";
var string_section_name = "strings"; var string_section_name = "strings";
const data = testInitData(section_name, string_section_name, .Executable, 0, undefined, undefined, undefined, undefined, undefined); const data = try testInitData(testing.allocator, section_name, string_section_name, .Executable, 0, undefined, undefined, undefined, undefined, undefined);
defer testing.allocator.free(data); defer testing.allocator.free(data);
const elf = try Elf.init(data, builtin.arch, testing.allocator); const elf = try Elf.init(data, builtin.arch, testing.allocator);
defer elf.deinit(); defer elf.deinit();
@ -664,30 +671,36 @@ test "toArch" {
inline for (@typeInfo(Architecture).Enum.fields) |field| { inline for (@typeInfo(Architecture).Enum.fields) |field| {
const architecture = @field(Architecture, field.name); const architecture = @field(Architecture, field.name);
const is_known = for (known_architectures) |known_architecture, i| { const is_known = switch (architecture) {
if (known_architecture == architecture) { .Sparc, .x86, .MIPS, .PowerPC, .PowerPC_64, .ARM, .AMD_64, .Aarch64, .RISC_V => true,
testing.expectEqual(architecture.toArch(), known_archs[i]); else => false,
break true; };
}
} else false;
if (!is_known) { if (!is_known) {
testing.expectError(Error.UnknownArchitecture, architecture.toArch()); testing.expectError(Error.UnknownArchitecture, architecture.toArch());
} else {
testing.expectEqual(architecture.toArch(), switch (architecture) {
.Sparc => .sparc,
.x86 => .i386,
.MIPS => .mips,
.PowerPC => .powerpc,
.PowerPC_64 => .powerpc64,
.ARM => .arm,
.AMD_64 => .x86_64,
.Aarch64 => .aarch64,
.RISC_V => .riscv32,
else => unreachable,
});
} }
} }
} }
test "hasData" { test "hasData" {
const no_data = [_]SectionType{ .Unused, .ProgramSpace, .Reserved, .ProgramData }; const no_data = [_]SectionType{ .Unused, .ProgramSpace, .Reserved };
inline for (@typeInfo(SectionType).Enum.fields) |field| { inline for (@typeInfo(SectionType).Enum.fields) |field| {
const sec_type = @field(SectionType, field.name); const sec_type = @field(SectionType, field.name);
const has_data = for (no_data) |no_data_type| { const should_not_have_data = sec_type == .Unused or sec_type == .ProgramSpace or sec_type == .Reserved;
if (sec_type == no_data_type) { testing.expectEqual(should_not_have_data, !sec_type.hasData());
break false;
}
} else true;
testing.expectEqual(has_data, sec_type.hasData());
} }
} }

View file

@ -604,7 +604,7 @@ pub const FreeListAllocator = struct {
pub fn init(comptime vmm_payload: type, heap_vmm: *vmm.VirtualMemoryManager(vmm_payload), attributes: vmm.Attributes, heap_size: usize) (FreeListAllocator.Error || Allocator.Error)!FreeListAllocator { pub fn init(comptime vmm_payload: type, heap_vmm: *vmm.VirtualMemoryManager(vmm_payload), attributes: vmm.Attributes, heap_size: usize) (FreeListAllocator.Error || Allocator.Error)!FreeListAllocator {
log.info("Init\n", .{}); log.info("Init\n", .{});
defer log.info("Done\n", .{}); defer log.info("Done\n", .{});
var heap_start = (try heap_vmm.alloc(heap_size / vmm.BLOCK_SIZE, attributes)) orelse panic(null, "Not enough contiguous virtual memory blocks to allocate to kernel heap\n", .{}); var heap_start = (try heap_vmm.alloc(heap_size / vmm.BLOCK_SIZE, null, attributes)) orelse panic(null, "Not enough contiguous virtual memory blocks to allocate to kernel heap\n", .{});
// This free call cannot error as it is guaranteed to have been allocated above // This free call cannot error as it is guaranteed to have been allocated above
errdefer heap_vmm.free(heap_start) catch unreachable; errdefer heap_vmm.free(heap_start) catch unreachable;
return try FreeListAllocator.init(heap_start, heap_size); return try FreeListAllocator.init(heap_start, heap_size);

View file

@ -13,6 +13,8 @@ const task = @import("task.zig");
const vmm = @import("vmm.zig"); const vmm = @import("vmm.zig");
const mem = @import("mem.zig"); const mem = @import("mem.zig");
const fs = @import("filesystem/vfs.zig"); const fs = @import("filesystem/vfs.zig");
const elf = @import("elf.zig");
const pmm = @import("pmm.zig");
const Task = task.Task; const Task = task.Task;
const EntryPoint = task.EntryPoint; const EntryPoint = task.EntryPoint;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
@ -304,8 +306,8 @@ fn rt_variable_preserved(allocator: *Allocator) void {
defer allocator.destroy(is_set); defer allocator.destroy(is_set);
is_set.* = true; is_set.* = true;
var test_task = Task.create(@ptrToInt(task_function), true, undefined, allocator) catch unreachable; var test_task = Task.create(@ptrToInt(task_function), true, &vmm.kernel_vmm, allocator) catch |e| panic(@errorReturnTrace(), "Failed to create task in rt_variable_preserved: {}\n", .{e});
scheduleTask(test_task, allocator) catch unreachable; scheduleTask(test_task, allocator) catch |e| panic(@errorReturnTrace(), "Failed to schedule a task in rt_variable_preserved: {}\n", .{e});
// TODO: Need to add the ability to remove tasks // TODO: Need to add the ability to remove tasks
var w: u32 = 0; var w: u32 = 0;
@ -352,37 +354,53 @@ fn rt_variable_preserved(allocator: *Allocator) void {
/// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM. /// IN mem_profile: mem.MemProfile - The system's memory profile. Determines the end address of the user task's VMM.
/// ///
fn rt_user_task(allocator: *Allocator, mem_profile: *const mem.MemProfile) void { fn rt_user_task(allocator: *Allocator, mem_profile: *const mem.MemProfile) void {
for (&[_][]const u8{ "/user_program_data.elf", "/user_program.elf" }) |user_program| {
// 1. Create user VMM // 1. Create user VMM
var task_vmm = allocator.create(vmm.VirtualMemoryManager(arch.VmmPayload)) catch |e| { var task_vmm = allocator.create(vmm.VirtualMemoryManager(arch.VmmPayload)) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate user task VMM: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to allocate VMM for {s}: {}\n", .{ user_program, e });
}; };
task_vmm.* = vmm.VirtualMemoryManager(arch.VmmPayload).init(0, @ptrToInt(mem_profile.vaddr_start), allocator, arch.VMM_MAPPER, undefined) catch unreachable; task_vmm.* = vmm.VirtualMemoryManager(arch.VmmPayload).init(0, @ptrToInt(mem_profile.vaddr_start), allocator, arch.VMM_MAPPER, undefined) catch |e| panic(@errorReturnTrace(), "Failed to create the vmm for {s}: {}\n", .{ user_program, e });
// 2. Create user task. The code will be loaded at address 0
var user_task = task.Task.create(0, false, task_vmm, allocator) catch |e| { const user_program_file = fs.openFile(user_program, .NO_CREATION) catch |e| {
panic(@errorReturnTrace(), "Failed to create user task: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to open {s}: {}\n", .{ user_program, e });
};
// 3. Read the user program file from the filesystem
const user_program_file = fs.openFile("/user_program", .NO_CREATION) catch |e| {
panic(@errorReturnTrace(), "Failed to open /user_program: {}\n", .{e});
}; };
defer user_program_file.close(); defer user_program_file.close();
var code: [1024]u8 = undefined; var code: [1024 * 9]u8 = undefined;
const code_len = user_program_file.read(code[0..1024]) catch |e| { const code_len = user_program_file.read(code[0..code.len]) catch |e| {
panic(@errorReturnTrace(), "Failed to read user program file: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to read {s}: {}\n", .{ user_program, e });
}; };
// 4. Allocate space in the vmm for the user_program const program_elf = elf.Elf.init(code[0..code_len], builtin.arch, allocator) catch |e| panic(@errorReturnTrace(), "Failed to load {s}: {}\n", .{ user_program, e });
const code_start = task_vmm.alloc(std.mem.alignForward(code_len, vmm.BLOCK_SIZE) / vmm.BLOCK_SIZE, .{ .kernel = false, .writable = true, .cachable = true }) catch |e| { defer program_elf.deinit();
panic(@errorReturnTrace(), "Failed to allocate VMM memory for user program code: {}\n", .{e});
} orelse panic(null, "User task VMM didn't allocate space for the user program\n", .{}); const current_physical_blocks = pmm.blocksFree();
if (code_start != 0) panic(null, "User program start address was {} instead of 0\n", .{code_start});
// 5. Copy user_program code over var user_task = task.Task.createFromElf(program_elf, false, task_vmm, allocator) catch |e| {
vmm.kernel_vmm.copyData(task_vmm, code[0..code_len], code_start, true) catch |e| { panic(@errorReturnTrace(), "Failed to create task for {s}: {}\n", .{ user_program, e });
panic(@errorReturnTrace(), "Failed to copy user code: {}\n", .{e});
}; };
// 6. Schedule it
scheduleTask(user_task, allocator) catch |e| { scheduleTask(user_task, allocator) catch |e| {
panic(@errorReturnTrace(), "Failed to schedule the user task: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to schedule the task for {s}: {}\n", .{ user_program, e });
}; };
var num_allocatable_sections: usize = 0;
var size_allocatable_sections: usize = 0;
for (program_elf.section_headers) |section| {
if (section.flags & elf.SECTION_ALLOCATABLE != 0) {
num_allocatable_sections += 1;
size_allocatable_sections += std.mem.alignForward(section.size, vmm.BLOCK_SIZE);
}
}
// Only a certain number of elf section are expected to have been allocated in the vmm
if (task_vmm.allocations.count() != num_allocatable_sections) {
panic(@errorReturnTrace(), "VMM allocated wrong number of virtual regions for {s}. Expected {} but found {}\n", .{ user_program, num_allocatable_sections, task_vmm.allocations.count() });
}
const allocated_size = (task_vmm.bmp.num_entries - task_vmm.bmp.num_free_entries) * vmm.BLOCK_SIZE;
if (size_allocatable_sections != allocated_size) {
panic(@errorReturnTrace(), "VMM allocated wrong amount of memory for {s}. Expected {} but found {}\n", .{ user_program, size_allocatable_sections, allocated_size });
}
}
} }
/// ///

View file

@ -7,9 +7,14 @@ const build_options = @import("build_options");
const mock_path = build_options.mock_path; const mock_path = build_options.mock_path;
const arch = @import("arch.zig").internals; const arch = @import("arch.zig").internals;
const panic = @import("panic.zig").panic; const panic = @import("panic.zig").panic;
const ComptimeBitmap = @import("bitmap.zig").ComptimeBitmap;
const vmm = @import("vmm.zig"); const vmm = @import("vmm.zig");
const pmm = @import("pmm.zig");
const mem = @import("mem.zig");
const elf = @import("elf.zig");
const bitmap = @import("bitmap.zig");
const ComptimeBitmap = bitmap.ComptimeBitmap;
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const log = std.log.scoped(.task);
/// The kernels main stack start as this is used to check for if the task being destroyed is this stack /// The kernels main stack start as this is used to check for if the task being destroyed is this stack
/// as we cannot deallocate this. /// as we cannot deallocate this.
@ -99,6 +104,38 @@ pub const Task = struct {
return task; return task;
} }
pub fn createFromElf(program_elf: elf.Elf, kernel: bool, task_vmm: *vmm.VirtualMemoryManager(arch.VmmPayload), allocator: *Allocator) (bitmap.Bitmap(usize).BitmapError || vmm.VmmError || Allocator.Error)!*Task {
const task = try create(program_elf.header.entry_address, kernel, task_vmm, allocator);
errdefer task.destroy(allocator);
// Iterate over sections
var i: usize = 0;
errdefer {
// Free the previously allocated addresses
for (program_elf.section_headers) |header, j| {
if (j >= i)
break;
if ((header.flags & elf.SECTION_ALLOCATABLE) != 0)
task_vmm.free(header.virtual_address) catch |e| panic(null, "VMM failed to clean up a previously-allocated address after an error: {}\n", .{e});
}
}
while (i < program_elf.section_headers.len) : (i += 1) {
const header = program_elf.section_headers[i];
if ((header.flags & elf.SECTION_ALLOCATABLE) == 0) {
continue;
}
// If it is loadable then allocate it at its virtual address
const attrs = vmm.Attributes{ .kernel = kernel, .writable = (header.flags & elf.SECTION_WRITABLE) != 0, .cachable = true };
const vmm_blocks = std.mem.alignForward(header.size, vmm.BLOCK_SIZE) / vmm.BLOCK_SIZE;
const vaddr_opt = try task_vmm.alloc(vmm_blocks, header.virtual_address, attrs);
const vaddr = vaddr_opt orelse return if (try task_vmm.isSet(header.virtual_address)) error.AlreadyAllocated else error.OutOfBounds;
errdefer task_vmm.free(vaddr) catch |e| panic(@errorReturnTrace(), "Failed to free VMM memory in createFromElf: {}\n", .{e});
// Copy it into memory
try vmm.kernel_vmm.copyData(task_vmm, true, program_elf.section_data[i].?, vaddr);
}
return task;
}
/// ///
/// Destroy the task. This will release the allocated PID and free the stack and self. /// Destroy the task. This will release the allocated PID and free the stack and self.
/// ///
@ -254,3 +291,62 @@ test "allocatePid and freePid" {
expectEqual(all_pids.bitmap, 0); expectEqual(all_pids.bitmap, 0);
} }
test "createFromElf" {
var allocator = std.testing.allocator;
var master_vmm = try vmm.testInit(32);
defer vmm.testDeinit(&master_vmm);
const code_address = 0;
const elf_data = try elf.testInitData(allocator, "abc123", "strings", .Executable, code_address, 0, elf.SECTION_ALLOCATABLE, 0, code_address, 0);
defer allocator.free(elf_data);
var the_elf = try elf.Elf.init(elf_data, builtin.arch, std.testing.allocator);
defer the_elf.deinit();
var the_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(0, 10000, std.testing.allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
defer the_vmm.deinit();
const task = try Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator);
defer task.destroy(allocator);
std.testing.expectEqual(task.pid, 0);
std.testing.expectEqual(task.user_stack.len, 0);
std.testing.expectEqual(task.kernel_stack.len, STACK_SIZE);
}
test "createFromElf clean-up" {
var allocator = std.testing.allocator;
var master_vmm = try vmm.testInit(32);
defer vmm.testDeinit(&master_vmm);
const code_address = 0;
const elf_data = try elf.testInitData(allocator, "abc123", "strings", .Executable, code_address, 0, elf.SECTION_ALLOCATABLE, 0, code_address, 0);
defer allocator.free(elf_data);
var the_elf = try elf.Elf.init(elf_data, builtin.arch, std.testing.allocator);
defer the_elf.deinit();
var the_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(0, 10000, std.testing.allocator, arch.VMM_MAPPER, arch.KERNEL_VMM_PAYLOAD);
defer the_vmm.deinit();
const task = try Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator);
defer task.destroy(allocator);
// Test clean-up
// Test OutOfMemory
var allocator2 = &std.testing.FailingAllocator.init(allocator, 0).allocator;
std.testing.expectError(std.mem.Allocator.Error.OutOfMemory, Task.createFromElf(the_elf, true, &the_vmm, allocator2));
std.testing.expectEqual(all_pids.num_free_entries, PidBitmap.NUM_ENTRIES - 1);
// Test AlreadyAllocated
std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, allocator));
// Test OutOfBounds
the_elf.section_headers[0].virtual_address = the_vmm.end + 1;
std.testing.expectError(error.OutOfBounds, Task.createFromElf(the_elf, true, &the_vmm, allocator));
// Test errdefer clean-up by fillng up all but one block in the VMM so allocating the last section fails
// The allocation for the first section should be cleaned up in case of an error
const available_address = (try the_vmm.alloc(1, null, .{ .writable = false, .kernel = false, .cachable = false })) orelse unreachable;
the_elf.section_headers[0].virtual_address = available_address;
_ = try the_vmm.alloc(the_vmm.bmp.num_free_entries, null, .{ .kernel = false, .writable = false, .cachable = false });
try the_vmm.free(available_address);
// Make the strings section allocatable so createFromElf tries to allocate more than one
the_elf.section_headers[1].flags |= elf.SECTION_ALLOCATABLE;
std.testing.expectError(error.AlreadyAllocated, Task.createFromElf(the_elf, true, &the_vmm, std.testing.allocator));
}

View file

@ -382,6 +382,7 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// Arguments: /// Arguments:
/// IN/OUT self: *Self - The manager to allocate for /// IN/OUT self: *Self - The manager to allocate for
/// IN num: usize - The number of blocks to allocate /// IN num: usize - The number of blocks to allocate
/// IN virtual_addr: ?usize - The virtual address to allocate to or null if any address is acceptable
/// IN attrs: Attributes - The attributes to apply to the mapped memory /// IN attrs: Attributes - The attributes to apply to the mapped memory
/// ///
/// Return: ?usize /// Return: ?usize
@ -390,14 +391,15 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// Error: Allocator.Error /// Error: Allocator.Error
/// error.OutOfMemory: The required amount of memory couldn't be allocated /// error.OutOfMemory: The required amount of memory couldn't be allocated
/// ///
pub fn alloc(self: *Self, num: usize, attrs: Attributes) Allocator.Error!?usize { pub fn alloc(self: *Self, num: usize, virtual_addr: ?usize, attrs: Attributes) Allocator.Error!?usize {
if (num == 0) { if (num == 0) {
return null; return null;
} }
// Ensure that there is both enough physical and virtual address space free // Ensure that there is both enough physical and virtual address space free
if (pmm.blocksFree() >= num and self.bmp.num_free_entries >= num) { if (pmm.blocksFree() >= num and self.bmp.num_free_entries >= num) {
// The virtual address space must be contiguous // The virtual address space must be contiguous
if (self.bmp.setContiguous(num)) |entry| { // Allocate from a specific entry if the caller requested it
if (self.bmp.setContiguous(num, if (virtual_addr) |a| (a - self.start) / BLOCK_SIZE else null)) |entry| {
var block_list = std.ArrayList(usize).init(self.allocator); var block_list = std.ArrayList(usize).init(self.allocator);
try block_list.ensureCapacity(num); try block_list.ensureCapacity(num);
@ -427,9 +429,9 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// Arguments: /// Arguments:
/// IN self: *Self - One of the VMMs to copy between. This should be the currently active VMM /// IN self: *Self - One of the VMMs to copy between. This should be the currently active VMM
/// IN other: *Self - The second of the VMMs to copy between /// IN other: *Self - The second of the VMMs to copy between
/// IN data: []u8 - The being copied from or written to (depending on `from`). Must be mapped within the VMM being copied from/to /// IN from: bool - Whether the data should be copied from `self` to `other`, or the other way around
/// IN data: if (from) []const u8 else []u8 - The being copied from or written to (depending on `from`). Must be mapped within the VMM being copied from/to
/// IN address: usize - The address within `other` that is to be copied from or to /// IN address: usize - The address within `other` that is to be copied from or to
/// IN from: bool - Whether the date should be copied from `self` to `other, or the other way around
/// ///
/// Error: VmmError || pmm.PmmError || Allocator.Error /// Error: VmmError || pmm.PmmError || Allocator.Error
/// VmmError.NotAllocated - Some or all of the destination isn't mapped /// VmmError.NotAllocated - Some or all of the destination isn't mapped
@ -437,7 +439,7 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
/// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed /// Bitmap(u32).Error.OutOfBounds - The address given is outside of the memory managed
/// Allocator.Error.OutOfMemory - There wasn't enough memory available to fulfill the request /// Allocator.Error.OutOfMemory - There wasn't enough memory available to fulfill the request
/// ///
pub fn copyData(self: *Self, other: *const Self, data: []u8, address: usize, from: bool) (bitmap.Bitmap(usize).BitmapError || VmmError || Allocator.Error)!void { pub fn copyData(self: *Self, other: *const Self, comptime from: bool, data: if (from) []const u8 else []u8, address: usize) (bitmap.Bitmap(usize).BitmapError || VmmError || Allocator.Error)!void {
if (data.len == 0) { if (data.len == 0) {
return; return;
} }
@ -460,35 +462,27 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
} }
} }
// Make sure the address is actually mapped in the destination VMM // Make sure the address is actually mapped in the destination VMM
if (blocks.items.len == 0) { if (blocks.items.len != std.mem.alignForward(data.len, BLOCK_SIZE) / BLOCK_SIZE) {
return VmmError.NotAllocated; return VmmError.NotAllocated;
} }
// Map them into self for some vaddr so they can be accessed from this VMM // Map them into self for some vaddr so they can be accessed from this VMM
if (self.bmp.setContiguous(blocks.items.len)) |entry| { if (self.bmp.setContiguous(blocks.items.len, null)) |entry| {
const v_start = entry * BLOCK_SIZE + self.start; const v_start = entry * BLOCK_SIZE + self.start;
defer {
// Unmap virtual blocks from self so they can be used in the future
var v = v_start;
while (v < v_start + blocks.items.len * BLOCK_SIZE) : (v += BLOCK_SIZE) {
// Cannot be out of bounds as it has been set above
self.bmp.clearEntry((v - self.start) / BLOCK_SIZE) catch unreachable;
}
}
for (blocks.items) |block, i| { for (blocks.items) |block, i| {
const v = v_start + i * BLOCK_SIZE; const v = v_start + i * BLOCK_SIZE;
const v_end = v + BLOCK_SIZE; const v_end = v + BLOCK_SIZE;
const p = block; const p = block;
const p_end = p + BLOCK_SIZE; const p_end = p + BLOCK_SIZE;
self.mapper.mapFn(v, v_end, p, p_end, .{ .kernel = true, .writable = true, .cachable = true }, self.allocator, self.payload) catch |e| { self.mapper.mapFn(v, v_end, p, p_end, .{ .kernel = true, .writable = true, .cachable = false }, self.allocator, self.payload) catch |e| {
// If we fail to map one of the blocks then attempt to free all previously mapped // If we fail to map one of the blocks then attempt to free all previously mapped
if (i > 0) { if (i > 0) {
self.mapper.unmapFn(v_start, v_end, self.allocator, self.payload) catch |e2| { self.mapper.unmapFn(v_start, v_end, self.allocator, self.payload) catch |e2| {
// If we can't unmap then just panic // If we can't unmap then just panic
panic(@errorReturnTrace(), "Failed to unmap region 0x{X} -> 0x{X}: {}\n", .{ v_start, v_end, e2 }); panic(@errorReturnTrace(), "Failed to unmap virtual region 0x{X} -> 0x{X}: {}\n", .{ v_start, v_end, e2 });
}; };
} }
panic(@errorReturnTrace(), "Failed to map vrutal region 0x{X} -> 0x{X} to 0x{X} -> 0x{X}: {}\n", .{ v, v_end, p, p_end, e }); panic(@errorReturnTrace(), "Failed to map virtual region 0x{X} -> 0x{X} to 0x{X} -> 0x{X}: {}\n", .{ v, v_end, p, p_end, e });
}; };
} }
// Copy to vaddr from above // Copy to vaddr from above
@ -499,6 +493,7 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
} else { } else {
std.mem.copy(u8, data, data_copy); std.mem.copy(u8, data, data_copy);
} }
// TODO Unmap and freee virtual blocks from self so they can be used in the future
} else { } else {
return VmmError.OutOfMemory; return VmmError.OutOfMemory;
} }
@ -645,7 +640,7 @@ test "alloc and free" {
// Test allocating various numbers of blocks all at once // Test allocating various numbers of blocks all at once
// Rather than using a random number generator, just set the number of blocks to allocate based on how many entries have been done so far // Rather than using a random number generator, just set the number of blocks to allocate based on how many entries have been done so far
var num_to_alloc: u32 = if (entry > 400) @as(u32, 8) else if (entry > 320) @as(u32, 14) else if (entry > 270) @as(u32, 9) else if (entry > 150) @as(u32, 26) else @as(u32, 1); var num_to_alloc: u32 = if (entry > 400) @as(u32, 8) else if (entry > 320) @as(u32, 14) else if (entry > 270) @as(u32, 9) else if (entry > 150) @as(u32, 26) else @as(u32, 1);
const result = try vmm.alloc(num_to_alloc, .{ .kernel = true, .writable = true, .cachable = true }); const result = try vmm.alloc(num_to_alloc, null, .{ .kernel = true, .writable = true, .cachable = true });
var should_be_set = true; var should_be_set = true;
if (entry + num_to_alloc > num_entries) { if (entry + num_to_alloc > num_entries) {
@ -714,6 +709,33 @@ test "alloc and free" {
} }
} }
test "alloc at a specific address" {
const num_entries = 100;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const attrs = Attributes{ .writable = true, .cachable = true, .kernel = true };
// Try allocating at the start
std.testing.expectEqual(vmm.alloc(10, vmm.start, attrs), vmm.start);
// Try that again
std.testing.expectEqual(vmm.alloc(5, vmm.start, attrs), null);
const middle = vmm.start + (vmm.end - vmm.start) / 2;
// Try allocating at the middle
std.testing.expectEqual(vmm.alloc(num_entries / 2, middle, attrs), middle);
// Allocating after the start and colliding with the middle should be impossible
std.testing.expectEqual(vmm.alloc(num_entries / 2, vmm.start + 10 * BLOCK_SIZE, attrs), null);
// Allocating within the last half should be impossible
std.testing.expectEqual(vmm.alloc(num_entries / 4, middle + BLOCK_SIZE, attrs), null);
// It should still be possible to allocate between the start and middle
std.testing.expectEqual(vmm.alloc(num_entries / 2 - 10, vmm.start + 10 * BLOCK_SIZE, attrs), vmm.start + 10 * BLOCK_SIZE);
// It should now be full
std.testing.expectEqual(vmm.bmp.num_free_entries, 0);
// Allocating at the end and before the start should fail
std.testing.expectEqual(vmm.alloc(1, vmm.end, attrs), null);
std.testing.expectEqual(vmm.alloc(1, vmm.start - BLOCK_SIZE, attrs), null);
}
test "set" { test "set" {
const num_entries = 512; const num_entries = 512;
var vmm = try testInit(num_entries); var vmm = try testInit(num_entries);
@ -750,7 +772,7 @@ test "copy" {
defer testDeinit(&vmm); defer testDeinit(&vmm);
const attrs = .{ .kernel = true, .cachable = true, .writable = true }; const attrs = .{ .kernel = true, .cachable = true, .writable = true };
const alloc0 = (try vmm.alloc(24, attrs)).?; const alloc0 = (try vmm.alloc(24, null, attrs)).?;
var mirrored = try vmm.copy(); var mirrored = try vmm.copy();
defer mirrored.deinit(); defer mirrored.deinit();
@ -768,60 +790,74 @@ test "copy" {
std.testing.expectEqual(vmm.payload, mirrored.payload); std.testing.expectEqual(vmm.payload, mirrored.payload);
// Allocating in the new VMM shouldn't allocate in the mirrored one // Allocating in the new VMM shouldn't allocate in the mirrored one
const alloc1 = (try mirrored.alloc(3, attrs)).?; const alloc1 = (try mirrored.alloc(3, null, attrs)).?;
std.testing.expectEqual(vmm.allocations.count() + 1, mirrored.allocations.count()); std.testing.expectEqual(vmm.allocations.count() + 1, mirrored.allocations.count());
std.testing.expectEqual(vmm.bmp.num_free_entries - 3, mirrored.bmp.num_free_entries); std.testing.expectEqual(vmm.bmp.num_free_entries - 3, mirrored.bmp.num_free_entries);
std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(alloc1)); std.testing.expectError(VmmError.NotAllocated, vmm.virtToPhys(alloc1));
// And vice-versa // And vice-versa
const alloc2 = (try vmm.alloc(3, attrs)).?; const alloc2 = (try vmm.alloc(3, null, attrs)).?;
const alloc3 = (try vmm.alloc(1, attrs)).?; const alloc3 = (try vmm.alloc(1, null, attrs)).?;
const alloc4 = (try vmm.alloc(1, attrs)).?; const alloc4 = (try vmm.alloc(1, null, attrs)).?;
std.testing.expectEqual(vmm.allocations.count() - 2, mirrored.allocations.count()); std.testing.expectEqual(vmm.allocations.count() - 2, mirrored.allocations.count());
std.testing.expectEqual(vmm.bmp.num_free_entries + 2, mirrored.bmp.num_free_entries); std.testing.expectEqual(vmm.bmp.num_free_entries + 2, mirrored.bmp.num_free_entries);
std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc3)); std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc3));
std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc4)); std.testing.expectError(VmmError.NotAllocated, mirrored.virtToPhys(alloc4));
} }
test "copyData" { test "copyData from" {
var vmm = try testInit(100); var vmm = try testInit(100);
defer testDeinit(&vmm); defer testDeinit(&vmm);
const alloc1_blocks = 1; const alloc1_blocks = 1;
const alloc = (try vmm.alloc(alloc1_blocks, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable; const alloc = (try vmm.alloc(alloc1_blocks, null, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable;
var vmm2 = try VirtualMemoryManager(u8).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, 39); var vmm2 = try VirtualMemoryManager(arch.VmmPayload).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD);
defer vmm2.deinit(); defer vmm2.deinit();
var vmm_free_entries = vmm.bmp.num_free_entries; var vmm_free_entries = vmm.bmp.num_free_entries;
var vmm2_free_entries = vmm2.bmp.num_free_entries; var vmm2_free_entries = vmm2.bmp.num_free_entries;
var buff: [4]u8 = [4]u8{ 10, 11, 12, 13 }; var buff: [4]u8 = [4]u8{ 10, 11, 12, 13 };
try vmm2.copyData(&vmm, buff[0..buff.len], alloc, true); try vmm2.copyData(&vmm, true, buff[0..buff.len], alloc);
// Make sure they are the same // Make sure they are the same
var buff2 = @intToPtr([*]u8, alloc)[0..buff.len]; var buff2 = @intToPtr([*]u8, alloc)[0..buff.len];
std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2); std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2);
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries); // TODO Remove the subtraction by one once we are able to free the temp space in copyData
std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries);
try vmm2.copyData(&vmm, buff2, alloc, false);
std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2);
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries);
// Test NotAllocated // Test NotAllocated
std.testing.expectError(VmmError.NotAllocated, vmm2.copyData(&vmm, buff[0..buff.len], alloc + alloc1_blocks * BLOCK_SIZE, true)); std.testing.expectError(VmmError.NotAllocated, vmm2.copyData(&vmm, true, buff[0..buff.len], alloc + alloc1_blocks * BLOCK_SIZE));
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries); std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries);
// Test Bitmap.Error.OutOfBounds // Test Bitmap.Error.OutOfBounds
std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm2.copyData(&vmm, buff[0..buff.len], vmm.end, true)); std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm2.copyData(&vmm, true, buff[0..buff.len], vmm.end));
std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm.copyData(&vmm2, buff[0..buff.len], vmm2.end, true)); std.testing.expectError(bitmap.Bitmap(usize).BitmapError.OutOfBounds, vmm.copyData(&vmm2, true, buff[0..buff.len], vmm2.end));
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries); std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries, vmm2.bmp.num_free_entries); std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries);
}
test "copyDaya to" {
var vmm = try testInit(100);
defer testDeinit(&vmm);
const alloc1_blocks = 1;
const alloc = (try vmm.alloc(alloc1_blocks, null, .{ .kernel = true, .writable = true, .cachable = true })) orelse unreachable;
var vmm2 = try VirtualMemoryManager(arch.VmmPayload).init(vmm.start, vmm.end, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD);
defer vmm2.deinit();
var vmm_free_entries = vmm.bmp.num_free_entries;
var vmm2_free_entries = vmm2.bmp.num_free_entries;
var buff: [4]u8 = [4]u8{ 10, 11, 12, 13 };
var buff2 = @intToPtr([*]u8, alloc)[0..buff.len];
try vmm2.copyData(&vmm, false, buff[0..], alloc);
std.testing.expectEqualSlices(u8, buff[0..buff.len], buff2);
std.testing.expectEqual(vmm_free_entries, vmm.bmp.num_free_entries);
std.testing.expectEqual(vmm2_free_entries - 1, vmm2.bmp.num_free_entries);
} }
var test_allocations: ?*bitmap.Bitmap(u64) = null; var test_allocations: ?*bitmap.Bitmap(u64) = null;
var test_mapper = Mapper(u8){ .mapFn = testMap, .unmapFn = testUnmap }; var test_mapper = Mapper(arch.VmmPayload){ .mapFn = testMap, .unmapFn = testUnmap };
var test_vmm: VirtualMemoryManager(u8) = undefined;
/// ///
/// Initialise a virtual memory manager used for testing /// Initialise a virtual memory manager used for testing
@ -835,7 +871,7 @@ var test_vmm: VirtualMemoryManager(u8) = undefined;
/// Error: Allocator.Error /// Error: Allocator.Error
/// OutOfMemory: The allocator couldn't allocate the structures needed /// OutOfMemory: The allocator couldn't allocate the structures needed
/// ///
fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) { pub fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(arch.VmmPayload) {
if (test_allocations == null) { if (test_allocations == null) {
test_allocations = try std.testing.allocator.create(bitmap.Bitmap(u64)); test_allocations = try std.testing.allocator.create(bitmap.Bitmap(u64));
test_allocations.?.* = try bitmap.Bitmap(u64).init(num_entries, std.testing.allocator); test_allocations.?.* = try bitmap.Bitmap(u64).init(num_entries, std.testing.allocator);
@ -859,11 +895,11 @@ fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) {
}; };
pmm.init(&mem_profile, std.testing.allocator); pmm.init(&mem_profile, std.testing.allocator);
const test_vaddr_start = @ptrToInt(&(try std.testing.allocator.alloc(u8, num_entries * BLOCK_SIZE))[0]); const test_vaddr_start = @ptrToInt(&(try std.testing.allocator.alloc(u8, num_entries * BLOCK_SIZE))[0]);
test_vmm = try VirtualMemoryManager(u8).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, 39); kernel_vmm = try VirtualMemoryManager(arch.VmmPayload).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, arch.KERNEL_VMM_PAYLOAD);
return test_vmm; return kernel_vmm;
} }
fn testDeinit(vmm: *VirtualMemoryManager(u8)) void { pub fn testDeinit(vmm: *VirtualMemoryManager(arch.VmmPayload)) void {
vmm.deinit(); vmm.deinit();
const space = @intToPtr([*]u8, vmm.start)[0 .. vmm.end - vmm.start]; const space = @intToPtr([*]u8, vmm.start)[0 .. vmm.end - vmm.start];
vmm.allocator.free(space); vmm.allocator.free(space);
@ -885,14 +921,14 @@ fn testDeinit(vmm: *VirtualMemoryManager(u8)) void {
/// IN pend: usize - The end of the physical region to map /// IN pend: usize - The end of the physical region to map
/// IN attrs: Attributes - The attributes to map with /// IN attrs: Attributes - The attributes to map with
/// IN/OUT allocator: *Allocator - The allocator to use. Ignored /// IN/OUT allocator: *Allocator - The allocator to use. Ignored
/// IN payload: u8 - The payload value. Expected to be 39 /// IN payload: arch.VmmPayload - The payload value. Expected to be arch.KERNEL_VMM_PAYLOAD
/// ///
fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attributes, allocator: *Allocator, payload: u8) (Allocator.Error || MapperError)!void { fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attributes, allocator: *Allocator, payload: arch.VmmPayload) (Allocator.Error || MapperError)!void {
std.testing.expectEqual(@as(u8, 39), payload); std.testing.expectEqual(arch.KERNEL_VMM_PAYLOAD, payload);
var vaddr = vstart; var vaddr = vstart;
var allocations = test_allocations.?; var allocations = test_allocations.?;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) { while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
allocations.setEntry((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable; allocations.setEntry((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable;
} }
} }
@ -902,15 +938,15 @@ fn testMap(vstart: usize, vend: usize, pstart: usize, pend: usize, attrs: Attrib
/// Arguments: /// Arguments:
/// IN vstart: usize - The start of the virtual region to unmap /// IN vstart: usize - The start of the virtual region to unmap
/// IN vend: usize - The end of the virtual region to unmap /// IN vend: usize - The end of the virtual region to unmap
/// IN payload: u8 - The payload value. Expected to be 39 /// IN payload: arch.VmmPayload - The payload value. Expected to be arch.KERNEL_VMM_PAYLOAD
/// ///
fn testUnmap(vstart: usize, vend: usize, allocator: *Allocator, payload: u8) MapperError!void { fn testUnmap(vstart: usize, vend: usize, allocator: *Allocator, payload: arch.VmmPayload) MapperError!void {
std.testing.expectEqual(@as(u8, 39), payload); std.testing.expectEqual(arch.KERNEL_VMM_PAYLOAD, payload);
var vaddr = vstart; var vaddr = vstart;
var allocations = test_allocations.?; var allocations = test_allocations.?;
while (vaddr < vend) : (vaddr += BLOCK_SIZE) { while (vaddr < vend) : (vaddr += BLOCK_SIZE) {
if (allocations.isSet((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable) { if (allocations.isSet((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable) {
allocations.clearEntry((vaddr - test_vmm.start) / BLOCK_SIZE) catch unreachable; allocations.clearEntry((vaddr - kernel_vmm.start) / BLOCK_SIZE) catch unreachable;
} else { } else {
return MapperError.NotMapped; return MapperError.NotMapped;
} }
@ -981,14 +1017,14 @@ fn rt_correctMapping(comptime Payload: type, vmm: *VirtualMemoryManager(Payload)
/// IN vmm: *VirtualMemoryManager() - The active VMM to test /// IN vmm: *VirtualMemoryManager() - The active VMM to test
/// ///
fn rt_copyData(vmm: *VirtualMemoryManager(arch.VmmPayload)) void { fn rt_copyData(vmm: *VirtualMemoryManager(arch.VmmPayload)) void {
const expected_free_entries = vmm.bmp.num_free_entries; const expected_free_entries = vmm.bmp.num_free_entries - 1;
// Mirror the VMM // Mirror the VMM
var vmm2 = vmm.copy() catch |e| { var vmm2 = vmm.copy() catch |e| {
panic(@errorReturnTrace(), "Failed to mirror VMM: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to mirror VMM: {}\n", .{e});
}; };
// Allocate within secondary VMM // Allocate within secondary VMM
const addr = vmm2.alloc(1, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| { const addr = vmm2.alloc(1, null, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyData: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyData: {}\n", .{e});
} orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyData\n", .{}); } orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyData\n", .{});
defer vmm2.free(addr) catch |e| { defer vmm2.free(addr) catch |e| {
@ -999,7 +1035,7 @@ fn rt_copyData(vmm: *VirtualMemoryManager(arch.VmmPayload)) void {
const expected_free_pmm_entries = pmm.blocksFree(); const expected_free_pmm_entries = pmm.blocksFree();
// Try copying to vmm2 // Try copying to vmm2
var buff: [6]u8 = [_]u8{ 4, 5, 9, 123, 90, 67 }; var buff: [6]u8 = [_]u8{ 4, 5, 9, 123, 90, 67 };
vmm.copyData(&vmm2, buff[0..buff.len], addr, true) catch |e| { vmm.copyData(&vmm2, true, buff[0..buff.len], addr) catch |e| {
panic(@errorReturnTrace(), "Failed to copy data to secondary VMM in rt_copyData: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to copy data to secondary VMM in rt_copyData: {}\n", .{e});
}; };
// Make sure the function cleaned up // Make sure the function cleaned up
@ -1024,10 +1060,41 @@ fn rt_copyData(vmm: *VirtualMemoryManager(arch.VmmPayload)) void {
var buff2 = vmm.allocator.alloc(u8, buff.len) catch |e| { var buff2 = vmm.allocator.alloc(u8, buff.len) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate a test buffer in rt_copyData: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to allocate a test buffer in rt_copyData: {}\n", .{e});
}; };
vmm.copyData(&vmm2, buff2, addr, false) catch |e| { vmm.copyData(&vmm2, false, buff2, addr) catch |e| {
panic(@errorReturnTrace(), "Failed to copy data from secondary VMM in rt_copyData: {}\n", .{e}); panic(@errorReturnTrace(), "Failed to copy data from secondary VMM in rt_copyData: {}\n", .{e});
}; };
if (!std.mem.eql(u8, buff[0..buff.len], buff2)) { if (!std.mem.eql(u8, buff[0..buff.len], buff2)) {
panic(@errorReturnTrace(), "Data copied from vmm2 doesn't have the expected values\n", .{}); panic(@errorReturnTrace(), "Data copied from vmm2 doesn't have the expected values\n", .{});
} }
// Make sure that a second copy will succeed
const addr2 = vmm2.alloc(1, null, .{ .kernel = true, .cachable = true, .writable = true }) catch |e| {
panic(@errorReturnTrace(), "Failed to allocate within the secondary VMM in rt_copyData: {}\n", .{e});
} orelse panic(@errorReturnTrace(), "Failed to get an allocation within the secondary VMM in rt_copyData\n", .{});
defer vmm2.free(addr2) catch |e| {
panic(@errorReturnTrace(), "Failed to free the allocation in secondary VMM: {}\n", .{e});
};
const expected_free_entries3 = vmm2.bmp.num_free_entries;
const expected_free_pmm_entries3 = pmm.blocksFree();
// Try copying to vmm2
var buff3: [6]u8 = [_]u8{ 3, 9, 0, 12, 50, 7 };
vmm.copyData(&vmm2, true, buff3[0..buff3.len], addr) catch |e| {
panic(@errorReturnTrace(), "Failed to copy third lot of data to secondary VMM in rt_copyData: {}\n", .{e});
};
// Make sure the function cleaned up
if (vmm.bmp.num_free_entries != expected_free_entries - 2) {
panic(@errorReturnTrace(), "Expected {} free entries in VMM after third copy, but there were {}\n", .{ expected_free_entries - 2, vmm.bmp.num_free_entries });
}
if (vmm2.bmp.num_free_entries != expected_free_entries3) {
panic(@errorReturnTrace(), "Expected {} free entries in the secondary VMM after third copy, but there were {}\n", .{ expected_free_entries2, vmm2.bmp.num_free_entries });
}
if (pmm.blocksFree() != expected_free_pmm_entries3) {
panic(@errorReturnTrace(), "Expected {} free entries in PMM after third copy, but there were {}\n", .{ expected_free_pmm_entries, pmm.blocksFree() });
}
// Make sure that the data at the allocated address is correct
// Since vmm2 is a mirror of vmm, this address should be mapped by the CPU's MMU
const dest_buff2 = @intToPtr([*]u8, addr2)[0..buff3.len];
if (!std.mem.eql(u8, buff3[0..buff3.len], dest_buff)) {
panic(@errorReturnTrace(), "Third lot of data copied doesn't have the expected values\n", .{});
}
} }

View file

@ -1,4 +1,5 @@
const std = @import("std"); const std = @import("std");
const builtin = @import("builtin");
const Allocator = std.mem.Allocator; const Allocator = std.mem.Allocator;
const mem = @import("../../../src/kernel/mem.zig"); const mem = @import("../../../src/kernel/mem.zig");
const MemProfile = mem.MemProfile; const MemProfile = mem.MemProfile;
@ -11,6 +12,7 @@ const Serial = @import("../../../src/kernel/serial.zig").Serial;
const TTY = @import("../../../src/kernel/tty.zig").TTY; const TTY = @import("../../../src/kernel/tty.zig").TTY;
const Keyboard = @import("../../../src/kernel/keyboard.zig").Keyboard; const Keyboard = @import("../../../src/kernel/keyboard.zig").Keyboard;
const task = @import("../../../src/kernel/task.zig"); const task = @import("../../../src/kernel/task.zig");
const x86_paging = @import("../../../src/kernel/arch/x86/paging.zig");
pub const Device = pci.PciDeviceInfo; pub const Device = pci.PciDeviceInfo;
pub const DateTime = struct { pub const DateTime = struct {
@ -54,11 +56,18 @@ pub const CpuState = struct {
user_ss: u32, user_ss: u32,
}; };
pub const VmmPayload = u8; pub const VmmPayload = switch (builtin.arch) {
pub const KERNEL_VMM_PAYLOAD: usize = 0; .i386 => *x86_paging.Directory,
else => unreachable,
};
pub const KERNEL_VMM_PAYLOAD: VmmPayload = switch (builtin.arch) {
.i386 => &x86_paging.kernel_directory,
else => unreachable,
};
pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB; pub const MEMORY_BLOCK_SIZE: u32 = paging.PAGE_SIZE_4KB;
pub const STACK_SIZE: u32 = MEMORY_BLOCK_SIZE / @sizeOf(u32); pub const STACK_SIZE: u32 = MEMORY_BLOCK_SIZE / @sizeOf(u32);
pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = undefined; pub const VMM_MAPPER: vmm.Mapper(VmmPayload) = .{ .mapFn = map, .unmapFn = unmap };
pub const BootPayload = u8; pub const BootPayload = u8;
pub const Task = task.Task; pub const Task = task.Task;
@ -69,6 +78,9 @@ var KERNEL_VADDR_START: u32 = 0xC0100000;
var KERNEL_VADDR_END: u32 = 0xC1100000; var KERNEL_VADDR_END: u32 = 0xC1100000;
var KERNEL_ADDR_OFFSET: u32 = 0xC0000000; var KERNEL_ADDR_OFFSET: u32 = 0xC0000000;
pub fn map(start: usize, end: usize, p_start: usize, p_end: usize, attrs: vmm.Attributes, allocator: *Allocator, payload: VmmPayload) !void {}
pub fn unmap(start: usize, end: usize, allocator: *Allocator, payload: VmmPayload) !void {}
pub fn out(port: u16, data: anytype) void { pub fn out(port: u16, data: anytype) void {
return mock_framework.performAction("out", void, .{ port, data }); return mock_framework.performAction("out", void, .{ port, data });
} }

13
test/user_program.ld Normal file
View file

@ -0,0 +1,13 @@
ENTRY(entry)
SECTIONS {
.text ALIGN(4K) : {
*(.text)
}
.data ALIGN(4K) : {
*(.data)
}
}

View file

@ -1,3 +1,5 @@
.section .text
.globl entry
entry: entry:
mov $0xCAFE, %eax mov $0xCAFE, %eax
mov $0xBEEF, %ebx mov $0xBEEF, %ebx

11
test/user_program_data.s Normal file
View file

@ -0,0 +1,11 @@
.section .text
.globl entry
entry:
mov item1, %eax
mov item2, %ebx
loop:
jmp loop
.section .data
item1: .long 0xCAFE
item2: .long 0xBEEF