Check for memory leaks in all tests that allocate memory

This commit is contained in:
Sam Tebbs 2020-10-28 17:36:40 +00:00
parent 374e95f322
commit 238e050c8c
7 changed files with 105 additions and 31 deletions

View file

@ -25,6 +25,23 @@ pub const Directory = packed struct {
/// The tables allocated for the directory. This is ignored by the CPU.
tables: [ENTRIES_PER_DIRECTORY]?*Table,
/// The allocator used to allocate and free tables
allocator: *std.mem.Allocator,
///
/// Free the state occupied by the directory. It will be unusable afterwards.
///
/// Arguments:
/// IN self: *Self - The directory to deinitialise.
///
pub fn deinit(self: *@This()) void {
for (self.tables) |table| {
if (table) |t| {
self.allocator.destroy(t);
}
}
}
};
/// An array of table entries. Forms the second level of paging and covers a 4MB memory space.
@ -109,7 +126,7 @@ pub const PAGE_SIZE_4MB: usize = 0x400000;
pub const PAGE_SIZE_4KB: usize = PAGE_SIZE_4MB / 1024;
/// The kernel's page directory. Should only be used to map kernel-owned code and data
pub var kernel_directory: Directory align(@truncate(u29, PAGE_SIZE_4KB)) = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
pub var kernel_directory: Directory align(@truncate(u29, PAGE_SIZE_4KB)) = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY, .allocator = &mem.fixed_buffer_allocator.allocator };
///
/// Convert a virtual address to an index within an array of directory entries.
@ -500,8 +517,9 @@ test "virtToTableEntryIdx" {
}
test "mapDirEntry" {
var allocator = std.heap.page_allocator;
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
var allocator = std.testing.allocator;
var dir: Directory = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY, .allocator = allocator };
defer dir.deinit();
const attrs = vmm.Attributes{ .kernel = false, .writable = false, .cachable = false };
vmm.kernel_vmm = try vmm.VirtualMemoryManager(arch.VmmPayload).init(PAGE_SIZE_4MB, 0xFFFFFFFF, allocator, arch.VMM_MAPPER, undefined);
{
@ -533,8 +551,8 @@ test "mapDirEntry" {
}
test "mapDirEntry returns errors correctly" {
var allocator = std.heap.page_allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = undefined };
var allocator = std.testing.allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = undefined, .allocator = allocator };
const attrs = vmm.Attributes{ .kernel = true, .writable = true, .cachable = true };
testing.expectError(vmm.MapperError.MisalignedVirtualAddress, mapDirEntry(&dir, 1, PAGE_SIZE_4KB + 1, 0, PAGE_SIZE_4KB, attrs, allocator));
testing.expectError(vmm.MapperError.MisalignedPhysicalAddress, mapDirEntry(&dir, 0, PAGE_SIZE_4KB, 1, PAGE_SIZE_4KB + 1, attrs, allocator));
@ -544,8 +562,9 @@ test "mapDirEntry returns errors correctly" {
}
test "map and unmap" {
var allocator = std.heap.page_allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY };
var allocator = std.testing.allocator;
var dir = Directory{ .entries = [_]DirectoryEntry{0} ** ENTRIES_PER_DIRECTORY, .tables = [_]?*Table{null} ** ENTRIES_PER_DIRECTORY, .allocator = allocator };
defer dir.deinit();
const phys_start: usize = PAGE_SIZE_4MB * 2;
const virt_start: usize = PAGE_SIZE_4MB * 4;
const phys_end: usize = PAGE_SIZE_4MB * 4;

View file

@ -208,6 +208,7 @@ pub fn Bitmap(comptime BitmapType: type) type {
num_entries: usize,
bitmaps: []BitmapType,
num_free_entries: usize,
allocator: *std.mem.Allocator,
///
/// Create an instance of this bitmap type.
@ -230,6 +231,7 @@ pub fn Bitmap(comptime BitmapType: type) type {
.num_entries = num_entries,
.bitmaps = try allocator.alloc(BitmapType, num),
.num_free_entries = num_entries,
.allocator = allocator,
};
for (self.bitmaps) |*bmp| {
bmp.* = 0;
@ -237,6 +239,16 @@ pub fn Bitmap(comptime BitmapType: type) type {
return self;
}
///
/// Free the memory occupied by this bitmap's internal state. It will become unusable afterwards.
///
/// Arguments:
/// IN self: *Self - The bitmap that should be deinitialised
///
pub fn deinit(self: *Self) void {
self.allocator.free(self.bitmaps);
}
///
/// Set an entry within a bitmap as occupied.
///
@ -533,7 +545,8 @@ test "Comptime setContiguous" {
}
test "setEntry" {
var bmp = try Bitmap(u32).init(31, std.heap.page_allocator);
var bmp = try Bitmap(u32).init(31, std.testing.allocator);
defer bmp.deinit();
testing.expectEqual(@as(u32, 31), bmp.num_free_entries);
try bmp.setEntry(0);
@ -554,7 +567,8 @@ test "setEntry" {
}
test "clearEntry" {
var bmp = try Bitmap(u32).init(32, std.heap.page_allocator);
var bmp = try Bitmap(u32).init(32, std.testing.allocator);
defer bmp.deinit();
testing.expectEqual(@as(u32, 32), bmp.num_free_entries);
try bmp.setEntry(0);
@ -580,7 +594,8 @@ test "clearEntry" {
}
test "setFirstFree multiple bitmaps" {
var bmp = try Bitmap(u8).init(9, std.heap.page_allocator);
var bmp = try Bitmap(u8).init(9, std.testing.allocator);
defer bmp.deinit();
// Allocate the first entry
testing.expectEqual(bmp.setFirstFree() orelse unreachable, 0);
@ -616,7 +631,8 @@ test "setFirstFree multiple bitmaps" {
}
test "setFirstFree" {
var bmp = try Bitmap(u32).init(32, std.heap.page_allocator);
var bmp = try Bitmap(u32).init(32, std.testing.allocator);
defer bmp.deinit();
// Allocate the first entry
testing.expectEqual(bmp.setFirstFree() orelse unreachable, 0);
@ -637,7 +653,8 @@ test "setFirstFree" {
}
test "isSet" {
var bmp = try Bitmap(u32).init(32, std.heap.page_allocator);
var bmp = try Bitmap(u32).init(32, std.testing.allocator);
defer bmp.deinit();
bmp.bitmaps[0] = 1;
// Make sure that only the set entry is considered set
@ -669,7 +686,8 @@ test "isSet" {
}
test "indexToBit" {
var bmp = try Bitmap(u8).init(10, std.heap.page_allocator);
var bmp = try Bitmap(u8).init(10, std.testing.allocator);
defer bmp.deinit();
testing.expectEqual(bmp.indexToBit(0), 1);
testing.expectEqual(bmp.indexToBit(1), 2);
testing.expectEqual(bmp.indexToBit(2), 4);
@ -683,7 +701,8 @@ test "indexToBit" {
}
test "setContiguous" {
var bmp = try Bitmap(u4).init(15, std.heap.page_allocator);
var bmp = try Bitmap(u4).init(15, std.testing.allocator);
defer bmp.deinit();
// Test trying to set more entries than the bitmap has
testing.expectEqual(bmp.setContiguous(bmp.num_entries + 1), null);
// All entries should still be free

View file

@ -336,18 +336,13 @@ const init_allocations: usize = 10;
test "init with files cleans memory if OutOfMemory" {
var i: usize = 0;
while (i < init_allocations) : (i += 1) {
{
var fa = std.testing.FailingAllocator.init(std.testing.allocator, i);
var fa = std.testing.FailingAllocator.init(std.testing.allocator, i);
var ramdisk_bytes = try createInitrd(std.testing.allocator);
defer std.testing.allocator.free(ramdisk_bytes);
var ramdisk_bytes = try createInitrd(std.testing.allocator);
defer std.testing.allocator.free(ramdisk_bytes);
var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes);
expectError(error.OutOfMemory, InitrdFS.init(&initrd_stream, &fa.allocator));
}
// Ensure we have freed any memory allocated
std.testing.expectEqual(false, std.testing.allocator_instance.detectLeaks());
var initrd_stream = std.io.fixedBufferStream(ramdisk_bytes);
expectError(error.OutOfMemory, InitrdFS.init(&initrd_stream, &fa.allocator));
}
}

View file

@ -484,8 +484,9 @@ test "parseMapEntry fails without a name" {
}
test "SymbolMap" {
var allocator = std.heap.page_allocator;
var allocator = std.testing.allocator;
var map = SymbolMap.init(allocator);
defer map.deinit();
try map.add("abc"[0..], 123);
try map.addEntry(MapEntry{ .func_name = "def"[0..], .addr = 456 });
try map.add("ghi"[0..], 789);

View file

@ -129,8 +129,16 @@ pub fn init(mem_profile: *const MemProfile, allocator: *Allocator) void {
}
}
///
/// Free the internal state of the PMM. Is unusable aftwards unless re-initialised
///
pub fn deinit() void {
bitmap.deinit();
}
test "alloc" {
bitmap = try Bitmap(u32).init(32, std.heap.page_allocator);
bitmap = try Bitmap(u32).init(32, testing.allocator);
defer bitmap.deinit();
comptime var addr = 0;
comptime var i = 0;
// Allocate all entries, making sure they succeed and return the correct addresses
@ -148,7 +156,8 @@ test "alloc" {
}
test "free" {
bitmap = try Bitmap(u32).init(32, std.heap.page_allocator);
bitmap = try Bitmap(u32).init(32, testing.allocator);
defer bitmap.deinit();
comptime var i = 0;
// Allocate and free all entries
inline while (i < 32) : (i += 1) {
@ -165,7 +174,8 @@ test "free" {
test "setAddr and isSet" {
const num_entries: u32 = 32;
bitmap = try Bitmap(u32).init(num_entries, std.heap.page_allocator);
bitmap = try Bitmap(u32).init(num_entries, testing.allocator);
defer bitmap.deinit();
var addr: u32 = 0;
var i: u32 = 0;
while (i < num_entries) : ({

View file

@ -180,6 +180,21 @@ pub fn VirtualMemoryManager(comptime Payload: type) type {
};
}
///
/// Free the internal state of the VMM. It is unusable afterwards
///
/// Arguments:
/// IN self: *Self - The VMM to deinitialise
///
pub fn deinit(self: *Self) void {
self.bmp.deinit();
var it = self.allocations.iterator();
while (it.next()) |entry| {
entry.value.physical.deinit();
}
self.allocations.deinit();
}
///
/// Find the physical address that a given virtual address is mapped to.
///
@ -458,6 +473,7 @@ pub fn init(mem_profile: *const mem.MemProfile, allocator: *Allocator) Allocator
test "virtToPhys" {
const num_entries = 512;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const vstart = test_vaddr_start + BLOCK_SIZE;
const vend = vstart + BLOCK_SIZE * 3;
@ -480,6 +496,7 @@ test "virtToPhys" {
test "physToVirt" {
const num_entries = 512;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const vstart = test_vaddr_start + BLOCK_SIZE;
const vend = vstart + BLOCK_SIZE * 3;
@ -502,6 +519,7 @@ test "physToVirt" {
test "alloc and free" {
const num_entries = 512;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
var allocations = test_allocations.?;
var virtual_allocations = std.ArrayList(usize).init(std.testing.allocator);
defer virtual_allocations.deinit();
@ -583,6 +601,7 @@ test "alloc and free" {
test "set" {
const num_entries = 512;
var vmm = try testInit(num_entries);
defer testDeinit(&vmm);
const vstart = vmm.start + BLOCK_SIZE * 37;
const vend = vmm.start + BLOCK_SIZE * 46;
@ -627,7 +646,7 @@ const test_vaddr_start: usize = 0xC0000000;
///
fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) {
if (test_allocations == null) {
test_allocations = try bitmap.Bitmap(u64).init(num_entries, std.heap.page_allocator);
test_allocations = try bitmap.Bitmap(u64).init(num_entries, std.testing.allocator);
} else |allocations| {
var entry: u32 = 0;
while (entry < allocations.num_entries) : (entry += 1) {
@ -646,8 +665,17 @@ fn testInit(num_entries: u32) Allocator.Error!VirtualMemoryManager(u8) {
.physical_reserved = &[_]mem.Range{},
.modules = &[_]mem.Module{},
};
pmm.init(&mem_profile, std.heap.page_allocator);
return VirtualMemoryManager(u8).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.heap.page_allocator, test_mapper, 39);
pmm.init(&mem_profile, std.testing.allocator);
return VirtualMemoryManager(u8).init(test_vaddr_start, test_vaddr_start + num_entries * BLOCK_SIZE, std.testing.allocator, test_mapper, 39);
}
fn testDeinit(vmm: *VirtualMemoryManager(u8)) void {
defer vmm.deinit();
defer {
test_allocations.?.deinit();
test_allocations = null;
}
defer pmm.deinit();
}
///

View file

@ -28,6 +28,8 @@ pub const MemProfile = struct {
fixed_allocator: std.heap.FixedBufferAllocator,
};
pub var fixed_buffer_allocator: std.heap.FixedBufferAllocator = undefined;
const FIXED_ALLOC_SIZE = 1024 * 1024;
const ADDR_OFFSET: usize = 100;