121 lines
4.1 KiB
C
121 lines
4.1 KiB
C
#ifndef SLAB_H
|
|
#define SLAB_H
|
|
|
|
/*
|
|
* To understand this code, some prerequisite knowledge on memory and allocators are
|
|
* required. You should probably know what a free-list is, and maybe how to implement a bump allocator.
|
|
*
|
|
* See James Shackleford's excellent presentation on the Linux slab allocator:
|
|
* https://www.youtube.com/watch?v=pFi-JKgoX-I
|
|
*
|
|
* In the Linux source tree, the slub implementation resides in:
|
|
* mm/slub.c
|
|
*
|
|
* To see the slab caches on a linux system:
|
|
* $ sudo cat /proc/slabinfo
|
|
* In order of columns:
|
|
* name, active obj, total n obj, obj size, obj per slab, pages per slab
|
|
* For more info about how to decode this, see:
|
|
* $ man 5 slabinfo
|
|
*
|
|
* The way kernel memory allocation works in modern unixes is essentially outlined by:
|
|
* kmalloc() -> slub/slab/slob -> binary-buddy
|
|
*
|
|
* Where kmalloc redirects the requests to one of its appropriately sized slab caches,
|
|
* which in turn checks for an available cache, and if not found, allocates a new one
|
|
* using the binary-buddy allocator, which only allocates fixed size objects (usually pages)
|
|
*/
|
|
|
|
#include <stddef.h>
|
|
#include <stdint.h>
|
|
|
|
#define KERN_TOP_ADDR
|
|
#define USER_TOP_ADDR
|
|
|
|
#define POISON_FREE 0x6b
|
|
#define POISON_END 0x6a
|
|
#define POISON_IN_USE 0x5a
|
|
|
|
#define RED_INACTIVE 0xbb
|
|
#define RED_ACTIVE 0xcc
|
|
|
|
/* Canonical addresses are addresses that point to unmapped memory (neither user nor kernel space) */
|
|
#define IS_CANONICAL_ADDRESS(ptr) (ptr < KERN_TOP_ADDR && ptr > USER_TOP_ADDR)
|
|
|
|
/* 'bin size', 'order' */
|
|
#define SIZE_TO_KMALLOC_CACHE_INDEX(size) (0)
|
|
struct kmem_cache *kmalloc_caches[10];
|
|
|
|
/* GetFreePages_t: The type used for flags in various allocation related functions */
|
|
typedef uint32_t gfp_t;
|
|
|
|
/* Kmalloc manages an internal array of slab caches, sizes 8 bytes to 2^13 bytes */
|
|
void *kzalloc(size_t size); // Same but zeroes memory
|
|
void *kmalloc(size_t size) {
|
|
/* If the order (size) is too large to fit inside one of the slab caches, go directly to frame (page, buddy)
|
|
* allocator */
|
|
struct kmem_cache *suitable_cache = kmalloc_caches[SIZE_TO_KMALLOC_CACHE_INDEX(size)];
|
|
/* Do operations on kmem cache */
|
|
(void)suitable_cache;
|
|
return NULL;
|
|
}
|
|
|
|
/* For big allocations, not physically contiguous */
|
|
void *vmalloc(size_t size);
|
|
|
|
// SLAB_HWCACHE_ALIGN
|
|
// SLAB_CACHE_DMA
|
|
// SLAB_PANIC
|
|
|
|
/* This struct exists only conceptually, as the poison is variable in length */
|
|
/* The struct within each slab */
|
|
struct kmem_free_obj {
|
|
/* FreePointer, pointer to the next free pointer in the slab, if null then full, essentially a linked list */
|
|
/* Poison, inserted to see if someone reads or writes to a free area, checked by comparison, these values are known
|
|
*/
|
|
/* Pad, to keep things aligned*/
|
|
};
|
|
|
|
/* This struct exists only conceptually */
|
|
struct kmem_allocated_obj {
|
|
char payload[128]; /* Sized for demonstration */
|
|
char red_zone[16]; /* Sized for demonstration */
|
|
};
|
|
|
|
/* Per cpu slab cache, to avoid worrying about locking */
|
|
struct kmem_cache_cpu {
|
|
/* FreeList, points to the _first free object_, which will be equal to the Page pointer if the page is empty */
|
|
/* Page, the page allocated by buddy */
|
|
/* Page_amt, there may be multiple pages */
|
|
};
|
|
|
|
/* Essentially a list head for pages */
|
|
struct kmem_page {
|
|
/* Page */
|
|
/* FirstFree */
|
|
/* Slabsize? */
|
|
/* Next */
|
|
/* Prev? */
|
|
};
|
|
|
|
/* Global (as in cross CPU) pointers to pages in various states */
|
|
/* When kmem_cache_cpu->page gets full, it gets swapped into the full linked list */
|
|
/* Operations on this struct requires locking */
|
|
struct kmem_cache_node {
|
|
/* Partial, linked list of kmem_page, moved from full once something inside a full page is free'd */
|
|
/* Full, linked list of kmem_page */
|
|
};
|
|
|
|
struct kmem_cache {
|
|
const char *name;
|
|
struct kmem_cache_cpu *cpu_slab;
|
|
struct kmem_cache_node *node;
|
|
};
|
|
|
|
// KMEM_CACHE macro
|
|
// kmem_cache_create(name, size, alignment, flags, constructor)
|
|
// kmem_cache_destroy(*kmem_cache)
|
|
// kmem_cache_alloc(*kmem_cache, flags (GFP_KERNEL|GFP_ATOMIC|GFP_ZERO))
|
|
// kmem_cache_free(*kmem_cache, *obj) <- Optionally checks redzone
|
|
|
|
#endif // SLAB_H
|