Compare commits

..

3 commits

Author SHA1 Message Date
Imbus
78e4ad9885 Slab outline 2025-09-04 17:57:59 +02:00
Imbus
129b29eaf3 Likely/unlikely 2025-09-04 05:00:09 +02:00
Imbus
4ee23cd145 stddef.h: size_t 2025-09-04 05:00:02 +02:00
3 changed files with 141 additions and 0 deletions

View file

@ -1,8 +1,13 @@
#ifndef STDDEF_H #ifndef STDDEF_H
#define STDDEF_H #define STDDEF_H
#include <stdint.h>
#ifndef NULL #ifndef NULL
#define NULL ((void *)0) #define NULL ((void *)0)
#endif #endif
#ifndef size_t
#define size_t uint64_t
#endif
#endif // STDDEF_H #endif // STDDEF_H

15
kern/libkern/util.h Normal file
View file

@ -0,0 +1,15 @@
#ifndef UTIL_H
#define UTIL_H
/*
* Give hints to the compiler for branch prediction optimization.
*/
#if defined(__clang__) || (defined(__GNUC__) && (__GNUC__ > 2))
#define likely(c) (__builtin_expect(!!(c), 1))
#define unlikely(c) (__builtin_expect(!!(c), 0))
#else
#define likely(c) (c)
#define unlikely(c) (c)
#endif
#endif // UTIL_H

121
kern/slab.h Normal file
View file

@ -0,0 +1,121 @@
#ifndef SLAB_H
#define SLAB_H
/*
* To understand this code, some prerequisite knowledge on memory and allocators are
* required. You should probably know what a free-list is, and maybe how to implement a bump allocator.
*
* See James Shackleford's excellent presentation on the Linux slab allocator:
* https://www.youtube.com/watch?v=pFi-JKgoX-I
*
* In the Linux source tree, the slub implementation resides in:
* mm/slub.c
*
* To see the slab caches on a linux system:
* $ sudo cat /proc/slabinfo
* In order of columns:
* name, active obj, total n obj, obj size, obj per slab, pages per slab
* For more info about how to decode this, see:
* $ man 5 slabinfo
*
* The way kernel memory allocation works in modern unixes is essentially outlined by:
* kmalloc() -> slub/slab/slob -> binary-buddy
*
* Where kmalloc redirects the requests to one of its appropriately sized slab caches,
* which in turn checks for an available cache, and if not found, allocates a new one
* using the binary-buddy allocator, which only allocates fixed size objects (usually pages)
*/
#include <stddef.h>
#include <stdint.h>
#define KERN_TOP_ADDR
#define USER_TOP_ADDR
#define POISON_FREE 0x6b
#define POISON_END 0x6a
#define POISON_IN_USE 0x5a
#define RED_INACTIVE 0xbb
#define RED_ACTIVE 0xcc
/* Canonical addresses are addresses that point to unmapped memory (neither user nor kernel space) */
#define IS_CANONICAL_ADDRESS(ptr) (ptr < KERN_TOP_ADDR && ptr > USER_TOP_ADDR)
/* 'bin size', 'order' */
#define SIZE_TO_KMALLOC_CACHE_INDEX(size) (0)
struct kmem_cache *kmalloc_caches[10];
/* GetFreePages_t: The type used for flags in various allocation related functions */
typedef uint32_t gfp_t;
/* Kmalloc manages an internal array of slab caches, sizes 8 bytes to 2^13 bytes */
void *kzalloc(size_t size); // Same but zeroes memory
void *kmalloc(size_t size) {
/* If the order (size) is too large to fit inside one of the slab caches, go directly to frame (page, buddy)
* allocator */
struct kmem_cache *suitable_cache = kmalloc_caches[SIZE_TO_KMALLOC_CACHE_INDEX(size)];
/* Do operations on kmem cache */
(void)suitable_cache;
return NULL;
}
/* For big allocations, not physically contiguous */
void *vmalloc(size_t size);
// SLAB_HWCACHE_ALIGN
// SLAB_CACHE_DMA
// SLAB_PANIC
/* This struct exists only conceptually, as the poison is variable in length */
/* The struct within each slab */
struct kmem_free_obj {
/* FreePointer, pointer to the next free pointer in the slab, if null then full, essentially a linked list */
/* Poison, inserted to see if someone reads or writes to a free area, checked by comparison, these values are known
*/
/* Pad, to keep things aligned*/
};
/* This struct exists only conceptually */
struct kmem_allocated_obj {
char payload[128]; /* Sized for demonstration */
char red_zone[16]; /* Sized for demonstration */
};
/* Per cpu slab cache, to avoid worrying about locking */
struct kmem_cache_cpu {
/* FreeList, points to the _first free object_, which will be equal to the Page pointer if the page is empty */
/* Page, the page allocated by buddy */
/* Page_amt, there may be multiple pages */
};
/* Essentially a list head for pages */
struct kmem_page {
/* Page */
/* FirstFree */
/* Slabsize? */
/* Next */
/* Prev? */
};
/* Global (as in cross CPU) pointers to pages in various states */
/* When kmem_cache_cpu->page gets full, it gets swapped into the full linked list */
/* Operations on this struct requires locking */
struct kmem_cache_node {
/* Partial, linked list of kmem_page, moved from full once something inside a full page is free'd */
/* Full, linked list of kmem_page */
};
struct kmem_cache {
const char *name;
struct kmem_cache_cpu *cpu_slab;
struct kmem_cache_node *node;
};
// KMEM_CACHE macro
// kmem_cache_create(name, size, alignment, flags, constructor)
// kmem_cache_destroy(*kmem_cache)
// kmem_cache_alloc(*kmem_cache, flags (GFP_KERNEL|GFP_ATOMIC|GFP_ZERO))
// kmem_cache_free(*kmem_cache, *obj) <- Optionally checks redzone
#endif // SLAB_H