Compare commits

...

8 commits

Author SHA1 Message Date
Imbus
0d8a1af1ff Use panic macro where suitable 2025-09-02 00:17:58 +02:00
Imbus
6c21ac7669 Better panic, with PANIC macro 2025-09-02 00:17:44 +02:00
Imbus
a6ae43f583 Convert from uart_puts to kprintf 2025-09-02 00:17:34 +02:00
Imbus
7018424278 stdio kprintf working 2025-09-02 00:17:12 +02:00
Imbus
a1f592c880 Stddef NULL 2025-09-02 00:15:58 +02:00
Imbus
0df09d5086 Spinlock cleaning 2025-09-01 23:41:11 +02:00
Imbus
eec052bafa Typedef some common structs 2025-09-01 23:41:00 +02:00
Imbus
52f88785c4 Pin to gnu99 2025-09-01 23:09:13 +02:00
14 changed files with 160 additions and 177 deletions

View file

@ -21,6 +21,7 @@ CFLAGS += -ffreestanding
CFLAGS += -fno-common CFLAGS += -fno-common
CFLAGS += -nostdlib CFLAGS += -nostdlib
CFLAGS += -mno-relax CFLAGS += -mno-relax
CFLAGS += -std=gnu99
CFLAGS += -fno-stack-protector # Prevents code that needs libc / runtime support CFLAGS += -fno-stack-protector # Prevents code that needs libc / runtime support
CFLAGS += -MD # Generate header dependency files (.d) CFLAGS += -MD # Generate header dependency files (.d)
@ -44,7 +45,8 @@ KERNEL_OBJ := \
kern/libkern/panic.o \ kern/libkern/panic.o \
kern/libkern/memory.o \ kern/libkern/memory.o \
kern/libkern/spinlock.o \ kern/libkern/spinlock.o \
kern/libkern/mini-printf.o kern/libkern/mini-printf.o \
kern/libkern/stdio.o
kern/kernel.elf: $(KERNEL_OBJ) kern/kernel.elf: $(KERNEL_OBJ)
@echo LD $@ @echo LD $@

View file

@ -44,7 +44,7 @@ void kfree(void *pa) {
// Assert that page is a ligned to a page boundary and that its correctly // Assert that page is a ligned to a page boundary and that its correctly
// sized // sized
if (((u64)pa % PGSIZE) != 0 || (char *)pa < kernel_end || (u64)pa >= PHYSTOP) if (((u64)pa % PGSIZE) != 0 || (char *)pa < kernel_end || (u64)pa >= PHYSTOP)
panic("kfree"); PANIC("kfree");
// Fill with junk to catch dangling refs. // Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE); memset(pa, 1, PGSIZE);

View file

@ -1,8 +1,18 @@
#include "stdbool.h"
#include <mini-printf.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <uart.h> #include <uart.h>
volatile int panicked;
void panic(char *s) { volatile int panicked = false;
panicked = 1;
uart_puts(s); __attribute__((visibility("hidden")))
while (1); void __panic(const char *restrict fmt, ...) {
va_list ap;
va_start(ap, fmt);
(void)mini_vpprintf(stdout_puts, NULL, fmt, ap);
va_end(ap);
panicked = true;
while (true) asm volatile("wfi");
} }

View file

@ -1,6 +1,8 @@
#ifndef KERNEL_PANIC_H #ifndef KERNEL_PANIC_H
#define KERNEL_PANIC_H #define KERNEL_PANIC_H
void panic(char *s); #define PANIC(fmt, ...) __panic("[%s:%d %s] \n" fmt, __FILE__, __LINE__, __func__)
void __panic(const char *restrict fmt, ...);
#endif #endif

View file

@ -1,6 +1,6 @@
#include <proc.h> #include <proc.h>
struct Cpu cpus[NCPU]; Cpu cpus[NCPU];
/** /**
* Must be called with interrupts disabled, to prevent race with process being * Must be called with interrupts disabled, to prevent race with process being
@ -14,8 +14,8 @@ int cpuid() {
/** /**
* Return this CPU's cpu struct. Interrupts must be disabled. * Return this CPU's cpu struct. Interrupts must be disabled.
*/ */
struct Cpu *mycpu(void) { Cpu *mycpu(void) {
int id = cpuid(); int id = cpuid();
struct Cpu *c = &cpus[id]; Cpu *c = &cpus[id];
return c; return c;
} }

View file

@ -3,8 +3,6 @@
* (Not mutexes as these are spinning locks). * (Not mutexes as these are spinning locks).
*/ */
// #include <lib/stdio.h>
#include "string.h"
#include <panic.h> #include <panic.h>
#include <proc.h> #include <proc.h>
#include <riscv.h> #include <riscv.h>
@ -41,86 +39,42 @@
* On RISC-V, this emits a fence instruction. * On RISC-V, this emits a fence instruction.
*/ */
/** Initialize Spinlock */ /*
void initlock(struct Spinlock *lk, char *name) { * These are from the original xv6 implementation, with only slight modifications on their return type.
lk->name = name; *
lk->locked = 0; * push_off/pop_off are like intr_off()/intr_on() except that they are matched:
lk->cpu = 0; * it takes two pop_off()s to undo two push_off()s. Also, if interrupts
} * are initially off, then push_off, pop_off leaves them off.
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/ */
void acquire(struct Spinlock *lk) {
push_off(); // disable interrupts to avoid deadlock.
if (holding(lk)) // If the lock is already held, panic. uint32_t push_off(void) {
panic("acquire"); int old = intr_get();
Cpu *cpu = mycpu();
// Spin until aquired. See file header for details
while (__sync_lock_test_and_set(&lk->locked, 1) != 0);
__sync_synchronize(); // No loads/stores after this point
// Record info about lock acquisition for holding() and debugging.
lk->cpu = mycpu();
}
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *lk) {
if (!holding(lk)) // If the lock is not held, panic.
panic("release");
lk->cpu = 0; // 0 means unheld
__sync_synchronize(); // No loads/stores after this point
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
pop_off();
}
// Check whether this cpu is holding the lock.
// Interrupts must be off.
int holding(struct Spinlock *lk) {
int r;
r = (lk->locked && lk->cpu == mycpu());
return r;
}
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
// are initially off, then push_off, pop_off leaves them off.
void push_off(void) {
int old = intr_get();
intr_off(); intr_off();
if (mycpu()->noff == 0)
mycpu()->intena = old;
mycpu()->noff += 1; if (cpu->noff == 0)
cpu->intena = old;
cpu->noff += 1;
return cpu->noff;
} }
void pop_off(void) { uint32_t pop_off(void) {
struct Cpu *c = mycpu(); Cpu *cpu = mycpu();
if (intr_get()) if (intr_get())
panic("pop_off - interruptible"); PANIC("pop_off - interruptible");
if (c->noff < 1) {
{ if (cpu->noff < 1)
// TODO: Remove this block when fixed PANIC("pop_off");
char amt[100];
itoa(c->noff, amt, 10); cpu->noff -= 1;
uart_puts(amt);
} if (cpu->noff == 0 && cpu->intena)
panic("pop_off");
}
c->noff -= 1;
if (c->noff == 0 && c->intena)
intr_on(); intr_on();
return cpu->noff;
} }
void spinlock_init(spinlock_t *l) { void spinlock_init(spinlock_t *l) {
@ -135,34 +89,47 @@ __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
} }
void spin_unlock(spinlock_t *l) { void spin_unlock(spinlock_t *l) {
// if (!spin_is_holding(l))
// panic("spin_unlock");
l->cpu = 0;
// Release: store 0 with .rl ordering. // Release: store 0 with .rl ordering.
uint32_t dummy; uint32_t dummy;
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory"); __asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
// __sync_synchronize(); // No loads/stores after this point
// __sync_lock_release(&lk->locked); // Essentially lk->locked = 0
// pop_off();
} }
// Optional: tiny pause/backoff (works even if Zihintpause isn't present). /**
// See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc * Test-and-test-and-set acquire with polite spinning + exponential backoff.
static inline void cpu_relax(void) { */
#if defined(__riscv_zihintpause)
__asm__ volatile("pause");
#else
__asm__ volatile("nop");
#endif
}
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
void spin_lock(spinlock_t *l) { void spin_lock(spinlock_t *l) {
unsigned backoff = 1; uint32_t backoff = 1;
for (;;) { for (;;) {
if (spin_trylock(l)) if (spin_trylock(l))
return; return;
// Contended: spin on plain loads (no AMO) until it looks free.
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) { while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
for (unsigned i = 0; i < backoff; ++i) cpu_relax(); for (uint32_t i = 0; i < backoff; ++i)
__asm__ volatile("nop"); /* NOTE: Pause can be used here if supported */
if (backoff < 1u << 12) if (backoff < 1u << 12)
backoff <<= 1; backoff <<= 1;
} }
// Try again; loop.
} }
l->cpu = mycpu();
}
/**
* Check whether this cpu is holding the lock.
* Interrupts must be off.
*/
bool spin_is_holding(spinlock_t *l) {
int r;
r = (l->v && l->cpu == mycpu());
return r;
} }

View file

@ -1,60 +1,22 @@
#ifndef KERNEL_Spinlock_H #ifndef KERNEL_Spinlock_H
#define KERNEL_Spinlock_H #define KERNEL_Spinlock_H
#include <proc.h>
#include <stdbool.h>
#include <stdint.h> #include <stdint.h>
/** Mutual exclusion spin lock */
struct Spinlock {
u32 locked; // Is the lock held?
// NOTE: Perhaps feature gate this?
// For debugging:
char *name; // Name of lock.
struct Cpu *cpu; // The cpu holding the lock.
};
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void acquire(struct Spinlock *);
/**
* Check whether this cpu is holding the lock.
* Interrupts must be off.
*/
int holding(struct Spinlock *);
/**
* Initialize Spinlock
*/
void initlock(struct Spinlock *, char *);
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *);
/**
* @brief push_off/pop_off are like intr_off()/intr_on() except that they are
* matched: it takes two pop_off()s to undo two push_off()s. Also, if
* interrupts are initially off, then push_off, pop_off leaves them off.
*/
void push_off(void);
/** @copydoc pop_off */
void pop_off(void);
typedef struct { typedef struct {
volatile uint32_t v; // 0 = unlocked, 1 = locked volatile uint32_t v; // 0 = unlocked, 1 = locked
Cpu *cpu;
} spinlock_t; } spinlock_t;
uint32_t push_off(void);
uint32_t pop_off(void);
void spinlock_init(spinlock_t *l); void spinlock_init(spinlock_t *l);
bool spin_trylock(spinlock_t *l); bool spin_trylock(spinlock_t *l);
void spin_unlock(spinlock_t *l); void spin_unlock(spinlock_t *l);
bool spin_is_holding(spinlock_t *l);
void spin_lock(spinlock_t *l); void spin_lock(spinlock_t *l);
#endif #endif

9
kern/libkern/stddef.h Normal file
View file

@ -0,0 +1,9 @@
#ifndef STDDEF_H
#define STDDEF_H
#ifndef NULL
#define NULL ((void*)0)
#endif
#endif // STDDEF_H

20
kern/libkern/stdio.c Normal file
View file

@ -0,0 +1,20 @@
#include <uart.h>
#include <mini-printf.h>
#include <stddef.h>
int stdout_puts(char *s, int len, void *unused) {
(void)unused;
// Example: UART write loop
for (int i = 0; i < len; i++) {
uart_putc(s[i]); // <-- your low-level "put char" routine
}
return len;
}
int kprintf(const char *restrict fmt, ...) {
va_list ap;
va_start(ap, fmt);
int ret = mini_vpprintf(stdout_puts, NULL, fmt, ap);
va_end(ap);
return ret;
}

18
kern/libkern/stdio.h Normal file
View file

@ -0,0 +1,18 @@
#ifndef STDIO_H
#define STDIO_H
int stdout_puts(char *s, int len, void *unused);
int kprintf(const char *restrict format, ...);
// int fprintf(FILE *restrict stream, const char *restrict format, ...);
// int dprintf(int fd, const char *restrict format, ...);
// int sprintf(char *restrict str, const char *restrict format, ...);
// int snprintf(char str[restrict.size], size_t size, const char *restrict format, ...);
// int vprintf(const char *restrict format, va_list ap);
// int vfprintf(FILE *restrict stream, const char *restrict format, va_list ap);
// int vdprintf(int fd, const char *restrict format, va_list ap);
// int vsprintf(char *restrict str, const char *restrict format, va_list ap);
// int vsnprintf(char str[restrict.size], size_t size, const char *restrict format, va_list ap);
#endif // STDIO_H

View file

@ -4,7 +4,3 @@
void uart_putc(char c) { void uart_putc(char c) {
*UART_BASE = c; *UART_BASE = c;
} }
void uart_puts(const char *s) {
while (*s) uart_putc(*s++);
}

View file

@ -4,7 +4,4 @@
/** Send a single character to the UART device */ /** Send a single character to the UART device */
void uart_putc(char c); void uart_putc(char c);
/** Send a **NULL TERMINATED** string to the UART device */
void uart_puts(const char *s);
#endif #endif

View file

@ -1,6 +1,8 @@
#ifndef PROC_H
#define PROC_H
#include <config.h> #include <config.h>
#include <riscv.h> #include <riscv.h>
#include <spinlock.h>
#include <stdint.h> #include <stdint.h>
typedef enum { typedef enum {
@ -13,7 +15,7 @@ typedef enum {
} ProcessState; } ProcessState;
/** Saved registers for kernel context switches. */ /** Saved registers for kernel context switches. */
struct Context { typedef struct Context {
uint64_t ra; uint64_t ra;
uint64_t sp; uint64_t sp;
@ -30,18 +32,18 @@ struct Context {
uint64_t s9; uint64_t s9;
uint64_t s10; uint64_t s10;
uint64_t s11; uint64_t s11;
}; } Context;
/** Per-CPU state. */ /** Per-CPU state. */
struct Cpu { typedef struct cpu_t {
struct Process *proc; // The process running on this cpu, or null. struct Process *proc; // The process running on this cpu, or null.
struct Context context; // swtch() here to enter scheduler(). struct Context context; // swtch() here to enter scheduler().
int noff; // Depth of push_off() nesting. int noff; // Depth of push_off() nesting.
int intena; // Were interrupts enabled before push_off()? int intena; // Were interrupts enabled before push_off()?
}; } Cpu;
/** Saved registers for kernel context switches. */ /** Saved registers for kernel context switches. */
typedef struct { typedef struct TrapFrame_t {
/* 0 */ uint64_t kernel_satp; // kernel page table /* 0 */ uint64_t kernel_satp; // kernel page table
/* 8 */ uint64_t kernel_sp; // top of process's kernel stack /* 8 */ uint64_t kernel_sp; // top of process's kernel stack
/* 16 */ uint64_t kernel_trap; // usertrap() /* 16 */ uint64_t kernel_trap; // usertrap()
@ -78,11 +80,13 @@ typedef struct {
/* 264 */ uint64_t t4; /* 264 */ uint64_t t4;
/* 272 */ uint64_t t5; /* 272 */ uint64_t t5;
/* 280 */ uint64_t t6; /* 280 */ uint64_t t6;
} TrapFrame_t; } TrapFrame;
struct Cpu *mycpu(void); Cpu *mycpu(void);
extern struct Cpu cpus[NCPU]; extern Cpu cpus[NCPU];
/** Per-process state */ /** Per-process state */
struct Proc {}; struct Proc {};
#endif

View file

@ -1,10 +1,12 @@
#include <config.h> #include <config.h>
#include <kalloc.h> #include <kalloc.h>
#include <memory.h> #include <memory.h>
#include <panic.h>
#include <proc.h> #include <proc.h>
#include <riscv.h> #include <riscv.h>
#include <spinlock.h> #include <spinlock.h>
#include <stdint.h> #include <stdint.h>
#include <stdio.h>
#include <uart.h> #include <uart.h>
/** /**
@ -37,30 +39,24 @@ void start() {
if (id == 0) { if (id == 0) {
/* Here we will do a bunch of initialization steps */ /* Here we will do a bunch of initialization steps */
kalloc_init(); kalloc_init();
uart_puts("Hello Neptune!\n");
spinlock_init(&sl); spinlock_init(&sl);
kprintf("Hello Neptune!\n");
__sync_synchronize();
hold = 0; hold = 0;
} else { } else {
while (hold); while (hold);
} }
// spin_lock(&sl);
//
// uart_puts("Hart number: ");
// uart_putc(id + '0');
// uart_putc('\n');
//
// spin_unlock(&sl);
if (id == 0) { if (id == 0) {
spin_lock(&sl); spin_lock(&sl);
uart_puts("Core count: "); kprintf("Core count: %d\n", max_hart);
uart_putc(max_hart + '0');
uart_putc('\n'); if (max_hart == NCPU)
if (max_hart == NCPU) { kprintf("All cores up!\n");
uart_puts("All cores up!"); else
uart_putc('\n'); PANIC("Some cores seem to have been enumerated incorrectly!\n");
}
spin_unlock(&sl); spin_unlock(&sl);
} }