This commit is contained in:
Imbus 2025-09-01 21:50:05 +02:00
parent 0562c2fe5a
commit d078e7fb93
7 changed files with 0 additions and 0 deletions

View file

@ -1,92 +0,0 @@
#ifndef ENDIAN_KERNEL_H
#define ENDIAN_KERNEL_H
#include <types.h>
/** Swap byte order of 16-bit value */
static inline u16 swap16(u16 x) { return (x >> 8) | (x << 8); }
/** Swap byte order of 32-bit value */
static inline u32 swap32(u32 x) {
return ((x >> 24) & 0x000000ff) | ((x >> 8) & 0x0000ff00) |
((x << 8) & 0x00ff0000) | ((x << 24) & 0xff000000);
}
/** Swap byte order of 64-bit value */
static inline u64 swap64(u64 x) {
return ((x >> 56) & 0x00000000000000ffULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 56) & 0xff00000000000000ULL);
}
#ifdef __LITTLE_ENDIAN__
/** Convert 16-bit value to little-endian */
static inline u16 to_le16(u16 x) { return x; }
/** Convert 16-bit little-endian value to host */
static inline u16 from_le16(u16 x) { return x; }
/** Convert 32-bit value to little-endian */
static inline u32 to_le32(u32 x) { return x; }
/** Convert 32-bit little-endian value to host */
static inline u32 from_le32(u32 x) { return x; }
/** Convert 64-bit value to little-endian */
static inline u64 to_le64(u64 x) { return x; }
/** Convert 64-bit little-endian value to host */
static inline u64 from_le64(u64 x) { return x; }
/** Convert 16-bit value to big-endian */
static inline u16 to_be16(u16 x) { return swap16(x); }
/** Convert 16-bit big-endian value to host */
static inline u16 from_be16(u16 x) { return swap16(x); }
/** Convert 32-bit value to big-endian */
static inline u32 to_be32(u32 x) { return swap32(x); }
/** Convert 32-bit big-endian value to host */
static inline u32 from_be32(u32 x) { return swap32(x); }
/** Convert 64-bit value to big-endian */
static inline u64 to_be64(u64 x) { return swap64(x); }
/** Convert 64-bit big-endian value to host */
static inline u64 from_be64(u64 x) { return swap64(x); }
#else // Big-endian
/** Convert 16-bit value to little-endian */
static inline u16 to_le16(u16 x) { return swap16(x); }
/** Convert 16-bit little-endian value to host */
static inline u16 from_le16(u16 x) { return swap16(x); }
/** Convert 32-bit value to little-endian */
static inline u32 to_le32(u32 x) { return swap32(x); }
/** Convert 32-bit little-endian value to host */
static inline u32 from_le32(u32 x) { return swap32(x); }
/** Convert 64-bit value to little-endian */
static inline u64 to_le64(u64 x) { return swap64(x); }
/** Convert 64-bit little-endian value to host */
static inline u64 from_le64(u64 x) { return swap64(x); }
/** Convert 16-bit value to big-endian */
static inline u16 to_be16(u16 x) { return x; }
/** Convert 16-bit big-endian value to host */
static inline u16 from_be16(u16 x) { return x; }
/** Convert 32-bit value to big-endian */
static inline u32 to_be32(u32 x) { return x; }
/** Convert 32-bit big-endian value to host */
static inline u32 from_be32(u32 x) { return x; }
/** Convert 64-bit value to big-endian */
static inline u64 to_be64(u64 x) { return x; }
/** Convert 64-bit big-endian value to host */
static inline u64 from_be64(u64 x) { return x; }
#endif // __LITTLE_ENDIAN__
#endif // ENDIAN_KERNEL_H

View file

@ -1,29 +0,0 @@
#include <memory.h>
#include <string.h>
#include <uart.h>
#define MAX_PROBE_SIZE (256 * 1024 * 1024) // Probe up to 256 MiB max
#define PROBE_STEP 0x1000 // Probe every 4 KiB page
size_t probe_memory(void) {
volatile u32 *addr;
u32 test_pattern = 0xA5A5A5A5;
size_t detected = 0;
for (size_t offset = 4096 * 16; offset < MAX_PROBE_SIZE;
offset += PROBE_STEP) {
addr = (volatile u32 *)(KERNBASE + offset);
u32 old = *addr;
*addr = test_pattern;
if (*addr != test_pattern) {
break; // Memory not readable/writable here, stop probing
}
*addr = old; // restore original data
detected = offset + PROBE_STEP;
}
return detected;
}

View file

@ -1,16 +0,0 @@
#ifndef MEMORY_KERNEL_H
#define MEMORY_KERNEL_H
#include <types.h>
/* These are hardcoded for now */
#define KERNBASE 0x80000000L
#define PHYSTOP (KERNBASE + 128 * 1024 * 1024)
/**
* Returns size in bytes of detected RAM In qemu, it requires a trap handler to
* handle the interrupt when accessing unavailable memory.
*/
size_t probe_memory(void);
#endif

View file

@ -1,8 +0,0 @@
#include <uart.h>
volatile int panicked;
void panic(char *s) {
panicked = 1;
uart_puts(s);
while (1);
}

View file

@ -1,6 +0,0 @@
#ifndef KERNEL_PANIC_H
#define KERNEL_PANIC_H
void panic(char *s);
#endif

View file

@ -1,124 +0,0 @@
/**
* Mutual exclusion spin locks.
* (Not mutexes as these are spinning locks).
*/
// #include <lib/stdio.h>
#include "string.h"
#include <panic.h>
#include <proc.h>
#include <riscv.h>
#include <spinlock.h>
#include <uart.h>
/**
* The aquire() and release() functions control ownership of the lock.
* To perform these operations, modern CPU's provide atomic instructions
* that prevent the cores from stepping on each other's toes, otherwise known
* as a deadlock.
*
* GCC provides a set of built-in functions that allow you to use atomic
* instructions in an architecture-independent way. These functions are
* defined in the GCC manual:
*
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* See: https://en.wikipedia.org/wiki/Memory_barrier
*
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
* a5 = 1
* s1 = &lk->locked
* amoswap.w.aq a5, a5, (s1)
*
* On RISC-V, sync_lock_release turns into an atomic swap:
* s1 = &lk->locked
* amoswap.w zero, zero, (s1)
*
* __sync_synchronize();
*
* This function tells the C compiler and the processor to not move loads or
* stores past this point, to ensure that the critical section's memory
* references happen strictly after the lock is acquired/locked.
* On RISC-V, this emits a fence instruction.
*/
/** Initialize Spinlock */
void initlock(struct Spinlock *lk, char *name) {
lk->name = name;
lk->locked = 0;
lk->cpu = 0;
}
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void acquire(struct Spinlock *lk) {
push_off(); // disable interrupts to avoid deadlock.
if (holding(lk)) // If the lock is already held, panic.
panic("acquire");
// Spin until aquired. See file header for details
while (__sync_lock_test_and_set(&lk->locked, 1) != 0);
__sync_synchronize(); // No loads/stores after this point
// Record info about lock acquisition for holding() and debugging.
lk->cpu = mycpu();
}
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *lk) {
if (!holding(lk)) // If the lock is not held, panic.
panic("release");
lk->cpu = 0; // 0 means unheld
__sync_synchronize(); // No loads/stores after this point
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
pop_off();
}
// Check whether this cpu is holding the lock.
// Interrupts must be off.
int holding(struct Spinlock *lk) {
int r;
r = (lk->locked && lk->cpu == mycpu());
return r;
}
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
// are initially off, then push_off, pop_off leaves them off.
void push_off(void) {
int old = intr_get();
intr_off();
if (mycpu()->noff == 0)
mycpu()->intena = old;
mycpu()->noff += 1;
}
void pop_off(void) {
struct Cpu *c = mycpu();
if (intr_get())
panic("pop_off - interruptible");
if (c->noff < 1) {
{
// TODO: Remove this block when fixed
char amt[100];
itoa(c->noff, amt, 10);
uart_puts(amt);
}
panic("pop_off");
}
c->noff -= 1;
if (c->noff == 0 && c->intena)
intr_on();
}

View file

@ -1,51 +0,0 @@
#ifndef KERNEL_Spinlock_H
#define KERNEL_Spinlock_H
#include "types.h"
/** Mutual exclusion spin lock */
struct Spinlock {
u32 locked; // Is the lock held?
// NOTE: Perhaps feature gate this?
// For debugging:
char *name; // Name of lock.
struct Cpu *cpu; // The cpu holding the lock.
};
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void acquire(struct Spinlock *);
/**
* Check whether this cpu is holding the lock.
* Interrupts must be off.
*/
int holding(struct Spinlock *);
/**
* Initialize Spinlock
*/
void initlock(struct Spinlock *, char *);
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *);
/**
* @brief push_off/pop_off are like intr_off()/intr_on() except that they are
* matched: it takes two pop_off()s to undo two push_off()s. Also, if
* interrupts are initially off, then push_off, pop_off leaves them off.
*/
void push_off(void);
/** @copydoc pop_off */
void pop_off(void);
#endif