Compare commits

..

No commits in common. "master" and "v0.0.1" have entirely different histories.

25 changed files with 38 additions and 819 deletions

View file

@ -6,11 +6,3 @@ ColumnLimit: 80 # Wrap lines after 80 characters
AllowShortLoopsOnASingleLine: true
AlwaysBreakTemplateDeclarations: true
BreakConstructorInitializers: BeforeComma
AlignConsecutiveDeclarations:
Enabled: true
AcrossEmptyLines: false
AcrossComments: false
AlignCompound: false
AlignFunctionPointers: false
PadOperators: false
AlignConsecutiveMacros: true

18
.clangd
View file

@ -1,18 +0,0 @@
CompileFlags:
Add:
- --target=riscv64-unknown-elf
- -mcmodel=medany
- -march=rv64gc
- -mabi=lp64
- -ffreestanding
- -fno-common
- -nostdlib
- -mno-relax
- -I.
- -Ilib
- -fno-stack-protector
- -fno-pie
- -no-pie
- -ggdb
- -gdwarf-2
- -fno-omit-frame-pointer

View file

@ -8,7 +8,7 @@ OBJDUMP = $(TOOLPREFIX)-objdump
ASFLAGS = -march=rv64gc -mabi=lp64
LDFLAGS = -Tkernel.ld
LDFLAGS = -Tlink.ld
LDFLAGS += -m elf64lriscv
CFLAGS = -Wall -Werror -O
@ -17,8 +17,6 @@ CFLAGS += -march=rv64gc -mabi=lp64
CFLAGS += -ffreestanding -fno-common -nostdlib -mno-relax
CFLAGS += -I.
CFLAGS += -Ilib
CFLAGS += -Ikern
CFLAGS += -fno-stack-protector # Prevents code that needs libc / runtime support
CFLAGS += -MD # Generate header dependency files (.d)
@ -28,7 +26,7 @@ CFLAGS += -fno-omit-frame-pointer # More reliable backtraces in GDB
all: kernel.elf
kernel.elf: entry.o start.o lib/string.o lib/proc.o lib/spinlock.o lib/proc.o lib/uart.o lib/panic.o kern/kalloc.o lib/memory.o
kernel.elf: entry.o start.o
@echo LD $@
@$(LD) $(LDFLAGS) -o $@ $^
@ -41,10 +39,10 @@ kernel.elf: entry.o start.o lib/string.o lib/proc.o lib/spinlock.o lib/proc.o li
@$(AS) $(ASFLAGS) -o $@ $<
qemu: kernel.elf
@echo QEMU $<
@qemu-system-riscv64 -machine virt -bios none -nographic -m 128M -smp 4 -kernel kernel.elf
@echo QEMU $@
@qemu-system-riscv64 -machine virt -bios none -nographic -kernel kernel.elf
clean:
rm -f *.o *.elf *.d lib/*.o lib/*.d
rm -f *.o *.elf *.d
-include *.d

View file

@ -1,34 +1,8 @@
# Neptune Kernel
Inspired by xv6
For a quick reference on RISC-V assembly:
- https://risc-v.guru/instructions/
Toolchains:
- https://github.com/xpack-dev-tools/riscv-none-elf-gcc-xpack
- https://github.com/xpack-dev-tools/qemu-riscv-xpack/
---
> A word on terminology: Although the official x86 term is exception, xv6 uses the
> term trap, largely because it was the term used by the PDP11/40 and therefore is the
> conventional Unix term.
| Register | Name | Privilege Level | Description |
|-------------|----------------------------|------------------|-----------------------------------------------------------------------------|
| `mstatus` | Machine Status Register | Machine | Holds global interrupt enable, previous privilege mode, etc. |
| `mtvec` | Machine Trap-Vector Base | Machine | Holds the base address of the trap handler (exception/interrupt entry). |
| `mepc` | Machine Exception PC | Machine | Stores the program counter at the time of the last trap. |
| `mcause` | Machine Cause Register | Machine | Indicates the cause of the last trap (interrupt or exception). |
| `satp` | Supervisor Address Translation and Protection | Supervisor | Controls page table base address and mode (e.g., Sv39, Sv48). |
| `sstatus` | Supervisor Status Register | Supervisor | Like `mstatus`, but accessible from supervisor mode. |
| `stvec` | Supervisor Trap-Vector Base| Supervisor | Like `mtvec`, but for supervisor mode traps. |
| `sepc` | Supervisor Exception PC | Supervisor | Like `mepc`, but for supervisor mode. |
| `scause` | Supervisor Cause Register | Supervisor | Like `mcause`, but for supervisor mode traps. |
| `sscratch` | Supervisor Scratch | Supervisor | Can be used to store temporary state across traps in supervisor mode. |
| `mscratch` | Machine Scratch | Machine | Like `sscratch`, but in machine mode. |
| `mcycle` | Machine Cycle Counter | Machine | Counts the number of cycles executed. |
| `mtime` | Machine Timer Register | Machine (via memory-mapped) | Used for timing and scheduling (not a CSR, but a memory-mapped register). |
| `mip` | Machine Interrupt Pending | Machine | Indicates pending interrupts. |
| `mie` | Machine Interrupt Enable | Machine | Controls which interrupts are enabled. |

View file

@ -1,8 +0,0 @@
/*
* Number of CPU's For now, this is hard-coded here. It will likely be
* dynamically discovered in the future.
*/
#define NCPU 3
/* Maximum number of files open */
#define NOFILE 10

14
entry.S
View file

@ -6,6 +6,20 @@ _entry:
call _clear
continue:
li t0, 0x10000000 # UART base address
li t1, 'E' # Character to print
sb t1, 0(t0)
li t1, 'n'
sb t1, 0(t0)
li t1, 't'
sb t1, 0(t0)
li t1, 'r'
sb t1, 0(t0)
li t1, 'y'
sb t1, 0(t0)
li t1, '\n'
sb t1, 0(t0)
# Set up a stack for C.
la sp, stack0
li a0, 1024*4 # a0 = 4096

View file

@ -1,75 +0,0 @@
#include <kalloc.h>
#include <memory.h>
#include <panic.h>
#include <riscv.h>
#include <spinlock.h>
#include <string.h>
#include <types.h>
// Physical memory allocator, for user processes,
// kernel stacks, page-table pages,
// and pipe buffers. Allocates whole 4096-byte pages.
/** Free list of physical pages. */
void freerange(void *physaddr_start, void *physaddr_end);
/** First address after kernel. Provided kernel.ld */
extern char kernel_end[];
/** A run is a node in the free list. */
struct Run {
struct Run *next;
};
/** Kernel memory allocator. */
struct {
struct Spinlock lock;
struct Run *freelist;
} kmem;
void kalloc_init() {
initlock(&kmem.lock, "kmem");
freerange(kernel_end, (void *)PHYSTOP);
}
void freerange(void *physaddr_start, void *physaddr_end) {
char *p;
p = (char *)PGROUNDUP((u64)physaddr_start);
for (; p + PGSIZE <= (char *)physaddr_end; p += PGSIZE) kfree(p);
}
void kfree(void *pa) {
struct Run *r;
// Assert that page is a ligned to a page boundary and that its correctly
// sized
if (((u64)pa % PGSIZE) != 0 || (char *)pa < kernel_end ||
(u64)pa >= PHYSTOP)
panic("kfree");
// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);
r = (struct Run *)pa;
acquire(&kmem.lock);
r->next = kmem.freelist;
kmem.freelist = r;
release(&kmem.lock);
}
void *kalloc(void) {
struct Run *r;
acquire(&kmem.lock);
r = kmem.freelist;
if (r)
kmem.freelist = r->next;
release(&kmem.lock);
if (r)
memset((char *)r, 5, PGSIZE); // fill with junk
return (void *)r;
}

View file

@ -1,33 +0,0 @@
#ifndef KALLOC_KERNEL_H
#define KALLOC_KERNEL_H
/**
* Kernel memory allocator
*
* Allocate one 4096-byte page of physical memory.
* Returns a pointer that the kernel can use.
* Returns 0 if the memory cannot be allocated.
* See: kalloc.c
*/
void *kalloc(void);
/**
* Kernel memory allocator
*
* Free the page of physical memory pointed at by pa,
* which normally should have been returned by a
* call to kalloc(). (The exception is when
* initializing the allocator; see kinit above.)
* See: kalloc.c
*/
void kfree(void *);
/**
* Initialize kernel memory allocator
*
* Called by main() on the way to the kernel's main loop.
* See: kalloc.c
*/
void kalloc_init(void);
#endif

View file

@ -1,92 +0,0 @@
#ifndef ENDIAN_KERNEL_H
#define ENDIAN_KERNEL_H
#include <types.h>
/** Swap byte order of 16-bit value */
static inline u16 swap16(u16 x) { return (x >> 8) | (x << 8); }
/** Swap byte order of 32-bit value */
static inline u32 swap32(u32 x) {
return ((x >> 24) & 0x000000ff) | ((x >> 8) & 0x0000ff00) |
((x << 8) & 0x00ff0000) | ((x << 24) & 0xff000000);
}
/** Swap byte order of 64-bit value */
static inline u64 swap64(u64 x) {
return ((x >> 56) & 0x00000000000000ffULL) |
((x >> 40) & 0x000000000000ff00ULL) |
((x >> 24) & 0x0000000000ff0000ULL) |
((x >> 8) & 0x00000000ff000000ULL) |
((x << 8) & 0x000000ff00000000ULL) |
((x << 24) & 0x0000ff0000000000ULL) |
((x << 40) & 0x00ff000000000000ULL) |
((x << 56) & 0xff00000000000000ULL);
}
#ifdef __LITTLE_ENDIAN__
/** Convert 16-bit value to little-endian */
static inline u16 to_le16(u16 x) { return x; }
/** Convert 16-bit little-endian value to host */
static inline u16 from_le16(u16 x) { return x; }
/** Convert 32-bit value to little-endian */
static inline u32 to_le32(u32 x) { return x; }
/** Convert 32-bit little-endian value to host */
static inline u32 from_le32(u32 x) { return x; }
/** Convert 64-bit value to little-endian */
static inline u64 to_le64(u64 x) { return x; }
/** Convert 64-bit little-endian value to host */
static inline u64 from_le64(u64 x) { return x; }
/** Convert 16-bit value to big-endian */
static inline u16 to_be16(u16 x) { return swap16(x); }
/** Convert 16-bit big-endian value to host */
static inline u16 from_be16(u16 x) { return swap16(x); }
/** Convert 32-bit value to big-endian */
static inline u32 to_be32(u32 x) { return swap32(x); }
/** Convert 32-bit big-endian value to host */
static inline u32 from_be32(u32 x) { return swap32(x); }
/** Convert 64-bit value to big-endian */
static inline u64 to_be64(u64 x) { return swap64(x); }
/** Convert 64-bit big-endian value to host */
static inline u64 from_be64(u64 x) { return swap64(x); }
#else // Big-endian
/** Convert 16-bit value to little-endian */
static inline u16 to_le16(u16 x) { return swap16(x); }
/** Convert 16-bit little-endian value to host */
static inline u16 from_le16(u16 x) { return swap16(x); }
/** Convert 32-bit value to little-endian */
static inline u32 to_le32(u32 x) { return swap32(x); }
/** Convert 32-bit little-endian value to host */
static inline u32 from_le32(u32 x) { return swap32(x); }
/** Convert 64-bit value to little-endian */
static inline u64 to_le64(u64 x) { return swap64(x); }
/** Convert 64-bit little-endian value to host */
static inline u64 from_le64(u64 x) { return swap64(x); }
/** Convert 16-bit value to big-endian */
static inline u16 to_be16(u16 x) { return x; }
/** Convert 16-bit big-endian value to host */
static inline u16 from_be16(u16 x) { return x; }
/** Convert 32-bit value to big-endian */
static inline u32 to_be32(u32 x) { return x; }
/** Convert 32-bit big-endian value to host */
static inline u32 from_be32(u32 x) { return x; }
/** Convert 64-bit value to big-endian */
static inline u64 to_be64(u64 x) { return x; }
/** Convert 64-bit big-endian value to host */
static inline u64 from_be64(u64 x) { return x; }
#endif // __LITTLE_ENDIAN__
#endif // ENDIAN_KERNEL_H

View file

@ -1,29 +0,0 @@
#include <memory.h>
#include <string.h>
#include <uart.h>
#define MAX_PROBE_SIZE (256 * 1024 * 1024) // Probe up to 256 MiB max
#define PROBE_STEP 0x1000 // Probe every 4 KiB page
size_t probe_memory(void) {
volatile u32 *addr;
u32 test_pattern = 0xA5A5A5A5;
size_t detected = 0;
for (size_t offset = 4096 * 16; offset < MAX_PROBE_SIZE;
offset += PROBE_STEP) {
addr = (volatile u32 *)(KERNBASE + offset);
u32 old = *addr;
*addr = test_pattern;
if (*addr != test_pattern) {
break; // Memory not readable/writable here, stop probing
}
*addr = old; // restore original data
detected = offset + PROBE_STEP;
}
return detected;
}

View file

@ -1,16 +0,0 @@
#ifndef MEMORY_KERNEL_H
#define MEMORY_KERNEL_H
#include <types.h>
/* These are hardcoded for now */
#define KERNBASE 0x80000000L
#define PHYSTOP (KERNBASE + 128 * 1024 * 1024)
/**
* Returns size in bytes of detected RAM In qemu, it requires a trap handler to
* handle the interrupt when accessing unavailable memory.
*/
size_t probe_memory(void);
#endif

View file

@ -1,8 +0,0 @@
#include <uart.h>
volatile int panicked;
void panic(char *s) {
panicked = 1;
uart_puts(s);
while (1);
}

View file

@ -1,6 +0,0 @@
#ifndef KERNEL_PANIC_H
#define KERNEL_PANIC_H
void panic(char *s);
#endif

View file

@ -1,21 +0,0 @@
#include <proc.h>
struct Cpu cpus[NCPU];
/**
* Must be called with interrupts disabled, to prevent race with process being
* moved to a different CPU.
*/
int cpuid() {
int id = read_tp();
return id;
}
/**
* Return this CPU's cpu struct. Interrupts must be disabled.
*/
struct Cpu *mycpu(void) {
int id = cpuid();
struct Cpu *c = &cpus[id];
return c;
}

View file

@ -1,22 +0,0 @@
#include <config.h>
#include <lib/spinlock.h>
#include <riscv.h>
#include <types.h>
struct Cpu *mycpu(void);
/** Saved registers for kernel context switches. */
struct Context {};
/** Per-CPU state. */
struct Cpu {
struct Proc *proc; // The process running on this cpu, or null.
struct Context context; // swtch() here to enter scheduler().
int noff; // Depth of push_off() nesting.
int intena; // Were interrupts enabled before push_off()?
};
extern struct Cpu cpus[NCPU];
/** Per-process state */
struct Proc {};

View file

@ -1,124 +0,0 @@
/**
* Mutual exclusion spin locks.
* (Not mutexes as these are spinning locks).
*/
// #include <lib/stdio.h>
#include "string.h"
#include <panic.h>
#include <proc.h>
#include <riscv.h>
#include <spinlock.h>
#include <uart.h>
/**
* The aquire() and release() functions control ownership of the lock.
* To perform these operations, modern CPU's provide atomic instructions
* that prevent the cores from stepping on each other's toes, otherwise known
* as a deadlock.
*
* GCC provides a set of built-in functions that allow you to use atomic
* instructions in an architecture-independent way. These functions are
* defined in the GCC manual:
*
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* See: https://en.wikipedia.org/wiki/Memory_barrier
*
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
* a5 = 1
* s1 = &lk->locked
* amoswap.w.aq a5, a5, (s1)
*
* On RISC-V, sync_lock_release turns into an atomic swap:
* s1 = &lk->locked
* amoswap.w zero, zero, (s1)
*
* __sync_synchronize();
*
* This function tells the C compiler and the processor to not move loads or
* stores past this point, to ensure that the critical section's memory
* references happen strictly after the lock is acquired/locked.
* On RISC-V, this emits a fence instruction.
*/
/** Initialize Spinlock */
void initlock(struct Spinlock *lk, char *name) {
lk->name = name;
lk->locked = 0;
lk->cpu = 0;
}
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void acquire(struct Spinlock *lk) {
push_off(); // disable interrupts to avoid deadlock.
if (holding(lk)) // If the lock is already held, panic.
panic("acquire");
// Spin until aquired. See file header for details
while (__sync_lock_test_and_set(&lk->locked, 1) != 0);
__sync_synchronize(); // No loads/stores after this point
// Record info about lock acquisition for holding() and debugging.
lk->cpu = mycpu();
}
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *lk) {
if (!holding(lk)) // If the lock is not held, panic.
panic("release");
lk->cpu = 0; // 0 means unheld
__sync_synchronize(); // No loads/stores after this point
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
pop_off();
}
// Check whether this cpu is holding the lock.
// Interrupts must be off.
int holding(struct Spinlock *lk) {
int r;
r = (lk->locked && lk->cpu == mycpu());
return r;
}
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
// are initially off, then push_off, pop_off leaves them off.
void push_off(void) {
int old = intr_get();
intr_off();
if (mycpu()->noff == 0)
mycpu()->intena = old;
mycpu()->noff += 1;
}
void pop_off(void) {
struct Cpu *c = mycpu();
if (intr_get())
panic("pop_off - interruptible");
if (c->noff < 1) {
{
// TODO: Remove this block when fixed
char amt[100];
itoa(c->noff, amt, 10);
uart_puts(amt);
}
panic("pop_off");
}
c->noff -= 1;
if (c->noff == 0 && c->intena)
intr_on();
}

View file

@ -1,51 +0,0 @@
#ifndef KERNEL_Spinlock_H
#define KERNEL_Spinlock_H
#include "types.h"
/** Mutual exclusion spin lock */
struct Spinlock {
u32 locked; // Is the lock held?
// NOTE: Perhaps feature gate this?
// For debugging:
char *name; // Name of lock.
struct Cpu *cpu; // The cpu holding the lock.
};
/**
* Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void acquire(struct Spinlock *);
/**
* Check whether this cpu is holding the lock.
* Interrupts must be off.
*/
int holding(struct Spinlock *);
/**
* Initialize Spinlock
*/
void initlock(struct Spinlock *, char *);
/**
* Release the lock.
* Panics if the lock is not held.
*/
void release(struct Spinlock *);
/**
* @brief push_off/pop_off are like intr_off()/intr_on() except that they are
* matched: it takes two pop_off()s to undo two push_off()s. Also, if
* interrupts are initially off, then push_off, pop_off leaves them off.
*/
void push_off(void);
/** @copydoc pop_off */
void pop_off(void);
#endif

View file

@ -1,99 +0,0 @@
#include <string.h>
char *itoa(int value, char *str, int base) {
char *p = str;
char *p1, *p2;
unsigned int uvalue = value;
int negative = 0;
if (base < 2 || base > 36) {
*str = '\0';
return str;
}
if (value < 0 && base == 10) {
negative = 1;
uvalue = -value;
}
// Convert to string
do {
int digit = uvalue % base;
*p++ = (digit < 10) ? '0' + digit : 'a' + (digit - 10);
uvalue /= base;
} while (uvalue);
if (negative)
*p++ = '-';
*p = '\0';
// Reverse string
p1 = str;
p2 = p - 1;
while (p1 < p2) {
char tmp = *p1;
*p1++ = *p2;
*p2-- = tmp;
}
return str;
}
void *memset(void *dst, int c, size_t length) {
u8 *ptr = (u8 *)dst;
const u8 value = (u8)c;
while (length--) *(ptr++) = value;
return dst;
}
void *memcpy(void *dst, const void *src, size_t len) {
u8 *d = (u8 *)dst;
const u8 *s = (const u8 *)src;
for (size_t i = 0; i < len; i++) {
d[i] = s[i];
}
return dst;
}
void *memmove(void *dst, const void *src, size_t len) {
u8 *d = (u8 *)dst;
const u8 *s = (const u8 *)src;
if (d < s) {
for (size_t i = 0; i < len; i++) {
d[i] = s[i];
}
} else if (d > s) {
for (size_t i = len; i > 0; i--) {
d[i - 1] = s[i - 1];
}
}
return dst;
}
int memcmp(const void *s1, const void *s2, size_t len) {
const u8 *a = (const u8 *)s1;
const u8 *b = (const u8 *)s2;
for (size_t i = 0; i < len; i++) {
if (a[i] != b[i]) {
return (int)a[i] - (int)b[i];
}
}
return 0;
}
size_t strlen(const char *s) {
const char *p = s;
while (*p) ++p;
return (size_t)(p - s);
}
size_t strnlen(const char *s, size_t maxlen) {
size_t len = 0;
while (len < maxlen && s[len] != '\0') {
len++;
}
return len;
}

View file

@ -1,40 +0,0 @@
#ifndef KERNEL_STRING_H
#define KERNEL_STRING_H
#include <types.h>
/** Integer to ascii */
char *itoa(int value, char *str, int base);
/** Fill memory with constant byte */
void *memset(void *dst, int c, size_t len);
/** Copy `len` bytes from `src` to `dst`. Undefined if regions overlap. */
void *memcpy(void *dst, const void *src, size_t len);
/** Copy `len` bytes from `src` to `dst`, safe for overlapping regions. */
void *memmove(void *dst, const void *src, size_t len);
/** Compare `len` bytes of `s1` and `s2`.
* Returns 0 if equal, <0 if s1 < s2, >0 if s1 > s2. */
int memcmp(const void *s1, const void *s2, size_t len);
/** Returns the length of a null-terminated string */
size_t strlen(const char *s);
/** Return length of string `s`, up to a max of `maxlen` bytes */
size_t strnlen(const char *s, size_t maxlen);
// TODO: These:
/*
int strcmp(const char *s1, const char *s2);
int strncmp(const char *s1, const char *s2, size_t n);
char *strcpy(char *dst, const char *src);
char *strncpy(char *dst, const char *src, size_t n);
char *strchr(const char *s, int c);
char *strrchr(const char *s, int c);
*/
#endif

View file

@ -1,8 +0,0 @@
/* QEMU memory maps a UART device here. */
#define UART_BASE ((volatile char *)0x10000000)
void uart_putc(char c) { *UART_BASE = c; }
void uart_puts(const char *s) {
while (*s) uart_putc(*s++);
}

View file

@ -1,10 +0,0 @@
#ifndef UART_KERNEL_H
#define UART_KERNEL_H
/** Send a single character to the UART device */
void uart_putc(char c);
/** Send a **NULL TERMINATED** string to the UART device */
void uart_puts(const char *s);
#endif

View file

@ -74,7 +74,7 @@ SECTIONS
}
/* Define symbol end as current location, note that this is not aligned, see vm.c */
PROVIDE(kernel_end = .);
PROVIDE(end = .);
}
PHDRS {

74
riscv.h
View file

@ -1,74 +0,0 @@
#ifndef RISCV_KERNEL_H
#define RISCV_KERNEL_H
#include <types.h>
/** Page Size */
#define PGSIZE 4096 // bytes per page
// /** Page Shift, bits of offset within a page */
#define PGSHIFT 12
#define PGROUNDUP(sz) (((sz) + PGSIZE - 1) & ~(PGSIZE - 1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE - 1))
// Supervisor Status Register, sstatus
#define SSTATUS_SPP (1L << 8) /** Supervisor Previous Privilege 1=S, 0=U */
#define SSTATUS_SPIE (1L << 5) /** Supervisor Previous Interrupt Enable */
#define SSTATUS_UPIE (1L << 4) /** User Previous Interrupt Enable */
#define SSTATUS_SIE (1L << 1) /** Supervisor Interrupt Enable */
#define SSTATUS_UIE (1L << 0) /** User Interrupt Enable */
/** Page Table Entry Type */
typedef u64 pte_t;
/** Page Table Type */
typedef u64 *pagetable_t; // 512 PTEs
/** Returns the current hart id */
static inline u64 read_mhartid() {
u64 x;
asm volatile("csrr %0, mhartid" : "=r"(x));
return x;
}
/** Read thread pointer */
static inline u64 read_tp() {
u64 x;
asm volatile("mv %0, tp" : "=r"(x));
return x;
}
/** Write thread pointer */
static inline void write_tp(u64 x) { asm volatile("mv tp, %0" : : "r"(x)); }
/**
* Read the value of the sstatus register.
* (Supervisor Status Register)
*/
static inline u64 r_sstatus() {
u64 x;
asm volatile("csrr %0, sstatus" : "=r"(x));
return x;
}
/**
* Write a value to the sstatus register.
* (Supervisor Status Register)
*/
static inline void w_sstatus(u64 x) {
asm volatile("csrw sstatus, %0" : : "r"(x));
}
/** Enable device interrupts */
static inline void intr_on() { w_sstatus(r_sstatus() | SSTATUS_SIE); }
/** Disable device interrupts */
static inline void intr_off() { w_sstatus(r_sstatus() & ~SSTATUS_SIE); }
/** Are device interrupts enabled? */
static inline int intr_get() {
u64 x = r_sstatus();
return (x & SSTATUS_SIE) != 0;
}
#endif

50
start.c
View file

@ -1,11 +1,19 @@
#include <config.h>
#include <kalloc.h>
#include <memory.h>
#include <proc.h>
#include <riscv.h>
#include <spinlock.h>
#include <types.h>
#include <uart.h>
/*
* Number of CPU's For now, this is hard-coded here. It will likely be in a
* header, or dynamically discovered in the future
*/
#define NCPU 3
/* QEMU memory maps a UART device here. */
#define UART_BASE ((char *)0x10000000)
/** Send a single character to the UART device */
void uart_putc(char c) { *UART_BASE = c; }
/** Send a **NULL TERMINATED** string to the UART device */
void uart_puts(const char *s) {
while (*s) uart_putc(*s++);
}
/**
* Allocate one stack per CPU (hart).
@ -15,37 +23,11 @@
*/
char stack0[4096 * NCPU] __attribute__((aligned(16)));
/* Keep this here and sync on it until we have synchronized printf */
struct Spinlock sl = {0};
volatile int greeted = 0;
/* This is where entry.S drops us of. All cores land here */
void start() {
u64 id = read_mhartid();
// Keep each CPU's hartid in its tp (thread pointer) register, for cpuid().
// This can then be retrieved with r_wp or cpuid(). It is used to index the
// cpus[] array in mycpu(), which in turn holds state for each individual
// cpu (struct Cpu).
write_tp(id);
acquire(&sl);
if (!greeted) {
uart_puts("Hello Neptune!\n");
greeted = 1;
}
uart_puts("Hart number: ");
uart_putc(id + '0');
uart_putc('\n');
release(&sl);
if (id == 0) {
/* Here we will do a bunch of initialization steps */
kalloc_init();
}
// We should not arrive here, but if we do, hang in a while on wfi.
while (1) __asm__ volatile("wfi"); // (Wait For Interrupt)

View file

@ -1,7 +0,0 @@
#pragma once
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef unsigned long u64;
typedef u64 size_t;