Merge branch 'riscv' of g.csail.mit.edu:xv6-dev into riscv

This commit is contained in:
Frans Kaashoek 2019-07-17 05:53:47 -04:00
commit b924e44f06
13 changed files with 190 additions and 124 deletions

View file

@ -55,8 +55,9 @@ kfree(void *pa)
// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);
acquire(&kmem.lock);
r = (struct run*)pa;
acquire(&kmem.lock);
r->next = kmem.freelist;
kmem.freelist = r;
release(&kmem.lock);
@ -75,6 +76,7 @@ kalloc(void)
if(r)
kmem.freelist = r->next;
release(&kmem.lock);
if(r)
memset((char*)r, 5, PGSIZE); // fill with junk
return (void*)r;

View file

@ -6,20 +6,16 @@
#include "proc.h"
#include "defs.h"
struct proc proc[NPROC];
struct cpu cpus[NCPU];
struct proc proc[NPROC];
struct proc *initproc;
struct spinlock pid_lock;
int nextpid = 1;
struct spinlock pid_lock;
extern void forkret(void);
// for returning out of the kernel
extern void sysexit(void);
static void wakeup1(struct proc *chan);
extern char trampout[]; // trampoline.S
@ -68,8 +64,10 @@ allocpid() {
int pid;
acquire(&pid_lock);
pid = nextpid++;
pid = nextpid;
nextpid = nextpid + 1;
release(&pid_lock);
return pid;
}
@ -77,7 +75,7 @@ allocpid() {
// Look in the process table for an UNUSED proc.
// If found, initialize state required to run in the kernel,
// and return with p->lock held.
// Otherwise return 0.
// If there are no free procs, return 0.
static struct proc*
allocproc(void)
{
@ -98,6 +96,7 @@ found:
// Allocate a page for the kernel stack.
if((p->kstack = kalloc()) == 0){
release(&p->lock);
return 0;
}
@ -105,6 +104,7 @@ found:
if((p->tf = (struct trapframe *)kalloc()) == 0){
kfree(p->kstack);
p->kstack = 0;
release(&p->lock);
return 0;
}
@ -145,16 +145,13 @@ freeproc(struct proc *p)
}
// Create a page table for a given process,
// with no users pages, but with trampoline pages.
// Called both when creating a process, and
// by exec() when building tentative new memory image,
// which might fail.
// with no user pages, but with trampoline pages.
pagetable_t
proc_pagetable(struct proc *p)
{
pagetable_t pagetable;
// An empty user page table.
// An empty page table.
pagetable = uvmcreate();
// map the trampoline code (for system call return)
@ -172,9 +169,7 @@ proc_pagetable(struct proc *p)
}
// Free a process's page table, and free the
// physical memory the page table refers to.
// Called both when a process exits and from
// exec() if it fails.
// physical memory it refers to.
void
proc_freepagetable(pagetable_t pagetable, uint64 sz)
{
@ -187,9 +182,12 @@ proc_freepagetable(pagetable_t pagetable, uint64 sz)
// a user program that calls exec("/init")
// od -t xC initcode
uchar initcode[] = {
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x05, 0x02, 0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x05, 0x02,
0x9d, 0x48, 0x73, 0x00, 0x00, 0x00, 0x89, 0x48, 0x73, 0x00, 0x00, 0x00, 0xef, 0xf0, 0xbf, 0xff,
0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x01, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x05, 0x02,
0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x05, 0x02,
0x9d, 0x48, 0x73, 0x00, 0x00, 0x00, 0x89, 0x48,
0x73, 0x00, 0x00, 0x00, 0xef, 0xf0, 0xbf, 0xff,
0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x01,
0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00
};
@ -203,12 +201,14 @@ userinit(void)
p = allocproc();
initproc = p;
// allocate one user page and copy init's instructions
// and data into it.
uvminit(p->pagetable, initcode, sizeof(initcode));
p->sz = PGSIZE;
// prepare for the very first "return" from kernel to user.
p->tf->epc = 0;
p->tf->sp = PGSIZE;
p->tf->epc = 0; // user program counter
p->tf->sp = PGSIZE; // user stack pointer
safestrcpy(p->name, "initcode", sizeof(p->name));
p->cwd = namei("/");
@ -218,7 +218,7 @@ userinit(void)
release(&p->lock);
}
// Grow current process's memory by n bytes.
// Grow or shrink user memory by n bytes.
// Return 0 on success, -1 on failure.
int
growproc(int n)
@ -240,8 +240,8 @@ growproc(int n)
return 0;
}
// Create a new process, copying p as the parent.
// Sets up child kernel stack to return as if from system call.
// Create a new process, copying the parent.
// Sets up child kernel stack to return as if from fork() system call.
int
fork(void)
{
@ -287,8 +287,8 @@ fork(void)
return pid;
}
// Pass p's abandoned children to init. p and p's parent
// are locked.
// Pass p's abandoned children to init.
// Caller must hold p->lock and parent->lock.
void
reparent(struct proc *p, struct proc *parent) {
struct proc *pp;
@ -312,7 +312,6 @@ reparent(struct proc *p, struct proc *parent) {
}
}
// Exit the current process. Does not return.
// An exited process remains in the zombie state
// until its parent calls wait().
@ -343,6 +342,7 @@ exit(void)
acquire(&p->lock);
// Give our children to init.
reparent(p, p->parent);
p->state = ZOMBIE;
@ -366,24 +366,32 @@ wait(void)
int havekids, pid;
struct proc *p = myproc();
// hold p->lock for the whole time to avoid lost
// wakeups from a child's exit().
acquire(&p->lock);
for(;;){
// Scan through table looking for exited children.
havekids = 0;
for(np = proc; np < &proc[NPROC]; np++){
if(np->parent != p)
continue;
acquire(&np->lock);
havekids = 1;
if(np->state == ZOMBIE){
// Found one.
pid = np->pid;
freeproc(np);
// this code uses np->parent without holding np->lock.
// acquiring the lock first would cause a deadlock,
// since np might be an ancestor, and we already hold p->lock.
if(np->parent == p){
// np->parent can't change between the check and the acquire()
// because only the parent changes it, and we're the parent.
acquire(&np->lock);
havekids = 1;
if(np->state == ZOMBIE){
// Found one.
pid = np->pid;
freeproc(np);
release(&np->lock);
release(&p->lock);
return pid;
}
release(&np->lock);
release(&p->lock);
return pid;
}
release(&np->lock);
}
// No point waiting if we don't have any children.
@ -392,7 +400,7 @@ wait(void)
return -1;
}
// Wait for children to exit. (See wakeup1 call in reparent.)
// Wait for a child to exit.
sleep(p, &p->lock); //DOC: wait-sleep
}
}
@ -401,10 +409,10 @@ wait(void)
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
// Scheduler never returns. It loops, doing:
// - choose a process to run
// - swtch to start running that process
// - choose a process to run.
// - swtch to start running that process.
// - eventually that process transfers control
// via swtch back to the scheduler.
// via swtch back to the scheduler.
void
scheduler(void)
{
@ -413,7 +421,7 @@ scheduler(void)
c->proc = 0;
for(;;){
// Enable interrupts on this processor.
// Give devices a brief chance to interrupt.
intr_on();
for(p = proc; p < &proc[NPROC]; p++) {
@ -435,7 +443,7 @@ scheduler(void)
}
}
// Enter scheduler. Must hold only p->lock
// Switch to scheduler. Must hold only p->lock
// and have changed proc->state. Saves and restores
// intena because intena is a property of this
// kernel thread, not this CPU. It should
@ -512,12 +520,13 @@ sleep(void *chan, struct spinlock *lk)
// change p->state and then call sched.
// Once we hold p->lock, we can be
// guaranteed that we won't miss any wakeup
// (wakeup runs with p->lock locked),
// (wakeup locks p->lock),
// so it's okay to release lk.
if(lk != &p->lock){ //DOC: sleeplock0
acquire(&p->lock); //DOC: sleeplock1
release(lk);
}
// Go to sleep.
p->chan = chan;
p->state = SLEEPING;
@ -535,8 +544,8 @@ sleep(void *chan, struct spinlock *lk)
}
//PAGEBREAK!
// Wake up p, used by exit()
// Caller should lock p.
// Wake up p if it is sleeping in wait(); used by exit().
// Caller must hold p->lock.
static void
wakeup1(struct proc *p)
{
@ -545,8 +554,8 @@ wakeup1(struct proc *p)
}
}
// Wake up all processes sleeping on chan. Never
// called when holding a p->lock
// Wake up all processes sleeping on chan.
// Must be called without any p->lock.
void
wakeup(void *chan)
{
@ -562,25 +571,25 @@ wakeup(void *chan)
}
// Kill the process with the given pid.
// Process won't exit until it returns
// to user space (see trap in trap.c).
// The victim won't exit until it tries to return
// to user space (see usertrap() in trap.c).
int
kill(int pid)
{
struct proc *p;
for(p = proc; p < &proc[NPROC]; p++){
acquire(&p->lock);
if(p->pid == pid){
acquire(&p->lock);
if(p->pid != pid)
panic("kill");
p->killed = 1;
// Wake process from sleep if necessary.
if(p->state == SLEEPING)
if(p->state == SLEEPING){
// Wake process from sleep().
p->state = RUNNABLE;
}
release(&p->lock);
return 0;
}
release(&p->lock);
}
return -1;
}

View file

@ -18,33 +18,37 @@ struct context {
uint64 s11;
};
// Per-CPU state
// Per-CPU state.
struct cpu {
struct proc *proc; // The process running on this cpu or null
struct context scheduler; // swtch() here to enter scheduler
int noff; // Depth of push_off() nesting.
int intena; // Were interrupts enabled before push_off()?
struct proc *proc; // The process running on this cpu, or null.
struct context scheduler; // swtch() here to enter scheduler().
int noff; // Depth of push_off() nesting.
int intena; // Were interrupts enabled before push_off()?
};
extern struct cpu cpus[NCPU];
//PAGEBREAK: 17
// per-process data for the early trap handling code in trampoline.S.
// per-process data for the trap handling code in trampoline.S.
// sits in a page by itself just under the trampoline page in the
// user page table. not specially mapped in the kernel page table.
// the sscratch register points here.
// trampoline.S saves user registers, then restores kernel_sp and
// kernel_satp.
// includes callee-saved registers like s0-s11 because the
// trampin in trampoline.S saves user registers in the trapframe,
// then initializes registers from the trapframe's
// kernel_sp, kernel_hartid, kernel_satp, and jumps to kernel_trap.
// usertrapret() and trampout in trampoline.S set up
// the trapframe's kernel_*, restore user registers from the
// trapframe, switch to the user page table, and enter user space.
// the trapframe includes callee-saved user registers like s0-s11 because the
// return-to-user path via usertrapret() doesn't return through
// the entire kernel call stack.
struct trapframe {
/* 0 */ uint64 kernel_satp;
/* 8 */ uint64 kernel_sp;
/* 16 */ uint64 kernel_trap; // usertrap()
/* 24 */ uint64 epc; // saved user program counter
/* 32 */ uint64 hartid;
/* 0 */ uint64 kernel_satp; // kernel page table
/* 8 */ uint64 kernel_sp; // top of process's kernel stack
/* 16 */ uint64 kernel_trap; // usertrap()
/* 24 */ uint64 epc; // saved user program counter
/* 32 */ uint64 kernel_hartid; // saved kernel tp
/* 40 */ uint64 ra;
/* 48 */ uint64 sp;
/* 56 */ uint64 gp;
@ -83,16 +87,20 @@ enum procstate { UNUSED, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Per-process state
struct proc {
struct spinlock lock;
// p->lock must be held when using these:
enum procstate state; // Process state
struct proc *parent; // Parent process
void *chan; // If non-zero, sleeping on chan
int killed; // If non-zero, have been killed
int pid; // Process ID
// these are private to the process, so p->lock need not be held.
char *kstack; // Bottom of kernel stack for this process
uint64 sz; // Size of process memory (bytes)
pagetable_t pagetable; // Page table
enum procstate state; // Process state
int pid; // Process ID
struct proc *parent; // Parent process
struct trapframe *tf; // data page for trampoline.S
struct context context; // swtch() here to run process
void *chan; // If non-zero, sleeping on chan
int killed; // If non-zero, have been killed
struct file *ofile[NOFILE]; // Open files
struct inode *cwd; // Current directory
char name[16]; // Process name (debugging)

View file

@ -312,6 +312,17 @@ r_ra()
return x;
}
// tell the machine to finish any previous writes to
// PTEs, so that a subsequent use of a virtual
// address or load of the SATP will see those writes.
// perhaps this also flushes the TLB.
static inline void
sfence_vma()
{
// the zero, zero means flush all TLB entries.
asm volatile("sfence.vma zero, zero");
}
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page

View file

@ -18,8 +18,6 @@ initlock(struct spinlock *lk, char *name)
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
@ -27,7 +25,7 @@ acquire(struct spinlock *lk)
if(holding(lk))
panic("acquire");
// On RISC-V, this turns into an atomic swap:
// On RISC-V, sync_lock_test_and_set turns into an atomic swap:
// a5 = 1
// s1 = &lk->locked
// amoswap.w.aq a5, a5, (s1)
@ -59,9 +57,10 @@ release(struct spinlock *lk)
__sync_synchronize();
// Release the lock, equivalent to lk->locked = 0.
// This code can't use a C assignment, since it might
// not be atomic.
// On RISC-V, this turns into an atomic swap:
// This code doesn't use a C assignment, since the C standard
// implies that an assignment might be implemented with
// multiple store instructions.
// On RISC-V, sync_lock_release turns into an atomic swap:
// s1 = &lk->locked
// amoswap.w zero, zero, (s1)
__sync_lock_release(&lk->locked);
@ -81,7 +80,7 @@ holding(struct spinlock *lk)
}
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
// it takes two pop_off to undo two push_off. Also, if interrupts
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
// are initially off, then push_off, pop_off leaves them off.
void

View file

@ -5,7 +5,5 @@ struct spinlock {
// For debugging:
char *name; // Name of lock.
struct cpu *cpu; // The cpu holding the lock.
struct cpu *last_release;
uint64 last_pc;
};

View file

@ -157,8 +157,8 @@ static uint64 (*syscalls[])(void) = {
[SYS_close] sys_close,
};
static void
dosyscall(void)
void
syscall(void)
{
int num;
struct proc *p = myproc();
@ -174,15 +174,3 @@ dosyscall(void)
p->tf->a0 = -1;
}
}
void
syscall()
{
if(myproc()->killed)
exit();
dosyscall();
if(myproc()->killed)
exit();
return;
}

View file

@ -17,7 +17,8 @@ trampout:
# a0: p->tf in user page table
# a1: new value for satp, for user page table
# switch to user page table
# switch to user page table.
sfence.vma zero, zero
csrw satp, a1
# put the saved user a0 in sscratch, so we
@ -120,7 +121,7 @@ trampin:
# restore kernel stack pointer from p->tf->kernel_sp
ld sp, 8(a0)
# make tp hold the current hartid, from p->tf->hartid
# make tp hold the current hartid, from p->tf->kernel_hartid
ld tp, 32(a0)
# remember the address of usertrap(), p->tf->kernel_trap
@ -128,6 +129,7 @@ trampin:
# restore kernel page table from p->tf->kernel_satp
ld t1, 0(a0)
sfence.vma zero, zero
csrw satp, t1
# a0 is no longer valid, since the kernel page

View file

@ -53,6 +53,9 @@ usertrap(void)
if(r_scause() == 8){
// system call
if(p->killed)
exit();
// sepc points to the ecall instruction,
// but we want to return to the next instruction.
p->tf->epc += 4;
@ -100,7 +103,7 @@ usertrapret(void)
p->tf->kernel_satp = r_satp();
p->tf->kernel_sp = (uint64)p->kstack + PGSIZE;
p->tf->kernel_trap = (uint64)usertrap;
p->tf->hartid = r_tp();
p->tf->kernel_hartid = r_tp();
// set up the registers that trampoline.S's sret will use
// to get to user space.
@ -155,6 +158,15 @@ kerneltrap()
w_sstatus(sstatus);
}
void
clockintr()
{
acquire(&tickslock);
ticks++;
wakeup(&ticks);
release(&tickslock);
}
// check if it's an external interrupt or software interrupt,
// and handle it.
// returns 2 if timer interrupt,
@ -179,16 +191,15 @@ devintr()
plic_complete(irq);
return 1;
} else if(scause == 0x8000000000000001){
// software interrupt from a machine-mode timer interrupt.
// software interrupt from a machine-mode timer interrupt,
// forwarded by machinevec in kernelvec.S.
if(cpuid() == 0){
acquire(&tickslock);
ticks++;
wakeup(&ticks);
release(&tickslock);
clockintr();
}
// acknowledge.
// acknowledge the software interrupt by clearing
// the SSIP bit in sip.
w_sip(r_sip() & ~2);
return 2;

View file

@ -20,7 +20,7 @@
// the address of virtio mmio register r.
#define R(r) ((volatile uint32 *)(VIRTIO0 + (r)))
struct spinlock virtio_disk_lock;
struct spinlock vdisk_lock;
// memory for virtio descriptors &c for queue 0.
// this is a global instead of allocated because it has
@ -49,7 +49,7 @@ virtio_disk_init(void)
{
uint32 status = 0;
initlock(&virtio_disk_lock, "virtio_disk");
initlock(&vdisk_lock, "virtio_disk");
if(*R(VIRTIO_MMIO_MAGIC_VALUE) != 0x74726976 ||
*R(VIRTIO_MMIO_VERSION) != 1 ||
@ -168,7 +168,7 @@ virtio_disk_rw(struct buf *b)
{
uint64 sector = b->blockno * (BSIZE / 512);
acquire(&virtio_disk_lock);
acquire(&vdisk_lock);
// the spec says that legacy block operations use three
// descriptors: one for type/reserved/sector, one for
@ -180,7 +180,7 @@ virtio_disk_rw(struct buf *b)
if(alloc3_desc(idx) == 0) {
break;
}
sleep(&free[0], &virtio_disk_lock);
sleep(&free[0], &vdisk_lock);
}
// format the three descriptors.
@ -234,16 +234,16 @@ virtio_disk_rw(struct buf *b)
// Wait for virtio_disk_intr() to say request has finished.
while((b->flags & (B_VALID|B_DIRTY)) != B_VALID){
sleep(b, &virtio_disk_lock);
sleep(b, &vdisk_lock);
}
release(&virtio_disk_lock);
release(&vdisk_lock);
}
void
virtio_disk_intr()
{
acquire(&virtio_disk_lock);
acquire(&vdisk_lock);
while((used_idx % NUM) != (used->id % NUM)){
int id = used->elems[used_idx].id;
@ -262,5 +262,5 @@ virtio_disk_intr()
used_idx = (used_idx + 1) % NUM;
}
release(&virtio_disk_lock);
release(&vdisk_lock);
}

View file

@ -61,6 +61,7 @@ kvminit()
void
kvminithart()
{
sfence_vma();
w_satp(MAKE_SATP(kernel_pagetable));
}