Spinlocks and initial proc implementation
This commit is contained in:
parent
eb0800c742
commit
ff3ad1e719
4 changed files with 209 additions and 0 deletions
19
lib/proc.c
Normal file
19
lib/proc.c
Normal file
|
@ -0,0 +1,19 @@
|
|||
#include <proc.h>
|
||||
|
||||
struct Cpu cpus[NCPU];
|
||||
|
||||
// Must be called with interrupts disabled,
|
||||
// to prevent race with process being moved
|
||||
// to a different CPU.
|
||||
int cpuid() {
|
||||
int id = r_tp();
|
||||
return id;
|
||||
}
|
||||
|
||||
// Return this CPU's cpu struct.
|
||||
// Interrupts must be disabled.
|
||||
struct Cpu *mycpu(void) {
|
||||
int id = cpuid();
|
||||
struct Cpu *c = &cpus[id];
|
||||
return c;
|
||||
}
|
23
lib/proc.h
Normal file
23
lib/proc.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
#include <config.h>
|
||||
#include <lib/spinlock.h>
|
||||
#include <riscv.h>
|
||||
#include <types.h>
|
||||
|
||||
int cpuid(void);
|
||||
struct Cpu *mycpu(void);
|
||||
|
||||
/** Saved registers for kernel context switches. */
|
||||
struct Context {};
|
||||
|
||||
/** Per-CPU state. */
|
||||
struct Cpu {
|
||||
struct Proc *proc; // The process running on this cpu, or null.
|
||||
struct Context context; // swtch() here to enter scheduler().
|
||||
int noff; // Depth of push_off() nesting.
|
||||
int intena; // Were interrupts enabled before push_off()?
|
||||
};
|
||||
|
||||
extern struct Cpu cpus[NCPU];
|
||||
|
||||
/** Per-process state */
|
||||
struct Proc {};
|
116
lib/spinlock.c
Normal file
116
lib/spinlock.c
Normal file
|
@ -0,0 +1,116 @@
|
|||
/**
|
||||
* Mutual exclusion spin locks.
|
||||
* (Not mutexes as these are spinning locks).
|
||||
*/
|
||||
|
||||
// #include <lib/stdio.h>
|
||||
#include <proc.h>
|
||||
#include <riscv.h>
|
||||
#include <spinlock.h>
|
||||
|
||||
// void panic(char *s) { for (;;); }
|
||||
void panic(char *s) {}
|
||||
|
||||
/**
|
||||
* The aquire() and release() functions control ownership of the lock.
|
||||
* To perform these operations, modern CPU's provide atomic instructions
|
||||
* that prevent the cores from stepping on each other's toes, otherwise known
|
||||
* as a deadlock.
|
||||
*
|
||||
* GCC provides a set of built-in functions that allow you to use atomic
|
||||
* instructions in an architecture-independent way. These functions are
|
||||
* defined in the GCC manual:
|
||||
*
|
||||
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
|
||||
* See: https://en.wikipedia.org/wiki/Memory_barrier
|
||||
*
|
||||
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
|
||||
* a5 = 1
|
||||
* s1 = &lk->locked
|
||||
* amoswap.w.aq a5, a5, (s1)
|
||||
*
|
||||
* On RISC-V, sync_lock_release turns into an atomic swap:
|
||||
* s1 = &lk->locked
|
||||
* amoswap.w zero, zero, (s1)
|
||||
*
|
||||
* __sync_synchronize();
|
||||
*
|
||||
* This function tells the C compiler and the processor to not move loads or
|
||||
* stores past this point, to ensure that the critical section's memory
|
||||
* references happen strictly after the lock is acquired/locked.
|
||||
* On RISC-V, this emits a fence instruction.
|
||||
*/
|
||||
|
||||
/** Initialize spinlock */
|
||||
void initlock(struct spinlock *lk, char *name) {
|
||||
lk->name = name;
|
||||
lk->locked = 0;
|
||||
lk->cpu = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire the lock.
|
||||
* Loops (spins) until the lock is acquired.
|
||||
* Panics if the lock is already held by this cpu.
|
||||
*/
|
||||
void acquire(struct spinlock *lk) {
|
||||
push_off(); // disable interrupts to avoid deadlock.
|
||||
|
||||
if (holding(lk)) // If the lock is already held, panic.
|
||||
panic("acquire");
|
||||
|
||||
// Spin until aquired. See file header for details
|
||||
while (__sync_lock_test_and_set(&lk->locked, 1) != 0) {
|
||||
}
|
||||
__sync_synchronize(); // No loads/stores after this point
|
||||
|
||||
// Record info about lock acquisition for holding() and debugging.
|
||||
lk->cpu = mycpu();
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the lock.
|
||||
* Panics if the lock is not held.
|
||||
*/
|
||||
void release(struct spinlock *lk) {
|
||||
if (!holding(lk)) // If the lock is not held, panic.
|
||||
panic("release");
|
||||
|
||||
lk->cpu = 0; // 0 means unheld
|
||||
__sync_synchronize(); // No loads/stores after this point
|
||||
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
||||
|
||||
pop_off();
|
||||
}
|
||||
|
||||
// Check whether this cpu is holding the lock.
|
||||
// Interrupts must be off.
|
||||
int holding(struct spinlock *lk) {
|
||||
int r;
|
||||
r = (lk->locked && lk->cpu == mycpu());
|
||||
return r;
|
||||
}
|
||||
|
||||
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
||||
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
|
||||
// are initially off, then push_off, pop_off leaves them off.
|
||||
|
||||
void push_off(void) {
|
||||
int old = intr_get();
|
||||
|
||||
intr_off();
|
||||
if (mycpu()->noff == 0)
|
||||
mycpu()->intena = old;
|
||||
mycpu()->noff += 1;
|
||||
}
|
||||
|
||||
void pop_off(void) {
|
||||
struct Cpu *c = mycpu();
|
||||
if (intr_get())
|
||||
panic("pop_off - interruptible");
|
||||
if (c->noff < 1)
|
||||
panic("pop_off");
|
||||
c->noff -= 1;
|
||||
if (c->noff == 0 && c->intena)
|
||||
intr_on();
|
||||
}
|
51
lib/spinlock.h
Normal file
51
lib/spinlock.h
Normal file
|
@ -0,0 +1,51 @@
|
|||
#ifndef KERNEL_SPINLOCK_H
|
||||
#define KERNEL_SPINLOCK_H
|
||||
|
||||
#include "types.h"
|
||||
|
||||
/** Mutual exclusion spin lock */
|
||||
struct spinlock {
|
||||
u32 locked; // Is the lock held?
|
||||
|
||||
// NOTE: Perhaps feature gate this?
|
||||
|
||||
// For debugging:
|
||||
char *name; // Name of lock.
|
||||
struct Cpu *cpu; // The cpu holding the lock.
|
||||
};
|
||||
|
||||
/**
|
||||
* Acquire the lock.
|
||||
* Loops (spins) until the lock is acquired.
|
||||
* Panics if the lock is already held by this cpu.
|
||||
*/
|
||||
void acquire(struct spinlock *);
|
||||
|
||||
/**
|
||||
* Check whether this cpu is holding the lock.
|
||||
* Interrupts must be off.
|
||||
*/
|
||||
int holding(struct spinlock *);
|
||||
|
||||
/**
|
||||
* Initialize spinlock
|
||||
*/
|
||||
void initlock(struct spinlock *, char *);
|
||||
|
||||
/**
|
||||
* Release the lock.
|
||||
* Panics if the lock is not held.
|
||||
*/
|
||||
void release(struct spinlock *);
|
||||
|
||||
/**
|
||||
* @brief push_off/pop_off are like intr_off()/intr_on() except that they are
|
||||
* matched: it takes two pop_off()s to undo two push_off()s. Also, if
|
||||
* interrupts are initially off, then push_off, pop_off leaves them off.
|
||||
*/
|
||||
void push_off(void);
|
||||
|
||||
/** @copydoc pop_off */
|
||||
void pop_off(void);
|
||||
|
||||
#endif
|
Loading…
Add table
Reference in a new issue