168 lines
4.6 KiB
C
168 lines
4.6 KiB
C
/**
|
|
* Mutual exclusion spin locks.
|
|
* (Not mutexes as these are spinning locks).
|
|
*/
|
|
|
|
// #include <lib/stdio.h>
|
|
#include "string.h"
|
|
#include <panic.h>
|
|
#include <proc.h>
|
|
#include <riscv.h>
|
|
#include <spinlock.h>
|
|
#include <uart.h>
|
|
|
|
/**
|
|
* The aquire() and release() functions control ownership of the lock.
|
|
* To perform these operations, modern CPU's provide atomic instructions
|
|
* that prevent the cores from stepping on each other's toes, otherwise known
|
|
* as a deadlock.
|
|
*
|
|
* GCC provides a set of built-in functions that allow you to use atomic
|
|
* instructions in an architecture-independent way. These functions are
|
|
* defined in the GCC manual:
|
|
*
|
|
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
|
|
* See: https://en.wikipedia.org/wiki/Memory_barrier
|
|
*
|
|
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
|
|
* a5 = 1
|
|
* s1 = &lk->locked
|
|
* amoswap.w.aq a5, a5, (s1)
|
|
*
|
|
* On RISC-V, sync_lock_release turns into an atomic swap:
|
|
* s1 = &lk->locked
|
|
* amoswap.w zero, zero, (s1)
|
|
*
|
|
* __sync_synchronize();
|
|
*
|
|
* This function tells the C compiler and the processor to not move loads or
|
|
* stores past this point, to ensure that the critical section's memory
|
|
* references happen strictly after the lock is acquired/locked.
|
|
* On RISC-V, this emits a fence instruction.
|
|
*/
|
|
|
|
/** Initialize Spinlock */
|
|
void initlock(struct Spinlock *lk, char *name) {
|
|
lk->name = name;
|
|
lk->locked = 0;
|
|
lk->cpu = 0;
|
|
}
|
|
|
|
/**
|
|
* Acquire the lock.
|
|
* Loops (spins) until the lock is acquired.
|
|
* Panics if the lock is already held by this cpu.
|
|
*/
|
|
void acquire(struct Spinlock *lk) {
|
|
push_off(); // disable interrupts to avoid deadlock.
|
|
|
|
if (holding(lk)) // If the lock is already held, panic.
|
|
panic("acquire");
|
|
|
|
// Spin until aquired. See file header for details
|
|
while (__sync_lock_test_and_set(&lk->locked, 1) != 0);
|
|
|
|
__sync_synchronize(); // No loads/stores after this point
|
|
|
|
// Record info about lock acquisition for holding() and debugging.
|
|
lk->cpu = mycpu();
|
|
}
|
|
|
|
/**
|
|
* Release the lock.
|
|
* Panics if the lock is not held.
|
|
*/
|
|
void release(struct Spinlock *lk) {
|
|
if (!holding(lk)) // If the lock is not held, panic.
|
|
panic("release");
|
|
|
|
lk->cpu = 0; // 0 means unheld
|
|
__sync_synchronize(); // No loads/stores after this point
|
|
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
|
|
|
pop_off();
|
|
}
|
|
|
|
// Check whether this cpu is holding the lock.
|
|
// Interrupts must be off.
|
|
int holding(struct Spinlock *lk) {
|
|
int r;
|
|
r = (lk->locked && lk->cpu == mycpu());
|
|
return r;
|
|
}
|
|
|
|
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
|
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
|
|
// are initially off, then push_off, pop_off leaves them off.
|
|
|
|
void push_off(void) {
|
|
int old = intr_get();
|
|
|
|
intr_off();
|
|
if (mycpu()->noff == 0)
|
|
mycpu()->intena = old;
|
|
|
|
mycpu()->noff += 1;
|
|
}
|
|
|
|
void pop_off(void) {
|
|
struct Cpu *c = mycpu();
|
|
if (intr_get())
|
|
panic("pop_off - interruptible");
|
|
if (c->noff < 1) {
|
|
{
|
|
// TODO: Remove this block when fixed
|
|
char amt[100];
|
|
itoa(c->noff, amt, 10);
|
|
uart_puts(amt);
|
|
}
|
|
panic("pop_off");
|
|
}
|
|
c->noff -= 1;
|
|
if (c->noff == 0 && c->intena)
|
|
intr_on();
|
|
}
|
|
|
|
void spinlock_init(spinlock_t *l) {
|
|
l->v = 0;
|
|
}
|
|
|
|
__attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
|
|
uint32_t old;
|
|
// old = xchg_acquire(&l->v, 1) using AMO
|
|
__asm__ volatile("amoswap.w.aq %0, %2, (%1)\n" : "=&r"(old) : "r"(&l->v), "r"(1u) : "memory");
|
|
return old == 0;
|
|
}
|
|
|
|
void spin_unlock(spinlock_t *l) {
|
|
// Release: store 0 with .rl ordering.
|
|
uint32_t dummy;
|
|
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
|
}
|
|
|
|
// Optional: tiny pause/backoff (works even if Zihintpause isn't present).
|
|
// See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc
|
|
static inline void cpu_relax(void) {
|
|
#if defined(__riscv_zihintpause)
|
|
__asm__ volatile("pause");
|
|
#else
|
|
__asm__ volatile("nop");
|
|
#endif
|
|
}
|
|
|
|
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
|
|
void spin_lock(spinlock_t *l) {
|
|
unsigned backoff = 1;
|
|
for (;;) {
|
|
if (spin_trylock(l))
|
|
return;
|
|
|
|
// Contended: spin on plain loads (no AMO) until it looks free.
|
|
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
|
for (unsigned i = 0; i < backoff; ++i) cpu_relax();
|
|
if (backoff < 1u << 12)
|
|
backoff <<= 1;
|
|
}
|
|
// Try again; loop.
|
|
}
|
|
}
|