124 lines
3 KiB
C
124 lines
3 KiB
C
/**
|
|
* Mutual exclusion spin locks.
|
|
* (Not mutexes as these are spinning locks).
|
|
*/
|
|
|
|
#include "spinlock.h"
|
|
#include "riscv.h"
|
|
#include "proc.h"
|
|
#include "defs.h"
|
|
|
|
/**
|
|
* The aquire() and release() functions control ownership of the lock.
|
|
* To perform these operations, modern CPU's provide atomic instructions
|
|
* that prevent the cores from stepping on each other's toes, otherwise known
|
|
* as a deadlock.
|
|
*
|
|
* GCC provides a set of built-in functions that allow you to use atomic
|
|
* instructions in an architecture-independent way. These functions are
|
|
* defined in the GCC manual:
|
|
*
|
|
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
|
|
* See: https://en.wikipedia.org/wiki/Memory_barrier
|
|
*
|
|
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
|
|
* a5 = 1
|
|
* s1 = &lk->locked
|
|
* amoswap.w.aq a5, a5, (s1)
|
|
*
|
|
* On RISC-V, sync_lock_release turns into an atomic swap:
|
|
* s1 = &lk->locked
|
|
* amoswap.w zero, zero, (s1)
|
|
*
|
|
* __sync_synchronize();
|
|
*
|
|
* This function tells the C compiler and the processor to not move loads or stores
|
|
* past this point, to ensure that the critical section's memory
|
|
* references happen strictly after the lock is acquired/locked.
|
|
* On RISC-V, this emits a fence instruction.
|
|
*/
|
|
|
|
/** Initialize spinlock */
|
|
void
|
|
initlock(struct spinlock *lk, char *name)
|
|
{
|
|
lk->name = name;
|
|
lk->locked = 0;
|
|
lk->cpu = 0;
|
|
}
|
|
|
|
/**
|
|
* Acquire the lock.
|
|
* Loops (spins) until the lock is acquired.
|
|
* Panics if the lock is already held by this cpu.
|
|
*/
|
|
void
|
|
acquire(struct spinlock *lk)
|
|
{
|
|
push_off(); // disable interrupts to avoid deadlock.
|
|
|
|
if(holding(lk)) // If the lock is already held, panic.
|
|
panic("acquire");
|
|
|
|
// Spin until aquired. See file header for details
|
|
while(__sync_lock_test_and_set(&lk->locked, 1) != 0) {}
|
|
__sync_synchronize(); // No loads/stores after this point
|
|
|
|
// Record info about lock acquisition for holding() and debugging.
|
|
lk->cpu = mycpu();
|
|
}
|
|
|
|
/**
|
|
* Release the lock.
|
|
* Panics if the lock is not held.
|
|
*/
|
|
void
|
|
release(struct spinlock *lk)
|
|
{
|
|
if(!holding(lk)) // If the lock is not held, panic.
|
|
panic("release");
|
|
|
|
lk->cpu = 0; // 0 means unheld
|
|
__sync_synchronize(); // No loads/stores after this point
|
|
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
|
|
|
pop_off();
|
|
}
|
|
|
|
// Check whether this cpu is holding the lock.
|
|
// Interrupts must be off.
|
|
int
|
|
holding(struct spinlock *lk)
|
|
{
|
|
int r;
|
|
r = (lk->locked && lk->cpu == mycpu());
|
|
return r;
|
|
}
|
|
|
|
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
|
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
|
|
// are initially off, then push_off, pop_off leaves them off.
|
|
|
|
void
|
|
push_off(void)
|
|
{
|
|
int old = intr_get();
|
|
|
|
intr_off();
|
|
if(mycpu()->noff == 0)
|
|
mycpu()->intena = old;
|
|
mycpu()->noff += 1;
|
|
}
|
|
|
|
void
|
|
pop_off(void)
|
|
{
|
|
struct cpu *c = mycpu();
|
|
if(intr_get())
|
|
panic("pop_off - interruptible");
|
|
if(c->noff < 1)
|
|
panic("pop_off");
|
|
c->noff -= 1;
|
|
if(c->noff == 0 && c->intena)
|
|
intr_on();
|
|
}
|