Documenting and slight restructure in spinlock.c

This commit is contained in:
Imbus 2024-08-07 15:37:55 +02:00
parent 6f7a2ac685
commit 67a2839d0a
3 changed files with 56 additions and 41 deletions

View file

@ -5,7 +5,7 @@
#include "riscv.h" #include "riscv.h"
#include "spinlock.h" #include "spinlock.h"
// Saved registers for kernel context switches. /** Saved registers for kernel context switches. */
struct context { struct context {
u64 ra; u64 ra;
u64 sp; u64 sp;
@ -25,7 +25,7 @@ struct context {
u64 s11; u64 s11;
}; };
// Per-CPU state. /** Per-CPU state. */
struct cpu { struct cpu {
struct proc *proc; // The process running on this cpu, or null. struct proc *proc; // The process running on this cpu, or null.
struct context context; // swtch() here to enter scheduler(). struct context context; // swtch() here to enter scheduler().
@ -88,7 +88,7 @@ struct trapframe {
enum procstate { UNUSED, USED, SLEEPING, RUNNABLE, RUNNING, ZOMBIE }; enum procstate { UNUSED, USED, SLEEPING, RUNNABLE, RUNNING, ZOMBIE };
// Per-process state /** Per-process state */
struct proc { struct proc {
struct spinlock lock; struct spinlock lock;

View file

@ -1,13 +1,44 @@
// Mutual exclusion spin locks. /**
* Mutual exclusion spin locks.
* (Not mutexes as these are spinning locks).
*/
#include "types.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h" #include "spinlock.h"
#include "riscv.h" #include "riscv.h"
#include "proc.h" #include "proc.h"
#include "defs.h" #include "defs.h"
/**
* The aquire() and release() functions control ownership of the lock.
* To perform these operations, modern CPU's provide atomic instructions
* that prevent the cores from stepping on each other's toes, otherwise known
* as a deadlock.
*
* GCC provides a set of built-in functions that allow you to use atomic
* instructions in an architecture-independent way. These functions are
* defined in the GCC manual:
*
* See: https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
* See: https://en.wikipedia.org/wiki/Memory_barrier
*
* On RISC-V, sync_lock_test_and_set turns into an atomic swap:
* a5 = 1
* s1 = &lk->locked
* amoswap.w.aq a5, a5, (s1)
*
* On RISC-V, sync_lock_release turns into an atomic swap:
* s1 = &lk->locked
* amoswap.w zero, zero, (s1)
*
* __sync_synchronize();
*
* This function tells the C compiler and the processor to not move loads or stores
* past this point, to ensure that the critical section's memory
* references happen strictly after the lock is acquired/locked.
* On RISC-V, this emits a fence instruction.
*/
/** Initialize spinlock */
void void
initlock(struct spinlock *lk, char *name) initlock(struct spinlock *lk, char *name)
{ {
@ -16,57 +47,41 @@ initlock(struct spinlock *lk, char *name)
lk->cpu = 0; lk->cpu = 0;
} }
// Acquire the lock. /**
// Loops (spins) until the lock is acquired. * Acquire the lock.
* Loops (spins) until the lock is acquired.
* Panics if the lock is already held by this cpu.
*/
void void
acquire(struct spinlock *lk) acquire(struct spinlock *lk)
{ {
push_off(); // disable interrupts to avoid deadlock. push_off(); // disable interrupts to avoid deadlock.
if(holding(lk))
if(holding(lk)) // If the lock is already held, panic.
panic("acquire"); panic("acquire");
// On RISC-V, sync_lock_test_and_set turns into an atomic swap: // See file header for details
// a5 = 1
// s1 = &lk->locked
// amoswap.w.aq a5, a5, (s1)
while(__sync_lock_test_and_set(&lk->locked, 1) != 0) while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
; ;
__sync_synchronize(); // No loads/stores after this point
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that the critical section's memory
// references happen strictly after the lock is acquired.
// On RISC-V, this emits a fence instruction.
__sync_synchronize();
// Record info about lock acquisition for holding() and debugging. // Record info about lock acquisition for holding() and debugging.
lk->cpu = mycpu(); lk->cpu = mycpu();
} }
// Release the lock. /**
* Release the lock.
* Panics if the lock is not held.
*/
void void
release(struct spinlock *lk) release(struct spinlock *lk)
{ {
if(!holding(lk)) if(!holding(lk)) // If the lock is not held, panic.
panic("release"); panic("release");
lk->cpu = 0; lk->cpu = 0; // 0 means unheld
__sync_synchronize(); // No loads/stores after this point
// Tell the C compiler and the CPU to not move loads or stores __sync_lock_release(&lk->locked); // Essentially lk->locked = 0
// past this point, to ensure that all the stores in the critical
// section are visible to other CPUs before the lock is released,
// and that loads in the critical section occur strictly before
// the lock is released.
// On RISC-V, this emits a fence instruction.
__sync_synchronize();
// Release the lock, equivalent to lk->locked = 0.
// This code doesn't use a C assignment, since the C standard
// implies that an assignment might be implemented with
// multiple store instructions.
// On RISC-V, sync_lock_release turns into an atomic swap:
// s1 = &lk->locked
// amoswap.w zero, zero, (s1)
__sync_lock_release(&lk->locked);
pop_off(); pop_off();
} }

View file

@ -2,7 +2,7 @@
#include "types.h" #include "types.h"
// Mutual exclusion lock. /** Mutual exclusion spin lock */
struct spinlock { struct spinlock {
u32 locked; // Is the lock held? u32 locked; // Is the lock held?