Spinlock cleaning
This commit is contained in:
parent
eec052bafa
commit
0df09d5086
2 changed files with 60 additions and 131 deletions
|
@ -3,8 +3,6 @@
|
|||
* (Not mutexes as these are spinning locks).
|
||||
*/
|
||||
|
||||
// #include <lib/stdio.h>
|
||||
#include "string.h"
|
||||
#include <panic.h>
|
||||
#include <proc.h>
|
||||
#include <riscv.h>
|
||||
|
@ -41,86 +39,42 @@
|
|||
* On RISC-V, this emits a fence instruction.
|
||||
*/
|
||||
|
||||
/** Initialize Spinlock */
|
||||
void initlock(struct Spinlock *lk, char *name) {
|
||||
lk->name = name;
|
||||
lk->locked = 0;
|
||||
lk->cpu = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire the lock.
|
||||
* Loops (spins) until the lock is acquired.
|
||||
* Panics if the lock is already held by this cpu.
|
||||
/*
|
||||
* These are from the original xv6 implementation, with only slight modifications on their return type.
|
||||
*
|
||||
* push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
||||
* it takes two pop_off()s to undo two push_off()s. Also, if interrupts
|
||||
* are initially off, then push_off, pop_off leaves them off.
|
||||
*/
|
||||
void acquire(struct Spinlock *lk) {
|
||||
push_off(); // disable interrupts to avoid deadlock.
|
||||
|
||||
if (holding(lk)) // If the lock is already held, panic.
|
||||
panic("acquire");
|
||||
|
||||
// Spin until aquired. See file header for details
|
||||
while (__sync_lock_test_and_set(&lk->locked, 1) != 0);
|
||||
|
||||
__sync_synchronize(); // No loads/stores after this point
|
||||
|
||||
// Record info about lock acquisition for holding() and debugging.
|
||||
lk->cpu = mycpu();
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the lock.
|
||||
* Panics if the lock is not held.
|
||||
*/
|
||||
void release(struct Spinlock *lk) {
|
||||
if (!holding(lk)) // If the lock is not held, panic.
|
||||
panic("release");
|
||||
|
||||
lk->cpu = 0; // 0 means unheld
|
||||
__sync_synchronize(); // No loads/stores after this point
|
||||
__sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
||||
|
||||
pop_off();
|
||||
}
|
||||
|
||||
// Check whether this cpu is holding the lock.
|
||||
// Interrupts must be off.
|
||||
int holding(struct Spinlock *lk) {
|
||||
int r;
|
||||
r = (lk->locked && lk->cpu == mycpu());
|
||||
return r;
|
||||
}
|
||||
|
||||
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
|
||||
// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
|
||||
// are initially off, then push_off, pop_off leaves them off.
|
||||
|
||||
void push_off(void) {
|
||||
int old = intr_get();
|
||||
uint32_t push_off(void) {
|
||||
int old = intr_get();
|
||||
Cpu *cpu = mycpu();
|
||||
|
||||
intr_off();
|
||||
if (mycpu()->noff == 0)
|
||||
mycpu()->intena = old;
|
||||
|
||||
mycpu()->noff += 1;
|
||||
if (cpu->noff == 0)
|
||||
cpu->intena = old;
|
||||
|
||||
cpu->noff += 1;
|
||||
return cpu->noff;
|
||||
}
|
||||
|
||||
void pop_off(void) {
|
||||
struct Cpu *c = mycpu();
|
||||
uint32_t pop_off(void) {
|
||||
Cpu *cpu = mycpu();
|
||||
|
||||
if (intr_get())
|
||||
panic("pop_off - interruptible");
|
||||
if (c->noff < 1) {
|
||||
{
|
||||
// TODO: Remove this block when fixed
|
||||
char amt[100];
|
||||
itoa(c->noff, amt, 10);
|
||||
uart_puts(amt);
|
||||
}
|
||||
|
||||
if (cpu->noff < 1)
|
||||
panic("pop_off");
|
||||
}
|
||||
c->noff -= 1;
|
||||
if (c->noff == 0 && c->intena)
|
||||
|
||||
cpu->noff -= 1;
|
||||
|
||||
if (cpu->noff == 0 && cpu->intena)
|
||||
intr_on();
|
||||
|
||||
return cpu->noff;
|
||||
}
|
||||
|
||||
void spinlock_init(spinlock_t *l) {
|
||||
|
@ -135,34 +89,47 @@ __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
|
|||
}
|
||||
|
||||
void spin_unlock(spinlock_t *l) {
|
||||
// if (!spin_is_holding(l))
|
||||
// panic("spin_unlock");
|
||||
|
||||
l->cpu = 0;
|
||||
|
||||
// Release: store 0 with .rl ordering.
|
||||
uint32_t dummy;
|
||||
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
||||
|
||||
// __sync_synchronize(); // No loads/stores after this point
|
||||
// __sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
||||
|
||||
// pop_off();
|
||||
}
|
||||
|
||||
// Optional: tiny pause/backoff (works even if Zihintpause isn't present).
|
||||
// See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc
|
||||
static inline void cpu_relax(void) {
|
||||
#if defined(__riscv_zihintpause)
|
||||
__asm__ volatile("pause");
|
||||
#else
|
||||
__asm__ volatile("nop");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
|
||||
/**
|
||||
* Test-and-test-and-set acquire with polite spinning + exponential backoff.
|
||||
*/
|
||||
void spin_lock(spinlock_t *l) {
|
||||
unsigned backoff = 1;
|
||||
uint32_t backoff = 1;
|
||||
for (;;) {
|
||||
if (spin_trylock(l))
|
||||
return;
|
||||
|
||||
// Contended: spin on plain loads (no AMO) until it looks free.
|
||||
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
||||
for (unsigned i = 0; i < backoff; ++i) cpu_relax();
|
||||
for (uint32_t i = 0; i < backoff; ++i)
|
||||
__asm__ volatile("nop"); /* NOTE: Pause can be used here if supported */
|
||||
if (backoff < 1u << 12)
|
||||
backoff <<= 1;
|
||||
}
|
||||
// Try again; loop.
|
||||
}
|
||||
|
||||
l->cpu = mycpu();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether this cpu is holding the lock.
|
||||
* Interrupts must be off.
|
||||
*/
|
||||
bool spin_is_holding(spinlock_t *l) {
|
||||
int r;
|
||||
r = (l->v && l->cpu == mycpu());
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1,60 +1,22 @@
|
|||
#ifndef KERNEL_Spinlock_H
|
||||
#define KERNEL_Spinlock_H
|
||||
|
||||
#include <proc.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/** Mutual exclusion spin lock */
|
||||
struct Spinlock {
|
||||
u32 locked; // Is the lock held?
|
||||
|
||||
// NOTE: Perhaps feature gate this?
|
||||
|
||||
// For debugging:
|
||||
char *name; // Name of lock.
|
||||
struct Cpu *cpu; // The cpu holding the lock.
|
||||
};
|
||||
|
||||
/**
|
||||
* Acquire the lock.
|
||||
* Loops (spins) until the lock is acquired.
|
||||
* Panics if the lock is already held by this cpu.
|
||||
*/
|
||||
void acquire(struct Spinlock *);
|
||||
|
||||
/**
|
||||
* Check whether this cpu is holding the lock.
|
||||
* Interrupts must be off.
|
||||
*/
|
||||
int holding(struct Spinlock *);
|
||||
|
||||
/**
|
||||
* Initialize Spinlock
|
||||
*/
|
||||
void initlock(struct Spinlock *, char *);
|
||||
|
||||
/**
|
||||
* Release the lock.
|
||||
* Panics if the lock is not held.
|
||||
*/
|
||||
void release(struct Spinlock *);
|
||||
|
||||
/**
|
||||
* @brief push_off/pop_off are like intr_off()/intr_on() except that they are
|
||||
* matched: it takes two pop_off()s to undo two push_off()s. Also, if
|
||||
* interrupts are initially off, then push_off, pop_off leaves them off.
|
||||
*/
|
||||
void push_off(void);
|
||||
|
||||
/** @copydoc pop_off */
|
||||
void pop_off(void);
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t v; // 0 = unlocked, 1 = locked
|
||||
Cpu *cpu;
|
||||
} spinlock_t;
|
||||
|
||||
uint32_t push_off(void);
|
||||
uint32_t pop_off(void);
|
||||
|
||||
void spinlock_init(spinlock_t *l);
|
||||
bool spin_trylock(spinlock_t *l);
|
||||
void spin_unlock(spinlock_t *l);
|
||||
bool spin_is_holding(spinlock_t *l);
|
||||
void spin_lock(spinlock_t *l);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Add table
Reference in a new issue