Unify spinlocks
This commit is contained in:
parent
c1a2c75054
commit
85ffed0c20
7 changed files with 56 additions and 59 deletions
2
Makefile
2
Makefile
|
@ -33,7 +33,7 @@ all: kern/kernel.elf
|
|||
|
||||
main: main.o
|
||||
|
||||
kern/kernel.elf: kern/entry.o kern/start.o kern/libkern/string.o kern/libkern/proc.o kern/libkern/uart.o kern/libkern/panic.o kern/kalloc.o kern/libkern/memory.o kern/ispinlock.o kern/libkern/spinlock.o kern/libkern/string.o kern/libkern/mini-printf.o
|
||||
kern/kernel.elf: kern/entry.o kern/start.o kern/libkern/string.o kern/libkern/proc.o kern/libkern/uart.o kern/libkern/panic.o kern/kalloc.o kern/libkern/memory.o kern/libkern/spinlock.o kern/libkern/string.o kern/libkern/mini-printf.o
|
||||
@echo LD $@
|
||||
@$(LD) $(LDFLAGS) -o $@ $^
|
||||
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
#include "ispinlock.h"
|
||||
|
||||
void spinlock_init(spinlock_t *l) {
|
||||
l->v = 0;
|
||||
}
|
||||
|
||||
__attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
|
||||
uint32_t old;
|
||||
// old = xchg_acquire(&l->v, 1) using AMO
|
||||
__asm__ volatile("amoswap.w.aq %0, %2, (%1)\n" : "=&r"(old) : "r"(&l->v), "r"(1u) : "memory");
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
void spin_unlock(spinlock_t *l) {
|
||||
// Release: store 0 with .rl ordering.
|
||||
uint32_t dummy;
|
||||
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
||||
}
|
||||
|
||||
// Optional: tiny pause/backoff (works even if Zihintpause isn't present).
|
||||
// See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc
|
||||
static inline void cpu_relax(void) {
|
||||
#if defined(__riscv_zihintpause)
|
||||
__asm__ volatile("pause");
|
||||
#else
|
||||
__asm__ volatile("nop");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
|
||||
void spin_lock(spinlock_t *l) {
|
||||
unsigned backoff = 1;
|
||||
for (;;) {
|
||||
if (spin_trylock(l))
|
||||
return;
|
||||
|
||||
// Contended: spin on plain loads (no AMO) until it looks free.
|
||||
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
||||
for (unsigned i = 0; i < backoff; ++i) cpu_relax();
|
||||
if (backoff < 1u << 12)
|
||||
backoff <<= 1;
|
||||
}
|
||||
// Try again; loop.
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
#pragma once
|
||||
#include <stdint.h>
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t v; // 0 = unlocked, 1 = locked
|
||||
} spinlock_t;
|
||||
|
||||
void spinlock_init(spinlock_t *l);
|
||||
bool spin_trylock(spinlock_t *l);
|
||||
void spin_unlock(spinlock_t *l);
|
||||
void spin_lock(spinlock_t *l);
|
|
@ -1,4 +1,4 @@
|
|||
#include <ispinlock.h>
|
||||
#include <spinlock.h>
|
||||
#include <kalloc.h>
|
||||
#include <memory.h>
|
||||
#include <panic.h>
|
||||
|
|
|
@ -122,3 +122,47 @@ void pop_off(void) {
|
|||
if (c->noff == 0 && c->intena)
|
||||
intr_on();
|
||||
}
|
||||
|
||||
void spinlock_init(spinlock_t *l) {
|
||||
l->v = 0;
|
||||
}
|
||||
|
||||
__attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
|
||||
uint32_t old;
|
||||
// old = xchg_acquire(&l->v, 1) using AMO
|
||||
__asm__ volatile("amoswap.w.aq %0, %2, (%1)\n" : "=&r"(old) : "r"(&l->v), "r"(1u) : "memory");
|
||||
return old == 0;
|
||||
}
|
||||
|
||||
void spin_unlock(spinlock_t *l) {
|
||||
// Release: store 0 with .rl ordering.
|
||||
uint32_t dummy;
|
||||
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
||||
}
|
||||
|
||||
// Optional: tiny pause/backoff (works even if Zihintpause isn't present).
|
||||
// See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc
|
||||
static inline void cpu_relax(void) {
|
||||
#if defined(__riscv_zihintpause)
|
||||
__asm__ volatile("pause");
|
||||
#else
|
||||
__asm__ volatile("nop");
|
||||
#endif
|
||||
}
|
||||
|
||||
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
|
||||
void spin_lock(spinlock_t *l) {
|
||||
unsigned backoff = 1;
|
||||
for (;;) {
|
||||
if (spin_trylock(l))
|
||||
return;
|
||||
|
||||
// Contended: spin on plain loads (no AMO) until it looks free.
|
||||
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
||||
for (unsigned i = 0; i < backoff; ++i) cpu_relax();
|
||||
if (backoff < 1u << 12)
|
||||
backoff <<= 1;
|
||||
}
|
||||
// Try again; loop.
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,4 +48,13 @@ void push_off(void);
|
|||
/** @copydoc pop_off */
|
||||
void pop_off(void);
|
||||
|
||||
typedef struct {
|
||||
volatile uint32_t v; // 0 = unlocked, 1 = locked
|
||||
} spinlock_t;
|
||||
|
||||
void spinlock_init(spinlock_t *l);
|
||||
bool spin_trylock(spinlock_t *l);
|
||||
void spin_unlock(spinlock_t *l);
|
||||
void spin_lock(spinlock_t *l);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include <config.h>
|
||||
#include <ispinlock.h>
|
||||
#include <spinlock.h>
|
||||
#include <kalloc.h>
|
||||
#include <memory.h>
|
||||
#include <proc.h>
|
||||
|
|
Loading…
Add table
Reference in a new issue