ispinlock

This commit is contained in:
Imbus 2025-08-16 14:59:24 +02:00
parent 6c3030c896
commit dc0487648a
2 changed files with 56 additions and 0 deletions

44
kern/ispinlock.c Normal file
View file

@ -0,0 +1,44 @@
#include "ispinlock.h"
void spinlock_init(spinlock_t *l) {
l->v = 0;
}
bool spin_trylock(spinlock_t *l) {
uint32_t old;
// old = xchg_acquire(&l->v, 1) using AMO
__asm__ volatile("amoswap.w.aq %0, %2, (%1)\n" : "=&r"(old) : "r"(&l->v), "r"(1u) : "memory");
return old == 0;
}
void spin_unlock(spinlock_t *l) {
// Release: store 0 with .rl ordering.
uint32_t dummy;
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
}
// Optional: tiny pause/backoff (works even if Zihintpause isn't present).
void cpu_relax(void) {
#if defined(__riscv_zihintpause)
__asm__ volatile("pause");
#else
__asm__ volatile("nop");
#endif
}
// Test-and-test-and-set acquire with polite spinning + exponential backoff.
void spin_lock(spinlock_t *l) {
unsigned backoff = 1;
for (;;) {
if (spin_trylock(l))
return;
// Contended: spin on plain loads (no AMO) until it looks free.
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
for (unsigned i = 0; i < backoff; ++i) cpu_relax();
if (backoff < 1u << 12)
backoff <<= 1;
}
// Try again; loop.
}
}

12
kern/ispinlock.h Normal file
View file

@ -0,0 +1,12 @@
#pragma once
#include <types.h>
typedef struct {
volatile uint32_t v; // 0 = unlocked, 1 = locked
} spinlock_t;
void spinlock_init(spinlock_t *l);
bool spin_trylock(spinlock_t *l);
void spin_unlock(spinlock_t *l);
void cpu_relax(void);
void spin_lock(spinlock_t *l);