#include "ispinlock.h" void spinlock_init(spinlock_t *l) { l->v = 0; } __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) { uint32_t old; // old = xchg_acquire(&l->v, 1) using AMO __asm__ volatile("amoswap.w.aq %0, %2, (%1)\n" : "=&r"(old) : "r"(&l->v), "r"(1u) : "memory"); return old == 0; } void spin_unlock(spinlock_t *l) { // Release: store 0 with .rl ordering. uint32_t dummy; __asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory"); } // Optional: tiny pause/backoff (works even if Zihintpause isn't present). // See: https://github.com/riscv/riscv-isa-manual/blob/main/src/zihintpause.adoc static inline void cpu_relax(void) { #if defined(__riscv_zihintpause) __asm__ volatile("pause"); #else __asm__ volatile("nop"); #endif } // Test-and-test-and-set acquire with polite spinning + exponential backoff. void spin_lock(spinlock_t *l) { unsigned backoff = 1; for (;;) { if (spin_trylock(l)) return; // Contended: spin on plain loads (no AMO) until it looks free. while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) { for (unsigned i = 0; i < backoff; ++i) cpu_relax(); if (backoff < 1u << 12) backoff <<= 1; } // Try again; loop. } }