From a02c1fcebd4d8d33f8a16e43833fe34c951fa982 Mon Sep 17 00:00:00 2001 From: Imbus <> Date: Tue, 2 Sep 2025 04:43:55 +0200 Subject: [PATCH] Use compiler builtins for spin_unlock, correct initialization for spinlock_init --- kern/libkern/spinlock.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kern/libkern/spinlock.c b/kern/libkern/spinlock.c index b9ccd8b..7fdf294 100644 --- a/kern/libkern/spinlock.c +++ b/kern/libkern/spinlock.c @@ -3,6 +3,7 @@ * (Not mutexes as these are spinning locks). */ +#include "stddef.h" #include #include #include @@ -79,6 +80,7 @@ uint32_t pop_off(void) { void spinlock_init(spinlock_t *l) { l->v = 0; + l->cpu = NULL; } __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) { @@ -92,15 +94,11 @@ void spin_unlock(spinlock_t *l) { if (!spin_is_holding(l)) PANIC("Unlocking a spinlock that is not held by the locking cpu!"); - l->cpu = 0; - - // Release: store 0 with .rl ordering. - uint32_t dummy; - __asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory"); - - __sync_synchronize(); // No loads/stores after this point - // __sync_lock_release(&lk->locked); // Essentially lk->locked = 0 + /* TODO: Replace with corresponding __atomic builtins */ + __sync_lock_release(&l->v); + __sync_synchronize(); + l->cpu = NULL; pop_off(); }