From e15b705eda11b2742c724a2973bee33211f499bb Mon Sep 17 00:00:00 2001 From: Imbus <> Date: Tue, 2 Sep 2025 04:30:00 +0200 Subject: [PATCH] Enable pop_off and push_off in spinlocks --- kern/libkern/spinlock.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/kern/libkern/spinlock.c b/kern/libkern/spinlock.c index 7dc6658..b9ccd8b 100644 --- a/kern/libkern/spinlock.c +++ b/kern/libkern/spinlock.c @@ -67,7 +67,7 @@ uint32_t pop_off(void) { PANIC("pop_off - interruptible"); if (cpu->noff < 1) - PANIC("pop_off"); + PANIC("pop_off when cpu->noff < 1"); cpu->noff -= 1; @@ -89,8 +89,8 @@ __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) { } void spin_unlock(spinlock_t *l) { - // if (!spin_is_holding(l)) - // panic("spin_unlock"); + if (!spin_is_holding(l)) + PANIC("Unlocking a spinlock that is not held by the locking cpu!"); l->cpu = 0; @@ -98,10 +98,10 @@ void spin_unlock(spinlock_t *l) { uint32_t dummy; __asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory"); - // __sync_synchronize(); // No loads/stores after this point + __sync_synchronize(); // No loads/stores after this point // __sync_lock_release(&lk->locked); // Essentially lk->locked = 0 - // pop_off(); + pop_off(); } /** @@ -110,8 +110,11 @@ void spin_unlock(spinlock_t *l) { void spin_lock(spinlock_t *l) { uint32_t backoff = 1; for (;;) { - if (spin_trylock(l)) + if (spin_trylock(l)) { + l->cpu = mycpu(); + push_off(); return; + } while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) { for (uint32_t i = 0; i < backoff; ++i) @@ -120,8 +123,6 @@ void spin_lock(spinlock_t *l) { backoff <<= 1; } } - - l->cpu = mycpu(); } /**