Enable pop_off and push_off in spinlocks
This commit is contained in:
parent
595ae0a8e2
commit
e15b705eda
1 changed files with 9 additions and 8 deletions
|
@ -67,7 +67,7 @@ uint32_t pop_off(void) {
|
||||||
PANIC("pop_off - interruptible");
|
PANIC("pop_off - interruptible");
|
||||||
|
|
||||||
if (cpu->noff < 1)
|
if (cpu->noff < 1)
|
||||||
PANIC("pop_off");
|
PANIC("pop_off when cpu->noff < 1");
|
||||||
|
|
||||||
cpu->noff -= 1;
|
cpu->noff -= 1;
|
||||||
|
|
||||||
|
@ -89,8 +89,8 @@ __attribute__((warn_unused_result)) bool spin_trylock(spinlock_t *l) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void spin_unlock(spinlock_t *l) {
|
void spin_unlock(spinlock_t *l) {
|
||||||
// if (!spin_is_holding(l))
|
if (!spin_is_holding(l))
|
||||||
// panic("spin_unlock");
|
PANIC("Unlocking a spinlock that is not held by the locking cpu!");
|
||||||
|
|
||||||
l->cpu = 0;
|
l->cpu = 0;
|
||||||
|
|
||||||
|
@ -98,10 +98,10 @@ void spin_unlock(spinlock_t *l) {
|
||||||
uint32_t dummy;
|
uint32_t dummy;
|
||||||
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
__asm__ volatile("amoswap.w.rl %0, %2, (%1)\n" : "=&r"(dummy) : "r"(&l->v), "r"(0u) : "memory");
|
||||||
|
|
||||||
// __sync_synchronize(); // No loads/stores after this point
|
__sync_synchronize(); // No loads/stores after this point
|
||||||
// __sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
// __sync_lock_release(&lk->locked); // Essentially lk->locked = 0
|
||||||
|
|
||||||
// pop_off();
|
pop_off();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -110,8 +110,11 @@ void spin_unlock(spinlock_t *l) {
|
||||||
void spin_lock(spinlock_t *l) {
|
void spin_lock(spinlock_t *l) {
|
||||||
uint32_t backoff = 1;
|
uint32_t backoff = 1;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (spin_trylock(l))
|
if (spin_trylock(l)) {
|
||||||
|
l->cpu = mycpu();
|
||||||
|
push_off();
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
while (__atomic_load_n(&l->v, __ATOMIC_RELAXED) != 0) {
|
||||||
for (uint32_t i = 0; i < backoff; ++i)
|
for (uint32_t i = 0; i < backoff; ++i)
|
||||||
|
@ -120,8 +123,6 @@ void spin_lock(spinlock_t *l) {
|
||||||
backoff <<= 1;
|
backoff <<= 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
l->cpu = mycpu();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Add table
Reference in a new issue