summaryrefslogtreecommitdiff
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c110
1 files changed, 110 insertions, 0 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
new file mode 100644
index 0000000..bbb7cb5
--- /dev/null
+++ b/kernel/spinlock.c
@@ -0,0 +1,110 @@
+// Mutual exclusion spin locks.
+
+#include "types.h"
+#include "param.h"
+#include "memlayout.h"
+#include "spinlock.h"
+#include "riscv.h"
+#include "proc.h"
+#include "defs.h"
+
+void
+initlock(struct spinlock *lk, char *name)
+{
+ lk->name = name;
+ lk->locked = 0;
+ lk->cpu = 0;
+}
+
+// Acquire the lock.
+// Loops (spins) until the lock is acquired.
+// Holding a lock for a long time may cause
+// other CPUs to waste time spinning to acquire it.
+void
+acquire(struct spinlock *lk)
+{
+ push_off(); // disable interrupts to avoid deadlock.
+ if(holding(lk))
+ panic("acquire");
+
+ // The xchg is atomic.
+ //while(xchg(&lk->locked, 1) != 0)
+ // ;
+ while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
+ ;
+
+ // Tell the C compiler and the processor to not move loads or stores
+ // past this point, to ensure that the critical section's memory
+ // references happen after the lock is acquired.
+ __sync_synchronize();
+
+ // Record info about lock acquisition for holding() and debugging.
+ lk->cpu = mycpu();
+}
+
+// Release the lock.
+void
+release(struct spinlock *lk)
+{
+ if(!holding(lk))
+ panic("release");
+
+ lk->cpu = 0;
+
+ // Tell the C compiler and the processor to not move loads or stores
+ // past this point, to ensure that all the stores in the critical
+ // section are visible to other cores before the lock is released.
+ // Both the C compiler and the hardware may re-order loads and
+ // stores; __sync_synchronize() tells them both not to.
+ // On RISC-V, this turns into a fence instruction.
+ __sync_synchronize();
+
+ // Release the lock, equivalent to lk->locked = 0.
+ // This code can't use a C assignment, since it might
+ // not be atomic. A real OS would use C atomics here.
+ // On RISC-V, use an amoswap instruction.
+ //asm volatile("movl $0, %0" : "+m" (lk->locked) : );
+ __sync_lock_release(&lk->locked);
+
+ pop_off();
+}
+
+// Check whether this cpu is holding the lock.
+int
+holding(struct spinlock *lk)
+{
+ int r;
+ push_off();
+ r = (lk->locked && lk->cpu == mycpu());
+ pop_off();
+ return r;
+}
+
+// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
+// it takes two pop_off to undo two push_off. Also, if interrupts
+// are initially off, then push_off, pop_off leaves them off.
+
+void
+push_off(void)
+{
+ struct cpu *c = mycpu();
+ int old = intr_get();
+
+ intr_off();
+ if(c->noff == 0)
+ c->intena = old;
+ c->noff += 1;
+}
+
+void
+pop_off(void)
+{
+ struct cpu *c = mycpu();
+ if(intr_get())
+ panic("pop_off - interruptible");
+ c->noff -= 1;
+ if(c->noff < 0)
+ panic("pop_off");
+ if(c->noff == 0 && c->intena)
+ intr_on();
+}