summaryrefslogtreecommitdiff
path: root/spinlock.c
diff options
context:
space:
mode:
authorRobert Morris <[email protected]>2019-06-11 09:57:14 -0400
committerRobert Morris <[email protected]>2019-06-11 09:57:14 -0400
commit5753553213df8f9de851adb68377db43faecb91f (patch)
tree3b629ff54897fca414146677532cb459a2ed11ba /spinlock.c
parent91ba81110acd3163f7de3580b677eece0c57f5e7 (diff)
downloadxv6-labs-5753553213df8f9de851adb68377db43faecb91f.tar.gz
xv6-labs-5753553213df8f9de851adb68377db43faecb91f.tar.bz2
xv6-labs-5753553213df8f9de851adb68377db43faecb91f.zip
separate source into kernel/ user/ mkfs/
Diffstat (limited to 'spinlock.c')
-rw-r--r--spinlock.c110
1 files changed, 0 insertions, 110 deletions
diff --git a/spinlock.c b/spinlock.c
deleted file mode 100644
index bbb7cb5..0000000
--- a/spinlock.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// Mutual exclusion spin locks.
-
-#include "types.h"
-#include "param.h"
-#include "memlayout.h"
-#include "spinlock.h"
-#include "riscv.h"
-#include "proc.h"
-#include "defs.h"
-
-void
-initlock(struct spinlock *lk, char *name)
-{
- lk->name = name;
- lk->locked = 0;
- lk->cpu = 0;
-}
-
-// Acquire the lock.
-// Loops (spins) until the lock is acquired.
-// Holding a lock for a long time may cause
-// other CPUs to waste time spinning to acquire it.
-void
-acquire(struct spinlock *lk)
-{
- push_off(); // disable interrupts to avoid deadlock.
- if(holding(lk))
- panic("acquire");
-
- // The xchg is atomic.
- //while(xchg(&lk->locked, 1) != 0)
- // ;
- while(__sync_lock_test_and_set(&lk->locked, 1) != 0)
- ;
-
- // Tell the C compiler and the processor to not move loads or stores
- // past this point, to ensure that the critical section's memory
- // references happen after the lock is acquired.
- __sync_synchronize();
-
- // Record info about lock acquisition for holding() and debugging.
- lk->cpu = mycpu();
-}
-
-// Release the lock.
-void
-release(struct spinlock *lk)
-{
- if(!holding(lk))
- panic("release");
-
- lk->cpu = 0;
-
- // Tell the C compiler and the processor to not move loads or stores
- // past this point, to ensure that all the stores in the critical
- // section are visible to other cores before the lock is released.
- // Both the C compiler and the hardware may re-order loads and
- // stores; __sync_synchronize() tells them both not to.
- // On RISC-V, this turns into a fence instruction.
- __sync_synchronize();
-
- // Release the lock, equivalent to lk->locked = 0.
- // This code can't use a C assignment, since it might
- // not be atomic. A real OS would use C atomics here.
- // On RISC-V, use an amoswap instruction.
- //asm volatile("movl $0, %0" : "+m" (lk->locked) : );
- __sync_lock_release(&lk->locked);
-
- pop_off();
-}
-
-// Check whether this cpu is holding the lock.
-int
-holding(struct spinlock *lk)
-{
- int r;
- push_off();
- r = (lk->locked && lk->cpu == mycpu());
- pop_off();
- return r;
-}
-
-// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
-// it takes two pop_off to undo two push_off. Also, if interrupts
-// are initially off, then push_off, pop_off leaves them off.
-
-void
-push_off(void)
-{
- struct cpu *c = mycpu();
- int old = intr_get();
-
- intr_off();
- if(c->noff == 0)
- c->intena = old;
- c->noff += 1;
-}
-
-void
-pop_off(void)
-{
- struct cpu *c = mycpu();
- if(intr_get())
- panic("pop_off - interruptible");
- c->noff -= 1;
- if(c->noff < 0)
- panic("pop_off");
- if(c->noff == 0 && c->intena)
- intr_on();
-}