summaryrefslogtreecommitdiff
path: root/spinlock.c
diff options
context:
space:
mode:
authorrsc <rsc>2007-10-01 20:43:15 +0000
committerrsc <rsc>2007-10-01 20:43:15 +0000
commit943fd378a1324ca60da72b271769fea4a86e36cb (patch)
treea2510dc65a996e7d7fc49ab1e594ccb5a45f20ba /spinlock.c
parent9fd9f80431ad85552c0969831a3ccc3e800ac464 (diff)
downloadxv6-labs-943fd378a1324ca60da72b271769fea4a86e36cb.tar.gz
xv6-labs-943fd378a1324ca60da72b271769fea4a86e36cb.tar.bz2
xv6-labs-943fd378a1324ca60da72b271769fea4a86e36cb.zip
Incorporate new understanding of/with Intel SMP spec.
Dropped cmpxchg in favor of xchg, to match lecture notes. Use xchg to release lock, for future protection and to keep gcc from acting clever.
Diffstat (limited to 'spinlock.c')
-rw-r--r--spinlock.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/spinlock.c b/spinlock.c
index c00c978..ba5bb4a 100644
--- a/spinlock.c
+++ b/spinlock.c
@@ -10,12 +10,6 @@
extern int use_console_lock;
-// Barrier to gcc's instruction reordering.
-static void inline gccbarrier(void)
-{
- asm volatile("" : : : "memory");
-}
-
void
initlock(struct spinlock *lock, char *name)
{
@@ -35,7 +29,10 @@ acquire(struct spinlock *lock)
if(holding(lock))
panic("acquire");
- while(cmpxchg(0, 1, &lock->locked) == 1)
+ // The xchg is atomic.
+ // It also serializes, so that reads after acquire are not
+ // reordered before it.
+ while(xchg(&lock->locked, 1) == 1)
;
// Record info about lock acquisition for debugging.
@@ -56,8 +53,12 @@ release(struct spinlock *lock)
lock->pcs[0] = 0;
lock->cpu = 0xffffffff;
- gccbarrier(); // Keep gcc from moving lock->locked = 0 earlier.
- lock->locked = 0;
+ // The xchg serializes, so that reads before release are
+ // not reordered after it. (This reordering would be allowed
+ // by the Intel manuals, but does not happen on current
+ // Intel processors. The xchg being asm volatile also keeps
+ // gcc from delaying the above assignments.)
+ xchg(&lock->locked, 0);
popcli();
}