summaryrefslogtreecommitdiff
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5a44a46..83512bb 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -18,8 +18,6 @@ initlock(struct spinlock *lk, char *name)
// Acquire the lock.
// Loops (spins) until the lock is acquired.
-// Holding a lock for a long time may cause
-// other CPUs to waste time spinning to acquire it.
void
acquire(struct spinlock *lk)
{
@@ -27,7 +25,7 @@ acquire(struct spinlock *lk)
if(holding(lk))
panic("acquire");
- // On RISC-V, this turns into an atomic swap:
+ // On RISC-V, sync_lock_test_and_set turns into an atomic swap:
// a5 = 1
// s1 = &lk->locked
// amoswap.w.aq a5, a5, (s1)
@@ -59,9 +57,10 @@ release(struct spinlock *lk)
__sync_synchronize();
// Release the lock, equivalent to lk->locked = 0.
- // This code can't use a C assignment, since it might
- // not be atomic.
- // On RISC-V, this turns into an atomic swap:
+ // This code doesn't use a C assignment, since the C standard
+ // implies that an assignment might be implemented with
+ // multiple store instructions.
+ // On RISC-V, sync_lock_release turns into an atomic swap:
// s1 = &lk->locked
// amoswap.w zero, zero, (s1)
__sync_lock_release(&lk->locked);
@@ -81,7 +80,7 @@ holding(struct spinlock *lk)
}
// push_off/pop_off are like intr_off()/intr_on() except that they are matched:
-// it takes two pop_off to undo two push_off. Also, if interrupts
+// it takes two pop_off()s to undo two push_off()s. Also, if interrupts
// are initially off, then push_off, pop_off leaves them off.
void