summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorFrans Kaashoek <[email protected]>2019-07-02 13:40:33 -0400
committerFrans Kaashoek <[email protected]>2019-07-02 13:40:33 -0400
commitda51735980e500922bc108a3444b64ac9450032e (patch)
tree76ad738ff42f56a36ea2300b2d90946e64d5040e /kernel
parent67702cf706bce7adef472f0caa48d81ddfaeb33a (diff)
downloadxv6-labs-da51735980e500922bc108a3444b64ac9450032e.tar.gz
xv6-labs-da51735980e500922bc108a3444b64ac9450032e.tar.bz2
xv6-labs-da51735980e500922bc108a3444b64ac9450032e.zip
Avoid two cores selecting the same process to run
Diffstat (limited to 'kernel')
-rw-r--r--kernel/proc.c59
-rw-r--r--kernel/riscv.h9
-rw-r--r--kernel/spinlock.c11
-rw-r--r--kernel/spinlock.h2
4 files changed, 52 insertions, 29 deletions
diff --git a/kernel/proc.c b/kernel/proc.c
index ae35105..b5d929d 100644
--- a/kernel/proc.c
+++ b/kernel/proc.c
@@ -297,15 +297,16 @@ exit(void)
panic("zombie exit");
}
-void tellparent(struct proc *p, struct proc *parent) {
+void reparent(struct proc *p) {
struct proc *pp;
+ struct proc *parent = p->parent;
acquire(&parent->lock);
// Parent might be sleeping in wait().
wakeup1(parent);
- // Pass abandoned children to init.
+ // Pass p's abandoned children to init.
for(pp = ptable.proc; pp < &ptable.proc[NPROC]; pp++){
if(pp->parent == p){
pp->parent = initproc;
@@ -329,8 +330,6 @@ wait(void)
int havekids, pid;
struct proc *p = myproc();
- // should p lock!
-
acquire(&p->lock);
for(;;){
// Scan through table looking for exited children.
@@ -367,11 +366,27 @@ wait(void)
return -1;
}
- // Wait for children to exit. (See wakeup1 call in tellparent.)
+ // Wait for children to exit. (See wakeup1 call in reparent.)
sleep(p, &p->lock); //DOC: wait-sleep
}
}
+// Loop over process table looking for process to run.
+struct proc *find_runnable() {
+ struct proc *p;
+ acquire(&ptable.lock);
+ for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
+ acquire(&p->lock);
+ if(p->state == RUNNABLE) {
+ release(&ptable.lock);
+ return p;
+ }
+ release(&p->lock);
+ }
+ release(&ptable.lock);
+ return 0;
+}
+
//PAGEBREAK: 42
// Per-CPU process scheduler.
// Each CPU calls scheduler() after setting itself up.
@@ -391,15 +406,7 @@ scheduler(void)
// Enable interrupts on this processor.
intr_on();
- // Loop over process table looking for process to run.
- for(p = ptable.proc; p < &ptable.proc[NPROC]; p++) {
- acquire(&p->lock);
-
- if(p->state != RUNNABLE) {
- release(&p->lock);
- continue;
- }
-
+ if((p = find_runnable()) != 0){
// Switch to chosen process. It is the process's job
// to release its lock and then reacquire it
// before jumping back to us.
@@ -412,11 +419,9 @@ scheduler(void)
// It should have changed its p->state before coming back.
c->proc = 0;
release(&p->lock);
-
if(p->state == ZOMBIE) {
- tellparent(p, p->parent);
+ reparent(p);
}
-
}
}
}
@@ -485,7 +490,9 @@ forkret(void)
void
sleep(void *chan, struct spinlock *lk)
{
- if(myproc() == 0)
+ struct proc *p = myproc();
+
+ if(p == 0)
panic("sleep");
if(lk == 0)
@@ -497,29 +504,29 @@ sleep(void *chan, struct spinlock *lk)
// guaranteed that we won't miss any wakeup
// (wakeup runs with p->lock locked),
// so it's okay to release lk.
- if(lk != &myproc()->lock){ //DOC: sleeplock0
- acquire(&myproc()->lock); //DOC: sleeplock1
+ if(lk != &p->lock){ //DOC: sleeplock0
+ acquire(&p->lock); //DOC: sleeplock1
release(lk);
}
// Go to sleep.
- myproc()->chan = chan;
- myproc()->state = SLEEPING;
+ p->chan = chan;
+ p->state = SLEEPING;
sched();
// Tidy up.
- myproc()->chan = 0;
+ p->chan = 0;
// Reacquire original lock.
- if(lk != &myproc()->lock){ //DOC: sleeplock2
- release(&myproc()->lock);
+ if(lk != &p->lock){ //DOC: sleeplock2
+ release(&p->lock);
acquire(lk);
}
}
//PAGEBREAK!
// Wake up all processes sleeping on chan,
-// where chan is a proc.
+// where chan is a proc, which is locked.
static void
wakeup1(struct proc *chan)
{
diff --git a/kernel/riscv.h b/kernel/riscv.h
index c3371a4..e5c0f64 100644
--- a/kernel/riscv.h
+++ b/kernel/riscv.h
@@ -304,6 +304,15 @@ w_tp(uint64 x)
asm volatile("mv tp, %0" : : "r" (x));
}
+static inline uint64
+r_ra()
+{
+ uint64 x;
+ asm volatile("mv %0, ra" : "=r" (x) );
+ return x;
+}
+
+
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index c238091..1d0b16a 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -20,7 +20,7 @@ initlock(struct spinlock *lk, char *name)
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
-void //__attribute__ ((noinline))
+void
acquire(struct spinlock *lk)
{
push_off(); // disable interrupts to avoid deadlock.
@@ -44,14 +44,19 @@ acquire(struct spinlock *lk)
}
// Release the lock.
-void //__attribute__ ((noinline))
+void
release(struct spinlock *lk)
{
+ uint64 x;
+ asm volatile("mv %0, ra" : "=r" (x) );
if(!holding(lk)) {
- printf("%p: !holding %s %p\n", mycpu(), lk->name, lk->cpu);
+ printf("%p: !holding %d %s %p %p %p %p\n", mycpu(), lk->locked, lk->name, lk->cpu,
+ lk->last_release, lk->last_pc, x);
panic("release");
}
+ lk->last_release = lk->cpu;
+ lk->last_pc = x;
lk->cpu = 0;
// Tell the C compiler and the CPU to not move loads or stores
diff --git a/kernel/spinlock.h b/kernel/spinlock.h
index 4392820..5f244c9 100644
--- a/kernel/spinlock.h
+++ b/kernel/spinlock.h
@@ -5,5 +5,7 @@ struct spinlock {
// For debugging:
char *name; // Name of lock.
struct cpu *cpu; // The cpu holding the lock.
+ struct cpu *last_release;
+ uint64 last_pc;
};