diff options
author | rsc <rsc> | 2007-09-27 21:25:37 +0000 |
---|---|---|
committer | rsc <rsc> | 2007-09-27 21:25:37 +0000 |
commit | ab08960f6402f5c7cbb7b6e81694a60b6abed4c8 (patch) | |
tree | 156a197126bdfb99bb5081b7fe78ee3e8528aa5b | |
parent | f97f0d2b3d3afbad3ef154b047f1b0408fd7288b (diff) | |
download | xv6-labs-ab08960f6402f5c7cbb7b6e81694a60b6abed4c8.tar.gz xv6-labs-ab08960f6402f5c7cbb7b6e81694a60b6abed4c8.tar.bz2 xv6-labs-ab08960f6402f5c7cbb7b6e81694a60b6abed4c8.zip |
Final word on the locking fiasco?
Change pushcli / popcli so that they can never turn on
interrupts unexpectedly. That is, if interrupts are on,
then pushcli(); popcli(); turns them off and back on, but
if they are off to begin with, then pushcli(); popcli(); is
a no-op.
I think our fundamental mistake was having a primitive
(release and then popcli nee spllo) that could turn
interrupts on at unexpected moments instead of being
explicit about when we want to start allowing interrupts.
With the new semantics, all the manual fiddling of ncli
to force interrupts off in certain sections goes away.
In return, we must explicitly mark the places where
we want to enable interrupts unconditionally, by calling sti().
There is only one: inside the scheduler loop.
-rw-r--r-- | main.c | 25 | ||||
-rw-r--r-- | proc.c | 9 | ||||
-rw-r--r-- | proc.h | 4 | ||||
-rw-r--r-- | spinlock.c | 14 | ||||
-rw-r--r-- | trap.c | 5 |
5 files changed, 24 insertions, 33 deletions
@@ -12,19 +12,13 @@ static void mpmain(void) __attribute__((noreturn)); int main(void) { - int bcpu, i; extern char edata[], end[]; // clear BSS memset(edata, 0, end - edata); - // pushcli() every processor during bootstrap. - for(i=0; i<NCPU; i++) - cpus[i].ncli = 1; // no interrupts during bootstrap - mp_init(); // collect info about this machine - bcpu = mp_bcpu(); - lapic_init(bcpu); + lapic_init(mp_bcpu()); cprintf("\ncpu%d: starting xv6\n\n", cpu()); pinit(); // process table @@ -38,19 +32,15 @@ main(void) console_init(); // I/O devices & their interrupts ide_init(); // disk if(!ismp) - timer_init(); // uniprocessor timer + timer_init(); // uniprocessor timer userinit(); // first user process + bootothers(); // start other processors - // Allocate scheduler stacks and boot the other CPUs. - for(i=0; i<ncpu; i++) - cpus[i].stack = kalloc(KSTACKSIZE); - bootothers(); - - // Switch to our scheduler stack and continue with mpmain. - asm volatile("movl %0, %%esp" : : "r" (cpus[bcpu].stack+KSTACKSIZE)); + // Finish setting up this processor in mpmain. mpmain(); } +// Bootstrap processor gets here after setting up the hardware. // Additional processors start here. static void mpmain(void) @@ -62,7 +52,6 @@ mpmain(void) setupsegs(0); cpuid(0, 0, 0, 0, 0); // memory barrier cpus[cpu()].booted = 1; - popcli(); scheduler(); } @@ -73,6 +62,7 @@ bootothers(void) extern uchar _binary_bootother_start[], _binary_bootother_size[]; uchar *code; struct cpu *c; + char *stack; // Write bootstrap code to unused memory at 0x7000. code = (uchar*)0x7000; @@ -83,7 +73,8 @@ bootothers(void) continue; // Fill in %esp, %eip and start code on cpu. - *(void**)(code-4) = c->stack + KSTACKSIZE; + stack = kalloc(KSTACKSIZE); + *(void**)(code-4) = stack + KSTACKSIZE; *(void**)(code-8) = mpmain; lapic_startap(c->apicid, (uint)code); @@ -179,7 +179,6 @@ userinit(void) } // Return currently running process. -// XXX comment better struct proc* curproc(void) { @@ -206,11 +205,13 @@ scheduler(void) struct cpu *c; int i; + c = &cpus[cpu()]; for(;;){ + // Enable interrupts on this processor. + sti(); + // Loop over process table looking for process to run. acquire(&proc_table_lock); - - c = &cpus[cpu()]; for(i = 0; i < NPROC; i++){ p = &proc[i]; if(p->state != RUNNABLE) @@ -229,8 +230,8 @@ scheduler(void) c->curproc = 0; setupsegs(0); } - release(&proc_table_lock); + } } @@ -56,9 +56,9 @@ struct cpu { struct context context; // Switch here to enter scheduler struct taskstate ts; // Used by x86 to find stack for interrupt struct segdesc gdt[NSEGS]; // x86 global descriptor table - char *stack; volatile int booted; // Has the CPU started? - int ncli; // Depth of pushcli nesting. + int ncli; // Depth of pushcli nesting. + int intena; // Were interrupts enabled before pushcli? }; extern struct cpu cpus[NCPU]; @@ -88,15 +88,19 @@ holding(struct spinlock *lock) } - -// XXX! -// Better names? Better functions? +// Pushcli/popcli are like cli/sti except that they are matched: +// it takes two popcli to undo two pushcli. Also, if interrupts +// are off, then pushcli, popcli leaves them off. void pushcli(void) { + int eflags; + + eflags = read_eflags(); cli(); - cpus[cpu()].ncli++; + if(cpus[cpu()].ncli++ == 0) + cpus[cpu()].intena = eflags & FL_IF; } void @@ -106,7 +110,7 @@ popcli(void) panic("popcli - interruptible"); if(--cpus[cpu()].ncli < 0) panic("popcli"); - if(cpus[cpu()].ncli == 0) + if(cpus[cpu()].ncli == 0 && cpus[cpu()].intena) sti(); } @@ -44,9 +44,6 @@ trap(struct trapframe *tf) return; } - // No interrupts during interrupt handling. - pushcli(); - switch(tf->trapno){ case IRQ_OFFSET + IRQ_TIMER: if(cpu() == 0){ @@ -84,8 +81,6 @@ trap(struct trapframe *tf) cp->killed = 1; } - popcli(); - // Force process exit if it has been killed and is in user space. // (If it is still executing in the kernel, let it keep running // until it gets to the regular system call return.) |