summaryrefslogtreecommitdiff
path: root/proc.c
diff options
context:
space:
mode:
authorrsc <rsc>2006-07-11 01:07:40 +0000
committerrsc <rsc>2006-07-11 01:07:40 +0000
commit5ce9751cab960e3b226eb0720e781e793a0be4ed (patch)
treeefdc77b710ac00bb38f58fd0d157adc58b22c440 /proc.c
parent7ea6c9d19747c84ede9b056475cd9046c89a4d33 (diff)
downloadxv6-labs-5ce9751cab960e3b226eb0720e781e793a0be4ed.tar.gz
xv6-labs-5ce9751cab960e3b226eb0720e781e793a0be4ed.tar.bz2
xv6-labs-5ce9751cab960e3b226eb0720e781e793a0be4ed.zip
Changes to allow use of native x86 ELF compilers, which on my
Linux 2.4 box using gcc 3.4.6 don't seem to follow the same conventions as the i386-jos-elf-gcc compilers. Can run make 'TOOLPREFIX=' or edit the Makefile. curproc[cpu()] can now be NULL, indicating that no proc is running. This seemed safer to me than having curproc[0] and curproc[1] both pointing at proc[0] potentially. The old implementation of swtch depended on the stack frame layout used inside swtch being okay to return from on the other stack (exactly the V6 you are not expected to understand this). It also could be called in two contexts: at boot time, to schedule the very first process, and later, on behalf of a process, to sleep or schedule some other process. I split this into two functions: scheduler and swtch. The scheduler is now a separate never-returning function, invoked by each cpu once set up. The scheduler looks like: scheduler() { setjmp(cpu.context); pick proc to schedule blah blah blah longjmp(proc.context) } The new swtch is intended to be called only when curproc[cpu()] is not NULL, that is, only on behalf of a user proc. It does: swtch() { if(setjmp(proc.context) == 0) longjmp(cpu.context) } to save the current proc context and then jump over to the scheduler, running on the cpu stack. Similarly the system call stubs are now in assembly in usys.S to avoid needing to know the details of stack frame layout used by the compiler. Also various changes in the debugging prints.
Diffstat (limited to 'proc.c')
-rw-r--r--proc.c83
1 files changed, 44 insertions, 39 deletions
diff --git a/proc.c b/proc.c
index 8ad6a23..bdff377 100644
--- a/proc.c
+++ b/proc.c
@@ -48,8 +48,7 @@ struct proc *
newproc()
{
struct proc *np;
- struct proc *op = curproc[cpu()];
- unsigned *sp;
+ struct proc *op;
int fd;
for(np = &proc[1]; np < &proc[NPROC]; np++)
@@ -58,6 +57,11 @@ newproc()
if(np >= &proc[NPROC])
return 0;
+ // copy from proc[0] if we're bootstrapping
+ op = curproc[cpu()];
+ if(op == 0)
+ op = &proc[0];
+
np->pid = next_pid++;
np->ppid = op->pid;
np->sz = op->sz;
@@ -76,11 +80,12 @@ newproc()
np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));
*(np->tf) = *(op->tf);
np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child
- sp = (unsigned *) np->tf;
- *(--sp) = (unsigned) &trapret; // for return from swtch()
- *(--sp) = 0; // previous bp for leave in swtch()
- np->esp = (unsigned) sp;
- np->ebp = (unsigned) sp;
+ cprintf("newproc pid=%d return to %x:%x tf-%p\n", np->pid, np->tf->tf_cs, np->tf->tf_eip, np->tf);
+
+ // set up new jmpbuf to start executing at trapret with esp pointing at tf
+ memset(&np->jmpbuf, 0, sizeof np->jmpbuf);
+ np->jmpbuf.jb_eip = (unsigned) trapret;
+ np->jmpbuf.jb_esp = (unsigned) np->tf - 4; // -4 for the %eip that isn't actually there
// copy file descriptors
for(fd = 0; fd < NOFILE; fd++){
@@ -96,33 +101,20 @@ newproc()
return np;
}
-/*
- * find a runnable process and switch to it.
- */
void
-swtch()
+scheduler(void)
{
- struct proc *np;
- struct proc *op = curproc[cpu()];
- unsigned sp;
+ struct proc *op, *np;
int i;
- // force push of callee-saved registers
- asm volatile("nop" : : : "%edi", "%esi", "%ebx");
-
- // save calling process's stack pointers
- op->ebp = read_ebp();
- op->esp = read_esp();
-
- // don't execute on calling process's stack
- sp = (unsigned) cpus[cpu()].mpstack + MPSTACK - 32;
- asm volatile("movl %0, %%esp" : : "g" (sp));
- asm volatile("movl %0, %%ebp" : : "g" (sp));
-
- // gcc might store op on the stack
- np = curproc[cpu()];
- np = np + 1;
+ cprintf("start scheduler on cpu %d jmpbuf %p\n", cpu(), &cpus[cpu()].jmpbuf);
+ cpus[cpu()].lastproc = &proc[0];
+ setjmp(&cpus[cpu()].jmpbuf);
+
+ // find a runnable process and switch to it
+ curproc[cpu()] = 0;
+ np = cpus[cpu()].lastproc + 1;
while(1){
for(i = 0; i < NPROC; i++){
if(np >= &proc[NPROC])
@@ -139,34 +131,47 @@ swtch()
acquire_spinlock(&kernel_lock);
np = &proc[0];
}
-
- cprintf("cpu %d swtch %x -> %x\n", cpu(), curproc[cpu()], np);
+ cpus[cpu()].lastproc = np;
curproc[cpu()] = np;
+
np->state = RUNNING;
// h/w sets busy bit in TSS descriptor sometimes, and faults
// if it's set in LTR. so clear tss descriptor busy bit.
np->gdt[SEG_TSS].sd_type = STS_T32A;
+ // XXX should probably have an lgdt() function in x86.h
+ // to confine all the inline assembly.
// XXX probably ought to lgdt on trap return too, in case
// a system call has moved a program or changed its size.
-
asm volatile("lgdt %0" : : "g" (np->gdt_pd.pd_lim));
ltr(SEG_TSS << 3);
- // this happens to work, but probably isn't safe:
- // it's not clear that np->ebp is guaranteed to evaluate
- // correctly after changing the stack pointer.
- asm volatile("movl %0, %%esp" : : "g" (np->esp));
- asm volatile("movl %0, %%ebp" : : "g" (np->ebp));
+ if(0) cprintf("cpu%d: run %d esp=%p callerpc=%p\n", cpu(), np-proc);
+ longjmp(&np->jmpbuf);
+}
+
+// give up the cpu by switching to the scheduler,
+// which runs on the per-cpu stack.
+void
+swtch(void)
+{
+ struct proc *p = curproc[cpu()];
+ if(p == 0)
+ panic("swtch");
+ if(setjmp(&p->jmpbuf) == 0)
+ longjmp(&cpus[cpu()].jmpbuf);
}
void
sleep(void *chan)
{
- curproc[cpu()]->chan = chan;
- curproc[cpu()]->state = WAITING;
+ struct proc *p = curproc[cpu()];
+ if(p == 0)
+ panic("sleep");
+ p->chan = chan;
+ p->state = WAITING;
swtch();
}