diff options
| author | rtm <rtm> | 2006-07-15 12:03:57 +0000 | 
|---|---|---|
| committer | rtm <rtm> | 2006-07-15 12:03:57 +0000 | 
| commit | 46bbd72f3eeaff9386b2a90af88f3d46b458a0e8 (patch) | |
| tree | 31ca93c160a10c50948329b30d27475aa6b38313 | |
| parent | d9872ffa951291fcc3f7a92c0d235b86435c5714 (diff) | |
| download | xv6-labs-46bbd72f3eeaff9386b2a90af88f3d46b458a0e8.tar.gz xv6-labs-46bbd72f3eeaff9386b2a90af88f3d46b458a0e8.tar.bz2 xv6-labs-46bbd72f3eeaff9386b2a90af88f3d46b458a0e8.zip  | |
no more recursive locks
wakeup1() assumes you hold proc_table_lock
sleep(chan, lock) provides atomic sleep-and-release to wait for condition
ugly code in swtch/scheduler to implement new sleep
fix lots of bugs in pipes, wait, and exit
fix bugs if timer interrupt goes off in schedule()
console locks per line, not per byte
| -rw-r--r-- | Notes | 23 | ||||
| -rw-r--r-- | console.c | 39 | ||||
| -rw-r--r-- | defs.h | 5 | ||||
| -rw-r--r-- | ide.c | 2 | ||||
| -rw-r--r-- | main.c | 4 | ||||
| -rw-r--r-- | pipe.c | 19 | ||||
| -rw-r--r-- | proc.c | 75 | ||||
| -rw-r--r-- | proc.h | 1 | ||||
| -rw-r--r-- | spinlock.c | 58 | ||||
| -rw-r--r-- | spinlock.h | 2 | ||||
| -rw-r--r-- | syscall.c | 33 | ||||
| -rw-r--r-- | syscall.h | 1 | ||||
| -rw-r--r-- | trap.c | 26 | ||||
| -rw-r--r-- | usertests.c | 42 | ||||
| -rw-r--r-- | usys.S | 1 | 
15 files changed, 229 insertions, 102 deletions
@@ -126,3 +126,26 @@ nasty hack to allow locks before first process,  race between release and sleep in sys_wait()  race between sys_exit waking up parent and setting state=ZOMBIE +race in pipe code when full/empty + +lock order +  per-pipe lock +  proc_table_lock fd_table_lock kalloc_lock +  console_lock + +condition variable + mutex that protects it +  proc * (for wait()), proc_table_lock +  pipe structure, pipe lock + +systematic way to test sleep races? +  print something at the start of sleep? + +do you have to be holding the mutex in order to call wakeup()? + +should lock around printf, not putc + +device interrupts don't clear FL_IF +  so a recursive timer interrupt is possible + +the sleep/swtch/schedule code that holds over a lock is ugly + @@ -4,7 +4,8 @@  #include "spinlock.h"  struct spinlock console_lock; -int use_printf_lock = 0; +int paniced = 0; +int use_console_lock = 0;  /*   * copy console output to parallel port, which you can tell @@ -23,15 +24,18 @@ lpt_putc(int c)  	outb(0x378+2, 0x08);  } -void -cons_putc(int c) +static void +real_cons_putc(int c)  {    int crtport = 0x3d4; // io port of CGA    unsigned short *crt = (unsigned short *) 0xB8000; // base of CGA memory    int ind; -  if(use_printf_lock) -    acquire(&console_lock); +  if(paniced){ +    cli(); +    while(1) +      ; +  }    lpt_putc(c); @@ -63,8 +67,15 @@ cons_putc(int c)    outb(crtport + 1, ind >> 8);    outb(crtport, 15);    outb(crtport + 1, ind); +} -  if(use_printf_lock) +void +cons_putc(int c) +{ +  if(use_console_lock) +    acquire(&console_lock); +  real_cons_putc(c); +  if(use_console_lock)      release(&console_lock);  } @@ -91,7 +102,7 @@ printint(int xx, int base, int sgn)    while(i > 0){      i -= 1; -    cons_putc(buf[i]); +    real_cons_putc(buf[i]);    }  } @@ -104,13 +115,16 @@ cprintf(char *fmt, ...)    int i, state = 0, c;    unsigned int *ap = (unsigned int *) &fmt + 1; +  if(use_console_lock) +    acquire(&console_lock); +    for(i = 0; fmt[i]; i++){      c = fmt[i] & 0xff;      if(state == 0){        if(c == '%'){          state = '%';        } else { -        cons_putc(c); +        real_cons_putc(c);        }      } else if(state == '%'){        if(c == 'd'){ @@ -120,20 +134,25 @@ cprintf(char *fmt, ...)          printint(*ap, 16, 0);          ap++;        } else if(c == '%'){ -        cons_putc(c); +        real_cons_putc(c);        }        state = 0;      }    } + +  if(use_console_lock) +    release(&console_lock);  }  void  panic(char *s)  { -  use_printf_lock = 0; +  __asm __volatile("cli"); +  use_console_lock = 0;    cprintf("panic: ");    cprintf(s, 0);    cprintf("\n", 0); +  paniced = 1; // freeze other CPU    while(1)      ;  } @@ -14,7 +14,8 @@ struct jmpbuf;  void setupsegs(struct proc *);  struct proc * newproc(void);  void swtch(int); -void sleep(void *); +struct spinlock; +void sleep(void *, struct spinlock *);  void wakeup(void *);  void scheduler(void);  void proc_exit(void); @@ -65,6 +66,8 @@ int cpu(void);  struct spinlock;  void acquire(struct spinlock * lock);  void release(struct spinlock * lock); +void acquire1(struct spinlock * lock, struct proc *); +void release1(struct spinlock * lock, struct proc *);  // main.c  void load_icode(struct proc *p, uint8_t *binary, unsigned size); @@ -112,7 +112,7 @@ ide_start_read(uint32_t secno, void *dst, unsigned nsecs)      panic("ide_start_read: nsecs too large");    while ((head + 1) % NREQUEST == tail) -    sleep (&disk_channel); +    sleep (&disk_channel, 0);    r = &request[head];    r->secno = secno; @@ -16,7 +16,7 @@ extern char _binary_user1_start[], _binary_user1_size[];  extern char _binary_usertests_start[], _binary_usertests_size[];  extern char _binary_userfs_start[], _binary_userfs_size[]; -extern use_printf_lock; +extern int use_console_lock;  int  main() @@ -40,7 +40,7 @@ main()    mp_init(); // collect info about this machine -  use_printf_lock = 1; +  use_console_lock = 1;    cpus[cpu()].clis = 1; // cpu starts as if we had called cli() @@ -81,16 +81,17 @@ pipe_write(struct pipe *p, char *addr, int n)    for(i = 0; i < n; i++){      while(((p->writep + 1) % PIPESIZE) == p->readp){ -      if(p->readopen == 0) +      if(p->readopen == 0){ +        release(&p->lock);          return -1; -      release(&p->lock); +      }        wakeup(&p->readp); -      sleep(&p->writep); -      acquire(&p->lock); +      sleep(&p->writep, &p->lock);      }      p->data[p->writep] = addr[i];      p->writep = (p->writep + 1) % PIPESIZE;    } +    release(&p->lock);    wakeup(&p->readp);    return i; @@ -101,19 +102,23 @@ pipe_read(struct pipe *p, char *addr, int n)  {    int i; +  acquire(&p->lock); +    while(p->readp == p->writep){ -    if(p->writeopen == 0) +    if(p->writeopen == 0){ +      release(&p->lock);        return 0; -    sleep(&p->readp); +    } +    sleep(&p->readp, &p->lock);    } -  acquire(&p->lock);    for(i = 0; i < n; i++){      if(p->readp == p->writep)        break;      addr[i] = p->data[p->readp];      p->readp = (p->readp + 1) % PIPESIZE;    } +    release(&p->lock);    wakeup(&p->writep);    return i; @@ -95,7 +95,6 @@ newproc()    np->tf = (struct Trapframe *) (np->kstack + KSTACKSIZE - sizeof(struct Trapframe));    *(np->tf) = *(op->tf);    np->tf->tf_regs.reg_eax = 0; // so fork() returns 0 in child -  cprintf("newproc pid=%d return to %x:%x tf-%p\n", np->pid, np->tf->tf_cs, np->tf->tf_eip, np->tf);    // set up new jmpbuf to start executing at trapret with esp pointing at tf    memset(&np->jmpbuf, 0, sizeof np->jmpbuf); @@ -109,8 +108,6 @@ newproc()        fd_reference(np->fds[fd]);    } -  cprintf("newproc %x\n", np); -    return np;  } @@ -126,18 +123,27 @@ scheduler(void)    setjmp(&cpus[cpu()].jmpbuf);    op = curproc[cpu()]; + +  if(op == 0 || op->mtx != &proc_table_lock) +    acquire1(&proc_table_lock, op); +    if(op){      if(op->newstate <= 0 || op->newstate > ZOMBIE)        panic("scheduler");      op->state = op->newstate;      op->newstate = -1; +    if(op->mtx){ +      struct spinlock *mtx = op->mtx; +      op->mtx = 0; +      if(mtx != &proc_table_lock) +        release1(mtx, op); +    }    }    // find a runnable process and switch to it    curproc[cpu()] = 0;    np = cpus[cpu()].lastproc + 1;    while(1){ -    acquire(&proc_table_lock);      for(i = 0; i < NPROC; i++){        if(np >= &proc[NPROC])          np = &proc[0]; @@ -148,11 +154,13 @@ scheduler(void)      if(i < NPROC){        np->state = RUNNING; -      release(&proc_table_lock); +      release1(&proc_table_lock, op);        break;      } -    release(&proc_table_lock); +    release1(&proc_table_lock, op); +    op = 0; +    acquire(&proc_table_lock);      np = &proc[0];    } @@ -180,36 +188,56 @@ void  swtch(int newstate)  {    struct proc *p = curproc[cpu()]; +    if(p == 0)      panic("swtch no proc"); -  if(p->locks != 0) +  if(p->mtx == 0 && p->locks != 0)      panic("swtch w/ locks"); +  if(p->mtx && p->locks != 1) +    panic("swtch w/ locks 1"); +  if(p->mtx && p->mtx->locked == 0) +    panic("switch w/ lock but not held"); +  if(p->locks && (read_eflags() & FL_IF)) +    panic("swtch w/ lock but FL_IF"); +    p->newstate = newstate; // basically an argument to scheduler()    if(setjmp(&p->jmpbuf) == 0)      longjmp(&cpus[cpu()].jmpbuf);  }  void -sleep(void *chan) +sleep(void *chan, struct spinlock *mtx)  {    struct proc *p = curproc[cpu()]; +    if(p == 0)      panic("sleep"); +    p->chan = chan; +  p->mtx = mtx; // scheduler will release it +    swtch(WAITING); +   +  if(mtx) +    acquire(mtx); +  p->chan = 0;  }  void -wakeup(void *chan) +wakeup1(void *chan)  {    struct proc *p; -  acquire(&proc_table_lock); -  for(p = proc; p < &proc[NPROC]; p++){ -    if(p->state == WAITING && p->chan == chan){ +  for(p = proc; p < &proc[NPROC]; p++) +    if(p->state == WAITING && p->chan == chan)        p->state = RUNNABLE; -    } -  } +} + +void +wakeup(void *chan) +{ +  acquire(&proc_table_lock); +  wakeup1(chan);    release(&proc_table_lock);  } @@ -229,8 +257,6 @@ proc_exit()    struct proc *cp = curproc[cpu()];    int fd; -  cprintf("exit %x pid %d ppid %d\n", cp, cp->pid, cp->ppid); -    for(fd = 0; fd < NOFILE; fd++){      if(cp->fds[fd]){        fd_close(cp->fds[fd]); @@ -243,32 +269,35 @@ proc_exit()    // wake up parent    for(p = proc; p < &proc[NPROC]; p++)      if(p->pid == cp->ppid) -      wakeup(p); +      wakeup1(p);    // abandon children    for(p = proc; p < &proc[NPROC]; p++)      if(p->ppid == cp->pid)        p->pid = 1; - -  release(&proc_table_lock); - -  // switch into scheduler +   +  cp->mtx = &proc_table_lock;    swtch(ZOMBIE); +  panic("a zombie revived");  }  // disable interrupts  void  cli(void)  { -  cpus[cpu()].clis += 1; -  if(cpus[cpu()].clis == 1) +  if(cpus[cpu()].clis == 0)      __asm __volatile("cli"); +  cpus[cpu()].clis += 1; +  if((read_eflags() & FL_IF) != 0) +    panic("cli but enabled");  }  // enable interrupts  void  sti(void)  { +  if((read_eflags() & FL_IF) != 0) +    panic("sti but enabled");    if(cpus[cpu()].clis < 1)      panic("sti");    cpus[cpu()].clis -= 1; @@ -41,6 +41,7 @@ struct proc{    char *kstack; // kernel stack, separate from mem so it doesn't move    enum proc_state state;    enum proc_state newstate; // desired state after swtch() +  struct spinlock *mtx; // mutex for condition variable    int pid;    int ppid;    void *chan; // sleep @@ -8,36 +8,20 @@  #define DEBUG 0 -extern use_printf_lock; +extern int use_console_lock;  int getcallerpc(void *v) {    return ((int*)v)[-1];  }  void -acquire(struct spinlock * lock) +acquire1(struct spinlock * lock, struct proc *cp)  { -  struct proc *cp = curproc[cpu()]; -  unsigned who; - -  if(cp) -    who = (unsigned) cp; -  else -    who = cpu() + 1; -    if(DEBUG) cprintf("cpu%d: acquiring at %x\n", cpu(), getcallerpc(&lock)); -  if (lock->who == who && lock->locked){ -    lock->count += 1; -  } else {  -    cli(); -    // if we get the lock, eax will be zero -    // if we don't get the lock, eax will be one -    while ( cmpxchg(0, 1, &lock->locked) == 1 ) { ; } -    lock->locker_pc = getcallerpc(&lock); -    lock->count = 1; -    lock->who = who; -  } +  cli(); +  while ( cmpxchg(0, 1, &lock->locked) == 1 ) { ; } +  lock->locker_pc = getcallerpc(&lock);    if(cp)      cp->locks += 1; @@ -46,27 +30,29 @@ acquire(struct spinlock * lock)  }  void -release(struct spinlock * lock) +release1(struct spinlock * lock, struct proc *cp)  { -  struct proc *cp = curproc[cpu()]; -  unsigned who; - -  if(cp) -    who = (unsigned) cp; -  else -    who = cpu() + 1;    if(DEBUG) cprintf ("cpu%d: releasing at %x\n", cpu(), getcallerpc(&lock)); -  if(lock->who != who || lock->count < 1 || lock->locked != 1) +  if(lock->locked != 1)      panic("release"); -  lock->count -= 1;    if(cp)      cp->locks -= 1; -  if(lock->count < 1){ -    lock->who = 0; -    cmpxchg(1, 0, &lock->locked); -    sti(); -  } + +  cmpxchg(1, 0, &lock->locked); +  sti(); +} + +void +acquire(struct spinlock *lock) +{ +  acquire1(lock, curproc[cpu()]); +} + +void +release(struct spinlock *lock) +{ +  release1(lock, curproc[cpu()]);  } @@ -1,6 +1,4 @@  struct spinlock {    unsigned int locked; -  unsigned who; -  int count;    unsigned locker_pc;  }; @@ -152,8 +152,12 @@ sys_fork()    struct proc *np;    np = newproc(); -  np->state = RUNNABLE; -  return np->pid; +  if(np){ +    np->state = RUNNABLE; +    return np->pid; +  } else { +    return -1; +  }  }  int @@ -170,11 +174,10 @@ sys_wait()    struct proc *cp = curproc[cpu()];    int any, pid; -  cprintf("waid pid %d ppid %d\n", cp->pid, cp->ppid); +  acquire(&proc_table_lock);    while(1){      any = 0; -    acquire(&proc_table_lock);      for(p = proc; p < &proc[NPROC]; p++){        if(p->state == ZOMBIE && p->ppid == cp->pid){          kfree(p->mem, p->sz); @@ -182,18 +185,16 @@ sys_wait()          pid = p->pid;          p->state = UNUSED;          release(&proc_table_lock); -        cprintf("%x collected %x\n", cp, p);          return pid;        }        if(p->state != UNUSED && p->ppid == cp->pid)          any = 1;      } -    release(&proc_table_lock);      if(any == 0){ -      cprintf("%x nothing to wait for\n", cp); +      release(&proc_table_lock);        return -1;      } -    sleep(cp); +    sleep(cp, &proc_table_lock);    }  } @@ -220,7 +221,7 @@ sys_block(void)        panic("couldn't start read\n");      }      cprintf("call sleep\n"); -    sleep (c); +    sleep (c, 0);      if (ide_finish_read(c)) {        panic("couldn't do read\n");      } @@ -253,6 +254,17 @@ sys_kill()    return -1;  } +int +sys_panic() +{ +  struct proc *p = curproc[cpu()]; +  unsigned int addr; + +  fetcharg(0, &addr); +  panic(p->mem + addr); +  return 0; +} +  void  syscall()  { @@ -292,6 +304,9 @@ syscall()    case SYS_kill:      ret = sys_kill();      break; +  case SYS_panic: +    ret = sys_panic(); +    break;    default:      cprintf("unknown sys call %d\n", num);      // XXX fault @@ -8,3 +8,4 @@  #define SYS_close 8  #define SYS_block 9  #define SYS_kill 10 +#define SYS_panic 11 @@ -36,8 +36,14 @@ trap(struct Trapframe *tf)  {    int v = tf->tf_trapno; +  if(cpus[cpu()].clis){ +    cprintf("cpu %d v %d eip %x\n", cpu(), v, tf->tf_eip); +    panic("interrupt while interrupts are off"); +  } +    if(v == T_SYSCALL){      struct proc *cp = curproc[cpu()]; +    int num = cp->tf->tf_regs.reg_eax;      if(cp == 0)        panic("syscall with no proc");      if(cp->killed) @@ -50,6 +56,14 @@ trap(struct Trapframe *tf)        panic("trap ret but not RUNNING");      if(tf != cp->tf)        panic("trap ret wrong tf"); +    if(cp->locks){ +      cprintf("num=%d\n", num); +      panic("syscall returning locks held"); +    } +    if(cpus[cpu()].clis) +      panic("syscall returning but clis != 0"); +    if((read_eflags() & FL_IF) == 0) +      panic("syscall returning but FL_IF clear");      if(read_esp() < (unsigned)cp->kstack ||         read_esp() >= (unsigned)cp->kstack + KSTACKSIZE)        panic("trap ret esp wrong"); @@ -61,14 +75,20 @@ trap(struct Trapframe *tf)    if(v == (IRQ_OFFSET + IRQ_TIMER)){      struct proc *cp = curproc[cpu()];      lapic_timerintr(); +    if(cp && cp->locks) +      panic("timer interrupt while holding a lock");      if(cp){ -      if(cpus[cpu()].clis != 0) -        panic("trap clis > 0"); +#if 1 +      if((read_eflags() & FL_IF) == 0) +        panic("timer interrupt but interrupts now disabled"); +#else        cpus[cpu()].clis += 1;        sti(); +#endif        if(cp->killed)          proc_exit(); -      yield(); +      if(cp->state == RUNNING) +        yield();      }      return;    } diff --git a/usertests.c b/usertests.c index 2f688ca..e277839 100644 --- a/usertests.c +++ b/usertests.c @@ -16,7 +16,7 @@ pipe1()        for(i = 0; i < 1033; i++)          buf[i] = seq++;        if(write(fds[1], buf, 1033) != 1033){ -        puts("pipe1 oops 1\n"); +        panic("pipe1 oops 1\n");          exit(1);        }      } @@ -31,7 +31,7 @@ pipe1()          break;        for(i = 0; i < n; i++){          if((buf[i] & 0xff) != (seq++ & 0xff)){ -          puts("pipe1 oops 2\n"); +          panic("pipe1 oops 2\n");            return;          }        } @@ -41,8 +41,9 @@ pipe1()          cc = sizeof(buf);      }      if(total != 5 * 1033) -      puts("pipe1 oops 3\n"); +      panic("pipe1 oops 3\n");      close(fds[0]); +    wait();    }    puts("pipe1 ok\n");  } @@ -69,7 +70,7 @@ preempt()    if(pid3 == 0){      close(pfds[0]);      if(write(pfds[1], "x", 1) != 1) -      puts("preempt write error"); +      panic("preempt write error");      close(pfds[1]);      while(1)        ; @@ -77,7 +78,7 @@ preempt()    close(pfds[1]);    if(read(pfds[0], buf, sizeof(buf)) != 1){ -    puts("preempt read error"); +    panic("preempt read error");      return;    }    close(pfds[0]); @@ -90,12 +91,37 @@ preempt()    puts("preempt ok\n");  } +// try to find any races between exit and wait +void +exitwait() +{ +  int i, pid; + +  for(i = 0; i < 100; i++){ +    pid = fork(); +    if(pid < 0){ +      panic("fork failed\n"); +      return; +    } +    if(pid){ +      if(wait() != pid){ +        panic("wait wrong pid\n"); +        return; +      } +    } else { +      exit(0); +    } +  } +  puts("exitwait ok\n"); +} +  main()  {    puts("usertests starting\n"); +    pipe1(); -  //preempt(); +  preempt(); +  exitwait(); -  while(1) -    ; +  panic("usertests finished successfuly");  } @@ -18,3 +18,4 @@ STUB(write)  STUB(close)  STUB(block)  STUB(kill) +STUB(panic)  | 
