diff options
| author | Frans Kaashoek <kaashoek@Frans-Kaashoeks-MacBook-Pro.local> | 2011-08-26 10:08:29 -0400 | 
|---|---|---|
| committer | Frans Kaashoek <kaashoek@Frans-Kaashoeks-MacBook-Pro.local> | 2011-08-26 10:08:29 -0400 | 
| commit | 3a5fa7ed9020eaf8ab843a16d26db7393b2ec072 (patch) | |
| tree | bfa4ad4ae03d7d21796bacaa7eab8e3d3e4ab365 | |
| parent | 8a9b6dbd4468f6312f1d07226a623879f970bd4b (diff) | |
| download | xv6-labs-3a5fa7ed9020eaf8ab843a16d26db7393b2ec072.tar.gz xv6-labs-3a5fa7ed9020eaf8ab843a16d26db7393b2ec072.tar.bz2 xv6-labs-3a5fa7ed9020eaf8ab843a16d26db7393b2ec072.zip | |
Introduce and use sleeplocks instead of BUSY flags
Remove I_BUSY, B_BUSY, and intrans defs and usages
One spinlock per buf to avoid ugly loop in bget
fix race in filewrite (don't update f->off after releasing lock)
| -rw-r--r-- | bio.c | 45 | ||||
| -rw-r--r-- | buf.h | 7 | ||||
| -rw-r--r-- | defs.h | 5 | ||||
| -rw-r--r-- | file.c | 6 | ||||
| -rw-r--r-- | file.h | 6 | ||||
| -rw-r--r-- | fs.c | 32 | ||||
| -rw-r--r-- | ide.c | 2 | ||||
| -rw-r--r-- | log.c | 14 | ||||
| -rw-r--r-- | pipe.c | 2 | ||||
| -rw-r--r-- | spinlock.c | 42 | ||||
| -rw-r--r-- | spinlock.h | 7 | ||||
| -rw-r--r-- | sysfile.c | 1 | 
12 files changed, 106 insertions, 63 deletions
| @@ -13,9 +13,7 @@  // * Only one process at a time can use a buffer,  //     so do not keep them longer than necessary.  //  -// The implementation uses three state flags internally: -// * B_BUSY: the block has been returned from bread -//     and has not been passed back to brelse.   +// The implementation uses two state flags internally:  // * B_VALID: the buffer data has been initialized  //     with the associated disk block contents.  // * B_DIRTY: the buffer data has been modified @@ -51,6 +49,8 @@ binit(void)      b->next = bcache.head.next;      b->prev = &bcache.head;      b->dev = -1; +    initlock(&b->lock, "buf"); +    initsleeplock(&b->sleeplock);      bcache.head.next->prev = b;      bcache.head.next = b;    } @@ -58,42 +58,43 @@ binit(void)  // Look through buffer cache for sector on device dev.  // If not found, allocate fresh block. -// In either case, return locked buffer. +// In either case, return sleep-locked buffer.  static struct buf*  bget(uint dev, uint sector)  {    struct buf *b;    acquire(&bcache.lock); - - loop:    // Try for cached block.    for(b = bcache.head.next; b != &bcache.head; b = b->next){ +    acquire(&b->lock);      if(b->dev == dev && b->sector == sector){ -      if(!(b->flags & B_BUSY)){ -        b->flags |= B_BUSY; -        release(&bcache.lock); -        return b; -      } -      sleep(b, &bcache.lock); -      goto loop; +      release(&bcache.lock); +      acquire_sleeplock(&b->sleeplock, &b->lock); +      release(&b->lock); +      return b;      } +    release(&b->lock);    }    // Allocate fresh block.    for(b = bcache.head.prev; b != &bcache.head; b = b->prev){ -    if((b->flags & B_BUSY) == 0){ +    acquire(&b->lock); +    if (!acquired_sleeplock(&b->sleeplock)) { +      release(&bcache.lock);        b->dev = dev;        b->sector = sector; -      b->flags = B_BUSY; -      release(&bcache.lock); +      b->flags = 0; +      acquire_sleeplock(&b->sleeplock, &b->lock); +      release(&b->lock);        return b;      } +    release(&b->lock);    }    panic("bget: no buffers");  } -// Return a B_BUSY buf with the contents of the indicated disk sector. +// Return a locked buf with the contents of the indicated disk sector.  struct buf*  bread(uint dev, uint sector)  { @@ -109,7 +110,7 @@ bread(uint dev, uint sector)  void  bwrite(struct buf *b)  { -  if((b->flags & B_BUSY) == 0) +  if(!acquired_sleeplock(&b->sleeplock))      panic("bwrite");    b->flags |= B_DIRTY;    iderw(b); @@ -119,11 +120,11 @@ bwrite(struct buf *b)  void  brelse(struct buf *b)  { -  if((b->flags & B_BUSY) == 0) +  if(!acquired_sleeplock(&b->sleeplock))      panic("brelse");    acquire(&bcache.lock); - +  acquire(&b->lock);    b->next->prev = b->prev;    b->prev->next = b->next;    b->next = bcache.head.next; @@ -131,8 +132,8 @@ brelse(struct buf *b)    bcache.head.next->prev = b;    bcache.head.next = b; -  b->flags &= ~B_BUSY; -  wakeup(b); +  release_sleeplock(&b->sleeplock); +  release(&b->lock);    release(&bcache.lock);  } @@ -2,12 +2,13 @@ struct buf {    int flags;    uint dev;    uint sector; +  struct spinlock lock; +  struct sleeplock sleeplock;    struct buf *prev; // LRU cache list    struct buf *next;    struct buf *qnext; // disk queue    uchar data[512];  }; -#define B_BUSY  0x1  // buffer is locked by some process -#define B_VALID 0x2  // buffer has been read from disk -#define B_DIRTY 0x4  // buffer needs to be written to disk +#define B_VALID 0x1  // buffer has been read from disk +#define B_DIRTY 0x2  // buffer needs to be written to disk @@ -5,6 +5,7 @@ struct inode;  struct pipe;  struct proc;  struct spinlock; +struct sleeplock;  struct stat;  struct superblock; @@ -129,6 +130,10 @@ void            initlock(struct spinlock*, char*);  void            release(struct spinlock*);  void            pushcli(void);  void            popcli(void); +void            initsleeplock(struct sleeplock*); +void            acquire_sleeplock(struct sleeplock*,struct spinlock*); +void            release_sleeplock(struct sleeplock*); +int             acquired_sleeplock(struct sleeplock*);  // string.c  int             memcmp(const void*, const void*, uint); @@ -2,8 +2,8 @@  #include "defs.h"  #include "param.h"  #include "fs.h" -#include "file.h"  #include "spinlock.h" +#include "file.h"  struct devsw devsw[NDEV];  struct { @@ -133,7 +133,8 @@ filewrite(struct file *f, char *addr, int n)        begin_trans();        ilock(f->ip); -      r = writei(f->ip, addr + i, f->off, n1); +      if ((r = writei(f->ip, addr + i, f->off, n1)) > 0) +	f->off += r;        iunlock(f->ip);        commit_trans(); @@ -141,7 +142,6 @@ filewrite(struct file *f, char *addr, int n)          break;        if(r != n1)          panic("short filewrite"); -      f->off += r;        i += r;      }      return i == n ? n : -1; @@ -15,7 +15,9 @@ struct inode {    uint dev;           // Device number    uint inum;          // Inode number    int ref;            // Reference count -  int flags;          // I_BUSY, I_VALID +  int flags;          // I_VALID +  struct spinlock lock; +  struct sleeplock sleeplock;    short type;         // copy of disk inode    short major; @@ -25,10 +27,8 @@ struct inode {    uint addrs[NDIRECT+1];  }; -#define I_BUSY 0x1  #define I_VALID 0x2 -  // device implementations  struct devsw { @@ -113,11 +113,8 @@ bfree(int dev, uint b)  // It is an error to use an inode without holding a reference to it.  //  // Processes are only allowed to read and write inode -// metadata and contents when holding the inode's lock, -// represented by the I_BUSY flag in the in-memory copy. -// Because inode locks are held during disk accesses,  -// they are implemented using a flag rather than with -// spin locks.  Callers are responsible for locking +// metadata and contents when holding the inode's sleeplock. +// Callers are responsible for locking  // inodes before passing them to routines in this file; leaving  // this responsibility with the caller makes it possible for them  // to create arbitrarily-sized atomic operations. @@ -216,6 +213,7 @@ iget(uint dev, uint inum)    ip->inum = inum;    ip->ref = 1;    ip->flags = 0; +  initsleeplock(&ip->sleeplock);    release(&icache.lock);    return ip; @@ -232,7 +230,7 @@ idup(struct inode *ip)    return ip;  } -// Lock the given inode. +// Acquire the sleeplock for a given inode.  void  ilock(struct inode *ip)  { @@ -243,9 +241,7 @@ ilock(struct inode *ip)      panic("ilock");    acquire(&icache.lock); -  while(ip->flags & I_BUSY) -    sleep(ip, &icache.lock); -  ip->flags |= I_BUSY; +  acquire_sleeplock(&ip->sleeplock, &icache.lock);    release(&icache.lock);    if(!(ip->flags & I_VALID)){ @@ -268,12 +264,11 @@ ilock(struct inode *ip)  void  iunlock(struct inode *ip)  { -  if(ip == 0 || !(ip->flags & I_BUSY) || ip->ref < 1) +  if(ip == 0 || !acquired_sleeplock(&ip->sleeplock) || ip->ref < 1)      panic("iunlock");    acquire(&icache.lock); -  ip->flags &= ~I_BUSY; -  wakeup(ip); +  release_sleeplock(&ip->sleeplock);    release(&icache.lock);  } @@ -284,14 +279,15 @@ iput(struct inode *ip)    acquire(&icache.lock);    if(ip->ref == 1 && (ip->flags & I_VALID) && ip->nlink == 0){      // inode is no longer used: truncate and free inode. -    if(ip->flags & I_BUSY) +    if(acquired_sleeplock(&ip->sleeplock))        panic("iput busy"); -    ip->flags |= I_BUSY; +    acquire_sleeplock(&ip->sleeplock, &icache.lock);      release(&icache.lock);      itrunc(ip);      ip->type = 0;      iupdate(ip);      acquire(&icache.lock); +    release_sleeplock(&ip->sleeplock);      ip->flags = 0;      wakeup(ip);    } @@ -433,10 +429,14 @@ writei(struct inode *ip, char *src, uint off, uint n)      return devsw[ip->major].write(ip, src, n);    } -  if(off > ip->size || off + n < off) +  if(off > ip->size || off + n < off) { +    panic("writei1");      return -1; -  if(off + n > MAXFILE*BSIZE) +  } +  if(off + n > MAXFILE*BSIZE) { +    panic("writei2");      return -1; +  }    for(tot=0; tot<n; tot+=m, off+=m, src+=m){      bp = bread(ip->dev, bmap(ip, off/BSIZE)); @@ -127,7 +127,7 @@ iderw(struct buf *b)  {    struct buf **pp; -  if(!(b->flags & B_BUSY)) +  if(!acquired_sleeplock(&b->sleeplock))      panic("iderw: buf not busy");    if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)      panic("iderw: nothing to do"); @@ -43,9 +43,9 @@ struct logheader {  struct {    struct spinlock lock; +  struct sleeplock sleeplock;    int start;    int size; -  int intrans;    int dev;    struct logheader lh;  } log; @@ -60,6 +60,7 @@ initlog(void)    struct superblock sb;    initlock(&log.lock, "log"); +  initsleeplock(&log.sleeplock);    readsb(ROOTDEV, &sb);    log.start = sb.size - sb.nlog;    log.size = sb.nlog; @@ -133,10 +134,7 @@ void  begin_trans(void)  {    acquire(&log.lock); -  while (log.intrans) { -    sleep(&log, &log.lock); -  } -  log.intrans = 1; +  acquire_sleeplock(&log.sleeplock, &log.lock);    release(&log.lock);  } @@ -149,10 +147,8 @@ commit_trans(void)      log.lh.n = 0;       write_head();        // Reclaim log    } -      acquire(&log.lock); -  log.intrans = 0; -  wakeup(&log); +  release_sleeplock(&log.sleeplock);    release(&log.lock);  } @@ -171,7 +167,7 @@ log_write(struct buf *b)    if (log.lh.n >= LOGSIZE || log.lh.n >= log.size - 1)      panic("too big a transaction"); -  if (!log.intrans) +  if (!acquired_sleeplock(&log.sleeplock))      panic("write outside of trans");    // cprintf("log_write: %d %d\n", b->sector, log.lh.n); @@ -4,8 +4,8 @@  #include "mmu.h"  #include "proc.h"  #include "fs.h" -#include "file.h"  #include "spinlock.h" +#include "file.h"  #define PIPESIZE 512 @@ -17,10 +17,9 @@ initlock(struct spinlock *lk, char *name)    lk->cpu = 0;  } -// Acquire the lock. -// Loops (spins) until the lock is acquired. -// Holding a lock for a long time may cause -// other CPUs to waste time spinning to acquire it. +// Acquire a spin lock. Loops (spins) until the lock is acquired. +// Holding a lock for a long time may cause other CPUs to waste time spinning to acquire it. +// Spinlocks shouldn't be held across sleep(); for those cases, use sleeplocks.  void  acquire(struct spinlock *lk)  { @@ -115,3 +114,38 @@ popcli(void)      sti();  } +void +initsleeplock(struct sleeplock *l) +{ +  l->locked = 0; +} + +// Grab the sleeplock that is protected by spinl. Sleeplocks allow a process to lock +// a data structure for long times, including across sleeps.  Other processes that try +// to acquire a sleeplock will be put to sleep when another process hold the sleeplock. +// To update status of the sleeplock atomically, the caller must hold spinl +void +acquire_sleeplock(struct sleeplock *sleepl, struct spinlock *spinl) +{ +  while (sleepl->locked) { +    sleep(sleepl, spinl); +  } +  sleepl->locked = 1; +} + +// Release the sleeplock that is protected by a spin lock +// Caller must hold the spinlock that protects the sleeplock +void +release_sleeplock(struct sleeplock *sleepl) +{ +  sleepl->locked = 0; +  wakeup(sleepl); +} + +// Is the sleeplock acquired? +// Caller must hold the spinlock that protects the sleeplock +int +acquired_sleeplock(struct sleeplock *sleepl) +{ +  return sleepl->locked; +} @@ -1,4 +1,4 @@ -// Mutual exclusion lock. +// Mutual exclusion lock for short code fragments  struct spinlock {    uint locked;       // Is the lock held? @@ -9,3 +9,8 @@ struct spinlock {                       // that locked the lock.  }; +// Lock that maybe held across sleeps +struct sleeplock { +  uint locked;       // Is the lock held? +}; + @@ -5,6 +5,7 @@  #include "mmu.h"  #include "proc.h"  #include "fs.h" +#include "spinlock.h"  #include "file.h"  #include "fcntl.h" | 
