diff options
author | Mole Shang <[email protected]> | 2024-02-16 10:05:03 +0800 |
---|---|---|
committer | Mole Shang <[email protected]> | 2024-02-16 10:05:03 +0800 |
commit | 99015f3a985b2fd051606636743a2a2969b216e8 (patch) | |
tree | e4b6336f4577ecc47b08165d40f081a810d0bda9 | |
parent | 16f51058790ba5779cf46de18e6a266f2fac0a1d (diff) | |
download | xv6-labs-99015f3a985b2fd051606636743a2a2969b216e8.tar.gz xv6-labs-99015f3a985b2fd051606636743a2a2969b216e8.tar.bz2 xv6-labs-99015f3a985b2fd051606636743a2a2969b216e8.zip |
lab lock/bcache: finish
-rw-r--r-- | kernel/bio.c | 148 | ||||
-rw-r--r-- | kernel/buf.h | 1 | ||||
-rw-r--r-- | user/user.h | 2 |
3 files changed, 119 insertions, 32 deletions
diff --git a/kernel/bio.c b/kernel/bio.c index 60d91a6..a1f4474 100644 --- a/kernel/bio.c +++ b/kernel/bio.c @@ -23,16 +23,26 @@ #include "fs.h" #include "buf.h" -struct { - struct spinlock lock; - struct buf buf[NBUF]; +#define N_BUCKETS 13 +struct bucket { + struct spinlock lock; // Linked list of all buffers, through prev/next. // Sorted by how recently the buffer was used. // head.next is most recent, head.prev is least. + // struct buf head; struct buf head; +}; +struct { + struct spinlock lock; + struct buf buf[NBUF]; + struct bucket buckets[N_BUCKETS]; } bcache; +static inline uint hash(uint blockno) { + return blockno % N_BUCKETS; +} + void binit(void) { @@ -40,15 +50,24 @@ binit(void) initlock(&bcache.lock, "bcache"); + // init each bucket + for (int i = 0; i < N_BUCKETS; i++) { + static char lock_name[16]; + snprintf(lock_name, sizeof(lock_name), "bio.bucket.%d", i); + initlock(&bcache.buckets[i].lock, lock_name); + bcache.buckets[i].head.prev = &bcache.buckets[i].head; + bcache.buckets[i].head.next = &bcache.buckets[i].head; + } + // Create linked list of buffers - bcache.head.prev = &bcache.head; - bcache.head.next = &bcache.head; + // since for now, blockno is all zero, + // they are all linked to bucket 0 for(b = bcache.buf; b < bcache.buf+NBUF; b++){ - b->next = bcache.head.next; - b->prev = &bcache.head; + b->next = bcache.buckets[0].head.next; + b->prev = &bcache.buckets[0].head; initsleeplock(&b->lock, "buffer"); - bcache.head.next->prev = b; - bcache.head.next = b; + bcache.buckets[0].head.next->prev = b; + bcache.buckets[0].head.next = b; } } @@ -59,32 +78,92 @@ static struct buf* bget(uint dev, uint blockno) { struct buf *b; + uint bucket_index = hash(blockno); - acquire(&bcache.lock); + acquire(&bcache.buckets[bucket_index].lock); // Is the block already cached? - for(b = bcache.head.next; b != &bcache.head; b = b->next){ + for(b = bcache.buckets[bucket_index].head.next; b != &bcache.buckets[bucket_index].head; b = b->next){ if(b->dev == dev && b->blockno == blockno){ b->refcnt++; - release(&bcache.lock); + release(&bcache.buckets[bucket_index].lock); acquiresleep(&b->lock); return b; } } // Not cached. - // Recycle the least recently used (LRU) unused buffer. - for(b = bcache.head.prev; b != &bcache.head; b = b->prev){ + // Recycle the least recently used (LRU) unused buffer + // of its own list first. + struct buf *unused = (struct buf *)0; + for(b = bcache.buckets[bucket_index].head.prev; b != &bcache.buckets[bucket_index].head; b = b->prev){ if(b->refcnt == 0) { - b->dev = dev; - b->blockno = blockno; - b->valid = 0; - b->refcnt = 1; - release(&bcache.lock); - acquiresleep(&b->lock); - return b; + if (!unused || unused->ticks > b->ticks) { + unused = b; + } } } + if (unused) { + unused->dev = dev; + unused->blockno = blockno; + unused->valid = 0; + unused->refcnt = 1; + unused->ticks = ticks; + release(&bcache.buckets[bucket_index].lock); + acquiresleep(&unused->lock); + return unused; + } + // Sadly still no usable buf, + // Steal the least recently used (LRU) unused buffer in others list + // and move to our hash bucket. + // To avoid deadlocks, scan with order bucket 0 -> N_BUCKETS. + + // release bigger bucket_index lock to acquire other locks later + release(&bcache.buckets[bucket_index].lock); + int i; + for (i = 0; i < N_BUCKETS; i++) { + if (i == bucket_index) { + acquire(&bcache.buckets[bucket_index].lock); + } else { + acquire(&bcache.buckets[i].lock); + for(b = bcache.buckets[i].head.prev; b != &bcache.buckets[i].head; b = b->prev){ + if(b->refcnt == 0) { + if (!unused || unused->ticks > b->ticks) { + unused = b; + } + } + } + if (unused) { + if (i < bucket_index) { + acquire(&bcache.buckets[bucket_index].lock); + } + break; + } else { + release(&bcache.buckets[i].lock); + } + } + } + if (unused) { + // remove from old bucket + unused->prev->next = unused->next; + unused->next->prev = unused->prev; + release(&bcache.buckets[i].lock); + + unused->dev = dev; + unused->blockno = blockno; + unused->valid = 0; + unused->refcnt = 1; + unused->ticks = ticks; + // add to our hash bucket, after head + unused->next = bcache.buckets[bucket_index].head.next; + unused->prev = &bcache.buckets[bucket_index].head; + bcache.buckets[bucket_index].head.next->prev = unused; + bcache.buckets[bucket_index].head.next = unused; + release(&bcache.buckets[bucket_index].lock); + + acquiresleep(&unused->lock); + return unused; + } panic("bget: no buffers"); } @@ -121,33 +200,38 @@ brelse(struct buf *b) releasesleep(&b->lock); - acquire(&bcache.lock); + uint bucket_index = hash(b->blockno); + acquire(&bcache.buckets[bucket_index].lock); + b->refcnt--; if (b->refcnt == 0) { - // no one is waiting for it. + // no one is waiting for it, move it to MRU b->next->prev = b->prev; b->prev->next = b->next; - b->next = bcache.head.next; - b->prev = &bcache.head; - bcache.head.next->prev = b; - bcache.head.next = b; + b->next = bcache.buckets[bucket_index].head.next; + b->prev = &bcache.buckets[bucket_index].head; + bcache.buckets[bucket_index].head.next->prev = b; + bcache.buckets[bucket_index].head.next = b; + b->ticks = ticks; } - release(&bcache.lock); + release(&bcache.buckets[bucket_index].lock); } void bpin(struct buf *b) { - acquire(&bcache.lock); + uint id = hash(b->blockno); + acquire(&bcache.buckets[id].lock); b->refcnt++; - release(&bcache.lock); + release(&bcache.buckets[id].lock); } void bunpin(struct buf *b) { - acquire(&bcache.lock); + uint id = hash(b->blockno); + acquire(&bcache.buckets[id].lock); b->refcnt--; - release(&bcache.lock); + release(&bcache.buckets[id].lock); } diff --git a/kernel/buf.h b/kernel/buf.h index 4616e9e..bbef407 100644 --- a/kernel/buf.h +++ b/kernel/buf.h @@ -8,5 +8,6 @@ struct buf { struct buf *prev; // LRU cache list struct buf *next; uchar data[BSIZE]; + uint ticks; // derived from global variable ticks (trap.c), record ticks when last modified }; diff --git a/user/user.h b/user/user.h index 31453f5..3544ac4 100644 --- a/user/user.h +++ b/user/user.h @@ -56,4 +56,6 @@ void free(void*); int atoi(const char*); int memcmp(const void *, const void *, uint); void *memcpy(void *, const void *, uint); + +// statistics.c int statistics(void*, int); |