summaryrefslogtreecommitdiff
path: root/kernel/bio.c
blob: a1f44747567521c1c7da2dc2dcf573222dcc82b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents.  Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
//     so do not keep them longer than necessary.


#include "types.h"
#include "param.h"
#include "spinlock.h"
#include "sleeplock.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
#include "buf.h"

#define N_BUCKETS 13

struct bucket {
  struct spinlock lock;
  // Linked list of all buffers, through prev/next.
  // Sorted by how recently the buffer was used.
  // head.next is most recent, head.prev is least.
  // struct buf head;
  struct buf head;
};
struct {
  struct spinlock lock;
  struct buf buf[NBUF];
  struct bucket buckets[N_BUCKETS];
} bcache;

static inline uint hash(uint blockno) {
  return blockno % N_BUCKETS;
}

void
binit(void)
{
  struct buf *b;

  initlock(&bcache.lock, "bcache");

  // init each bucket
  for (int i = 0; i < N_BUCKETS; i++) {
    static char lock_name[16];
    snprintf(lock_name, sizeof(lock_name), "bio.bucket.%d", i);
    initlock(&bcache.buckets[i].lock, lock_name);
    bcache.buckets[i].head.prev = &bcache.buckets[i].head;
    bcache.buckets[i].head.next = &bcache.buckets[i].head;
  }

  // Create linked list of buffers
  // since for now, blockno is all zero, 
  // they are all linked to bucket 0 
  for(b = bcache.buf; b < bcache.buf+NBUF; b++){
    b->next = bcache.buckets[0].head.next;
    b->prev = &bcache.buckets[0].head;
    initsleeplock(&b->lock, "buffer");
    bcache.buckets[0].head.next->prev = b;
    bcache.buckets[0].head.next = b;
  }
}

// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint blockno)
{
  struct buf *b;
  uint bucket_index = hash(blockno);

  acquire(&bcache.buckets[bucket_index].lock);

  // Is the block already cached?
  for(b = bcache.buckets[bucket_index].head.next; b != &bcache.buckets[bucket_index].head; b = b->next){
    if(b->dev == dev && b->blockno == blockno){
      b->refcnt++;
      release(&bcache.buckets[bucket_index].lock);
      acquiresleep(&b->lock);
      return b;
    }
  }

  // Not cached.
  // Recycle the least recently used (LRU) unused buffer
  // of its own list first.
  struct buf *unused = (struct buf *)0;
  for(b = bcache.buckets[bucket_index].head.prev; b != &bcache.buckets[bucket_index].head; b = b->prev){
    if(b->refcnt == 0) {
      if (!unused || unused->ticks > b->ticks) {
        unused = b;
      }
    }
  }
  if (unused) {
    unused->dev = dev;
    unused->blockno = blockno;
    unused->valid = 0;
    unused->refcnt = 1;
    unused->ticks = ticks;
    release(&bcache.buckets[bucket_index].lock);
    acquiresleep(&unused->lock);
    return unused;
  }
  // Sadly still no usable buf,
  // Steal the least recently used (LRU) unused buffer in others list
  // and move to our hash bucket.
  // To avoid deadlocks, scan with order bucket 0 -> N_BUCKETS.
  
  // release bigger bucket_index lock to acquire other locks later
  release(&bcache.buckets[bucket_index].lock);
  int i;
  for (i = 0; i < N_BUCKETS; i++) {
    if (i == bucket_index) {
      acquire(&bcache.buckets[bucket_index].lock);
    } else {
      acquire(&bcache.buckets[i].lock);
      for(b = bcache.buckets[i].head.prev; b != &bcache.buckets[i].head; b = b->prev){
        if(b->refcnt == 0) {
          if (!unused || unused->ticks > b->ticks) {
            unused = b;
          }
        }
      }
      if (unused) {
        if (i < bucket_index) {
          acquire(&bcache.buckets[bucket_index].lock);
        }
        break;
      } else {
        release(&bcache.buckets[i].lock);
      }
    }
  }
  if (unused) {
    // remove from old bucket
    unused->prev->next = unused->next;
    unused->next->prev = unused->prev;
    release(&bcache.buckets[i].lock);

    unused->dev = dev;
    unused->blockno = blockno;
    unused->valid = 0;
    unused->refcnt = 1;
    unused->ticks = ticks;
    // add to our hash bucket, after head
    unused->next = bcache.buckets[bucket_index].head.next;
    unused->prev = &bcache.buckets[bucket_index].head;
    bcache.buckets[bucket_index].head.next->prev = unused;
    bcache.buckets[bucket_index].head.next = unused;
    release(&bcache.buckets[bucket_index].lock);

    acquiresleep(&unused->lock);
    return unused;
  }
  panic("bget: no buffers");
}

// Return a locked buf with the contents of the indicated block.
struct buf*
bread(uint dev, uint blockno)
{
  struct buf *b;

  b = bget(dev, blockno);
  if(!b->valid) {
    virtio_disk_rw(b, 0);
    b->valid = 1;
  }
  return b;
}

// Write b's contents to disk.  Must be locked.
void
bwrite(struct buf *b)
{
  if(!holdingsleep(&b->lock))
    panic("bwrite");
  virtio_disk_rw(b, 1);
}

// Release a locked buffer.
// Move to the head of the most-recently-used list.
void
brelse(struct buf *b)
{
  if(!holdingsleep(&b->lock))
    panic("brelse");

  releasesleep(&b->lock);

  uint bucket_index = hash(b->blockno);
  acquire(&bcache.buckets[bucket_index].lock);

  b->refcnt--;
  if (b->refcnt == 0) {
    // no one is waiting for it, move it to MRU
    b->next->prev = b->prev;
    b->prev->next = b->next;
    b->next = bcache.buckets[bucket_index].head.next;
    b->prev = &bcache.buckets[bucket_index].head;
    bcache.buckets[bucket_index].head.next->prev = b;
    bcache.buckets[bucket_index].head.next = b;
    b->ticks = ticks;
  }
  
  release(&bcache.buckets[bucket_index].lock);
}

void
bpin(struct buf *b) {
  uint id = hash(b->blockno);
  acquire(&bcache.buckets[id].lock);
  b->refcnt++;
  release(&bcache.buckets[id].lock);
}

void
bunpin(struct buf *b) {
  uint id = hash(b->blockno);
  acquire(&bcache.buckets[id].lock);
  b->refcnt--;
  release(&bcache.buckets[id].lock);
}