summaryrefslogtreecommitdiff
path: root/bio.c
blob: 2b17a52167a0022d40694b2a37ac670271641877 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#include "types.h"
#include "param.h"
#include "x86.h"
#include "mmu.h"
#include "proc.h"
#include "defs.h"
#include "spinlock.h"
#include "buf.h"

struct buf buf[NBUF];
struct spinlock buf_table_lock;

void
binit(void)
{
  initlock(&buf_table_lock, "buf_table");
}

struct buf *
getblk()
{
  int i;

  acquire(&buf_table_lock);

  // XXX need to lock the block even if not caching, to
  // avoid read modify write problems.

  while(1){
    for(i = 0; i < NBUF; i++){
      if((buf[i].flags & B_BUSY) == 0){
        buf[i].flags |= B_BUSY;
        release(&buf_table_lock);
        return buf + i;
      }
    }
    sleep(buf, &buf_table_lock);
  }
}

struct buf *
bread(uint dev, uint sector)
{
  void *c;
  struct buf *b;
  extern struct spinlock ide_lock;

  b = getblk();

  acquire(&ide_lock);
  c = ide_start_rw(dev & 0xff, sector, b->data, 1, 1);
  sleep (c, &ide_lock);
  ide_finish(c);
  release(&ide_lock);

  return b;
}

void
bwrite(uint dev, struct buf *b, uint sector)
{
  void *c;
  extern struct spinlock ide_lock;

  acquire(&ide_lock);
  c = ide_start_rw(dev & 0xff, sector, b->data, 1, 0);
  sleep (c, &ide_lock);
  ide_finish(c);
  release(&ide_lock);
}

void
brelse(struct buf *b)
{
  if((b->flags & B_BUSY) == 0)
    panic("brelse");
  
  acquire(&buf_table_lock);

  b->flags &= ~B_BUSY;
  wakeup(buf);

  release(&buf_table_lock);
}