xref: /xv6-public/bio.c (revision 4d39b633)
1 // Buffer cache.
2 //
3 // The buffer cache is a linked list of buf structures
4 // holding cached copies of disk block contents.
5 // Each buf has two state bits B_BUSY and B_VALID.
6 // If B_BUSY is set, it means that some code is currently
7 // using buf, so other code is not allowed to use it.
8 // To wait for a buffer that is B_BUSY, sleep on buf.
9 // (See bget below.)
10 //
11 // If B_VALID is set, it means that the sector in b->data is
12 // the same as on the disk. If B_VALID is not set, the contents
13 // of buf must be initialized, often by calling bread,
14 // before being used.
15 //
16 // After making changes to a buf's memory, call bwrite to flush
17 // the changes out to disk, to keep the disk and memory copies
18 // in sync.
19 //
20 // When finished with a buffer, call brelse to release the buffer
21 // (i.e., clear B_BUSY), so that others can access it.
22 //
23 // Bufs that are not B_BUSY are fair game for reuse for other
24 // disk blocks.  It is not allowed to use a buf after calling brelse.
25 
26 #include "types.h"
27 #include "param.h"
28 #include "x86.h"
29 #include "mmu.h"
30 #include "proc.h"
31 #include "defs.h"
32 #include "spinlock.h"
33 #include "buf.h"
34 
35 struct buf buf[NBUF];
36 struct spinlock buf_table_lock;
37 
38 // Linked list of all buffers, through prev/next.
39 // bufhead->next is most recently used.
40 // bufhead->tail is least recently used.
41 struct buf bufhead;
42 
43 void
44 binit(void)
45 {
46   struct buf *b;
47 
48   initlock(&buf_table_lock, "buf_table");
49 
50   // Create linked list of buffers
51   bufhead.prev = &bufhead;
52   bufhead.next = &bufhead;
53   for(b = buf; b < buf+NBUF; b++){
54     b->next = bufhead.next;
55     b->prev = &bufhead;
56     bufhead.next->prev = b;
57     bufhead.next = b;
58   }
59 }
60 
61 // Look through buffer cache for sector on device dev.
62 // If not found, allocate fresh block.
63 // In either case, return locked buffer.
64 static struct buf*
65 bget(uint dev, uint sector)
66 {
67   struct buf *b;
68 
69   acquire(&buf_table_lock);
70 
71  loop:
72   // Try for cached block.
73   for(b = bufhead.next; b != &bufhead; b = b->next){
74     if((b->flags & (B_BUSY|B_VALID)) &&
75        b->dev == dev && b->sector == sector){
76       if(b->flags & B_BUSY){
77         sleep(buf, &buf_table_lock);
78         goto loop;
79       }
80       b->flags |= B_BUSY;
81       release(&buf_table_lock);
82       return b;
83     }
84   }
85 
86   // Allocate fresh block.
87   for(b = bufhead.prev; b != &bufhead; b = b->prev){
88     if((b->flags & B_BUSY) == 0){
89       b->flags = B_BUSY;
90       b->dev = dev;
91       b->sector = sector;
92       release(&buf_table_lock);
93       return b;
94     }
95   }
96   panic("bget: no buffers");
97 }
98 
99 // Return a B_BUSY buf with the contents of the indicated
100 // disk sector.
101 struct buf*
102 bread(uint dev, uint sector)
103 {
104   struct buf *b;
105 
106   b = bget(dev, sector);
107   if(b->flags & B_VALID)
108     return b;
109 
110   b->flags &= ~B_WRITE;
111   ide_rw(b);
112   b->flags |= B_VALID;
113 
114   return b;
115 }
116 
117 // Write buf's contents to disk.
118 // Must be locked.
119 void
120 bwrite(struct buf *b)
121 {
122   if((b->flags & B_BUSY) == 0)
123     panic("bwrite");
124   b->flags |= B_WRITE;
125   ide_rw(b);
126   b->flags |= B_VALID;
127 }
128 
129 // Release the buffer buf.
130 void
131 brelse(struct buf *b)
132 {
133   if((b->flags & B_BUSY) == 0)
134     panic("brelse");
135 
136   acquire(&buf_table_lock);
137 
138   b->next->prev = b->prev;
139   b->prev->next = b->next;
140   b->next = bufhead.next;
141   b->prev = &bufhead;
142   bufhead.next->prev = b;
143   bufhead.next = b;
144 
145   b->flags &= ~B_BUSY;
146   wakeup(buf);
147 
148   release(&buf_table_lock);
149 }
150 
151