1 /*-
2 * Copyright (c) 1986, 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * Berkeley Software Design Inc.
12 *
13 * %sccs.include.redist.c%
14 *
15 * @(#)vfs_bio.c 8.11 (Berkeley) 01/09/95
16 */
17
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/proc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/mount.h>
24 #include <sys/trace.h>
25 #include <sys/malloc.h>
26 #include <sys/resourcevar.h>
27
28 /*
29 * Definitions for the buffer hash lists.
30 */
31 #define BUFHASH(dvp, lbn) \
32 (&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
33 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
34 u_long bufhash;
35
36 /*
37 * Insq/Remq for the buffer hash lists.
38 */
39 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
40 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
41
42 /*
43 * Definitions for the buffer free lists.
44 */
45 #define BQUEUES 4 /* number of free buffer queues */
46
47 #define BQ_LOCKED 0 /* super-blocks &c */
48 #define BQ_LRU 1 /* lru, useful buffers */
49 #define BQ_AGE 2 /* rubbish */
50 #define BQ_EMPTY 3 /* buffer headers with no memory */
51
52 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
53 int needbuffer;
54
55 /*
56 * Insq/Remq for the buffer free lists.
57 */
58 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
59 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
60
61 void
bremfree(bp)62 bremfree(bp)
63 struct buf *bp;
64 {
65 struct bqueues *dp = NULL;
66
67 /*
68 * We only calculate the head of the freelist when removing
69 * the last element of the list as that is the only time that
70 * it is needed (e.g. to reset the tail pointer).
71 *
72 * NB: This makes an assumption about how tailq's are implemented.
73 */
74 if (bp->b_freelist.tqe_next == NULL) {
75 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
76 if (dp->tqh_last == &bp->b_freelist.tqe_next)
77 break;
78 if (dp == &bufqueues[BQUEUES])
79 panic("bremfree: lost tail");
80 }
81 TAILQ_REMOVE(dp, bp, b_freelist);
82 }
83
84 /*
85 * Initialize buffers and hash links for buffers.
86 */
87 void
bufinit()88 bufinit()
89 {
90 register struct buf *bp;
91 struct bqueues *dp;
92 register int i;
93 int base, residual;
94
95 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
96 TAILQ_INIT(dp);
97 bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
98 base = bufpages / nbuf;
99 residual = bufpages % nbuf;
100 for (i = 0; i < nbuf; i++) {
101 bp = &buf[i];
102 bzero((char *)bp, sizeof *bp);
103 bp->b_dev = NODEV;
104 bp->b_rcred = NOCRED;
105 bp->b_wcred = NOCRED;
106 bp->b_vnbufs.le_next = NOLIST;
107 bp->b_data = buffers + i * MAXBSIZE;
108 if (i < residual)
109 bp->b_bufsize = (base + 1) * CLBYTES;
110 else
111 bp->b_bufsize = base * CLBYTES;
112 bp->b_flags = B_INVAL;
113 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
114 binsheadfree(bp, dp);
115 binshash(bp, &invalhash);
116 }
117 }
118
119 /*
120 * Find the block in the buffer pool.
121 * If the buffer is not present, allocate a new buffer and load
122 * its contents according to the filesystem fill routine.
123 */
124 bread(vp, blkno, size, cred, bpp)
125 struct vnode *vp;
126 daddr_t blkno;
127 int size;
128 struct ucred *cred;
129 struct buf **bpp;
130 {
131 struct proc *p = curproc; /* XXX */
132 register struct buf *bp;
133
134 if (size == 0)
135 panic("bread: size 0");
136 *bpp = bp = getblk(vp, blkno, size, 0, 0);
137 if (bp->b_flags & (B_DONE | B_DELWRI)) {
138 trace(TR_BREADHIT, pack(vp, size), blkno);
139 return (0);
140 }
141 bp->b_flags |= B_READ;
142 if (bp->b_bcount > bp->b_bufsize)
143 panic("bread");
144 if (bp->b_rcred == NOCRED && cred != NOCRED) {
145 crhold(cred);
146 bp->b_rcred = cred;
147 }
148 VOP_STRATEGY(bp);
149 trace(TR_BREADMISS, pack(vp, size), blkno);
150 p->p_stats->p_ru.ru_inblock++; /* pay for read */
151 return (biowait(bp));
152 }
153
154 /*
155 * Operates like bread, but also starts I/O on the N specified
156 * read-ahead blocks.
157 */
158 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
159 struct vnode *vp;
160 daddr_t blkno; int size;
161 daddr_t rablkno[]; int rabsize[];
162 int num;
163 struct ucred *cred;
164 struct buf **bpp;
165 {
166 struct proc *p = curproc; /* XXX */
167 register struct buf *bp, *rabp;
168 register int i;
169
170 bp = NULL;
171 /*
172 * If the block is not memory resident,
173 * allocate a buffer and start I/O.
174 */
175 if (!incore(vp, blkno)) {
176 *bpp = bp = getblk(vp, blkno, size, 0, 0);
177 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
178 bp->b_flags |= B_READ;
179 if (bp->b_bcount > bp->b_bufsize)
180 panic("breadn");
181 if (bp->b_rcred == NOCRED && cred != NOCRED) {
182 crhold(cred);
183 bp->b_rcred = cred;
184 }
185 VOP_STRATEGY(bp);
186 trace(TR_BREADMISS, pack(vp, size), blkno);
187 p->p_stats->p_ru.ru_inblock++; /* pay for read */
188 } else {
189 trace(TR_BREADHIT, pack(vp, size), blkno);
190 }
191 }
192
193 /*
194 * If there's read-ahead block(s), start I/O
195 * on them also (as above).
196 */
197 for (i = 0; i < num; i++) {
198 if (incore(vp, rablkno[i]))
199 continue;
200 rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
201 if (rabp->b_flags & (B_DONE | B_DELWRI)) {
202 brelse(rabp);
203 trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
204 } else {
205 rabp->b_flags |= B_ASYNC | B_READ;
206 if (rabp->b_bcount > rabp->b_bufsize)
207 panic("breadrabp");
208 if (rabp->b_rcred == NOCRED && cred != NOCRED) {
209 crhold(cred);
210 rabp->b_rcred = cred;
211 }
212 VOP_STRATEGY(rabp);
213 trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
214 p->p_stats->p_ru.ru_inblock++; /* pay in advance */
215 }
216 }
217
218 /*
219 * If block was memory resident, let bread get it.
220 * If block was not memory resident, the read was
221 * started above, so just wait for the read to complete.
222 */
223 if (bp == NULL)
224 return (bread(vp, blkno, size, cred, bpp));
225 return (biowait(bp));
226 }
227
228 /*
229 * Synchronous write.
230 * Release buffer on completion.
231 */
bwrite(bp)232 bwrite(bp)
233 register struct buf *bp;
234 {
235 struct proc *p = curproc; /* XXX */
236 register int flag;
237 int s, error = 0;
238
239 if ((bp->b_flags & B_ASYNC) == 0 &&
240 bp->b_vp && bp->b_vp->v_mount &&
241 (bp->b_vp->v_mount->mnt_flag & MNT_ASYNC)) {
242 bdwrite(bp);
243 return (0);
244 }
245 flag = bp->b_flags;
246 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
247 if (flag & B_ASYNC) {
248 if ((flag & B_DELWRI) == 0)
249 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
250 else
251 reassignbuf(bp, bp->b_vp);
252 }
253 trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
254 if (bp->b_bcount > bp->b_bufsize)
255 panic("bwrite");
256 s = splbio();
257 bp->b_vp->v_numoutput++;
258 bp->b_flags |= B_WRITEINPROG;
259 splx(s);
260 VOP_STRATEGY(bp);
261
262 /*
263 * If the write was synchronous, then await I/O completion.
264 * If the write was "delayed", then we put the buffer on
265 * the queue of blocks awaiting I/O completion status.
266 */
267 if ((flag & B_ASYNC) == 0) {
268 error = biowait(bp);
269 if ((flag&B_DELWRI) == 0)
270 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
271 else
272 reassignbuf(bp, bp->b_vp);
273 if (bp->b_flags & B_EINTR) {
274 bp->b_flags &= ~B_EINTR;
275 error = EINTR;
276 }
277 brelse(bp);
278 } else if (flag & B_DELWRI) {
279 s = splbio();
280 bp->b_flags |= B_AGE;
281 splx(s);
282 }
283 return (error);
284 }
285
286 int
vn_bwrite(ap)287 vn_bwrite(ap)
288 struct vop_bwrite_args *ap;
289 {
290
291 return (bwrite(ap->a_bp));
292 }
293
294
295 /*
296 * Delayed write.
297 *
298 * The buffer is marked dirty, but is not queued for I/O.
299 * This routine should be used when the buffer is expected
300 * to be modified again soon, typically a small write that
301 * partially fills a buffer.
302 *
303 * NB: magnetic tapes cannot be delayed; they must be
304 * written in the order that the writes are requested.
305 */
306 void
bdwrite(bp)307 bdwrite(bp)
308 register struct buf *bp;
309 {
310 struct proc *p = curproc; /* XXX */
311
312 if ((bp->b_flags & B_DELWRI) == 0) {
313 bp->b_flags |= B_DELWRI;
314 reassignbuf(bp, bp->b_vp);
315 p->p_stats->p_ru.ru_oublock++; /* no one paid yet */
316 }
317 /*
318 * If this is a tape drive, the write must be initiated.
319 */
320 if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
321 bawrite(bp);
322 } else {
323 bp->b_flags |= (B_DONE | B_DELWRI);
324 brelse(bp);
325 }
326 }
327
328 /*
329 * Asynchronous write.
330 * Start I/O on a buffer, but do not wait for it to complete.
331 * The buffer is released when the I/O completes.
332 */
333 void
bawrite(bp)334 bawrite(bp)
335 register struct buf *bp;
336 {
337
338 /*
339 * Setting the ASYNC flag causes bwrite to return
340 * after starting the I/O.
341 */
342 bp->b_flags |= B_ASYNC;
343 (void) VOP_BWRITE(bp);
344 }
345
346 /*
347 * Release a buffer.
348 * Even if the buffer is dirty, no I/O is started.
349 */
350 void
brelse(bp)351 brelse(bp)
352 register struct buf *bp;
353 {
354 register struct bqueues *flist;
355 int s;
356
357 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
358 /*
359 * If a process is waiting for the buffer, or
360 * is waiting for a free buffer, awaken it.
361 */
362 if (bp->b_flags & B_WANTED)
363 wakeup((caddr_t)bp);
364 if (needbuffer) {
365 needbuffer = 0;
366 wakeup((caddr_t)&needbuffer);
367 }
368 /*
369 * Retry I/O for locked buffers rather than invalidating them.
370 */
371 s = splbio();
372 if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
373 bp->b_flags &= ~B_ERROR;
374 /*
375 * Disassociate buffers that are no longer valid.
376 */
377 if (bp->b_flags & (B_NOCACHE | B_ERROR))
378 bp->b_flags |= B_INVAL;
379 if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
380 if (bp->b_vp)
381 brelvp(bp);
382 bp->b_flags &= ~B_DELWRI;
383 }
384 /*
385 * Stick the buffer back on a free list.
386 */
387 if (bp->b_bufsize <= 0) {
388 /* block has no buffer ... put at front of unused buffer list */
389 flist = &bufqueues[BQ_EMPTY];
390 binsheadfree(bp, flist);
391 } else if (bp->b_flags & (B_ERROR | B_INVAL)) {
392 /* block has no info ... put at front of most free list */
393 flist = &bufqueues[BQ_AGE];
394 binsheadfree(bp, flist);
395 } else {
396 if (bp->b_flags & B_LOCKED)
397 flist = &bufqueues[BQ_LOCKED];
398 else if (bp->b_flags & B_AGE)
399 flist = &bufqueues[BQ_AGE];
400 else
401 flist = &bufqueues[BQ_LRU];
402 binstailfree(bp, flist);
403 }
404 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
405 splx(s);
406 }
407
408 /*
409 * Check to see if a block is currently memory resident.
410 */
411 struct buf *
incore(vp,blkno)412 incore(vp, blkno)
413 struct vnode *vp;
414 daddr_t blkno;
415 {
416 register struct buf *bp;
417
418 for (bp = BUFHASH(vp, blkno)->lh_first; bp; bp = bp->b_hash.le_next)
419 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
420 (bp->b_flags & B_INVAL) == 0)
421 return (bp);
422 return (NULL);
423 }
424
425 /*
426 * Check to see if a block is currently memory resident.
427 * If it is resident, return it. If it is not resident,
428 * allocate a new buffer and assign it to the block.
429 */
430 struct buf *
getblk(vp,blkno,size,slpflag,slptimeo)431 getblk(vp, blkno, size, slpflag, slptimeo)
432 register struct vnode *vp;
433 daddr_t blkno;
434 int size, slpflag, slptimeo;
435 {
436 register struct buf *bp;
437 struct bufhashhdr *dp;
438 int s, error;
439
440 if (size > MAXBSIZE)
441 panic("getblk: size too big");
442 /*
443 * Search the cache for the block. If the buffer is found,
444 * but it is currently locked, the we must wait for it to
445 * become available.
446 */
447 dp = BUFHASH(vp, blkno);
448 loop:
449 for (bp = dp->lh_first; bp; bp = bp->b_hash.le_next) {
450 if (bp->b_lblkno != blkno || bp->b_vp != vp)
451 continue;
452 s = splbio();
453 if (bp->b_flags & B_BUSY) {
454 bp->b_flags |= B_WANTED;
455 error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
456 "getblk", slptimeo);
457 splx(s);
458 if (error)
459 return (NULL);
460 goto loop;
461 }
462 /*
463 * The test for B_INVAL is moved down here, since there
464 * are cases where B_INVAL is set before VOP_BWRITE() is
465 * called and for NFS, the process cannot be allowed to
466 * allocate a new buffer for the same block until the write
467 * back to the server has been completed. (ie. B_BUSY clears)
468 */
469 if (bp->b_flags & B_INVAL) {
470 splx(s);
471 continue;
472 }
473 bremfree(bp);
474 bp->b_flags |= B_BUSY;
475 splx(s);
476 if (bp->b_bcount != size) {
477 printf("getblk: stray size\n");
478 bp->b_flags |= B_INVAL;
479 VOP_BWRITE(bp);
480 goto loop;
481 }
482 bp->b_flags |= B_CACHE;
483 return (bp);
484 }
485 /*
486 * The loop back to the top when getnewbuf() fails is because
487 * stateless filesystems like NFS have no node locks. Thus,
488 * there is a slight chance that more than one process will
489 * try and getnewbuf() for the same block concurrently when
490 * the first sleeps in getnewbuf(). So after a sleep, go back
491 * up to the top to check the hash lists again.
492 */
493 if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
494 goto loop;
495 bremhash(bp);
496 bgetvp(vp, bp);
497 bp->b_bcount = 0;
498 bp->b_lblkno = blkno;
499 bp->b_blkno = blkno;
500 bp->b_error = 0;
501 bp->b_resid = 0;
502 binshash(bp, dp);
503 allocbuf(bp, size);
504 return (bp);
505 }
506
507 /*
508 * Allocate a buffer.
509 * The caller will assign it to a block.
510 */
511 struct buf *
geteblk(size)512 geteblk(size)
513 int size;
514 {
515 register struct buf *bp;
516
517 if (size > MAXBSIZE)
518 panic("geteblk: size too big");
519 while ((bp = getnewbuf(0, 0)) == NULL)
520 /* void */;
521 bp->b_flags |= B_INVAL;
522 bremhash(bp);
523 binshash(bp, &invalhash);
524 bp->b_bcount = 0;
525 bp->b_error = 0;
526 bp->b_resid = 0;
527 allocbuf(bp, size);
528 return (bp);
529 }
530
531 /*
532 * Expand or contract the actual memory allocated to a buffer.
533 * If no memory is available, release buffer and take error exit.
534 */
allocbuf(tp,size)535 allocbuf(tp, size)
536 register struct buf *tp;
537 int size;
538 {
539 register struct buf *bp, *ep;
540 int sizealloc, take, s;
541
542 sizealloc = roundup(size, CLBYTES);
543 /*
544 * Buffer size does not change
545 */
546 if (sizealloc == tp->b_bufsize)
547 goto out;
548 /*
549 * Buffer size is shrinking.
550 * Place excess space in a buffer header taken from the
551 * BQ_EMPTY buffer list and placed on the "most free" list.
552 * If no extra buffer headers are available, leave the
553 * extra space in the present buffer.
554 */
555 if (sizealloc < tp->b_bufsize) {
556 if ((ep = bufqueues[BQ_EMPTY].tqh_first) == NULL)
557 goto out;
558 s = splbio();
559 bremfree(ep);
560 ep->b_flags |= B_BUSY;
561 splx(s);
562 pagemove((char *)tp->b_data + sizealloc, ep->b_data,
563 (int)tp->b_bufsize - sizealloc);
564 ep->b_bufsize = tp->b_bufsize - sizealloc;
565 tp->b_bufsize = sizealloc;
566 ep->b_flags |= B_INVAL;
567 ep->b_bcount = 0;
568 brelse(ep);
569 goto out;
570 }
571 /*
572 * More buffer space is needed. Get it out of buffers on
573 * the "most free" list, placing the empty headers on the
574 * BQ_EMPTY buffer header list.
575 */
576 while (tp->b_bufsize < sizealloc) {
577 take = sizealloc - tp->b_bufsize;
578 while ((bp = getnewbuf(0, 0)) == NULL)
579 /* void */;
580 if (take >= bp->b_bufsize)
581 take = bp->b_bufsize;
582 pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
583 &((char *)tp->b_data)[tp->b_bufsize], take);
584 tp->b_bufsize += take;
585 bp->b_bufsize = bp->b_bufsize - take;
586 if (bp->b_bcount > bp->b_bufsize)
587 bp->b_bcount = bp->b_bufsize;
588 if (bp->b_bufsize <= 0) {
589 bremhash(bp);
590 binshash(bp, &invalhash);
591 bp->b_dev = NODEV;
592 bp->b_error = 0;
593 bp->b_flags |= B_INVAL;
594 }
595 brelse(bp);
596 }
597 out:
598 tp->b_bcount = size;
599 return (1);
600 }
601
602 /*
603 * Find a buffer which is available for use.
604 * Select something from a free list.
605 * Preference is to AGE list, then LRU list.
606 */
607 struct buf *
getnewbuf(slpflag,slptimeo)608 getnewbuf(slpflag, slptimeo)
609 int slpflag, slptimeo;
610 {
611 register struct buf *bp;
612 register struct bqueues *dp;
613 register struct ucred *cred;
614 int s;
615
616 loop:
617 s = splbio();
618 for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
619 if (dp->tqh_first)
620 break;
621 if (dp == bufqueues) { /* no free blocks */
622 needbuffer = 1;
623 (void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
624 "getnewbuf", slptimeo);
625 splx(s);
626 return (NULL);
627 }
628 bp = dp->tqh_first;
629 bremfree(bp);
630 bp->b_flags |= B_BUSY;
631 splx(s);
632 if (bp->b_flags & B_DELWRI) {
633 (void) bawrite(bp);
634 goto loop;
635 }
636 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
637 if (bp->b_vp)
638 brelvp(bp);
639 if (bp->b_rcred != NOCRED) {
640 cred = bp->b_rcred;
641 bp->b_rcred = NOCRED;
642 crfree(cred);
643 }
644 if (bp->b_wcred != NOCRED) {
645 cred = bp->b_wcred;
646 bp->b_wcred = NOCRED;
647 crfree(cred);
648 }
649 bp->b_flags = B_BUSY;
650 bp->b_dirtyoff = bp->b_dirtyend = 0;
651 bp->b_validoff = bp->b_validend = 0;
652 return (bp);
653 }
654
655 /*
656 * Wait for I/O to complete.
657 *
658 * Extract and return any errors associated with the I/O.
659 * If the error flag is set, but no specific error is
660 * given, return EIO.
661 */
biowait(bp)662 biowait(bp)
663 register struct buf *bp;
664 {
665 int s;
666
667 s = splbio();
668 while ((bp->b_flags & B_DONE) == 0)
669 sleep((caddr_t)bp, PRIBIO);
670 splx(s);
671 if ((bp->b_flags & B_ERROR) == 0)
672 return (0);
673 if (bp->b_error)
674 return (bp->b_error);
675 return (EIO);
676 }
677
678 /*
679 * Mark I/O complete on a buffer.
680 *
681 * If a callback has been requested, e.g. the pageout
682 * daemon, do so. Otherwise, awaken waiting processes.
683 */
684 void
biodone(bp)685 biodone(bp)
686 register struct buf *bp;
687 {
688
689 if (bp->b_flags & B_DONE)
690 panic("dup biodone");
691 bp->b_flags |= B_DONE;
692 if ((bp->b_flags & B_READ) == 0)
693 vwakeup(bp);
694 if (bp->b_flags & B_CALL) {
695 bp->b_flags &= ~B_CALL;
696 (*bp->b_iodone)(bp);
697 return;
698 }
699 if (bp->b_flags & B_ASYNC)
700 brelse(bp);
701 else {
702 bp->b_flags &= ~B_WANTED;
703 wakeup((caddr_t)bp);
704 }
705 }
706
707 int
count_lock_queue()708 count_lock_queue()
709 {
710 register struct buf *bp;
711 register int ret;
712
713 for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].tqh_first;
714 bp; bp = (struct buf *)bp->b_freelist.tqe_next)
715 ++ret;
716 return(ret);
717 }
718
719 #ifdef DIAGNOSTIC
720 /*
721 * Print out statistics on the current allocation of the buffer pool.
722 * Can be enabled to print out on every ``sync'' by setting "syncprt"
723 * in vfs_syscalls.c using sysctl.
724 */
725 void
vfs_bufstats()726 vfs_bufstats()
727 {
728 int s, i, j, count;
729 register struct buf *bp;
730 register struct bqueues *dp;
731 int counts[MAXBSIZE/CLBYTES+1];
732 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
733
734 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
735 count = 0;
736 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
737 counts[j] = 0;
738 s = splbio();
739 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
740 counts[bp->b_bufsize/CLBYTES]++;
741 count++;
742 }
743 splx(s);
744 printf("%s: total-%d", bname[i], count);
745 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
746 if (counts[j] != 0)
747 printf(", %d-%d", j * CLBYTES, counts[j]);
748 printf("\n");
749 }
750 }
751 #endif /* DIAGNOSTIC */
752