xref: /original-bsd/sys/kern/vfs_bio.c (revision 95ecee29)
1 /*-
2  * Copyright (c) 1986, 1989, 1993 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Berkeley Software Design Inc.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vfs_bio.c	8.3 (Berkeley) 09/21/93
11  */
12 
13 #include <sys/param.h>
14 #include <sys/proc.h>
15 #include <sys/buf.h>
16 #include <sys/vnode.h>
17 #include <sys/mount.h>
18 #include <sys/trace.h>
19 #include <sys/malloc.h>
20 #include <sys/resourcevar.h>
21 #include <libkern/libkern.h>
22 
23 /*
24  * Definitions for the buffer hash lists.
25  */
26 #define	BUFHASH(dvp, lbn)	\
27 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
28 struct	list_entry *bufhashtbl, invalhash;
29 u_long	bufhash;
30 
31 /*
32  * Insq/Remq for the buffer hash lists.
33  */
34 #define	binshash(bp, dp)	list_enter_head(dp, bp, struct buf *, b_hash)
35 #define	bremhash(bp)		list_remove(bp, struct buf *, b_hash)
36 
37 /*
38  * Definitions for the buffer free lists.
39  */
40 #define	BQUEUES		4		/* number of free buffer queues */
41 
42 #define	BQ_LOCKED	0		/* super-blocks &c */
43 #define	BQ_LRU		1		/* lru, useful buffers */
44 #define	BQ_AGE		2		/* rubbish */
45 #define	BQ_EMPTY	3		/* buffer headers with no memory */
46 
47 struct queue_entry bufqueues[BQUEUES];
48 int needbuffer;
49 
50 /*
51  * Insq/Remq for the buffer free lists.
52  */
53 #define	binsheadfree(bp, dp) \
54 	queue_enter_head(dp, bp, struct buf *, b_freelist)
55 #define	binstailfree(bp, dp) \
56 	queue_enter_tail(dp, bp, struct buf *, b_freelist)
57 
58 void
59 bremfree(bp)
60 	struct buf *bp;
61 {
62 	struct queue_entry *dp;
63 
64 	/*
65 	 * We only calculate the head of the freelist when removing
66 	 * the last element of the list as that is the only time that
67 	 * it is needed (e.g. to reset the tail pointer).
68 	 */
69 	if (bp->b_freelist.qe_next == NULL) {
70 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
71 			if (dp->qe_prev == &bp->b_freelist.qe_next)
72 				break;
73 		if (dp == &bufqueues[BQUEUES])
74 			panic("bremfree: lost tail");
75 	}
76 	queue_remove(dp, bp, struct buf *, b_freelist);
77 }
78 
79 /*
80  * Initialize buffers and hash links for buffers.
81  */
82 void
83 bufinit()
84 {
85 	register struct buf *bp;
86 	struct queue_entry *dp;
87 	register int i;
88 	int base, residual;
89 
90 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
91 		queue_init(dp);
92 	bufhashtbl = (struct list_entry *)hashinit(nbuf, M_CACHE, &bufhash);
93 	base = bufpages / nbuf;
94 	residual = bufpages % nbuf;
95 	for (i = 0; i < nbuf; i++) {
96 		bp = &buf[i];
97 		bzero((char *)bp, sizeof *bp);
98 		bp->b_dev = NODEV;
99 		bp->b_rcred = NOCRED;
100 		bp->b_wcred = NOCRED;
101 		bp->b_data = buffers + i * MAXBSIZE;
102 		if (i < residual)
103 			bp->b_bufsize = (base + 1) * CLBYTES;
104 		else
105 			bp->b_bufsize = base * CLBYTES;
106 		bp->b_flags = B_INVAL;
107 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
108 		binsheadfree(bp, dp);
109 		binshash(bp, &invalhash);
110 	}
111 }
112 
113 /*
114  * Find the block in the buffer pool.
115  * If the buffer is not present, allocate a new buffer and load
116  * its contents according to the filesystem fill routine.
117  */
118 bread(vp, blkno, size, cred, bpp)
119 	struct vnode *vp;
120 	daddr_t blkno;
121 	int size;
122 	struct ucred *cred;
123 	struct buf **bpp;
124 {
125 	struct proc *p = curproc;		/* XXX */
126 	register struct buf *bp;
127 
128 	if (size == 0)
129 		panic("bread: size 0");
130 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
131 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
132 		trace(TR_BREADHIT, pack(vp, size), blkno);
133 		return (0);
134 	}
135 	bp->b_flags |= B_READ;
136 	if (bp->b_bcount > bp->b_bufsize)
137 		panic("bread");
138 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
139 		crhold(cred);
140 		bp->b_rcred = cred;
141 	}
142 	VOP_STRATEGY(bp);
143 	trace(TR_BREADMISS, pack(vp, size), blkno);
144 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
145 	return (biowait(bp));
146 }
147 
148 /*
149  * Operates like bread, but also starts I/O on the N specified
150  * read-ahead blocks.
151  */
152 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
153 	struct vnode *vp;
154 	daddr_t blkno; int size;
155 	daddr_t rablkno[]; int rabsize[];
156 	int num;
157 	struct ucred *cred;
158 	struct buf **bpp;
159 {
160 	struct proc *p = curproc;		/* XXX */
161 	register struct buf *bp, *rabp;
162 	register int i;
163 
164 	bp = NULL;
165 	/*
166 	 * If the block is not memory resident,
167 	 * allocate a buffer and start I/O.
168 	 */
169 	if (!incore(vp, blkno)) {
170 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
171 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
172 			bp->b_flags |= B_READ;
173 			if (bp->b_bcount > bp->b_bufsize)
174 				panic("breadn");
175 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
176 				crhold(cred);
177 				bp->b_rcred = cred;
178 			}
179 			VOP_STRATEGY(bp);
180 			trace(TR_BREADMISS, pack(vp, size), blkno);
181 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
182 		} else {
183 			trace(TR_BREADHIT, pack(vp, size), blkno);
184 		}
185 	}
186 
187 	/*
188 	 * If there's read-ahead block(s), start I/O
189 	 * on them also (as above).
190 	 */
191 	for (i = 0; i < num; i++) {
192 		if (incore(vp, rablkno[i]))
193 			continue;
194 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
195 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
196 			brelse(rabp);
197 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
198 		} else {
199 			rabp->b_flags |= B_ASYNC | B_READ;
200 			if (rabp->b_bcount > rabp->b_bufsize)
201 				panic("breadrabp");
202 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
203 				crhold(cred);
204 				rabp->b_rcred = cred;
205 			}
206 			VOP_STRATEGY(rabp);
207 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
208 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
209 		}
210 	}
211 
212 	/*
213 	 * If block was memory resident, let bread get it.
214 	 * If block was not memory resident, the read was
215 	 * started above, so just wait for the read to complete.
216 	 */
217 	if (bp == NULL)
218 		return (bread(vp, blkno, size, cred, bpp));
219 	return (biowait(bp));
220 }
221 
222 /*
223  * Synchronous write.
224  * Release buffer on completion.
225  */
226 bwrite(bp)
227 	register struct buf *bp;
228 {
229 	struct proc *p = curproc;		/* XXX */
230 	register int flag;
231 	int s, error = 0;
232 
233 	flag = bp->b_flags;
234 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
235 	if (flag & B_ASYNC) {
236 		if ((flag & B_DELWRI) == 0)
237 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
238 		else
239 			reassignbuf(bp, bp->b_vp);
240 	}
241 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
242 	if (bp->b_bcount > bp->b_bufsize)
243 		panic("bwrite");
244 	s = splbio();
245 	bp->b_vp->v_numoutput++;
246 	bp->b_flags |= B_WRITEINPROG;
247 	splx(s);
248 	VOP_STRATEGY(bp);
249 
250 	/*
251 	 * If the write was synchronous, then await I/O completion.
252 	 * If the write was "delayed", then we put the buffer on
253 	 * the queue of blocks awaiting I/O completion status.
254 	 */
255 	if ((flag & B_ASYNC) == 0) {
256 		error = biowait(bp);
257 		if ((flag&B_DELWRI) == 0)
258 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
259 		else
260 			reassignbuf(bp, bp->b_vp);
261 		if (bp->b_flags & B_EINTR) {
262 			bp->b_flags &= ~B_EINTR;
263 			error = EINTR;
264 		}
265 		brelse(bp);
266 	} else if (flag & B_DELWRI) {
267 		s = splbio();
268 		bp->b_flags |= B_AGE;
269 		splx(s);
270 	}
271 	return (error);
272 }
273 
274 int
275 vn_bwrite(ap)
276 	struct vop_bwrite_args *ap;
277 {
278 	return (bwrite(ap->a_bp));
279 }
280 
281 
282 /*
283  * Delayed write.
284  *
285  * The buffer is marked dirty, but is not queued for I/O.
286  * This routine should be used when the buffer is expected
287  * to be modified again soon, typically a small write that
288  * partially fills a buffer.
289  *
290  * NB: magnetic tapes cannot be delayed; they must be
291  * written in the order that the writes are requested.
292  */
293 bdwrite(bp)
294 	register struct buf *bp;
295 {
296 	struct proc *p = curproc;		/* XXX */
297 
298 	if ((bp->b_flags & B_DELWRI) == 0) {
299 		bp->b_flags |= B_DELWRI;
300 		reassignbuf(bp, bp->b_vp);
301 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
302 	}
303 	/*
304 	 * If this is a tape drive, the write must be initiated.
305 	 */
306 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
307 		bawrite(bp);
308 	} else {
309 		bp->b_flags |= (B_DONE | B_DELWRI);
310 		brelse(bp);
311 	}
312 }
313 
314 /*
315  * Asynchronous write.
316  * Start I/O on a buffer, but do not wait for it to complete.
317  * The buffer is released when the I/O completes.
318  */
319 bawrite(bp)
320 	register struct buf *bp;
321 {
322 
323 	/*
324 	 * Setting the ASYNC flag causes bwrite to return
325 	 * after starting the I/O.
326 	 */
327 	bp->b_flags |= B_ASYNC;
328 	(void) VOP_BWRITE(bp);
329 }
330 
331 /*
332  * Release a buffer.
333  * Even if the buffer is dirty, no I/O is started.
334  */
335 brelse(bp)
336 	register struct buf *bp;
337 {
338 	register struct queue_entry *flist;
339 	int s;
340 
341 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
342 	/*
343 	 * If a process is waiting for the buffer, or
344 	 * is waiting for a free buffer, awaken it.
345 	 */
346 	if (bp->b_flags & B_WANTED)
347 		wakeup((caddr_t)bp);
348 	if (needbuffer) {
349 		needbuffer = 0;
350 		wakeup((caddr_t)&needbuffer);
351 	}
352 	/*
353 	 * Retry I/O for locked buffers rather than invalidating them.
354 	 */
355 	s = splbio();
356 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
357 		bp->b_flags &= ~B_ERROR;
358 	/*
359 	 * Disassociate buffers that are no longer valid.
360 	 */
361 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
362 		bp->b_flags |= B_INVAL;
363 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
364 		if (bp->b_vp)
365 			brelvp(bp);
366 		bp->b_flags &= ~B_DELWRI;
367 	}
368 	/*
369 	 * Stick the buffer back on a free list.
370 	 */
371 	if (bp->b_bufsize <= 0) {
372 		/* block has no buffer ... put at front of unused buffer list */
373 		flist = &bufqueues[BQ_EMPTY];
374 		binsheadfree(bp, flist);
375 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
376 		/* block has no info ... put at front of most free list */
377 		flist = &bufqueues[BQ_AGE];
378 		binsheadfree(bp, flist);
379 	} else {
380 		if (bp->b_flags & B_LOCKED)
381 			flist = &bufqueues[BQ_LOCKED];
382 		else if (bp->b_flags & B_AGE)
383 			flist = &bufqueues[BQ_AGE];
384 		else
385 			flist = &bufqueues[BQ_LRU];
386 		binstailfree(bp, flist);
387 	}
388 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
389 	splx(s);
390 }
391 
392 /*
393  * Check to see if a block is currently memory resident.
394  */
395 struct buf *
396 incore(vp, blkno)
397 	struct vnode *vp;
398 	daddr_t blkno;
399 {
400 	register struct buf *bp;
401 
402 	for (bp = BUFHASH(vp, blkno)->le_next; bp; bp = bp->b_hash.qe_next)
403 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
404 		    (bp->b_flags & B_INVAL) == 0)
405 			return (bp);
406 	return (NULL);
407 }
408 
409 /*
410  * Check to see if a block is currently memory resident.
411  * If it is resident, return it. If it is not resident,
412  * allocate a new buffer and assign it to the block.
413  */
414 struct buf *
415 getblk(vp, blkno, size, slpflag, slptimeo)
416 	register struct vnode *vp;
417 	daddr_t blkno;
418 	int size, slpflag, slptimeo;
419 {
420 	register struct buf *bp;
421 	struct list_entry *dp;
422 	int s, error;
423 
424 	if (size > MAXBSIZE)
425 		panic("getblk: size too big");
426 	/*
427 	 * Search the cache for the block. If the buffer is found,
428 	 * but it is currently locked, the we must wait for it to
429 	 * become available.
430 	 */
431 	dp = BUFHASH(vp, blkno);
432 loop:
433 	for (bp = dp->le_next; bp; bp = bp->b_hash.qe_next) {
434 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
435 			continue;
436 		s = splbio();
437 		if (bp->b_flags & B_BUSY) {
438 			bp->b_flags |= B_WANTED;
439 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
440 				"getblk", slptimeo);
441 			splx(s);
442 			if (error)
443 				return (NULL);
444 			goto loop;
445 		}
446 		/*
447 		 * The test for B_INVAL is moved down here, since there
448 		 * are cases where B_INVAL is set before VOP_BWRITE() is
449 		 * called and for NFS, the process cannot be allowed to
450 		 * allocate a new buffer for the same block until the write
451 		 * back to the server has been completed. (ie. B_BUSY clears)
452 		 */
453 		if (bp->b_flags & B_INVAL) {
454 			splx(s);
455 			continue;
456 		}
457 		bremfree(bp);
458 		bp->b_flags |= B_BUSY;
459 		splx(s);
460 		if (bp->b_bcount != size) {
461 			printf("getblk: stray size");
462 			bp->b_flags |= B_INVAL;
463 			VOP_BWRITE(bp);
464 			goto loop;
465 		}
466 		bp->b_flags |= B_CACHE;
467 		return (bp);
468 	}
469 	/*
470 	 * The loop back to the top when getnewbuf() fails is because
471 	 * stateless filesystems like NFS have no node locks. Thus,
472 	 * there is a slight chance that more than one process will
473 	 * try and getnewbuf() for the same block concurrently when
474 	 * the first sleeps in getnewbuf(). So after a sleep, go back
475 	 * up to the top to check the hash lists again.
476 	 */
477 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
478 		goto loop;
479 	bremhash(bp);
480 	bgetvp(vp, bp);
481 	bp->b_bcount = 0;
482 	bp->b_lblkno = blkno;
483 	bp->b_blkno = blkno;
484 	bp->b_error = 0;
485 	bp->b_resid = 0;
486 	binshash(bp, dp);
487 	allocbuf(bp, size);
488 	return (bp);
489 }
490 
491 /*
492  * Allocate a buffer.
493  * The caller will assign it to a block.
494  */
495 struct buf *
496 geteblk(size)
497 	int size;
498 {
499 	register struct buf *bp;
500 
501 	if (size > MAXBSIZE)
502 		panic("geteblk: size too big");
503 	while ((bp = getnewbuf(0, 0)) == NULL)
504 		/* void */;
505 	bp->b_flags |= B_INVAL;
506 	bremhash(bp);
507 	binshash(bp, &invalhash);
508 	bp->b_bcount = 0;
509 	bp->b_error = 0;
510 	bp->b_resid = 0;
511 	allocbuf(bp, size);
512 	return (bp);
513 }
514 
515 /*
516  * Expand or contract the actual memory allocated to a buffer.
517  * If no memory is available, release buffer and take error exit.
518  */
519 allocbuf(tp, size)
520 	register struct buf *tp;
521 	int size;
522 {
523 	register struct buf *bp, *ep;
524 	int sizealloc, take, s;
525 
526 	sizealloc = roundup(size, CLBYTES);
527 	/*
528 	 * Buffer size does not change
529 	 */
530 	if (sizealloc == tp->b_bufsize)
531 		goto out;
532 	/*
533 	 * Buffer size is shrinking.
534 	 * Place excess space in a buffer header taken from the
535 	 * BQ_EMPTY buffer list and placed on the "most free" list.
536 	 * If no extra buffer headers are available, leave the
537 	 * extra space in the present buffer.
538 	 */
539 	if (sizealloc < tp->b_bufsize) {
540 		if ((ep = bufqueues[BQ_EMPTY].qe_next) == NULL)
541 			goto out;
542 		s = splbio();
543 		bremfree(ep);
544 		ep->b_flags |= B_BUSY;
545 		splx(s);
546 		pagemove((char *)tp->b_data + sizealloc, ep->b_data,
547 		    (int)tp->b_bufsize - sizealloc);
548 		ep->b_bufsize = tp->b_bufsize - sizealloc;
549 		tp->b_bufsize = sizealloc;
550 		ep->b_flags |= B_INVAL;
551 		ep->b_bcount = 0;
552 		brelse(ep);
553 		goto out;
554 	}
555 	/*
556 	 * More buffer space is needed. Get it out of buffers on
557 	 * the "most free" list, placing the empty headers on the
558 	 * BQ_EMPTY buffer header list.
559 	 */
560 	while (tp->b_bufsize < sizealloc) {
561 		take = sizealloc - tp->b_bufsize;
562 		while ((bp = getnewbuf(0, 0)) == NULL)
563 			/* void */;
564 		if (take >= bp->b_bufsize)
565 			take = bp->b_bufsize;
566 		pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
567 		    &((char *)tp->b_data)[tp->b_bufsize], take);
568 		tp->b_bufsize += take;
569 		bp->b_bufsize = bp->b_bufsize - take;
570 		if (bp->b_bcount > bp->b_bufsize)
571 			bp->b_bcount = bp->b_bufsize;
572 		if (bp->b_bufsize <= 0) {
573 			bremhash(bp);
574 			binshash(bp, &invalhash);
575 			bp->b_dev = NODEV;
576 			bp->b_error = 0;
577 			bp->b_flags |= B_INVAL;
578 		}
579 		brelse(bp);
580 	}
581 out:
582 	tp->b_bcount = size;
583 	return (1);
584 }
585 
586 /*
587  * Find a buffer which is available for use.
588  * Select something from a free list.
589  * Preference is to AGE list, then LRU list.
590  */
591 struct buf *
592 getnewbuf(slpflag, slptimeo)
593 	int slpflag, slptimeo;
594 {
595 	register struct buf *bp;
596 	register struct queue_entry *dp;
597 	register struct ucred *cred;
598 	int s;
599 
600 loop:
601 	s = splbio();
602 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
603 		if (dp->qe_next)
604 			break;
605 	if (dp == bufqueues) {		/* no free blocks */
606 		needbuffer = 1;
607 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
608 			"getnewbuf", slptimeo);
609 		splx(s);
610 		return (NULL);
611 	}
612 	bp = dp->qe_next;
613 	bremfree(bp);
614 	bp->b_flags |= B_BUSY;
615 	splx(s);
616 	if (bp->b_flags & B_DELWRI) {
617 		(void) bawrite(bp);
618 		goto loop;
619 	}
620 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
621 	if (bp->b_vp)
622 		brelvp(bp);
623 	if (bp->b_rcred != NOCRED) {
624 		cred = bp->b_rcred;
625 		bp->b_rcred = NOCRED;
626 		crfree(cred);
627 	}
628 	if (bp->b_wcred != NOCRED) {
629 		cred = bp->b_wcred;
630 		bp->b_wcred = NOCRED;
631 		crfree(cred);
632 	}
633 	bp->b_flags = B_BUSY;
634 	bp->b_dirtyoff = bp->b_dirtyend = 0;
635 	bp->b_validoff = bp->b_validend = 0;
636 	return (bp);
637 }
638 
639 /*
640  * Wait for I/O to complete.
641  *
642  * Extract and return any errors associated with the I/O.
643  * If the error flag is set, but no specific error is
644  * given, return EIO.
645  */
646 biowait(bp)
647 	register struct buf *bp;
648 {
649 	int s;
650 
651 	s = splbio();
652 	while ((bp->b_flags & B_DONE) == 0)
653 		sleep((caddr_t)bp, PRIBIO);
654 	splx(s);
655 	if ((bp->b_flags & B_ERROR) == 0)
656 		return (0);
657 	if (bp->b_error)
658 		return (bp->b_error);
659 	return (EIO);
660 }
661 
662 /*
663  * Mark I/O complete on a buffer.
664  *
665  * If a callback has been requested, e.g. the pageout
666  * daemon, do so. Otherwise, awaken waiting processes.
667  */
668 void
669 biodone(bp)
670 	register struct buf *bp;
671 {
672 
673 	if (bp->b_flags & B_DONE)
674 		panic("dup biodone");
675 	bp->b_flags |= B_DONE;
676 	if ((bp->b_flags & B_READ) == 0)
677 		vwakeup(bp);
678 	if (bp->b_flags & B_CALL) {
679 		bp->b_flags &= ~B_CALL;
680 		(*bp->b_iodone)(bp);
681 		return;
682 	}
683 	if (bp->b_flags & B_ASYNC)
684 		brelse(bp);
685 	else {
686 		bp->b_flags &= ~B_WANTED;
687 		wakeup((caddr_t)bp);
688 	}
689 }
690 
691 int
692 count_lock_queue()
693 {
694 	register struct buf *bp;
695 	register int ret;
696 
697 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].qe_next;
698 	    bp; bp = (struct buf *)bp->b_freelist.qe_next)
699 		++ret;
700 	return(ret);
701 }
702 
703 #ifdef DIAGNOSTIC
704 /*
705  * Print out statistics on the current allocation of the buffer pool.
706  * Can be enabled to print out on every ``sync'' by setting "syncprt"
707  * in vfs_syscalls.c using sysctl.
708  */
709 void
710 vfs_bufstats()
711 {
712 	int s, i, j, count;
713 	register struct buf *bp;
714 	register struct queue_entry *dp;
715 	int counts[MAXBSIZE/CLBYTES+1];
716 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
717 
718 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
719 		count = 0;
720 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
721 			counts[j] = 0;
722 		s = splbio();
723 		for (bp = dp->qe_next; bp; bp = bp->b_freelist.qe_next) {
724 			counts[bp->b_bufsize/CLBYTES]++;
725 			count++;
726 		}
727 		splx(s);
728 		printf("%s: total-%d", bname[i], count);
729 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
730 			if (counts[j] != 0)
731 				printf(", %d-%d", j * CLBYTES, counts[j]);
732 		printf("\n");
733 	}
734 }
735 #endif /* DIAGNOSTIC */
736