xref: /original-bsd/sys/kern/vfs_bio.c (revision 3705696b)
1 /*-
2  * Copyright (c) 1982, 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This module is believed to contain source code proprietary to AT&T.
6  * Use and redistribution is subject to the Berkeley Software License
7  * Agreement and your Software Agreement with AT&T (Western Electric).
8  *
9  *	@(#)vfs_bio.c	8.1 (Berkeley) 06/10/93
10  */
11 
12 #include <sys/param.h>
13 #include <sys/proc.h>
14 #include <sys/buf.h>
15 #include <sys/vnode.h>
16 #include <sys/mount.h>
17 #include <sys/trace.h>
18 #include <sys/malloc.h>
19 #include <sys/resourcevar.h>
20 #include <libkern/libkern.h>
21 
22 /*
23  * Definitions for the buffer hash lists.
24  */
25 #define	BUFHASH(dvp, lbn)	\
26 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
27 struct	list_entry *bufhashtbl, invalhash;
28 u_long	bufhash;
29 
30 /*
31  * Insq/Remq for the buffer hash lists.
32  */
33 #define	binshash(bp, dp)	list_enter_head(dp, bp, struct buf *, b_hash)
34 #define	bremhash(bp)		list_remove(bp, struct buf *, b_hash)
35 
36 /*
37  * Definitions for the buffer free lists.
38  */
39 #define	BQUEUES		4		/* number of free buffer queues */
40 
41 #define	BQ_LOCKED	0		/* super-blocks &c */
42 #define	BQ_LRU		1		/* lru, useful buffers */
43 #define	BQ_AGE		2		/* rubbish */
44 #define	BQ_EMPTY	3		/* buffer headers with no memory */
45 
46 struct queue_entry bufqueues[BQUEUES];
47 int needbuffer;
48 
49 /*
50  * Insq/Remq for the buffer free lists.
51  */
52 #define	binsheadfree(bp, dp) \
53 	queue_enter_head(dp, bp, struct buf *, b_freelist)
54 #define	binstailfree(bp, dp) \
55 	queue_enter_tail(dp, bp, struct buf *, b_freelist)
56 
57 void
58 bremfree(bp)
59 	struct buf *bp;
60 {
61 	struct queue_entry *dp;
62 
63 	/*
64 	 * We only calculate the head of the freelist when removing
65 	 * the last element of the list as that is the only time that
66 	 * it is needed (e.g. to reset the tail pointer).
67 	 */
68 	if (bp->b_freelist.qe_next == NULL) {
69 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
70 			if (dp->qe_prev == &bp->b_freelist.qe_next)
71 				break;
72 		if (dp == &bufqueues[BQUEUES])
73 			panic("bremfree: lost tail");
74 	}
75 	queue_remove(dp, bp, struct buf *, b_freelist);
76 }
77 
78 /*
79  * Initialize buffers and hash links for buffers.
80  */
81 void
82 bufinit()
83 {
84 	register struct buf *bp;
85 	struct queue_entry *dp;
86 	register int i;
87 	int base, residual;
88 
89 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
90 		queue_init(dp);
91 	bufhashtbl = (struct list_entry *)hashinit(nbuf, M_CACHE, &bufhash);
92 	base = bufpages / nbuf;
93 	residual = bufpages % nbuf;
94 	for (i = 0; i < nbuf; i++) {
95 		bp = &buf[i];
96 		bzero((char *)bp, sizeof *bp);
97 		bp->b_dev = NODEV;
98 		bp->b_rcred = NOCRED;
99 		bp->b_wcred = NOCRED;
100 		bp->b_un.b_addr = buffers + i * MAXBSIZE;
101 		if (i < residual)
102 			bp->b_bufsize = (base + 1) * CLBYTES;
103 		else
104 			bp->b_bufsize = base * CLBYTES;
105 		bp->b_flags = B_INVAL;
106 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
107 		binsheadfree(bp, dp);
108 		binshash(bp, &invalhash);
109 	}
110 }
111 
112 /*
113  * Find the block in the buffer pool.
114  * If the buffer is not present, allocate a new buffer and load
115  * its contents according to the filesystem fill routine.
116  */
117 bread(vp, blkno, size, cred, bpp)
118 	struct vnode *vp;
119 	daddr_t blkno;
120 	int size;
121 	struct ucred *cred;
122 	struct buf **bpp;
123 {
124 	struct proc *p = curproc;		/* XXX */
125 	register struct buf *bp;
126 
127 	if (size == 0)
128 		panic("bread: size 0");
129 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
130 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
131 		trace(TR_BREADHIT, pack(vp, size), blkno);
132 		return (0);
133 	}
134 	bp->b_flags |= B_READ;
135 	if (bp->b_bcount > bp->b_bufsize)
136 		panic("bread");
137 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
138 		crhold(cred);
139 		bp->b_rcred = cred;
140 	}
141 	VOP_STRATEGY(bp);
142 	trace(TR_BREADMISS, pack(vp, size), blkno);
143 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
144 	return (biowait(bp));
145 }
146 
147 /*
148  * Operates like bread, but also starts I/O on the N specified
149  * read-ahead blocks.
150  */
151 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
152 	struct vnode *vp;
153 	daddr_t blkno; int size;
154 	daddr_t rablkno[]; int rabsize[];
155 	int num;
156 	struct ucred *cred;
157 	struct buf **bpp;
158 {
159 	struct proc *p = curproc;		/* XXX */
160 	register struct buf *bp, *rabp;
161 	register int i;
162 
163 	bp = NULL;
164 	/*
165 	 * If the block is not memory resident,
166 	 * allocate a buffer and start I/O.
167 	 */
168 	if (!incore(vp, blkno)) {
169 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
170 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
171 			bp->b_flags |= B_READ;
172 			if (bp->b_bcount > bp->b_bufsize)
173 				panic("breadn");
174 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
175 				crhold(cred);
176 				bp->b_rcred = cred;
177 			}
178 			VOP_STRATEGY(bp);
179 			trace(TR_BREADMISS, pack(vp, size), blkno);
180 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
181 		} else {
182 			trace(TR_BREADHIT, pack(vp, size), blkno);
183 		}
184 	}
185 
186 	/*
187 	 * If there's read-ahead block(s), start I/O
188 	 * on them also (as above).
189 	 */
190 	for (i = 0; i < num; i++) {
191 		if (incore(vp, rablkno[i]))
192 			continue;
193 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
194 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
195 			brelse(rabp);
196 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
197 		} else {
198 			rabp->b_flags |= B_ASYNC | B_READ;
199 			if (rabp->b_bcount > rabp->b_bufsize)
200 				panic("breadrabp");
201 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
202 				crhold(cred);
203 				rabp->b_rcred = cred;
204 			}
205 			VOP_STRATEGY(rabp);
206 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
207 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
208 		}
209 	}
210 
211 	/*
212 	 * If block was memory resident, let bread get it.
213 	 * If block was not memory resident, the read was
214 	 * started above, so just wait for the read to complete.
215 	 */
216 	if (bp == NULL)
217 		return (bread(vp, blkno, size, cred, bpp));
218 	return (biowait(bp));
219 }
220 
221 /*
222  * Synchronous write.
223  * Release buffer on completion.
224  */
225 bwrite(bp)
226 	register struct buf *bp;
227 {
228 	struct proc *p = curproc;		/* XXX */
229 	register int flag;
230 	int s, error = 0;
231 
232 	flag = bp->b_flags;
233 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
234 	if (flag & B_ASYNC) {
235 		if ((flag & B_DELWRI) == 0)
236 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
237 		else
238 			reassignbuf(bp, bp->b_vp);
239 	}
240 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
241 	if (bp->b_bcount > bp->b_bufsize)
242 		panic("bwrite");
243 	s = splbio();
244 	bp->b_vp->v_numoutput++;
245 	bp->b_flags |= B_WRITEINPROG;
246 	splx(s);
247 	VOP_STRATEGY(bp);
248 
249 	/*
250 	 * If the write was synchronous, then await I/O completion.
251 	 * If the write was "delayed", then we put the buffer on
252 	 * the queue of blocks awaiting I/O completion status.
253 	 */
254 	if ((flag & B_ASYNC) == 0) {
255 		error = biowait(bp);
256 		if ((flag&B_DELWRI) == 0)
257 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
258 		else
259 			reassignbuf(bp, bp->b_vp);
260 		if (bp->b_flags & B_EINTR) {
261 			bp->b_flags &= ~B_EINTR;
262 			error = EINTR;
263 		}
264 		brelse(bp);
265 	} else if (flag & B_DELWRI) {
266 		s = splbio();
267 		bp->b_flags |= B_AGE;
268 		splx(s);
269 	}
270 	return (error);
271 }
272 
273 int
274 vn_bwrite(ap)
275 	struct vop_bwrite_args *ap;
276 {
277 	return (bwrite(ap->a_bp));
278 }
279 
280 
281 /*
282  * Delayed write.
283  *
284  * The buffer is marked dirty, but is not queued for I/O.
285  * This routine should be used when the buffer is expected
286  * to be modified again soon, typically a small write that
287  * partially fills a buffer.
288  *
289  * NB: magnetic tapes cannot be delayed; they must be
290  * written in the order that the writes are requested.
291  */
292 bdwrite(bp)
293 	register struct buf *bp;
294 {
295 	struct proc *p = curproc;		/* XXX */
296 
297 	if ((bp->b_flags & B_DELWRI) == 0) {
298 		bp->b_flags |= B_DELWRI;
299 		reassignbuf(bp, bp->b_vp);
300 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
301 	}
302 	/*
303 	 * If this is a tape drive, the write must be initiated.
304 	 */
305 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
306 		bawrite(bp);
307 	} else {
308 		bp->b_flags |= (B_DONE | B_DELWRI);
309 		brelse(bp);
310 	}
311 }
312 
313 /*
314  * Asynchronous write.
315  * Start I/O on a buffer, but do not wait for it to complete.
316  * The buffer is released when the I/O completes.
317  */
318 bawrite(bp)
319 	register struct buf *bp;
320 {
321 
322 	/*
323 	 * Setting the ASYNC flag causes bwrite to return
324 	 * after starting the I/O.
325 	 */
326 	bp->b_flags |= B_ASYNC;
327 	(void) VOP_BWRITE(bp);
328 }
329 
330 /*
331  * Release a buffer.
332  * Even if the buffer is dirty, no I/O is started.
333  */
334 brelse(bp)
335 	register struct buf *bp;
336 {
337 	register struct queue_entry *flist;
338 	int s;
339 
340 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
341 	/*
342 	 * If a process is waiting for the buffer, or
343 	 * is waiting for a free buffer, awaken it.
344 	 */
345 	if (bp->b_flags & B_WANTED)
346 		wakeup((caddr_t)bp);
347 	if (needbuffer) {
348 		needbuffer = 0;
349 		wakeup((caddr_t)&needbuffer);
350 	}
351 	/*
352 	 * Retry I/O for locked buffers rather than invalidating them.
353 	 */
354 	s = splbio();
355 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
356 		bp->b_flags &= ~B_ERROR;
357 	/*
358 	 * Disassociate buffers that are no longer valid.
359 	 */
360 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
361 		bp->b_flags |= B_INVAL;
362 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
363 		if (bp->b_vp)
364 			brelvp(bp);
365 		bp->b_flags &= ~B_DELWRI;
366 	}
367 	/*
368 	 * Stick the buffer back on a free list.
369 	 */
370 	if (bp->b_bufsize <= 0) {
371 		/* block has no buffer ... put at front of unused buffer list */
372 		flist = &bufqueues[BQ_EMPTY];
373 		binsheadfree(bp, flist);
374 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
375 		/* block has no info ... put at front of most free list */
376 		flist = &bufqueues[BQ_AGE];
377 		binsheadfree(bp, flist);
378 	} else {
379 		if (bp->b_flags & B_LOCKED)
380 			flist = &bufqueues[BQ_LOCKED];
381 		else if (bp->b_flags & B_AGE)
382 			flist = &bufqueues[BQ_AGE];
383 		else
384 			flist = &bufqueues[BQ_LRU];
385 		binstailfree(bp, flist);
386 	}
387 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
388 	splx(s);
389 }
390 
391 /*
392  * Check to see if a block is currently memory resident.
393  */
394 struct buf *
395 incore(vp, blkno)
396 	struct vnode *vp;
397 	daddr_t blkno;
398 {
399 	register struct buf *bp;
400 
401 	for (bp = BUFHASH(vp, blkno)->le_next; bp; bp = bp->b_hash.qe_next)
402 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
403 		    (bp->b_flags & B_INVAL) == 0)
404 			return (bp);
405 	return (NULL);
406 }
407 
408 /*
409  * Check to see if a block is currently memory resident.
410  * If it is resident, return it. If it is not resident,
411  * allocate a new buffer and assign it to the block.
412  */
413 struct buf *
414 getblk(vp, blkno, size, slpflag, slptimeo)
415 	register struct vnode *vp;
416 	daddr_t blkno;
417 	int size, slpflag, slptimeo;
418 {
419 	register struct buf *bp;
420 	struct list_entry *dp;
421 	int s, error;
422 
423 	if (size > MAXBSIZE)
424 		panic("getblk: size too big");
425 	/*
426 	 * Search the cache for the block. If the buffer is found,
427 	 * but it is currently locked, the we must wait for it to
428 	 * become available.
429 	 */
430 	dp = BUFHASH(vp, blkno);
431 loop:
432 	for (bp = dp->le_next; bp; bp = bp->b_hash.qe_next) {
433 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
434 			continue;
435 		s = splbio();
436 		if (bp->b_flags & B_BUSY) {
437 			bp->b_flags |= B_WANTED;
438 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
439 				"getblk", slptimeo);
440 			splx(s);
441 			if (error)
442 				return (NULL);
443 			goto loop;
444 		}
445 		/*
446 		 * The test for B_INVAL is moved down here, since there
447 		 * are cases where B_INVAL is set before VOP_BWRITE() is
448 		 * called and for NFS, the process cannot be allowed to
449 		 * allocate a new buffer for the same block until the write
450 		 * back to the server has been completed. (ie. B_BUSY clears)
451 		 */
452 		if (bp->b_flags & B_INVAL) {
453 			splx(s);
454 			continue;
455 		}
456 		bremfree(bp);
457 		bp->b_flags |= B_BUSY;
458 		splx(s);
459 		if (bp->b_bcount != size) {
460 			printf("getblk: stray size");
461 			bp->b_flags |= B_INVAL;
462 			VOP_BWRITE(bp);
463 			goto loop;
464 		}
465 		bp->b_flags |= B_CACHE;
466 		return (bp);
467 	}
468 	/*
469 	 * The loop back to the top when getnewbuf() fails is because
470 	 * stateless filesystems like NFS have no node locks. Thus,
471 	 * there is a slight chance that more than one process will
472 	 * try and getnewbuf() for the same block concurrently when
473 	 * the first sleeps in getnewbuf(). So after a sleep, go back
474 	 * up to the top to check the hash lists again.
475 	 */
476 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
477 		goto loop;
478 	bremhash(bp);
479 	bgetvp(vp, bp);
480 	bp->b_bcount = 0;
481 	bp->b_lblkno = blkno;
482 	bp->b_blkno = blkno;
483 	bp->b_error = 0;
484 	bp->b_resid = 0;
485 	binshash(bp, dp);
486 	allocbuf(bp, size);
487 	return (bp);
488 }
489 
490 /*
491  * Allocate a buffer.
492  * The caller will assign it to a block.
493  */
494 struct buf *
495 geteblk(size)
496 	int size;
497 {
498 	register struct buf *bp;
499 
500 	if (size > MAXBSIZE)
501 		panic("geteblk: size too big");
502 	while ((bp = getnewbuf(0, 0)) == NULL)
503 		/* void */;
504 	bp->b_flags |= B_INVAL;
505 	bremhash(bp);
506 	binshash(bp, &invalhash);
507 	bp->b_bcount = 0;
508 	bp->b_error = 0;
509 	bp->b_resid = 0;
510 	allocbuf(bp, size);
511 	return (bp);
512 }
513 
514 /*
515  * Expand or contract the actual memory allocated to a buffer.
516  * If no memory is available, release buffer and take error exit.
517  */
518 allocbuf(tp, size)
519 	register struct buf *tp;
520 	int size;
521 {
522 	register struct buf *bp, *ep;
523 	int sizealloc, take, s;
524 
525 	sizealloc = roundup(size, CLBYTES);
526 	/*
527 	 * Buffer size does not change
528 	 */
529 	if (sizealloc == tp->b_bufsize)
530 		goto out;
531 	/*
532 	 * Buffer size is shrinking.
533 	 * Place excess space in a buffer header taken from the
534 	 * BQ_EMPTY buffer list and placed on the "most free" list.
535 	 * If no extra buffer headers are available, leave the
536 	 * extra space in the present buffer.
537 	 */
538 	if (sizealloc < tp->b_bufsize) {
539 		if ((ep = bufqueues[BQ_EMPTY].qe_next) == NULL)
540 			goto out;
541 		s = splbio();
542 		bremfree(ep);
543 		ep->b_flags |= B_BUSY;
544 		splx(s);
545 		pagemove(tp->b_un.b_addr + sizealloc, ep->b_un.b_addr,
546 		    (int)tp->b_bufsize - sizealloc);
547 		ep->b_bufsize = tp->b_bufsize - sizealloc;
548 		tp->b_bufsize = sizealloc;
549 		ep->b_flags |= B_INVAL;
550 		ep->b_bcount = 0;
551 		brelse(ep);
552 		goto out;
553 	}
554 	/*
555 	 * More buffer space is needed. Get it out of buffers on
556 	 * the "most free" list, placing the empty headers on the
557 	 * BQ_EMPTY buffer header list.
558 	 */
559 	while (tp->b_bufsize < sizealloc) {
560 		take = sizealloc - tp->b_bufsize;
561 		while ((bp = getnewbuf(0, 0)) == NULL)
562 			/* void */;
563 		if (take >= bp->b_bufsize)
564 			take = bp->b_bufsize;
565 		pagemove(&bp->b_un.b_addr[bp->b_bufsize - take],
566 		    &tp->b_un.b_addr[tp->b_bufsize], take);
567 		tp->b_bufsize += take;
568 		bp->b_bufsize = bp->b_bufsize - take;
569 		if (bp->b_bcount > bp->b_bufsize)
570 			bp->b_bcount = bp->b_bufsize;
571 		if (bp->b_bufsize <= 0) {
572 			bremhash(bp);
573 			binshash(bp, &invalhash);
574 			bp->b_dev = NODEV;
575 			bp->b_error = 0;
576 			bp->b_flags |= B_INVAL;
577 		}
578 		brelse(bp);
579 	}
580 out:
581 	tp->b_bcount = size;
582 	return (1);
583 }
584 
585 /*
586  * Find a buffer which is available for use.
587  * Select something from a free list.
588  * Preference is to AGE list, then LRU list.
589  */
590 struct buf *
591 getnewbuf(slpflag, slptimeo)
592 	int slpflag, slptimeo;
593 {
594 	register struct buf *bp;
595 	register struct queue_entry *dp;
596 	register struct ucred *cred;
597 	int s;
598 
599 loop:
600 	s = splbio();
601 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
602 		if (dp->qe_next)
603 			break;
604 	if (dp == bufqueues) {		/* no free blocks */
605 		needbuffer = 1;
606 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
607 			"getnewbuf", slptimeo);
608 		splx(s);
609 		return (NULL);
610 	}
611 	bp = dp->qe_next;
612 	bremfree(bp);
613 	bp->b_flags |= B_BUSY;
614 	splx(s);
615 	if (bp->b_flags & B_DELWRI) {
616 		(void) bawrite(bp);
617 		goto loop;
618 	}
619 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
620 	if (bp->b_vp)
621 		brelvp(bp);
622 	if (bp->b_rcred != NOCRED) {
623 		cred = bp->b_rcred;
624 		bp->b_rcred = NOCRED;
625 		crfree(cred);
626 	}
627 	if (bp->b_wcred != NOCRED) {
628 		cred = bp->b_wcred;
629 		bp->b_wcred = NOCRED;
630 		crfree(cred);
631 	}
632 	bp->b_flags = B_BUSY;
633 	bp->b_dirtyoff = bp->b_dirtyend = 0;
634 	bp->b_validoff = bp->b_validend = 0;
635 	return (bp);
636 }
637 
638 /*
639  * Wait for I/O to complete.
640  *
641  * Extract and return any errors associated with the I/O.
642  * If the error flag is set, but no specific error is
643  * given, return EIO.
644  */
645 biowait(bp)
646 	register struct buf *bp;
647 {
648 	int s;
649 
650 	s = splbio();
651 	while ((bp->b_flags & B_DONE) == 0)
652 		sleep((caddr_t)bp, PRIBIO);
653 	splx(s);
654 	if ((bp->b_flags & B_ERROR) == 0)
655 		return (0);
656 	if (bp->b_error)
657 		return (bp->b_error);
658 	return (EIO);
659 }
660 
661 /*
662  * Mark I/O complete on a buffer.
663  *
664  * If a callback has been requested, e.g. the pageout
665  * daemon, do so. Otherwise, awaken waiting processes.
666  */
667 void
668 biodone(bp)
669 	register struct buf *bp;
670 {
671 
672 	if (bp->b_flags & B_DONE)
673 		panic("dup biodone");
674 	bp->b_flags |= B_DONE;
675 	if ((bp->b_flags & B_READ) == 0)
676 		vwakeup(bp);
677 	if (bp->b_flags & B_CALL) {
678 		bp->b_flags &= ~B_CALL;
679 		(*bp->b_iodone)(bp);
680 		return;
681 	}
682 	if (bp->b_flags & B_ASYNC)
683 		brelse(bp);
684 	else {
685 		bp->b_flags &= ~B_WANTED;
686 		wakeup((caddr_t)bp);
687 	}
688 }
689 
690 int
691 count_lock_queue()
692 {
693 	register struct buf *bp;
694 	register int ret;
695 
696 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].qe_next;
697 	    bp; bp = (struct buf *)bp->b_freelist.qe_next)
698 		++ret;
699 	return(ret);
700 }
701 
702 #ifdef DIAGNOSTIC
703 /*
704  * Print out statistics on the current allocation of the buffer pool.
705  * Can be enabled to print out on every ``sync'' by setting "syncprt"
706  * in vfs_syscalls.c using sysctl.
707  */
708 void
709 vfs_bufstats()
710 {
711 	int s, i, j, count;
712 	register struct buf *bp;
713 	register struct queue_entry *dp;
714 	int counts[MAXBSIZE/CLBYTES+1];
715 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
716 
717 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
718 		count = 0;
719 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
720 			counts[j] = 0;
721 		s = splbio();
722 		for (bp = dp->qe_next; bp; bp = bp->b_freelist.qe_next) {
723 			counts[bp->b_bufsize/CLBYTES]++;
724 			count++;
725 		}
726 		splx(s);
727 		printf("%s: total-%d", bname[i], count);
728 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
729 			if (counts[j] != 0)
730 				printf(", %d-%d", j * CLBYTES, counts[j]);
731 		printf("\n");
732 	}
733 }
734 #endif /* DIAGNOSTIC */
735