xref: /original-bsd/sys/kern/vfs_bio.c (revision 7a38d872)
1 /*-
2  * Copyright (c) 1986, 1989, 1993 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Berkeley Software Design Inc.
7  *
8  * %sccs.include.redist.c%
9  *
10  *	@(#)vfs_bio.c	8.6 (Berkeley) 01/11/94
11  */
12 
13 #include <sys/param.h>
14 #include <sys/systm.h>
15 #include <sys/proc.h>
16 #include <sys/buf.h>
17 #include <sys/vnode.h>
18 #include <sys/mount.h>
19 #include <sys/trace.h>
20 #include <sys/malloc.h>
21 #include <sys/resourcevar.h>
22 
23 /*
24  * Definitions for the buffer hash lists.
25  */
26 #define	BUFHASH(dvp, lbn)	\
27 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
28 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
29 u_long	bufhash;
30 
31 /*
32  * Insq/Remq for the buffer hash lists.
33  */
34 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
35 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
36 
37 /*
38  * Definitions for the buffer free lists.
39  */
40 #define	BQUEUES		4		/* number of free buffer queues */
41 
42 #define	BQ_LOCKED	0		/* super-blocks &c */
43 #define	BQ_LRU		1		/* lru, useful buffers */
44 #define	BQ_AGE		2		/* rubbish */
45 #define	BQ_EMPTY	3		/* buffer headers with no memory */
46 
47 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
48 int needbuffer;
49 
50 /*
51  * Insq/Remq for the buffer free lists.
52  */
53 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
54 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
55 
56 void
57 bremfree(bp)
58 	struct buf *bp;
59 {
60 	struct bqueues *dp = NULL;
61 
62 	/*
63 	 * We only calculate the head of the freelist when removing
64 	 * the last element of the list as that is the only time that
65 	 * it is needed (e.g. to reset the tail pointer).
66 	 *
67 	 * NB: This makes an assumption about how tailq's are implemented.
68 	 */
69 	if (bp->b_freelist.tqe_next == NULL) {
70 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
71 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
72 				break;
73 		if (dp == &bufqueues[BQUEUES])
74 			panic("bremfree: lost tail");
75 	}
76 	TAILQ_REMOVE(dp, bp, b_freelist);
77 }
78 
79 /*
80  * Initialize buffers and hash links for buffers.
81  */
82 void
83 bufinit()
84 {
85 	register struct buf *bp;
86 	struct bqueues *dp;
87 	register int i;
88 	int base, residual;
89 
90 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
91 		TAILQ_INIT(dp);
92 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
93 	base = bufpages / nbuf;
94 	residual = bufpages % nbuf;
95 	for (i = 0; i < nbuf; i++) {
96 		bp = &buf[i];
97 		bzero((char *)bp, sizeof *bp);
98 		bp->b_dev = NODEV;
99 		bp->b_rcred = NOCRED;
100 		bp->b_wcred = NOCRED;
101 		bp->b_vnbufs.le_next = NOLIST;
102 		bp->b_data = buffers + i * MAXBSIZE;
103 		if (i < residual)
104 			bp->b_bufsize = (base + 1) * CLBYTES;
105 		else
106 			bp->b_bufsize = base * CLBYTES;
107 		bp->b_flags = B_INVAL;
108 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
109 		binsheadfree(bp, dp);
110 		binshash(bp, &invalhash);
111 	}
112 }
113 
114 /*
115  * Find the block in the buffer pool.
116  * If the buffer is not present, allocate a new buffer and load
117  * its contents according to the filesystem fill routine.
118  */
119 bread(vp, blkno, size, cred, bpp)
120 	struct vnode *vp;
121 	daddr_t blkno;
122 	int size;
123 	struct ucred *cred;
124 	struct buf **bpp;
125 {
126 	struct proc *p = curproc;		/* XXX */
127 	register struct buf *bp;
128 
129 	if (size == 0)
130 		panic("bread: size 0");
131 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
132 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
133 		trace(TR_BREADHIT, pack(vp, size), blkno);
134 		return (0);
135 	}
136 	bp->b_flags |= B_READ;
137 	if (bp->b_bcount > bp->b_bufsize)
138 		panic("bread");
139 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
140 		crhold(cred);
141 		bp->b_rcred = cred;
142 	}
143 	VOP_STRATEGY(bp);
144 	trace(TR_BREADMISS, pack(vp, size), blkno);
145 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
146 	return (biowait(bp));
147 }
148 
149 /*
150  * Operates like bread, but also starts I/O on the N specified
151  * read-ahead blocks.
152  */
153 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
154 	struct vnode *vp;
155 	daddr_t blkno; int size;
156 	daddr_t rablkno[]; int rabsize[];
157 	int num;
158 	struct ucred *cred;
159 	struct buf **bpp;
160 {
161 	struct proc *p = curproc;		/* XXX */
162 	register struct buf *bp, *rabp;
163 	register int i;
164 
165 	bp = NULL;
166 	/*
167 	 * If the block is not memory resident,
168 	 * allocate a buffer and start I/O.
169 	 */
170 	if (!incore(vp, blkno)) {
171 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
172 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
173 			bp->b_flags |= B_READ;
174 			if (bp->b_bcount > bp->b_bufsize)
175 				panic("breadn");
176 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
177 				crhold(cred);
178 				bp->b_rcred = cred;
179 			}
180 			VOP_STRATEGY(bp);
181 			trace(TR_BREADMISS, pack(vp, size), blkno);
182 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
183 		} else {
184 			trace(TR_BREADHIT, pack(vp, size), blkno);
185 		}
186 	}
187 
188 	/*
189 	 * If there's read-ahead block(s), start I/O
190 	 * on them also (as above).
191 	 */
192 	for (i = 0; i < num; i++) {
193 		if (incore(vp, rablkno[i]))
194 			continue;
195 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
196 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
197 			brelse(rabp);
198 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
199 		} else {
200 			rabp->b_flags |= B_ASYNC | B_READ;
201 			if (rabp->b_bcount > rabp->b_bufsize)
202 				panic("breadrabp");
203 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
204 				crhold(cred);
205 				rabp->b_rcred = cred;
206 			}
207 			VOP_STRATEGY(rabp);
208 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
209 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
210 		}
211 	}
212 
213 	/*
214 	 * If block was memory resident, let bread get it.
215 	 * If block was not memory resident, the read was
216 	 * started above, so just wait for the read to complete.
217 	 */
218 	if (bp == NULL)
219 		return (bread(vp, blkno, size, cred, bpp));
220 	return (biowait(bp));
221 }
222 
223 /*
224  * Synchronous write.
225  * Release buffer on completion.
226  */
227 bwrite(bp)
228 	register struct buf *bp;
229 {
230 	struct proc *p = curproc;		/* XXX */
231 	register int flag;
232 	int s, error = 0;
233 
234 	if (bp->b_vp && (bp->b_vp->v_mount->mnt_flag & MNT_ASYNC))
235 		bp->b_flags |= B_ASYNC;
236 	flag = bp->b_flags;
237 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
238 	if (flag & B_ASYNC) {
239 		if ((flag & B_DELWRI) == 0)
240 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
241 		else
242 			reassignbuf(bp, bp->b_vp);
243 	}
244 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
245 	if (bp->b_bcount > bp->b_bufsize)
246 		panic("bwrite");
247 	s = splbio();
248 	bp->b_vp->v_numoutput++;
249 	bp->b_flags |= B_WRITEINPROG;
250 	splx(s);
251 	VOP_STRATEGY(bp);
252 
253 	/*
254 	 * If the write was synchronous, then await I/O completion.
255 	 * If the write was "delayed", then we put the buffer on
256 	 * the queue of blocks awaiting I/O completion status.
257 	 */
258 	if ((flag & B_ASYNC) == 0) {
259 		error = biowait(bp);
260 		if ((flag&B_DELWRI) == 0)
261 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
262 		else
263 			reassignbuf(bp, bp->b_vp);
264 		if (bp->b_flags & B_EINTR) {
265 			bp->b_flags &= ~B_EINTR;
266 			error = EINTR;
267 		}
268 		brelse(bp);
269 	} else if (flag & B_DELWRI) {
270 		s = splbio();
271 		bp->b_flags |= B_AGE;
272 		splx(s);
273 	}
274 	return (error);
275 }
276 
277 int
278 vn_bwrite(ap)
279 	struct vop_bwrite_args *ap;
280 {
281 
282 	return (bwrite(ap->a_bp));
283 }
284 
285 
286 /*
287  * Delayed write.
288  *
289  * The buffer is marked dirty, but is not queued for I/O.
290  * This routine should be used when the buffer is expected
291  * to be modified again soon, typically a small write that
292  * partially fills a buffer.
293  *
294  * NB: magnetic tapes cannot be delayed; they must be
295  * written in the order that the writes are requested.
296  */
297 bdwrite(bp)
298 	register struct buf *bp;
299 {
300 	struct proc *p = curproc;		/* XXX */
301 
302 	if ((bp->b_flags & B_DELWRI) == 0) {
303 		bp->b_flags |= B_DELWRI;
304 		reassignbuf(bp, bp->b_vp);
305 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
306 	}
307 	/*
308 	 * If this is a tape drive, the write must be initiated.
309 	 */
310 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
311 		bawrite(bp);
312 	} else {
313 		bp->b_flags |= (B_DONE | B_DELWRI);
314 		brelse(bp);
315 	}
316 }
317 
318 /*
319  * Asynchronous write.
320  * Start I/O on a buffer, but do not wait for it to complete.
321  * The buffer is released when the I/O completes.
322  */
323 bawrite(bp)
324 	register struct buf *bp;
325 {
326 
327 	/*
328 	 * Setting the ASYNC flag causes bwrite to return
329 	 * after starting the I/O.
330 	 */
331 	bp->b_flags |= B_ASYNC;
332 	(void) VOP_BWRITE(bp);
333 }
334 
335 /*
336  * Release a buffer.
337  * Even if the buffer is dirty, no I/O is started.
338  */
339 brelse(bp)
340 	register struct buf *bp;
341 {
342 	register struct bqueues *flist;
343 	int s;
344 
345 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
346 	/*
347 	 * If a process is waiting for the buffer, or
348 	 * is waiting for a free buffer, awaken it.
349 	 */
350 	if (bp->b_flags & B_WANTED)
351 		wakeup((caddr_t)bp);
352 	if (needbuffer) {
353 		needbuffer = 0;
354 		wakeup((caddr_t)&needbuffer);
355 	}
356 	/*
357 	 * Retry I/O for locked buffers rather than invalidating them.
358 	 */
359 	s = splbio();
360 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
361 		bp->b_flags &= ~B_ERROR;
362 	/*
363 	 * Disassociate buffers that are no longer valid.
364 	 */
365 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
366 		bp->b_flags |= B_INVAL;
367 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
368 		if (bp->b_vp)
369 			brelvp(bp);
370 		bp->b_flags &= ~B_DELWRI;
371 	}
372 	/*
373 	 * Stick the buffer back on a free list.
374 	 */
375 	if (bp->b_bufsize <= 0) {
376 		/* block has no buffer ... put at front of unused buffer list */
377 		flist = &bufqueues[BQ_EMPTY];
378 		binsheadfree(bp, flist);
379 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
380 		/* block has no info ... put at front of most free list */
381 		flist = &bufqueues[BQ_AGE];
382 		binsheadfree(bp, flist);
383 	} else {
384 		if (bp->b_flags & B_LOCKED)
385 			flist = &bufqueues[BQ_LOCKED];
386 		else if (bp->b_flags & B_AGE)
387 			flist = &bufqueues[BQ_AGE];
388 		else
389 			flist = &bufqueues[BQ_LRU];
390 		binstailfree(bp, flist);
391 	}
392 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
393 	splx(s);
394 }
395 
396 /*
397  * Check to see if a block is currently memory resident.
398  */
399 struct buf *
400 incore(vp, blkno)
401 	struct vnode *vp;
402 	daddr_t blkno;
403 {
404 	register struct buf *bp;
405 
406 	for (bp = BUFHASH(vp, blkno)->lh_first; bp; bp = bp->b_hash.le_next)
407 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
408 		    (bp->b_flags & B_INVAL) == 0)
409 			return (bp);
410 	return (NULL);
411 }
412 
413 /*
414  * Check to see if a block is currently memory resident.
415  * If it is resident, return it. If it is not resident,
416  * allocate a new buffer and assign it to the block.
417  */
418 struct buf *
419 getblk(vp, blkno, size, slpflag, slptimeo)
420 	register struct vnode *vp;
421 	daddr_t blkno;
422 	int size, slpflag, slptimeo;
423 {
424 	register struct buf *bp;
425 	struct bufhashhdr *dp;
426 	int s, error;
427 
428 	if (size > MAXBSIZE)
429 		panic("getblk: size too big");
430 	/*
431 	 * Search the cache for the block. If the buffer is found,
432 	 * but it is currently locked, the we must wait for it to
433 	 * become available.
434 	 */
435 	dp = BUFHASH(vp, blkno);
436 loop:
437 	for (bp = dp->lh_first; bp; bp = bp->b_hash.le_next) {
438 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
439 			continue;
440 		s = splbio();
441 		if (bp->b_flags & B_BUSY) {
442 			bp->b_flags |= B_WANTED;
443 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
444 				"getblk", slptimeo);
445 			splx(s);
446 			if (error)
447 				return (NULL);
448 			goto loop;
449 		}
450 		/*
451 		 * The test for B_INVAL is moved down here, since there
452 		 * are cases where B_INVAL is set before VOP_BWRITE() is
453 		 * called and for NFS, the process cannot be allowed to
454 		 * allocate a new buffer for the same block until the write
455 		 * back to the server has been completed. (ie. B_BUSY clears)
456 		 */
457 		if (bp->b_flags & B_INVAL) {
458 			splx(s);
459 			continue;
460 		}
461 		bremfree(bp);
462 		bp->b_flags |= B_BUSY;
463 		splx(s);
464 		if (bp->b_bcount != size) {
465 			printf("getblk: stray size");
466 			bp->b_flags |= B_INVAL;
467 			VOP_BWRITE(bp);
468 			goto loop;
469 		}
470 		bp->b_flags |= B_CACHE;
471 		return (bp);
472 	}
473 	/*
474 	 * The loop back to the top when getnewbuf() fails is because
475 	 * stateless filesystems like NFS have no node locks. Thus,
476 	 * there is a slight chance that more than one process will
477 	 * try and getnewbuf() for the same block concurrently when
478 	 * the first sleeps in getnewbuf(). So after a sleep, go back
479 	 * up to the top to check the hash lists again.
480 	 */
481 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
482 		goto loop;
483 	bremhash(bp);
484 	bgetvp(vp, bp);
485 	bp->b_bcount = 0;
486 	bp->b_lblkno = blkno;
487 	bp->b_blkno = blkno;
488 	bp->b_error = 0;
489 	bp->b_resid = 0;
490 	binshash(bp, dp);
491 	allocbuf(bp, size);
492 	return (bp);
493 }
494 
495 /*
496  * Allocate a buffer.
497  * The caller will assign it to a block.
498  */
499 struct buf *
500 geteblk(size)
501 	int size;
502 {
503 	register struct buf *bp;
504 
505 	if (size > MAXBSIZE)
506 		panic("geteblk: size too big");
507 	while ((bp = getnewbuf(0, 0)) == NULL)
508 		/* void */;
509 	bp->b_flags |= B_INVAL;
510 	bremhash(bp);
511 	binshash(bp, &invalhash);
512 	bp->b_bcount = 0;
513 	bp->b_error = 0;
514 	bp->b_resid = 0;
515 	allocbuf(bp, size);
516 	return (bp);
517 }
518 
519 /*
520  * Expand or contract the actual memory allocated to a buffer.
521  * If no memory is available, release buffer and take error exit.
522  */
523 allocbuf(tp, size)
524 	register struct buf *tp;
525 	int size;
526 {
527 	register struct buf *bp, *ep;
528 	int sizealloc, take, s;
529 
530 	sizealloc = roundup(size, CLBYTES);
531 	/*
532 	 * Buffer size does not change
533 	 */
534 	if (sizealloc == tp->b_bufsize)
535 		goto out;
536 	/*
537 	 * Buffer size is shrinking.
538 	 * Place excess space in a buffer header taken from the
539 	 * BQ_EMPTY buffer list and placed on the "most free" list.
540 	 * If no extra buffer headers are available, leave the
541 	 * extra space in the present buffer.
542 	 */
543 	if (sizealloc < tp->b_bufsize) {
544 		if ((ep = bufqueues[BQ_EMPTY].tqh_first) == NULL)
545 			goto out;
546 		s = splbio();
547 		bremfree(ep);
548 		ep->b_flags |= B_BUSY;
549 		splx(s);
550 		pagemove((char *)tp->b_data + sizealloc, ep->b_data,
551 		    (int)tp->b_bufsize - sizealloc);
552 		ep->b_bufsize = tp->b_bufsize - sizealloc;
553 		tp->b_bufsize = sizealloc;
554 		ep->b_flags |= B_INVAL;
555 		ep->b_bcount = 0;
556 		brelse(ep);
557 		goto out;
558 	}
559 	/*
560 	 * More buffer space is needed. Get it out of buffers on
561 	 * the "most free" list, placing the empty headers on the
562 	 * BQ_EMPTY buffer header list.
563 	 */
564 	while (tp->b_bufsize < sizealloc) {
565 		take = sizealloc - tp->b_bufsize;
566 		while ((bp = getnewbuf(0, 0)) == NULL)
567 			/* void */;
568 		if (take >= bp->b_bufsize)
569 			take = bp->b_bufsize;
570 		pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
571 		    &((char *)tp->b_data)[tp->b_bufsize], take);
572 		tp->b_bufsize += take;
573 		bp->b_bufsize = bp->b_bufsize - take;
574 		if (bp->b_bcount > bp->b_bufsize)
575 			bp->b_bcount = bp->b_bufsize;
576 		if (bp->b_bufsize <= 0) {
577 			bremhash(bp);
578 			binshash(bp, &invalhash);
579 			bp->b_dev = NODEV;
580 			bp->b_error = 0;
581 			bp->b_flags |= B_INVAL;
582 		}
583 		brelse(bp);
584 	}
585 out:
586 	tp->b_bcount = size;
587 	return (1);
588 }
589 
590 /*
591  * Find a buffer which is available for use.
592  * Select something from a free list.
593  * Preference is to AGE list, then LRU list.
594  */
595 struct buf *
596 getnewbuf(slpflag, slptimeo)
597 	int slpflag, slptimeo;
598 {
599 	register struct buf *bp;
600 	register struct bqueues *dp;
601 	register struct ucred *cred;
602 	int s;
603 
604 loop:
605 	s = splbio();
606 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
607 		if (dp->tqh_first)
608 			break;
609 	if (dp == bufqueues) {		/* no free blocks */
610 		needbuffer = 1;
611 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
612 			"getnewbuf", slptimeo);
613 		splx(s);
614 		return (NULL);
615 	}
616 	bp = dp->tqh_first;
617 	bremfree(bp);
618 	bp->b_flags |= B_BUSY;
619 	splx(s);
620 	if (bp->b_flags & B_DELWRI) {
621 		(void) bawrite(bp);
622 		goto loop;
623 	}
624 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
625 	if (bp->b_vp)
626 		brelvp(bp);
627 	if (bp->b_rcred != NOCRED) {
628 		cred = bp->b_rcred;
629 		bp->b_rcred = NOCRED;
630 		crfree(cred);
631 	}
632 	if (bp->b_wcred != NOCRED) {
633 		cred = bp->b_wcred;
634 		bp->b_wcred = NOCRED;
635 		crfree(cred);
636 	}
637 	bp->b_flags = B_BUSY;
638 	bp->b_dirtyoff = bp->b_dirtyend = 0;
639 	bp->b_validoff = bp->b_validend = 0;
640 	return (bp);
641 }
642 
643 /*
644  * Wait for I/O to complete.
645  *
646  * Extract and return any errors associated with the I/O.
647  * If the error flag is set, but no specific error is
648  * given, return EIO.
649  */
650 biowait(bp)
651 	register struct buf *bp;
652 {
653 	int s;
654 
655 	s = splbio();
656 	while ((bp->b_flags & B_DONE) == 0)
657 		sleep((caddr_t)bp, PRIBIO);
658 	splx(s);
659 	if ((bp->b_flags & B_ERROR) == 0)
660 		return (0);
661 	if (bp->b_error)
662 		return (bp->b_error);
663 	return (EIO);
664 }
665 
666 /*
667  * Mark I/O complete on a buffer.
668  *
669  * If a callback has been requested, e.g. the pageout
670  * daemon, do so. Otherwise, awaken waiting processes.
671  */
672 void
673 biodone(bp)
674 	register struct buf *bp;
675 {
676 
677 	if (bp->b_flags & B_DONE)
678 		panic("dup biodone");
679 	bp->b_flags |= B_DONE;
680 	if ((bp->b_flags & B_READ) == 0)
681 		vwakeup(bp);
682 	if (bp->b_flags & B_CALL) {
683 		bp->b_flags &= ~B_CALL;
684 		(*bp->b_iodone)(bp);
685 		return;
686 	}
687 	if (bp->b_flags & B_ASYNC)
688 		brelse(bp);
689 	else {
690 		bp->b_flags &= ~B_WANTED;
691 		wakeup((caddr_t)bp);
692 	}
693 }
694 
695 int
696 count_lock_queue()
697 {
698 	register struct buf *bp;
699 	register int ret;
700 
701 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].tqh_first;
702 	    bp; bp = (struct buf *)bp->b_freelist.tqe_next)
703 		++ret;
704 	return(ret);
705 }
706 
707 #ifdef DIAGNOSTIC
708 /*
709  * Print out statistics on the current allocation of the buffer pool.
710  * Can be enabled to print out on every ``sync'' by setting "syncprt"
711  * in vfs_syscalls.c using sysctl.
712  */
713 void
714 vfs_bufstats()
715 {
716 	int s, i, j, count;
717 	register struct buf *bp;
718 	register struct bqueues *dp;
719 	int counts[MAXBSIZE/CLBYTES+1];
720 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
721 
722 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
723 		count = 0;
724 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
725 			counts[j] = 0;
726 		s = splbio();
727 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
728 			counts[bp->b_bufsize/CLBYTES]++;
729 			count++;
730 		}
731 		splx(s);
732 		printf("%s: total-%d", bname[i], count);
733 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
734 			if (counts[j] != 0)
735 				printf(", %d-%d", j * CLBYTES, counts[j]);
736 		printf("\n");
737 	}
738 }
739 #endif /* DIAGNOSTIC */
740