xref: /original-bsd/sys/kern/vfs_bio.c (revision 333da485)
1 /*-
2  * Copyright (c) 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Berkeley Software Design Inc.
12  *
13  * %sccs.include.redist.c%
14  *
15  *	@(#)vfs_bio.c	8.7 (Berkeley) 01/21/94
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/proc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/mount.h>
24 #include <sys/trace.h>
25 #include <sys/malloc.h>
26 #include <sys/resourcevar.h>
27 
28 /*
29  * Definitions for the buffer hash lists.
30  */
31 #define	BUFHASH(dvp, lbn)	\
32 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
33 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
34 u_long	bufhash;
35 
36 /*
37  * Insq/Remq for the buffer hash lists.
38  */
39 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
40 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
41 
42 /*
43  * Definitions for the buffer free lists.
44  */
45 #define	BQUEUES		4		/* number of free buffer queues */
46 
47 #define	BQ_LOCKED	0		/* super-blocks &c */
48 #define	BQ_LRU		1		/* lru, useful buffers */
49 #define	BQ_AGE		2		/* rubbish */
50 #define	BQ_EMPTY	3		/* buffer headers with no memory */
51 
52 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
53 int needbuffer;
54 
55 /*
56  * Insq/Remq for the buffer free lists.
57  */
58 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
59 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
60 
61 void
62 bremfree(bp)
63 	struct buf *bp;
64 {
65 	struct bqueues *dp = NULL;
66 
67 	/*
68 	 * We only calculate the head of the freelist when removing
69 	 * the last element of the list as that is the only time that
70 	 * it is needed (e.g. to reset the tail pointer).
71 	 *
72 	 * NB: This makes an assumption about how tailq's are implemented.
73 	 */
74 	if (bp->b_freelist.tqe_next == NULL) {
75 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
76 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
77 				break;
78 		if (dp == &bufqueues[BQUEUES])
79 			panic("bremfree: lost tail");
80 	}
81 	TAILQ_REMOVE(dp, bp, b_freelist);
82 }
83 
84 /*
85  * Initialize buffers and hash links for buffers.
86  */
87 void
88 bufinit()
89 {
90 	register struct buf *bp;
91 	struct bqueues *dp;
92 	register int i;
93 	int base, residual;
94 
95 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
96 		TAILQ_INIT(dp);
97 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
98 	base = bufpages / nbuf;
99 	residual = bufpages % nbuf;
100 	for (i = 0; i < nbuf; i++) {
101 		bp = &buf[i];
102 		bzero((char *)bp, sizeof *bp);
103 		bp->b_dev = NODEV;
104 		bp->b_rcred = NOCRED;
105 		bp->b_wcred = NOCRED;
106 		bp->b_vnbufs.le_next = NOLIST;
107 		bp->b_data = buffers + i * MAXBSIZE;
108 		if (i < residual)
109 			bp->b_bufsize = (base + 1) * CLBYTES;
110 		else
111 			bp->b_bufsize = base * CLBYTES;
112 		bp->b_flags = B_INVAL;
113 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
114 		binsheadfree(bp, dp);
115 		binshash(bp, &invalhash);
116 	}
117 }
118 
119 /*
120  * Find the block in the buffer pool.
121  * If the buffer is not present, allocate a new buffer and load
122  * its contents according to the filesystem fill routine.
123  */
124 bread(vp, blkno, size, cred, bpp)
125 	struct vnode *vp;
126 	daddr_t blkno;
127 	int size;
128 	struct ucred *cred;
129 	struct buf **bpp;
130 {
131 	struct proc *p = curproc;		/* XXX */
132 	register struct buf *bp;
133 
134 	if (size == 0)
135 		panic("bread: size 0");
136 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
137 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
138 		trace(TR_BREADHIT, pack(vp, size), blkno);
139 		return (0);
140 	}
141 	bp->b_flags |= B_READ;
142 	if (bp->b_bcount > bp->b_bufsize)
143 		panic("bread");
144 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
145 		crhold(cred);
146 		bp->b_rcred = cred;
147 	}
148 	VOP_STRATEGY(bp);
149 	trace(TR_BREADMISS, pack(vp, size), blkno);
150 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
151 	return (biowait(bp));
152 }
153 
154 /*
155  * Operates like bread, but also starts I/O on the N specified
156  * read-ahead blocks.
157  */
158 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
159 	struct vnode *vp;
160 	daddr_t blkno; int size;
161 	daddr_t rablkno[]; int rabsize[];
162 	int num;
163 	struct ucred *cred;
164 	struct buf **bpp;
165 {
166 	struct proc *p = curproc;		/* XXX */
167 	register struct buf *bp, *rabp;
168 	register int i;
169 
170 	bp = NULL;
171 	/*
172 	 * If the block is not memory resident,
173 	 * allocate a buffer and start I/O.
174 	 */
175 	if (!incore(vp, blkno)) {
176 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
177 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
178 			bp->b_flags |= B_READ;
179 			if (bp->b_bcount > bp->b_bufsize)
180 				panic("breadn");
181 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
182 				crhold(cred);
183 				bp->b_rcred = cred;
184 			}
185 			VOP_STRATEGY(bp);
186 			trace(TR_BREADMISS, pack(vp, size), blkno);
187 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
188 		} else {
189 			trace(TR_BREADHIT, pack(vp, size), blkno);
190 		}
191 	}
192 
193 	/*
194 	 * If there's read-ahead block(s), start I/O
195 	 * on them also (as above).
196 	 */
197 	for (i = 0; i < num; i++) {
198 		if (incore(vp, rablkno[i]))
199 			continue;
200 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
201 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
202 			brelse(rabp);
203 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
204 		} else {
205 			rabp->b_flags |= B_ASYNC | B_READ;
206 			if (rabp->b_bcount > rabp->b_bufsize)
207 				panic("breadrabp");
208 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
209 				crhold(cred);
210 				rabp->b_rcred = cred;
211 			}
212 			VOP_STRATEGY(rabp);
213 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
214 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
215 		}
216 	}
217 
218 	/*
219 	 * If block was memory resident, let bread get it.
220 	 * If block was not memory resident, the read was
221 	 * started above, so just wait for the read to complete.
222 	 */
223 	if (bp == NULL)
224 		return (bread(vp, blkno, size, cred, bpp));
225 	return (biowait(bp));
226 }
227 
228 /*
229  * Synchronous write.
230  * Release buffer on completion.
231  */
232 bwrite(bp)
233 	register struct buf *bp;
234 {
235 	struct proc *p = curproc;		/* XXX */
236 	register int flag;
237 	int s, error = 0;
238 
239 	if (bp->b_vp && (bp->b_vp->v_mount->mnt_flag & MNT_ASYNC))
240 		bp->b_flags |= B_ASYNC;
241 	flag = bp->b_flags;
242 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
243 	if (flag & B_ASYNC) {
244 		if ((flag & B_DELWRI) == 0)
245 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
246 		else
247 			reassignbuf(bp, bp->b_vp);
248 	}
249 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
250 	if (bp->b_bcount > bp->b_bufsize)
251 		panic("bwrite");
252 	s = splbio();
253 	bp->b_vp->v_numoutput++;
254 	bp->b_flags |= B_WRITEINPROG;
255 	splx(s);
256 	VOP_STRATEGY(bp);
257 
258 	/*
259 	 * If the write was synchronous, then await I/O completion.
260 	 * If the write was "delayed", then we put the buffer on
261 	 * the queue of blocks awaiting I/O completion status.
262 	 */
263 	if ((flag & B_ASYNC) == 0) {
264 		error = biowait(bp);
265 		if ((flag&B_DELWRI) == 0)
266 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
267 		else
268 			reassignbuf(bp, bp->b_vp);
269 		if (bp->b_flags & B_EINTR) {
270 			bp->b_flags &= ~B_EINTR;
271 			error = EINTR;
272 		}
273 		brelse(bp);
274 	} else if (flag & B_DELWRI) {
275 		s = splbio();
276 		bp->b_flags |= B_AGE;
277 		splx(s);
278 	}
279 	return (error);
280 }
281 
282 int
283 vn_bwrite(ap)
284 	struct vop_bwrite_args *ap;
285 {
286 
287 	return (bwrite(ap->a_bp));
288 }
289 
290 
291 /*
292  * Delayed write.
293  *
294  * The buffer is marked dirty, but is not queued for I/O.
295  * This routine should be used when the buffer is expected
296  * to be modified again soon, typically a small write that
297  * partially fills a buffer.
298  *
299  * NB: magnetic tapes cannot be delayed; they must be
300  * written in the order that the writes are requested.
301  */
302 bdwrite(bp)
303 	register struct buf *bp;
304 {
305 	struct proc *p = curproc;		/* XXX */
306 
307 	if ((bp->b_flags & B_DELWRI) == 0) {
308 		bp->b_flags |= B_DELWRI;
309 		reassignbuf(bp, bp->b_vp);
310 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
311 	}
312 	/*
313 	 * If this is a tape drive, the write must be initiated.
314 	 */
315 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
316 		bawrite(bp);
317 	} else {
318 		bp->b_flags |= (B_DONE | B_DELWRI);
319 		brelse(bp);
320 	}
321 }
322 
323 /*
324  * Asynchronous write.
325  * Start I/O on a buffer, but do not wait for it to complete.
326  * The buffer is released when the I/O completes.
327  */
328 bawrite(bp)
329 	register struct buf *bp;
330 {
331 
332 	/*
333 	 * Setting the ASYNC flag causes bwrite to return
334 	 * after starting the I/O.
335 	 */
336 	bp->b_flags |= B_ASYNC;
337 	(void) VOP_BWRITE(bp);
338 }
339 
340 /*
341  * Release a buffer.
342  * Even if the buffer is dirty, no I/O is started.
343  */
344 brelse(bp)
345 	register struct buf *bp;
346 {
347 	register struct bqueues *flist;
348 	int s;
349 
350 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
351 	/*
352 	 * If a process is waiting for the buffer, or
353 	 * is waiting for a free buffer, awaken it.
354 	 */
355 	if (bp->b_flags & B_WANTED)
356 		wakeup((caddr_t)bp);
357 	if (needbuffer) {
358 		needbuffer = 0;
359 		wakeup((caddr_t)&needbuffer);
360 	}
361 	/*
362 	 * Retry I/O for locked buffers rather than invalidating them.
363 	 */
364 	s = splbio();
365 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
366 		bp->b_flags &= ~B_ERROR;
367 	/*
368 	 * Disassociate buffers that are no longer valid.
369 	 */
370 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
371 		bp->b_flags |= B_INVAL;
372 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
373 		if (bp->b_vp)
374 			brelvp(bp);
375 		bp->b_flags &= ~B_DELWRI;
376 	}
377 	/*
378 	 * Stick the buffer back on a free list.
379 	 */
380 	if (bp->b_bufsize <= 0) {
381 		/* block has no buffer ... put at front of unused buffer list */
382 		flist = &bufqueues[BQ_EMPTY];
383 		binsheadfree(bp, flist);
384 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
385 		/* block has no info ... put at front of most free list */
386 		flist = &bufqueues[BQ_AGE];
387 		binsheadfree(bp, flist);
388 	} else {
389 		if (bp->b_flags & B_LOCKED)
390 			flist = &bufqueues[BQ_LOCKED];
391 		else if (bp->b_flags & B_AGE)
392 			flist = &bufqueues[BQ_AGE];
393 		else
394 			flist = &bufqueues[BQ_LRU];
395 		binstailfree(bp, flist);
396 	}
397 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
398 	splx(s);
399 }
400 
401 /*
402  * Check to see if a block is currently memory resident.
403  */
404 struct buf *
405 incore(vp, blkno)
406 	struct vnode *vp;
407 	daddr_t blkno;
408 {
409 	register struct buf *bp;
410 
411 	for (bp = BUFHASH(vp, blkno)->lh_first; bp; bp = bp->b_hash.le_next)
412 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
413 		    (bp->b_flags & B_INVAL) == 0)
414 			return (bp);
415 	return (NULL);
416 }
417 
418 /*
419  * Check to see if a block is currently memory resident.
420  * If it is resident, return it. If it is not resident,
421  * allocate a new buffer and assign it to the block.
422  */
423 struct buf *
424 getblk(vp, blkno, size, slpflag, slptimeo)
425 	register struct vnode *vp;
426 	daddr_t blkno;
427 	int size, slpflag, slptimeo;
428 {
429 	register struct buf *bp;
430 	struct bufhashhdr *dp;
431 	int s, error;
432 
433 	if (size > MAXBSIZE)
434 		panic("getblk: size too big");
435 	/*
436 	 * Search the cache for the block. If the buffer is found,
437 	 * but it is currently locked, the we must wait for it to
438 	 * become available.
439 	 */
440 	dp = BUFHASH(vp, blkno);
441 loop:
442 	for (bp = dp->lh_first; bp; bp = bp->b_hash.le_next) {
443 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
444 			continue;
445 		s = splbio();
446 		if (bp->b_flags & B_BUSY) {
447 			bp->b_flags |= B_WANTED;
448 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
449 				"getblk", slptimeo);
450 			splx(s);
451 			if (error)
452 				return (NULL);
453 			goto loop;
454 		}
455 		/*
456 		 * The test for B_INVAL is moved down here, since there
457 		 * are cases where B_INVAL is set before VOP_BWRITE() is
458 		 * called and for NFS, the process cannot be allowed to
459 		 * allocate a new buffer for the same block until the write
460 		 * back to the server has been completed. (ie. B_BUSY clears)
461 		 */
462 		if (bp->b_flags & B_INVAL) {
463 			splx(s);
464 			continue;
465 		}
466 		bremfree(bp);
467 		bp->b_flags |= B_BUSY;
468 		splx(s);
469 		if (bp->b_bcount != size) {
470 			printf("getblk: stray size");
471 			bp->b_flags |= B_INVAL;
472 			VOP_BWRITE(bp);
473 			goto loop;
474 		}
475 		bp->b_flags |= B_CACHE;
476 		return (bp);
477 	}
478 	/*
479 	 * The loop back to the top when getnewbuf() fails is because
480 	 * stateless filesystems like NFS have no node locks. Thus,
481 	 * there is a slight chance that more than one process will
482 	 * try and getnewbuf() for the same block concurrently when
483 	 * the first sleeps in getnewbuf(). So after a sleep, go back
484 	 * up to the top to check the hash lists again.
485 	 */
486 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
487 		goto loop;
488 	bremhash(bp);
489 	bgetvp(vp, bp);
490 	bp->b_bcount = 0;
491 	bp->b_lblkno = blkno;
492 	bp->b_blkno = blkno;
493 	bp->b_error = 0;
494 	bp->b_resid = 0;
495 	binshash(bp, dp);
496 	allocbuf(bp, size);
497 	return (bp);
498 }
499 
500 /*
501  * Allocate a buffer.
502  * The caller will assign it to a block.
503  */
504 struct buf *
505 geteblk(size)
506 	int size;
507 {
508 	register struct buf *bp;
509 
510 	if (size > MAXBSIZE)
511 		panic("geteblk: size too big");
512 	while ((bp = getnewbuf(0, 0)) == NULL)
513 		/* void */;
514 	bp->b_flags |= B_INVAL;
515 	bremhash(bp);
516 	binshash(bp, &invalhash);
517 	bp->b_bcount = 0;
518 	bp->b_error = 0;
519 	bp->b_resid = 0;
520 	allocbuf(bp, size);
521 	return (bp);
522 }
523 
524 /*
525  * Expand or contract the actual memory allocated to a buffer.
526  * If no memory is available, release buffer and take error exit.
527  */
528 allocbuf(tp, size)
529 	register struct buf *tp;
530 	int size;
531 {
532 	register struct buf *bp, *ep;
533 	int sizealloc, take, s;
534 
535 	sizealloc = roundup(size, CLBYTES);
536 	/*
537 	 * Buffer size does not change
538 	 */
539 	if (sizealloc == tp->b_bufsize)
540 		goto out;
541 	/*
542 	 * Buffer size is shrinking.
543 	 * Place excess space in a buffer header taken from the
544 	 * BQ_EMPTY buffer list and placed on the "most free" list.
545 	 * If no extra buffer headers are available, leave the
546 	 * extra space in the present buffer.
547 	 */
548 	if (sizealloc < tp->b_bufsize) {
549 		if ((ep = bufqueues[BQ_EMPTY].tqh_first) == NULL)
550 			goto out;
551 		s = splbio();
552 		bremfree(ep);
553 		ep->b_flags |= B_BUSY;
554 		splx(s);
555 		pagemove((char *)tp->b_data + sizealloc, ep->b_data,
556 		    (int)tp->b_bufsize - sizealloc);
557 		ep->b_bufsize = tp->b_bufsize - sizealloc;
558 		tp->b_bufsize = sizealloc;
559 		ep->b_flags |= B_INVAL;
560 		ep->b_bcount = 0;
561 		brelse(ep);
562 		goto out;
563 	}
564 	/*
565 	 * More buffer space is needed. Get it out of buffers on
566 	 * the "most free" list, placing the empty headers on the
567 	 * BQ_EMPTY buffer header list.
568 	 */
569 	while (tp->b_bufsize < sizealloc) {
570 		take = sizealloc - tp->b_bufsize;
571 		while ((bp = getnewbuf(0, 0)) == NULL)
572 			/* void */;
573 		if (take >= bp->b_bufsize)
574 			take = bp->b_bufsize;
575 		pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
576 		    &((char *)tp->b_data)[tp->b_bufsize], take);
577 		tp->b_bufsize += take;
578 		bp->b_bufsize = bp->b_bufsize - take;
579 		if (bp->b_bcount > bp->b_bufsize)
580 			bp->b_bcount = bp->b_bufsize;
581 		if (bp->b_bufsize <= 0) {
582 			bremhash(bp);
583 			binshash(bp, &invalhash);
584 			bp->b_dev = NODEV;
585 			bp->b_error = 0;
586 			bp->b_flags |= B_INVAL;
587 		}
588 		brelse(bp);
589 	}
590 out:
591 	tp->b_bcount = size;
592 	return (1);
593 }
594 
595 /*
596  * Find a buffer which is available for use.
597  * Select something from a free list.
598  * Preference is to AGE list, then LRU list.
599  */
600 struct buf *
601 getnewbuf(slpflag, slptimeo)
602 	int slpflag, slptimeo;
603 {
604 	register struct buf *bp;
605 	register struct bqueues *dp;
606 	register struct ucred *cred;
607 	int s;
608 
609 loop:
610 	s = splbio();
611 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
612 		if (dp->tqh_first)
613 			break;
614 	if (dp == bufqueues) {		/* no free blocks */
615 		needbuffer = 1;
616 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
617 			"getnewbuf", slptimeo);
618 		splx(s);
619 		return (NULL);
620 	}
621 	bp = dp->tqh_first;
622 	bremfree(bp);
623 	bp->b_flags |= B_BUSY;
624 	splx(s);
625 	if (bp->b_flags & B_DELWRI) {
626 		(void) bawrite(bp);
627 		goto loop;
628 	}
629 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
630 	if (bp->b_vp)
631 		brelvp(bp);
632 	if (bp->b_rcred != NOCRED) {
633 		cred = bp->b_rcred;
634 		bp->b_rcred = NOCRED;
635 		crfree(cred);
636 	}
637 	if (bp->b_wcred != NOCRED) {
638 		cred = bp->b_wcred;
639 		bp->b_wcred = NOCRED;
640 		crfree(cred);
641 	}
642 	bp->b_flags = B_BUSY;
643 	bp->b_dirtyoff = bp->b_dirtyend = 0;
644 	bp->b_validoff = bp->b_validend = 0;
645 	return (bp);
646 }
647 
648 /*
649  * Wait for I/O to complete.
650  *
651  * Extract and return any errors associated with the I/O.
652  * If the error flag is set, but no specific error is
653  * given, return EIO.
654  */
655 biowait(bp)
656 	register struct buf *bp;
657 {
658 	int s;
659 
660 	s = splbio();
661 	while ((bp->b_flags & B_DONE) == 0)
662 		sleep((caddr_t)bp, PRIBIO);
663 	splx(s);
664 	if ((bp->b_flags & B_ERROR) == 0)
665 		return (0);
666 	if (bp->b_error)
667 		return (bp->b_error);
668 	return (EIO);
669 }
670 
671 /*
672  * Mark I/O complete on a buffer.
673  *
674  * If a callback has been requested, e.g. the pageout
675  * daemon, do so. Otherwise, awaken waiting processes.
676  */
677 void
678 biodone(bp)
679 	register struct buf *bp;
680 {
681 
682 	if (bp->b_flags & B_DONE)
683 		panic("dup biodone");
684 	bp->b_flags |= B_DONE;
685 	if ((bp->b_flags & B_READ) == 0)
686 		vwakeup(bp);
687 	if (bp->b_flags & B_CALL) {
688 		bp->b_flags &= ~B_CALL;
689 		(*bp->b_iodone)(bp);
690 		return;
691 	}
692 	if (bp->b_flags & B_ASYNC)
693 		brelse(bp);
694 	else {
695 		bp->b_flags &= ~B_WANTED;
696 		wakeup((caddr_t)bp);
697 	}
698 }
699 
700 int
701 count_lock_queue()
702 {
703 	register struct buf *bp;
704 	register int ret;
705 
706 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].tqh_first;
707 	    bp; bp = (struct buf *)bp->b_freelist.tqe_next)
708 		++ret;
709 	return(ret);
710 }
711 
712 #ifdef DIAGNOSTIC
713 /*
714  * Print out statistics on the current allocation of the buffer pool.
715  * Can be enabled to print out on every ``sync'' by setting "syncprt"
716  * in vfs_syscalls.c using sysctl.
717  */
718 void
719 vfs_bufstats()
720 {
721 	int s, i, j, count;
722 	register struct buf *bp;
723 	register struct bqueues *dp;
724 	int counts[MAXBSIZE/CLBYTES+1];
725 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
726 
727 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
728 		count = 0;
729 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
730 			counts[j] = 0;
731 		s = splbio();
732 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
733 			counts[bp->b_bufsize/CLBYTES]++;
734 			count++;
735 		}
736 		splx(s);
737 		printf("%s: total-%d", bname[i], count);
738 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
739 			if (counts[j] != 0)
740 				printf(", %d-%d", j * CLBYTES, counts[j]);
741 		printf("\n");
742 	}
743 }
744 #endif /* DIAGNOSTIC */
745