xref: /original-bsd/sys/kern/vfs_bio.c (revision f737e041)
1 /*-
2  * Copyright (c) 1986, 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * Berkeley Software Design Inc.
12  *
13  * %sccs.include.redist.c%
14  *
15  *	@(#)vfs_bio.c	8.10 (Berkeley) 02/04/94
16  */
17 
18 #include <sys/param.h>
19 #include <sys/systm.h>
20 #include <sys/proc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/mount.h>
24 #include <sys/trace.h>
25 #include <sys/malloc.h>
26 #include <sys/resourcevar.h>
27 
28 /*
29  * Definitions for the buffer hash lists.
30  */
31 #define	BUFHASH(dvp, lbn)	\
32 	(&bufhashtbl[((int)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
33 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
34 u_long	bufhash;
35 
36 /*
37  * Insq/Remq for the buffer hash lists.
38  */
39 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
40 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
41 
42 /*
43  * Definitions for the buffer free lists.
44  */
45 #define	BQUEUES		4		/* number of free buffer queues */
46 
47 #define	BQ_LOCKED	0		/* super-blocks &c */
48 #define	BQ_LRU		1		/* lru, useful buffers */
49 #define	BQ_AGE		2		/* rubbish */
50 #define	BQ_EMPTY	3		/* buffer headers with no memory */
51 
52 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
53 int needbuffer;
54 
55 /*
56  * Insq/Remq for the buffer free lists.
57  */
58 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
59 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
60 
61 void
62 bremfree(bp)
63 	struct buf *bp;
64 {
65 	struct bqueues *dp = NULL;
66 
67 	/*
68 	 * We only calculate the head of the freelist when removing
69 	 * the last element of the list as that is the only time that
70 	 * it is needed (e.g. to reset the tail pointer).
71 	 *
72 	 * NB: This makes an assumption about how tailq's are implemented.
73 	 */
74 	if (bp->b_freelist.tqe_next == NULL) {
75 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
76 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
77 				break;
78 		if (dp == &bufqueues[BQUEUES])
79 			panic("bremfree: lost tail");
80 	}
81 	TAILQ_REMOVE(dp, bp, b_freelist);
82 }
83 
84 /*
85  * Initialize buffers and hash links for buffers.
86  */
87 void
88 bufinit()
89 {
90 	register struct buf *bp;
91 	struct bqueues *dp;
92 	register int i;
93 	int base, residual;
94 
95 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
96 		TAILQ_INIT(dp);
97 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
98 	base = bufpages / nbuf;
99 	residual = bufpages % nbuf;
100 	for (i = 0; i < nbuf; i++) {
101 		bp = &buf[i];
102 		bzero((char *)bp, sizeof *bp);
103 		bp->b_dev = NODEV;
104 		bp->b_rcred = NOCRED;
105 		bp->b_wcred = NOCRED;
106 		bp->b_vnbufs.le_next = NOLIST;
107 		bp->b_data = buffers + i * MAXBSIZE;
108 		if (i < residual)
109 			bp->b_bufsize = (base + 1) * CLBYTES;
110 		else
111 			bp->b_bufsize = base * CLBYTES;
112 		bp->b_flags = B_INVAL;
113 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
114 		binsheadfree(bp, dp);
115 		binshash(bp, &invalhash);
116 	}
117 }
118 
119 /*
120  * Find the block in the buffer pool.
121  * If the buffer is not present, allocate a new buffer and load
122  * its contents according to the filesystem fill routine.
123  */
124 bread(vp, blkno, size, cred, bpp)
125 	struct vnode *vp;
126 	daddr_t blkno;
127 	int size;
128 	struct ucred *cred;
129 	struct buf **bpp;
130 {
131 	struct proc *p = curproc;		/* XXX */
132 	register struct buf *bp;
133 
134 	if (size == 0)
135 		panic("bread: size 0");
136 	*bpp = bp = getblk(vp, blkno, size, 0, 0);
137 	if (bp->b_flags & (B_DONE | B_DELWRI)) {
138 		trace(TR_BREADHIT, pack(vp, size), blkno);
139 		return (0);
140 	}
141 	bp->b_flags |= B_READ;
142 	if (bp->b_bcount > bp->b_bufsize)
143 		panic("bread");
144 	if (bp->b_rcred == NOCRED && cred != NOCRED) {
145 		crhold(cred);
146 		bp->b_rcred = cred;
147 	}
148 	VOP_STRATEGY(bp);
149 	trace(TR_BREADMISS, pack(vp, size), blkno);
150 	p->p_stats->p_ru.ru_inblock++;		/* pay for read */
151 	return (biowait(bp));
152 }
153 
154 /*
155  * Operates like bread, but also starts I/O on the N specified
156  * read-ahead blocks.
157  */
158 breadn(vp, blkno, size, rablkno, rabsize, num, cred, bpp)
159 	struct vnode *vp;
160 	daddr_t blkno; int size;
161 	daddr_t rablkno[]; int rabsize[];
162 	int num;
163 	struct ucred *cred;
164 	struct buf **bpp;
165 {
166 	struct proc *p = curproc;		/* XXX */
167 	register struct buf *bp, *rabp;
168 	register int i;
169 
170 	bp = NULL;
171 	/*
172 	 * If the block is not memory resident,
173 	 * allocate a buffer and start I/O.
174 	 */
175 	if (!incore(vp, blkno)) {
176 		*bpp = bp = getblk(vp, blkno, size, 0, 0);
177 		if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
178 			bp->b_flags |= B_READ;
179 			if (bp->b_bcount > bp->b_bufsize)
180 				panic("breadn");
181 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
182 				crhold(cred);
183 				bp->b_rcred = cred;
184 			}
185 			VOP_STRATEGY(bp);
186 			trace(TR_BREADMISS, pack(vp, size), blkno);
187 			p->p_stats->p_ru.ru_inblock++;	/* pay for read */
188 		} else {
189 			trace(TR_BREADHIT, pack(vp, size), blkno);
190 		}
191 	}
192 
193 	/*
194 	 * If there's read-ahead block(s), start I/O
195 	 * on them also (as above).
196 	 */
197 	for (i = 0; i < num; i++) {
198 		if (incore(vp, rablkno[i]))
199 			continue;
200 		rabp = getblk(vp, rablkno[i], rabsize[i], 0, 0);
201 		if (rabp->b_flags & (B_DONE | B_DELWRI)) {
202 			brelse(rabp);
203 			trace(TR_BREADHITRA, pack(vp, rabsize[i]), rablkno[i]);
204 		} else {
205 			rabp->b_flags |= B_ASYNC | B_READ;
206 			if (rabp->b_bcount > rabp->b_bufsize)
207 				panic("breadrabp");
208 			if (rabp->b_rcred == NOCRED && cred != NOCRED) {
209 				crhold(cred);
210 				rabp->b_rcred = cred;
211 			}
212 			VOP_STRATEGY(rabp);
213 			trace(TR_BREADMISSRA, pack(vp, rabsize[i]), rablkno[i]);
214 			p->p_stats->p_ru.ru_inblock++;	/* pay in advance */
215 		}
216 	}
217 
218 	/*
219 	 * If block was memory resident, let bread get it.
220 	 * If block was not memory resident, the read was
221 	 * started above, so just wait for the read to complete.
222 	 */
223 	if (bp == NULL)
224 		return (bread(vp, blkno, size, cred, bpp));
225 	return (biowait(bp));
226 }
227 
228 /*
229  * Synchronous write.
230  * Release buffer on completion.
231  */
232 bwrite(bp)
233 	register struct buf *bp;
234 {
235 	struct proc *p = curproc;		/* XXX */
236 	register int flag;
237 	int s, error = 0;
238 
239 	if ((bp->b_flags & B_ASYNC) == 0 &&
240 	    bp->b_vp && bp->b_vp->v_mount &&
241 	    (bp->b_vp->v_mount->mnt_flag & MNT_ASYNC)) {
242 		bdwrite(bp);
243 		return (0);
244 	}
245 	flag = bp->b_flags;
246 	bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
247 	if (flag & B_ASYNC) {
248 		if ((flag & B_DELWRI) == 0)
249 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
250 		else
251 			reassignbuf(bp, bp->b_vp);
252 	}
253 	trace(TR_BWRITE, pack(bp->b_vp, bp->b_bcount), bp->b_lblkno);
254 	if (bp->b_bcount > bp->b_bufsize)
255 		panic("bwrite");
256 	s = splbio();
257 	bp->b_vp->v_numoutput++;
258 	bp->b_flags |= B_WRITEINPROG;
259 	splx(s);
260 	VOP_STRATEGY(bp);
261 
262 	/*
263 	 * If the write was synchronous, then await I/O completion.
264 	 * If the write was "delayed", then we put the buffer on
265 	 * the queue of blocks awaiting I/O completion status.
266 	 */
267 	if ((flag & B_ASYNC) == 0) {
268 		error = biowait(bp);
269 		if ((flag&B_DELWRI) == 0)
270 			p->p_stats->p_ru.ru_oublock++;	/* no one paid yet */
271 		else
272 			reassignbuf(bp, bp->b_vp);
273 		if (bp->b_flags & B_EINTR) {
274 			bp->b_flags &= ~B_EINTR;
275 			error = EINTR;
276 		}
277 		brelse(bp);
278 	} else if (flag & B_DELWRI) {
279 		s = splbio();
280 		bp->b_flags |= B_AGE;
281 		splx(s);
282 	}
283 	return (error);
284 }
285 
286 int
287 vn_bwrite(ap)
288 	struct vop_bwrite_args *ap;
289 {
290 
291 	return (bwrite(ap->a_bp));
292 }
293 
294 
295 /*
296  * Delayed write.
297  *
298  * The buffer is marked dirty, but is not queued for I/O.
299  * This routine should be used when the buffer is expected
300  * to be modified again soon, typically a small write that
301  * partially fills a buffer.
302  *
303  * NB: magnetic tapes cannot be delayed; they must be
304  * written in the order that the writes are requested.
305  */
306 bdwrite(bp)
307 	register struct buf *bp;
308 {
309 	struct proc *p = curproc;		/* XXX */
310 
311 	if ((bp->b_flags & B_DELWRI) == 0) {
312 		bp->b_flags |= B_DELWRI;
313 		reassignbuf(bp, bp->b_vp);
314 		p->p_stats->p_ru.ru_oublock++;		/* no one paid yet */
315 	}
316 	/*
317 	 * If this is a tape drive, the write must be initiated.
318 	 */
319 	if (VOP_IOCTL(bp->b_vp, 0, (caddr_t)B_TAPE, 0, NOCRED, p) == 0) {
320 		bawrite(bp);
321 	} else {
322 		bp->b_flags |= (B_DONE | B_DELWRI);
323 		brelse(bp);
324 	}
325 }
326 
327 /*
328  * Asynchronous write.
329  * Start I/O on a buffer, but do not wait for it to complete.
330  * The buffer is released when the I/O completes.
331  */
332 bawrite(bp)
333 	register struct buf *bp;
334 {
335 
336 	/*
337 	 * Setting the ASYNC flag causes bwrite to return
338 	 * after starting the I/O.
339 	 */
340 	bp->b_flags |= B_ASYNC;
341 	(void) VOP_BWRITE(bp);
342 }
343 
344 /*
345  * Release a buffer.
346  * Even if the buffer is dirty, no I/O is started.
347  */
348 brelse(bp)
349 	register struct buf *bp;
350 {
351 	register struct bqueues *flist;
352 	int s;
353 
354 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
355 	/*
356 	 * If a process is waiting for the buffer, or
357 	 * is waiting for a free buffer, awaken it.
358 	 */
359 	if (bp->b_flags & B_WANTED)
360 		wakeup((caddr_t)bp);
361 	if (needbuffer) {
362 		needbuffer = 0;
363 		wakeup((caddr_t)&needbuffer);
364 	}
365 	/*
366 	 * Retry I/O for locked buffers rather than invalidating them.
367 	 */
368 	s = splbio();
369 	if ((bp->b_flags & B_ERROR) && (bp->b_flags & B_LOCKED))
370 		bp->b_flags &= ~B_ERROR;
371 	/*
372 	 * Disassociate buffers that are no longer valid.
373 	 */
374 	if (bp->b_flags & (B_NOCACHE | B_ERROR))
375 		bp->b_flags |= B_INVAL;
376 	if ((bp->b_bufsize <= 0) || (bp->b_flags & (B_ERROR | B_INVAL))) {
377 		if (bp->b_vp)
378 			brelvp(bp);
379 		bp->b_flags &= ~B_DELWRI;
380 	}
381 	/*
382 	 * Stick the buffer back on a free list.
383 	 */
384 	if (bp->b_bufsize <= 0) {
385 		/* block has no buffer ... put at front of unused buffer list */
386 		flist = &bufqueues[BQ_EMPTY];
387 		binsheadfree(bp, flist);
388 	} else if (bp->b_flags & (B_ERROR | B_INVAL)) {
389 		/* block has no info ... put at front of most free list */
390 		flist = &bufqueues[BQ_AGE];
391 		binsheadfree(bp, flist);
392 	} else {
393 		if (bp->b_flags & B_LOCKED)
394 			flist = &bufqueues[BQ_LOCKED];
395 		else if (bp->b_flags & B_AGE)
396 			flist = &bufqueues[BQ_AGE];
397 		else
398 			flist = &bufqueues[BQ_LRU];
399 		binstailfree(bp, flist);
400 	}
401 	bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_AGE | B_NOCACHE);
402 	splx(s);
403 }
404 
405 /*
406  * Check to see if a block is currently memory resident.
407  */
408 struct buf *
409 incore(vp, blkno)
410 	struct vnode *vp;
411 	daddr_t blkno;
412 {
413 	register struct buf *bp;
414 
415 	for (bp = BUFHASH(vp, blkno)->lh_first; bp; bp = bp->b_hash.le_next)
416 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
417 		    (bp->b_flags & B_INVAL) == 0)
418 			return (bp);
419 	return (NULL);
420 }
421 
422 /*
423  * Check to see if a block is currently memory resident.
424  * If it is resident, return it. If it is not resident,
425  * allocate a new buffer and assign it to the block.
426  */
427 struct buf *
428 getblk(vp, blkno, size, slpflag, slptimeo)
429 	register struct vnode *vp;
430 	daddr_t blkno;
431 	int size, slpflag, slptimeo;
432 {
433 	register struct buf *bp;
434 	struct bufhashhdr *dp;
435 	int s, error;
436 
437 	if (size > MAXBSIZE)
438 		panic("getblk: size too big");
439 	/*
440 	 * Search the cache for the block. If the buffer is found,
441 	 * but it is currently locked, the we must wait for it to
442 	 * become available.
443 	 */
444 	dp = BUFHASH(vp, blkno);
445 loop:
446 	for (bp = dp->lh_first; bp; bp = bp->b_hash.le_next) {
447 		if (bp->b_lblkno != blkno || bp->b_vp != vp)
448 			continue;
449 		s = splbio();
450 		if (bp->b_flags & B_BUSY) {
451 			bp->b_flags |= B_WANTED;
452 			error = tsleep((caddr_t)bp, slpflag | (PRIBIO + 1),
453 				"getblk", slptimeo);
454 			splx(s);
455 			if (error)
456 				return (NULL);
457 			goto loop;
458 		}
459 		/*
460 		 * The test for B_INVAL is moved down here, since there
461 		 * are cases where B_INVAL is set before VOP_BWRITE() is
462 		 * called and for NFS, the process cannot be allowed to
463 		 * allocate a new buffer for the same block until the write
464 		 * back to the server has been completed. (ie. B_BUSY clears)
465 		 */
466 		if (bp->b_flags & B_INVAL) {
467 			splx(s);
468 			continue;
469 		}
470 		bremfree(bp);
471 		bp->b_flags |= B_BUSY;
472 		splx(s);
473 		if (bp->b_bcount != size) {
474 			printf("getblk: stray size\n");
475 			bp->b_flags |= B_INVAL;
476 			VOP_BWRITE(bp);
477 			goto loop;
478 		}
479 		bp->b_flags |= B_CACHE;
480 		return (bp);
481 	}
482 	/*
483 	 * The loop back to the top when getnewbuf() fails is because
484 	 * stateless filesystems like NFS have no node locks. Thus,
485 	 * there is a slight chance that more than one process will
486 	 * try and getnewbuf() for the same block concurrently when
487 	 * the first sleeps in getnewbuf(). So after a sleep, go back
488 	 * up to the top to check the hash lists again.
489 	 */
490 	if ((bp = getnewbuf(slpflag, slptimeo)) == 0)
491 		goto loop;
492 	bremhash(bp);
493 	bgetvp(vp, bp);
494 	bp->b_bcount = 0;
495 	bp->b_lblkno = blkno;
496 	bp->b_blkno = blkno;
497 	bp->b_error = 0;
498 	bp->b_resid = 0;
499 	binshash(bp, dp);
500 	allocbuf(bp, size);
501 	return (bp);
502 }
503 
504 /*
505  * Allocate a buffer.
506  * The caller will assign it to a block.
507  */
508 struct buf *
509 geteblk(size)
510 	int size;
511 {
512 	register struct buf *bp;
513 
514 	if (size > MAXBSIZE)
515 		panic("geteblk: size too big");
516 	while ((bp = getnewbuf(0, 0)) == NULL)
517 		/* void */;
518 	bp->b_flags |= B_INVAL;
519 	bremhash(bp);
520 	binshash(bp, &invalhash);
521 	bp->b_bcount = 0;
522 	bp->b_error = 0;
523 	bp->b_resid = 0;
524 	allocbuf(bp, size);
525 	return (bp);
526 }
527 
528 /*
529  * Expand or contract the actual memory allocated to a buffer.
530  * If no memory is available, release buffer and take error exit.
531  */
532 allocbuf(tp, size)
533 	register struct buf *tp;
534 	int size;
535 {
536 	register struct buf *bp, *ep;
537 	int sizealloc, take, s;
538 
539 	sizealloc = roundup(size, CLBYTES);
540 	/*
541 	 * Buffer size does not change
542 	 */
543 	if (sizealloc == tp->b_bufsize)
544 		goto out;
545 	/*
546 	 * Buffer size is shrinking.
547 	 * Place excess space in a buffer header taken from the
548 	 * BQ_EMPTY buffer list and placed on the "most free" list.
549 	 * If no extra buffer headers are available, leave the
550 	 * extra space in the present buffer.
551 	 */
552 	if (sizealloc < tp->b_bufsize) {
553 		if ((ep = bufqueues[BQ_EMPTY].tqh_first) == NULL)
554 			goto out;
555 		s = splbio();
556 		bremfree(ep);
557 		ep->b_flags |= B_BUSY;
558 		splx(s);
559 		pagemove((char *)tp->b_data + sizealloc, ep->b_data,
560 		    (int)tp->b_bufsize - sizealloc);
561 		ep->b_bufsize = tp->b_bufsize - sizealloc;
562 		tp->b_bufsize = sizealloc;
563 		ep->b_flags |= B_INVAL;
564 		ep->b_bcount = 0;
565 		brelse(ep);
566 		goto out;
567 	}
568 	/*
569 	 * More buffer space is needed. Get it out of buffers on
570 	 * the "most free" list, placing the empty headers on the
571 	 * BQ_EMPTY buffer header list.
572 	 */
573 	while (tp->b_bufsize < sizealloc) {
574 		take = sizealloc - tp->b_bufsize;
575 		while ((bp = getnewbuf(0, 0)) == NULL)
576 			/* void */;
577 		if (take >= bp->b_bufsize)
578 			take = bp->b_bufsize;
579 		pagemove(&((char *)bp->b_data)[bp->b_bufsize - take],
580 		    &((char *)tp->b_data)[tp->b_bufsize], take);
581 		tp->b_bufsize += take;
582 		bp->b_bufsize = bp->b_bufsize - take;
583 		if (bp->b_bcount > bp->b_bufsize)
584 			bp->b_bcount = bp->b_bufsize;
585 		if (bp->b_bufsize <= 0) {
586 			bremhash(bp);
587 			binshash(bp, &invalhash);
588 			bp->b_dev = NODEV;
589 			bp->b_error = 0;
590 			bp->b_flags |= B_INVAL;
591 		}
592 		brelse(bp);
593 	}
594 out:
595 	tp->b_bcount = size;
596 	return (1);
597 }
598 
599 /*
600  * Find a buffer which is available for use.
601  * Select something from a free list.
602  * Preference is to AGE list, then LRU list.
603  */
604 struct buf *
605 getnewbuf(slpflag, slptimeo)
606 	int slpflag, slptimeo;
607 {
608 	register struct buf *bp;
609 	register struct bqueues *dp;
610 	register struct ucred *cred;
611 	int s;
612 
613 loop:
614 	s = splbio();
615 	for (dp = &bufqueues[BQ_AGE]; dp > bufqueues; dp--)
616 		if (dp->tqh_first)
617 			break;
618 	if (dp == bufqueues) {		/* no free blocks */
619 		needbuffer = 1;
620 		(void) tsleep((caddr_t)&needbuffer, slpflag | (PRIBIO + 1),
621 			"getnewbuf", slptimeo);
622 		splx(s);
623 		return (NULL);
624 	}
625 	bp = dp->tqh_first;
626 	bremfree(bp);
627 	bp->b_flags |= B_BUSY;
628 	splx(s);
629 	if (bp->b_flags & B_DELWRI) {
630 		(void) bawrite(bp);
631 		goto loop;
632 	}
633 	trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
634 	if (bp->b_vp)
635 		brelvp(bp);
636 	if (bp->b_rcred != NOCRED) {
637 		cred = bp->b_rcred;
638 		bp->b_rcred = NOCRED;
639 		crfree(cred);
640 	}
641 	if (bp->b_wcred != NOCRED) {
642 		cred = bp->b_wcred;
643 		bp->b_wcred = NOCRED;
644 		crfree(cred);
645 	}
646 	bp->b_flags = B_BUSY;
647 	bp->b_dirtyoff = bp->b_dirtyend = 0;
648 	bp->b_validoff = bp->b_validend = 0;
649 	return (bp);
650 }
651 
652 /*
653  * Wait for I/O to complete.
654  *
655  * Extract and return any errors associated with the I/O.
656  * If the error flag is set, but no specific error is
657  * given, return EIO.
658  */
659 biowait(bp)
660 	register struct buf *bp;
661 {
662 	int s;
663 
664 	s = splbio();
665 	while ((bp->b_flags & B_DONE) == 0)
666 		sleep((caddr_t)bp, PRIBIO);
667 	splx(s);
668 	if ((bp->b_flags & B_ERROR) == 0)
669 		return (0);
670 	if (bp->b_error)
671 		return (bp->b_error);
672 	return (EIO);
673 }
674 
675 /*
676  * Mark I/O complete on a buffer.
677  *
678  * If a callback has been requested, e.g. the pageout
679  * daemon, do so. Otherwise, awaken waiting processes.
680  */
681 void
682 biodone(bp)
683 	register struct buf *bp;
684 {
685 
686 	if (bp->b_flags & B_DONE)
687 		panic("dup biodone");
688 	bp->b_flags |= B_DONE;
689 	if ((bp->b_flags & B_READ) == 0)
690 		vwakeup(bp);
691 	if (bp->b_flags & B_CALL) {
692 		bp->b_flags &= ~B_CALL;
693 		(*bp->b_iodone)(bp);
694 		return;
695 	}
696 	if (bp->b_flags & B_ASYNC)
697 		brelse(bp);
698 	else {
699 		bp->b_flags &= ~B_WANTED;
700 		wakeup((caddr_t)bp);
701 	}
702 }
703 
704 int
705 count_lock_queue()
706 {
707 	register struct buf *bp;
708 	register int ret;
709 
710 	for (ret = 0, bp = (struct buf *)bufqueues[BQ_LOCKED].tqh_first;
711 	    bp; bp = (struct buf *)bp->b_freelist.tqe_next)
712 		++ret;
713 	return(ret);
714 }
715 
716 #ifdef DIAGNOSTIC
717 /*
718  * Print out statistics on the current allocation of the buffer pool.
719  * Can be enabled to print out on every ``sync'' by setting "syncprt"
720  * in vfs_syscalls.c using sysctl.
721  */
722 void
723 vfs_bufstats()
724 {
725 	int s, i, j, count;
726 	register struct buf *bp;
727 	register struct bqueues *dp;
728 	int counts[MAXBSIZE/CLBYTES+1];
729 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
730 
731 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
732 		count = 0;
733 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
734 			counts[j] = 0;
735 		s = splbio();
736 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
737 			counts[bp->b_bufsize/CLBYTES]++;
738 			count++;
739 		}
740 		splx(s);
741 		printf("%s: total-%d", bname[i], count);
742 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
743 			if (counts[j] != 0)
744 				printf(", %d-%d", j * CLBYTES, counts[j]);
745 		printf("\n");
746 	}
747 }
748 #endif /* DIAGNOSTIC */
749