xref: /openbsd/sys/kern/vfs_bio.c (revision d25d28bf)
1 /*	$OpenBSD: vfs_bio.c,v 1.178 2016/09/16 02:54:51 dlg Exp $	*/
2 /*	$NetBSD: vfs_bio.c,v 1.44 1996/06/11 11:15:36 pk Exp $	*/
3 
4 /*
5  * Copyright (c) 1994 Christopher G. Demetriou
6  * Copyright (c) 1982, 1986, 1989, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  * (c) UNIX System Laboratories, Inc.
9  * All or some portions of this file are derived from material licensed
10  * to the University of California by American Telephone and Telegraph
11  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
12  * the permission of UNIX System Laboratories, Inc.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
39  */
40 
41 /*
42  * Some references:
43  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
44  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
45  *		UNIX Operating System (Addison Welley, 1989)
46  */
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/proc.h>
51 #include <sys/buf.h>
52 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/malloc.h>
55 #include <sys/pool.h>
56 #include <sys/resourcevar.h>
57 #include <sys/conf.h>
58 #include <sys/kernel.h>
59 #include <sys/specdev.h>
60 #include <uvm/uvm_extern.h>
61 
62 int nobuffers;
63 int needbuffer;
64 struct bio_ops bioops;
65 
66 /* private bufcache functions */
67 void bufcache_init(void);
68 void bufcache_adjust(void);
69 
70 /*
71  * Buffer pool for I/O buffers.
72  */
73 struct pool bufpool;
74 struct bufhead bufhead = LIST_HEAD_INITIALIZER(bufhead);
75 void buf_put(struct buf *);
76 
77 struct buf *bio_doread(struct vnode *, daddr_t, int, int);
78 struct buf *buf_get(struct vnode *, daddr_t, size_t);
79 void bread_cluster_callback(struct buf *);
80 
81 struct bcachestats bcstats;  /* counters */
82 long lodirtypages;      /* dirty page count low water mark */
83 long hidirtypages;      /* dirty page count high water mark */
84 long targetpages;   	/* target number of pages for cache size */
85 long buflowpages;	/* smallest size cache allowed */
86 long bufhighpages; 	/* largest size cache allowed */
87 long bufbackpages; 	/* minimum number of pages we shrink when asked to */
88 
89 vsize_t bufkvm;
90 
91 struct proc *cleanerproc;
92 int bd_req;			/* Sleep point for cleaner daemon. */
93 
94 #define NUM_CACHES 2
95 #define DMA_CACHE 0
96 struct bufcache cleancache[NUM_CACHES];
97 struct bufqueue dirtyqueue;
98 
99 void
100 buf_put(struct buf *bp)
101 {
102 	splassert(IPL_BIO);
103 
104 #ifdef DIAGNOSTIC
105 	if (bp->b_pobj != NULL)
106 		KASSERT(bp->b_bufsize > 0);
107 	if (ISSET(bp->b_flags, B_DELWRI))
108 		panic("buf_put: releasing dirty buffer");
109 	if (bp->b_freelist.tqe_next != NOLIST &&
110 	    bp->b_freelist.tqe_next != (void *)-1)
111 		panic("buf_put: still on the free list");
112 	if (bp->b_vnbufs.le_next != NOLIST &&
113 	    bp->b_vnbufs.le_next != (void *)-1)
114 		panic("buf_put: still on the vnode list");
115 	if (!LIST_EMPTY(&bp->b_dep))
116 		panic("buf_put: b_dep is not empty");
117 #endif
118 
119 	LIST_REMOVE(bp, b_list);
120 	bcstats.numbufs--;
121 
122 	if (buf_dealloc_mem(bp) != 0)
123 		return;
124 	pool_put(&bufpool, bp);
125 }
126 
127 /*
128  * Initialize buffers and hash links for buffers.
129  */
130 void
131 bufinit(void)
132 {
133 	u_int64_t dmapages;
134 
135 	dmapages = uvm_pagecount(&dma_constraint);
136 	/* take away a guess at how much of this the kernel will consume */
137 	dmapages -= (atop(physmem) - atop(uvmexp.free));
138 
139 	/*
140 	 * If MD code doesn't say otherwise, use up to 10% of DMA'able
141 	 * memory for buffers.
142 	 */
143 	if (bufcachepercent == 0)
144 		bufcachepercent = 10;
145 
146 	/*
147 	 * XXX these values and their same use in kern_sysctl
148 	 * need to move into buf.h
149 	 */
150 	KASSERT(bufcachepercent <= 90);
151 	KASSERT(bufcachepercent >= 5);
152 	if (bufpages == 0)
153 		bufpages = dmapages * bufcachepercent / 100;
154 	if (bufpages < BCACHE_MIN)
155 		bufpages = BCACHE_MIN;
156 	KASSERT(bufpages < dmapages);
157 
158 	bufhighpages = bufpages;
159 
160 	/*
161 	 * Set the base backoff level for the buffer cache.  We will
162 	 * not allow uvm to steal back more than this number of pages.
163 	 */
164 	buflowpages = dmapages * 5 / 100;
165 	if (buflowpages < BCACHE_MIN)
166 		buflowpages = BCACHE_MIN;
167 
168 	/*
169 	 * set bufbackpages to 100 pages, or 10 percent of the low water mark
170 	 * if we don't have that many pages.
171 	 */
172 
173 	bufbackpages = buflowpages * 10 / 100;
174 	if (bufbackpages > 100)
175 		bufbackpages = 100;
176 
177 	/*
178 	 * If the MD code does not say otherwise, reserve 10% of kva
179 	 * space for mapping buffers.
180 	 */
181 	if (bufkvm == 0)
182 		bufkvm = VM_KERNEL_SPACE_SIZE / 10;
183 
184 	/*
185 	 * Don't use more than twice the amount of bufpages for mappings.
186 	 * It's twice since we map things sparsely.
187 	 */
188 	if (bufkvm > bufpages * PAGE_SIZE)
189 		bufkvm = bufpages * PAGE_SIZE;
190 	/*
191 	 * Round bufkvm to MAXPHYS because we allocate chunks of va space
192 	 * in MAXPHYS chunks.
193 	 */
194 	bufkvm &= ~(MAXPHYS - 1);
195 
196 	pool_init(&bufpool, sizeof(struct buf), 0, IPL_BIO, 0, "bufpl", NULL);
197 
198 	bufcache_init();
199 
200 	/*
201 	 * hmm - bufkvm is an argument because it's static, while
202 	 * bufpages is global because it can change while running.
203  	 */
204 	buf_mem_init(bufkvm);
205 
206 	/*
207 	 * Set the dirty page high water mark to be less than the low
208 	 * water mark for pages in the buffer cache. This ensures we
209 	 * can always back off by throwing away clean pages, and give
210 	 * ourselves a chance to write out the dirty pages eventually.
211 	 */
212 	hidirtypages = (buflowpages / 4) * 3;
213 	lodirtypages = buflowpages / 2;
214 
215 	/*
216 	 * We are allowed to use up to the reserve.
217 	 */
218 	targetpages = bufpages - RESERVE_PAGES;
219 }
220 
221 /*
222  * Change cachepct
223  */
224 void
225 bufadjust(int newbufpages)
226 {
227 	struct buf *bp;
228 	int s;
229 
230 	if (newbufpages < buflowpages)
231 		newbufpages = buflowpages;
232 
233 	s = splbio();
234 	bufpages = newbufpages;
235 
236 	/*
237 	 * We are allowed to use up to the reserve
238 	 */
239 	targetpages = bufpages - RESERVE_PAGES;
240 
241 	/*
242 	 * Shrinking the cache happens here only if someone has manually
243 	 * adjusted bufcachepercent - or the pagedaemon has told us
244 	 * to give back memory *now* - so we give it all back.
245 	 */
246 	while ((bp = bufcache_getanycleanbuf()) &&
247 	    (bcstats.numbufpages > targetpages)) {
248 		bufcache_take(bp);
249 		if (bp->b_vp) {
250 			RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
251 			brelvp(bp);
252 		}
253 		buf_put(bp);
254 	}
255 	bufcache_adjust();
256 
257 	/*
258 	 * Wake up the cleaner if we have lots of dirty pages,
259 	 * or if we are getting low on buffer cache kva.
260 	 */
261 	if ((UNCLEAN_PAGES >= hidirtypages) ||
262 	    bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
263 		wakeup(&bd_req);
264 
265 	splx(s);
266 }
267 
268 /*
269  * Make the buffer cache back off from cachepct.
270  */
271 int
272 bufbackoff(struct uvm_constraint_range *range, long size)
273 {
274 	/*
275 	 * Back off "size" buffer cache pages. Called by the page
276 	 * daemon to consume buffer cache pages rather than scanning.
277 	 *
278 	 * It returns 0 to the pagedaemon to indicate that it has
279 	 * succeeded in freeing enough pages. It returns -1 to
280 	 * indicate that it could not and the pagedaemon should take
281 	 * other measures.
282 	 *
283 	 */
284 	long pdelta, oldbufpages;
285 
286 	/*
287 	 * Back off by at least bufbackpages. If the page daemon gave us
288 	 * a larger size, back off by that much.
289 	 */
290 	pdelta = (size > bufbackpages) ? size : bufbackpages;
291 
292 	if (bufpages <= buflowpages)
293 		return(-1);
294 	if (bufpages - pdelta < buflowpages)
295 		pdelta = bufpages - buflowpages;
296 	oldbufpages = bufpages;
297 	bufadjust(bufpages - pdelta);
298 	if (oldbufpages - bufpages < size)
299 		return (-1); /* we did not free what we were asked */
300 	else
301 		return(0);
302 }
303 
304 void
305 buf_flip_high(struct buf *bp)
306 {
307 	KASSERT(ISSET(bp->b_flags, B_BC));
308 	KASSERT(ISSET(bp->b_flags, B_DMA));
309 	KASSERT(bp->cache == DMA_CACHE);
310 	CLR(bp->b_flags, B_DMA);
311 	/* XXX does nothing to buffer for now */
312 }
313 
314 void
315 buf_flip_dma(struct buf *bp)
316 {
317 	KASSERT(ISSET(bp->b_flags, B_BC));
318 	KASSERT(ISSET(bp->b_flags, B_BUSY));
319 	if (!ISSET(bp->b_flags, B_DMA)) {
320 		KASSERT(bp->cache > DMA_CACHE);
321 		KASSERT(bp->cache < NUM_CACHES);
322 		/* XXX does not flip buffer for now */
323 		/* make buffer hot, in DMA_CACHE, once it gets released. */
324 		CLR(bp->b_flags, B_COLD);
325 		CLR(bp->b_flags, B_WARM);
326 		SET(bp->b_flags, B_DMA);
327 		bp->cache = DMA_CACHE;
328 	}
329 }
330 
331 struct buf *
332 bio_doread(struct vnode *vp, daddr_t blkno, int size, int async)
333 {
334 	struct buf *bp;
335 	struct mount *mp;
336 
337 	bp = getblk(vp, blkno, size, 0, 0);
338 
339 	/*
340 	 * If buffer does not have valid data, start a read.
341 	 * Note that if buffer is B_INVAL, getblk() won't return it.
342 	 * Therefore, it's valid if its I/O has completed or been delayed.
343 	 */
344 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
345 		SET(bp->b_flags, B_READ | async);
346 		bcstats.pendingreads++;
347 		bcstats.numreads++;
348 		VOP_STRATEGY(bp);
349 		/* Pay for the read. */
350 		curproc->p_ru.ru_inblock++;			/* XXX */
351 	} else if (async) {
352 		brelse(bp);
353 	}
354 
355 	mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
356 
357 	/*
358 	 * Collect statistics on synchronous and asynchronous reads.
359 	 * Reads from block devices are charged to their associated
360 	 * filesystem (if any).
361 	 */
362 	if (mp != NULL) {
363 		if (async == 0)
364 			mp->mnt_stat.f_syncreads++;
365 		else
366 			mp->mnt_stat.f_asyncreads++;
367 	}
368 
369 	return (bp);
370 }
371 
372 /*
373  * Read a disk block.
374  * This algorithm described in Bach (p.54).
375  */
376 int
377 bread(struct vnode *vp, daddr_t blkno, int size, struct buf **bpp)
378 {
379 	struct buf *bp;
380 
381 	/* Get buffer for block. */
382 	bp = *bpp = bio_doread(vp, blkno, size, 0);
383 
384 	/* Wait for the read to complete, and return result. */
385 	return (biowait(bp));
386 }
387 
388 /*
389  * Read-ahead multiple disk blocks. The first is sync, the rest async.
390  * Trivial modification to the breada algorithm presented in Bach (p.55).
391  */
392 int
393 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t rablks[],
394     int rasizes[], int nrablks, struct buf **bpp)
395 {
396 	struct buf *bp;
397 	int i;
398 
399 	bp = *bpp = bio_doread(vp, blkno, size, 0);
400 
401 	/*
402 	 * For each of the read-ahead blocks, start a read, if necessary.
403 	 */
404 	for (i = 0; i < nrablks; i++) {
405 		/* If it's in the cache, just go on to next one. */
406 		if (incore(vp, rablks[i]))
407 			continue;
408 
409 		/* Get a buffer for the read-ahead block */
410 		(void) bio_doread(vp, rablks[i], rasizes[i], B_ASYNC);
411 	}
412 
413 	/* Otherwise, we had to start a read for it; wait until it's valid. */
414 	return (biowait(bp));
415 }
416 
417 /*
418  * Called from interrupt context.
419  */
420 void
421 bread_cluster_callback(struct buf *bp)
422 {
423 	struct buf **xbpp = bp->b_saveaddr;
424 	int i;
425 
426 	if (xbpp[1] != NULL) {
427 		size_t newsize = xbpp[1]->b_bufsize;
428 
429 		/*
430 		 * Shrink this buffer's mapping to only cover its part of
431 		 * the total I/O.
432 		 */
433 		buf_fix_mapping(bp, newsize);
434 		bp->b_bcount = newsize;
435 	}
436 
437 	for (i = 1; xbpp[i] != 0; i++) {
438 		if (ISSET(bp->b_flags, B_ERROR))
439 			SET(xbpp[i]->b_flags, B_INVAL | B_ERROR);
440 		biodone(xbpp[i]);
441 	}
442 
443 	free(xbpp, M_TEMP, 0);
444 
445 	if (ISSET(bp->b_flags, B_ASYNC)) {
446 		brelse(bp);
447 	} else {
448 		CLR(bp->b_flags, B_WANTED);
449 		wakeup(bp);
450 	}
451 }
452 
453 int
454 bread_cluster(struct vnode *vp, daddr_t blkno, int size, struct buf **rbpp)
455 {
456 	struct buf *bp, **xbpp;
457 	int howmany, maxra, i, inc;
458 	daddr_t sblkno;
459 
460 	*rbpp = bio_doread(vp, blkno, size, 0);
461 
462 	/*
463 	 * If the buffer is in the cache skip any I/O operation.
464 	 */
465 	if (ISSET((*rbpp)->b_flags, B_CACHE))
466 		goto out;
467 
468 	if (size != round_page(size))
469 		goto out;
470 
471 	if (VOP_BMAP(vp, blkno + 1, NULL, &sblkno, &maxra))
472 		goto out;
473 
474 	maxra++;
475 	if (sblkno == -1 || maxra < 2)
476 		goto out;
477 
478 	howmany = MAXPHYS / size;
479 	if (howmany > maxra)
480 		howmany = maxra;
481 
482 	xbpp = mallocarray(howmany + 1, sizeof(struct buf *), M_TEMP, M_NOWAIT);
483 	if (xbpp == NULL)
484 		goto out;
485 
486 	for (i = howmany - 1; i >= 0; i--) {
487 		size_t sz;
488 
489 		/*
490 		 * First buffer allocates big enough size to cover what
491 		 * all the other buffers need.
492 		 */
493 		sz = i == 0 ? howmany * size : 0;
494 
495 		xbpp[i] = buf_get(vp, blkno + i + 1, sz);
496 		if (xbpp[i] == NULL) {
497 			for (++i; i < howmany; i++) {
498 				SET(xbpp[i]->b_flags, B_INVAL);
499 				brelse(xbpp[i]);
500 			}
501 			free(xbpp, M_TEMP, 0);
502 			goto out;
503 		}
504 	}
505 
506 	bp = xbpp[0];
507 
508 	xbpp[howmany] = 0;
509 
510 	inc = btodb(size);
511 
512 	for (i = 1; i < howmany; i++) {
513 		bcstats.pendingreads++;
514 		bcstats.numreads++;
515                 /*
516                 * We set B_DMA here because bp above will be B_DMA,
517                 * and we are playing buffer slice-n-dice games from
518                 * the memory allocated in bp.
519                 */
520 		SET(xbpp[i]->b_flags, B_DMA | B_READ | B_ASYNC);
521 		xbpp[i]->b_blkno = sblkno + (i * inc);
522 		xbpp[i]->b_bufsize = xbpp[i]->b_bcount = size;
523 		xbpp[i]->b_data = NULL;
524 		xbpp[i]->b_pobj = bp->b_pobj;
525 		xbpp[i]->b_poffs = bp->b_poffs + (i * size);
526 	}
527 
528 	KASSERT(bp->b_lblkno == blkno + 1);
529 	KASSERT(bp->b_vp == vp);
530 
531 	bp->b_blkno = sblkno;
532 	SET(bp->b_flags, B_READ | B_ASYNC | B_CALL);
533 
534 	bp->b_saveaddr = (void *)xbpp;
535 	bp->b_iodone = bread_cluster_callback;
536 
537 	bcstats.pendingreads++;
538 	bcstats.numreads++;
539 	VOP_STRATEGY(bp);
540 	curproc->p_ru.ru_inblock++;
541 
542 out:
543 	return (biowait(*rbpp));
544 }
545 
546 /*
547  * Block write.  Described in Bach (p.56)
548  */
549 int
550 bwrite(struct buf *bp)
551 {
552 	int rv, async, wasdelayed, s;
553 	struct vnode *vp;
554 	struct mount *mp;
555 
556 	vp = bp->b_vp;
557 	if (vp != NULL)
558 		mp = vp->v_type == VBLK? vp->v_specmountpoint : vp->v_mount;
559 	else
560 		mp = NULL;
561 
562 	/*
563 	 * Remember buffer type, to switch on it later.  If the write was
564 	 * synchronous, but the file system was mounted with MNT_ASYNC,
565 	 * convert it to a delayed write.
566 	 * XXX note that this relies on delayed tape writes being converted
567 	 * to async, not sync writes (which is safe, but ugly).
568 	 */
569 	async = ISSET(bp->b_flags, B_ASYNC);
570 	if (!async && mp && ISSET(mp->mnt_flag, MNT_ASYNC)) {
571 		bdwrite(bp);
572 		return (0);
573 	}
574 
575 	/*
576 	 * Collect statistics on synchronous and asynchronous writes.
577 	 * Writes to block devices are charged to their associated
578 	 * filesystem (if any).
579 	 */
580 	if (mp != NULL) {
581 		if (async)
582 			mp->mnt_stat.f_asyncwrites++;
583 		else
584 			mp->mnt_stat.f_syncwrites++;
585 	}
586 	bcstats.pendingwrites++;
587 	bcstats.numwrites++;
588 
589 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
590 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
591 
592 	s = splbio();
593 
594 	/*
595 	 * If not synchronous, pay for the I/O operation and make
596 	 * sure the buf is on the correct vnode queue.  We have
597 	 * to do this now, because if we don't, the vnode may not
598 	 * be properly notified that its I/O has completed.
599 	 */
600 	if (wasdelayed) {
601 		reassignbuf(bp);
602 	} else
603 		curproc->p_ru.ru_oublock++;
604 
605 
606 	/* Initiate disk write.  Make sure the appropriate party is charged. */
607 	bp->b_vp->v_numoutput++;
608 	splx(s);
609 	buf_flip_dma(bp);
610 	SET(bp->b_flags, B_WRITEINPROG);
611 	VOP_STRATEGY(bp);
612 
613 	/*
614 	 * If the queue is above the high water mark, wait till
615 	 * the number of outstanding write bufs drops below the low
616 	 * water mark.
617 	 */
618 	if (bp->b_bq)
619 		bufq_wait(bp->b_bq);
620 
621 	if (async)
622 		return (0);
623 
624 	/*
625 	 * If I/O was synchronous, wait for it to complete.
626 	 */
627 	rv = biowait(bp);
628 
629 	/* Release the buffer. */
630 	brelse(bp);
631 
632 	return (rv);
633 }
634 
635 
636 /*
637  * Delayed write.
638  *
639  * The buffer is marked dirty, but is not queued for I/O.
640  * This routine should be used when the buffer is expected
641  * to be modified again soon, typically a small write that
642  * partially fills a buffer.
643  *
644  * NB: magnetic tapes cannot be delayed; they must be
645  * written in the order that the writes are requested.
646  *
647  * Described in Leffler, et al. (pp. 208-213).
648  */
649 void
650 bdwrite(struct buf *bp)
651 {
652 	int s;
653 
654 	/*
655 	 * If the block hasn't been seen before:
656 	 *	(1) Mark it as having been seen,
657 	 *	(2) Charge for the write.
658 	 *	(3) Make sure it's on its vnode's correct block list,
659 	 *	(4) If a buffer is rewritten, move it to end of dirty list
660 	 */
661 	if (!ISSET(bp->b_flags, B_DELWRI)) {
662 		SET(bp->b_flags, B_DELWRI);
663 		s = splbio();
664 		buf_flip_dma(bp);
665 		reassignbuf(bp);
666 		splx(s);
667 		curproc->p_ru.ru_oublock++;		/* XXX */
668 	}
669 
670 	/* The "write" is done, so mark and release the buffer. */
671 	CLR(bp->b_flags, B_NEEDCOMMIT);
672 	SET(bp->b_flags, B_DONE);
673 	brelse(bp);
674 }
675 
676 /*
677  * Asynchronous block write; just an asynchronous bwrite().
678  */
679 void
680 bawrite(struct buf *bp)
681 {
682 
683 	SET(bp->b_flags, B_ASYNC);
684 	VOP_BWRITE(bp);
685 }
686 
687 /*
688  * Must be called at splbio()
689  */
690 void
691 buf_dirty(struct buf *bp)
692 {
693 	splassert(IPL_BIO);
694 
695 #ifdef DIAGNOSTIC
696 	if (!ISSET(bp->b_flags, B_BUSY))
697 		panic("Trying to dirty buffer on freelist!");
698 #endif
699 
700 	if (ISSET(bp->b_flags, B_DELWRI) == 0) {
701 		SET(bp->b_flags, B_DELWRI);
702 		buf_flip_dma(bp);
703 		reassignbuf(bp);
704 	}
705 }
706 
707 /*
708  * Must be called at splbio()
709  */
710 void
711 buf_undirty(struct buf *bp)
712 {
713 	splassert(IPL_BIO);
714 
715 #ifdef DIAGNOSTIC
716 	if (!ISSET(bp->b_flags, B_BUSY))
717 		panic("Trying to undirty buffer on freelist!");
718 #endif
719 	if (ISSET(bp->b_flags, B_DELWRI)) {
720 		CLR(bp->b_flags, B_DELWRI);
721 		reassignbuf(bp);
722 	}
723 }
724 
725 /*
726  * Release a buffer on to the free lists.
727  * Described in Bach (p. 46).
728  */
729 void
730 brelse(struct buf *bp)
731 {
732 	int s;
733 
734 	s = splbio();
735 
736 	if (bp->b_data != NULL)
737 		KASSERT(bp->b_bufsize > 0);
738 
739 	/*
740 	 * Determine which queue the buffer should be on, then put it there.
741 	 */
742 
743 	/* If it's not cacheable, or an error, mark it invalid. */
744 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
745 		SET(bp->b_flags, B_INVAL);
746 
747 	if (ISSET(bp->b_flags, B_INVAL)) {
748 		/*
749 		 * If the buffer is invalid, free it now rather than leaving
750 		 * it in a queue and wasting memory.
751 		 */
752 		if (LIST_FIRST(&bp->b_dep) != NULL)
753 			buf_deallocate(bp);
754 
755 		if (ISSET(bp->b_flags, B_DELWRI)) {
756 			CLR(bp->b_flags, B_DELWRI);
757 		}
758 
759 		if (bp->b_vp) {
760 			RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
761 			brelvp(bp);
762 		}
763 		bp->b_vp = NULL;
764 
765 		/*
766 		 * Wake up any processes waiting for _this_ buffer to
767 		 * become free. They are not allowed to grab it
768 		 * since it will be freed. But the only sleeper is
769 		 * getblk and it will restart the operation after
770 		 * sleep.
771 		 */
772 		if (ISSET(bp->b_flags, B_WANTED)) {
773 			CLR(bp->b_flags, B_WANTED);
774 			wakeup(bp);
775 		}
776 		buf_put(bp);
777 	} else {
778 		/*
779 		 * It has valid data.  Put it on the end of the appropriate
780 		 * queue, so that it'll stick around for as long as possible.
781 		 */
782 		bufcache_release(bp);
783 
784 		/* Unlock the buffer. */
785 		CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
786 		buf_release(bp);
787 
788 		/* Wake up any processes waiting for _this_ buffer to
789 		 * become free. */
790 		if (ISSET(bp->b_flags, B_WANTED)) {
791 			CLR(bp->b_flags, B_WANTED);
792 			wakeup(bp);
793 		}
794 	}
795 
796 	/* Wake up syncer and cleaner processes waiting for buffers. */
797 	if (nobuffers) {
798 		nobuffers = 0;
799 		wakeup(&nobuffers);
800 	}
801 
802 	/* Wake up any processes waiting for any buffer to become free. */
803 	if (needbuffer && bcstats.numbufpages < targetpages &&
804 	    bcstats.kvaslots_avail > RESERVE_SLOTS) {
805 		needbuffer = 0;
806 		wakeup(&needbuffer);
807 	}
808 
809 	splx(s);
810 }
811 
812 /*
813  * Determine if a block is in the cache. Just look on what would be its hash
814  * chain. If it's there, return a pointer to it, unless it's marked invalid.
815  */
816 struct buf *
817 incore(struct vnode *vp, daddr_t blkno)
818 {
819 	struct buf *bp;
820 	struct buf b;
821 	int s;
822 
823 	s = splbio();
824 
825 	/* Search buf lookup tree */
826 	b.b_lblkno = blkno;
827 	bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
828 	if (bp != NULL && ISSET(bp->b_flags, B_INVAL))
829 		bp = NULL;
830 
831 	splx(s);
832 	return (bp);
833 }
834 
835 /*
836  * Get a block of requested size that is associated with
837  * a given vnode and block offset. If it is found in the
838  * block cache, mark it as having been found, make it busy
839  * and return it. Otherwise, return an empty block of the
840  * correct size. It is up to the caller to ensure that the
841  * cached blocks be of the correct size.
842  */
843 struct buf *
844 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
845 {
846 	struct buf *bp;
847 	struct buf b;
848 	int s, error;
849 
850 	/*
851 	 * XXX
852 	 * The following is an inlined version of 'incore()', but with
853 	 * the 'invalid' test moved to after the 'busy' test.  It's
854 	 * necessary because there are some cases in which the NFS
855 	 * code sets B_INVAL prior to writing data to the server, but
856 	 * in which the buffers actually contain valid data.  In this
857 	 * case, we can't allow the system to allocate a new buffer for
858 	 * the block until the write is finished.
859 	 */
860 start:
861 	s = splbio();
862 	b.b_lblkno = blkno;
863 	bp = RBT_FIND(buf_rb_bufs, &vp->v_bufs_tree, &b);
864 	if (bp != NULL) {
865 		if (ISSET(bp->b_flags, B_BUSY)) {
866 			SET(bp->b_flags, B_WANTED);
867 			error = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
868 			    slptimeo);
869 			splx(s);
870 			if (error)
871 				return (NULL);
872 			goto start;
873 		}
874 
875 		if (!ISSET(bp->b_flags, B_INVAL)) {
876 			bcstats.cachehits++;
877 			SET(bp->b_flags, B_CACHE);
878 			bufcache_take(bp);
879 			buf_acquire(bp);
880 			splx(s);
881 			return (bp);
882 		}
883 	}
884 	splx(s);
885 
886 	if ((bp = buf_get(vp, blkno, size)) == NULL)
887 		goto start;
888 
889 	return (bp);
890 }
891 
892 /*
893  * Get an empty, disassociated buffer of given size.
894  */
895 struct buf *
896 geteblk(int size)
897 {
898 	struct buf *bp;
899 
900 	while ((bp = buf_get(NULL, 0, size)) == NULL)
901 		continue;
902 
903 	return (bp);
904 }
905 
906 /*
907  * Allocate a buffer.
908  */
909 struct buf *
910 buf_get(struct vnode *vp, daddr_t blkno, size_t size)
911 {
912 	struct buf *bp;
913 	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
914 	int npages;
915 	int s;
916 
917 	s = splbio();
918 	if (size) {
919 		/*
920 		 * Wake up the cleaner if we have lots of dirty pages,
921 		 * or if we are getting low on buffer cache kva.
922 		 */
923 		if (UNCLEAN_PAGES >= hidirtypages ||
924 			bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
925 			wakeup(&bd_req);
926 
927 		npages = atop(round_page(size));
928 
929 		/*
930 		 * if our cache has been previously shrunk,
931 		 * allow it to grow again with use up to
932 		 * bufhighpages (cachepercent)
933 		 */
934 		if (bufpages < bufhighpages)
935 			bufadjust(bufhighpages);
936 
937 		/*
938 		 * If we would go over the page target with our
939 		 * new allocation, free enough buffers first
940 		 * to stay at the target with our new allocation.
941 		 */
942 		while ((bcstats.numbufpages + npages > targetpages) &&
943 		    (bp = bufcache_getanycleanbuf())) {
944 			bufcache_take(bp);
945 			if (bp->b_vp) {
946 				RBT_REMOVE(buf_rb_bufs,
947 				    &bp->b_vp->v_bufs_tree, bp);
948 				brelvp(bp);
949 			}
950 			buf_put(bp);
951 		}
952 
953 		/*
954 		 * If we get here, we tried to free the world down
955 		 * above, and couldn't get down - Wake the cleaner
956 		 * and wait for it to push some buffers out.
957 		 */
958 		if ((bcstats.numbufpages + npages > targetpages ||
959 		    bcstats.kvaslots_avail <= RESERVE_SLOTS) &&
960 		    curproc != syncerproc && curproc != cleanerproc) {
961 			wakeup(&bd_req);
962 			needbuffer++;
963 			tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
964 			splx(s);
965 			return (NULL);
966 		}
967 		if (bcstats.numbufpages + npages > bufpages) {
968 			/* cleaner or syncer */
969 			nobuffers = 1;
970 			tsleep(&nobuffers, PRIBIO, "nobuffers", 0);
971 			splx(s);
972 			return (NULL);
973 		}
974 	}
975 
976 	bp = pool_get(&bufpool, poolwait|PR_ZERO);
977 
978 	if (bp == NULL) {
979 		splx(s);
980 		return (NULL);
981 	}
982 
983 	bp->b_freelist.tqe_next = NOLIST;
984 	bp->b_dev = NODEV;
985 	LIST_INIT(&bp->b_dep);
986 	bp->b_bcount = size;
987 
988 	buf_acquire_nomap(bp);
989 
990 	if (vp != NULL) {
991 		/*
992 		 * We insert the buffer into the hash with B_BUSY set
993 		 * while we allocate pages for it. This way any getblk
994 		 * that happens while we allocate pages will wait for
995 		 * this buffer instead of starting its own buf_get.
996 		 *
997 		 * But first, we check if someone beat us to it.
998 		 */
999 		if (incore(vp, blkno)) {
1000 			pool_put(&bufpool, bp);
1001 			splx(s);
1002 			return (NULL);
1003 		}
1004 
1005 		bp->b_blkno = bp->b_lblkno = blkno;
1006 		bgetvp(vp, bp);
1007 		if (RBT_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
1008 			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
1009 	} else {
1010 		bp->b_vnbufs.le_next = NOLIST;
1011 		SET(bp->b_flags, B_INVAL);
1012 		bp->b_vp = NULL;
1013 	}
1014 
1015 	LIST_INSERT_HEAD(&bufhead, bp, b_list);
1016 	bcstats.numbufs++;
1017 
1018 	if (size) {
1019 		buf_alloc_pages(bp, round_page(size));
1020 		KASSERT(ISSET(bp->b_flags, B_DMA));
1021 		buf_map(bp);
1022 	}
1023 
1024 	SET(bp->b_flags, B_BC);
1025 	splx(s);
1026 
1027 	return (bp);
1028 }
1029 
1030 /*
1031  * Buffer cleaning daemon.
1032  */
1033 void
1034 buf_daemon(struct proc *p)
1035 {
1036 	struct buf *bp = NULL;
1037 	int s, pushed = 0;
1038 
1039 	cleanerproc = curproc;
1040 
1041 	s = splbio();
1042 	for (;;) {
1043 		if (bp == NULL || (pushed >= 16 &&
1044 		    UNCLEAN_PAGES < hidirtypages &&
1045 		    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS)){
1046 			pushed = 0;
1047 			/*
1048 			 * Wake up anyone who was waiting for buffers
1049 			 * to be released.
1050 			 */
1051 			if (needbuffer) {
1052 				needbuffer = 0;
1053 				wakeup(&needbuffer);
1054 			}
1055 			tsleep(&bd_req, PRIBIO - 7, "cleaner", 0);
1056 		}
1057 
1058 		while ((bp = bufcache_getdirtybuf())) {
1059 
1060 			if (UNCLEAN_PAGES < lodirtypages &&
1061 			    bcstats.kvaslots_avail > 2 * RESERVE_SLOTS &&
1062 			    pushed >= 16)
1063 				break;
1064 
1065 			bufcache_take(bp);
1066 			buf_acquire(bp);
1067 			splx(s);
1068 
1069 			if (ISSET(bp->b_flags, B_INVAL)) {
1070 				brelse(bp);
1071 				s = splbio();
1072 				continue;
1073 			}
1074 #ifdef DIAGNOSTIC
1075 			if (!ISSET(bp->b_flags, B_DELWRI))
1076 				panic("Clean buffer on dirty queue");
1077 #endif
1078 			if (LIST_FIRST(&bp->b_dep) != NULL &&
1079 			    !ISSET(bp->b_flags, B_DEFERRED) &&
1080 			    buf_countdeps(bp, 0, 0)) {
1081 				SET(bp->b_flags, B_DEFERRED);
1082 				s = splbio();
1083 				bufcache_release(bp);
1084 				buf_release(bp);
1085 				continue;
1086 			}
1087 
1088 			bawrite(bp);
1089 			pushed++;
1090 
1091 			sched_pause();
1092 
1093 			s = splbio();
1094 		}
1095 	}
1096 }
1097 
1098 /*
1099  * Wait for operations on the buffer to complete.
1100  * When they do, extract and return the I/O's error value.
1101  */
1102 int
1103 biowait(struct buf *bp)
1104 {
1105 	int s;
1106 
1107 	KASSERT(!(bp->b_flags & B_ASYNC));
1108 
1109 	s = splbio();
1110 	while (!ISSET(bp->b_flags, B_DONE))
1111 		tsleep(bp, PRIBIO + 1, "biowait", 0);
1112 	splx(s);
1113 
1114 	/* check for interruption of I/O (e.g. via NFS), then errors. */
1115 	if (ISSET(bp->b_flags, B_EINTR)) {
1116 		CLR(bp->b_flags, B_EINTR);
1117 		return (EINTR);
1118 	}
1119 
1120 	if (ISSET(bp->b_flags, B_ERROR))
1121 		return (bp->b_error ? bp->b_error : EIO);
1122 	else
1123 		return (0);
1124 }
1125 
1126 /*
1127  * Mark I/O complete on a buffer.
1128  *
1129  * If a callback has been requested, e.g. the pageout
1130  * daemon, do so. Otherwise, awaken waiting processes.
1131  *
1132  * [ Leffler, et al., says on p.247:
1133  *	"This routine wakes up the blocked process, frees the buffer
1134  *	for an asynchronous write, or, for a request by the pagedaemon
1135  *	process, invokes a procedure specified in the buffer structure" ]
1136  *
1137  * In real life, the pagedaemon (or other system processes) wants
1138  * to do async stuff to, and doesn't want the buffer brelse()'d.
1139  * (for swap pager, that puts swap buffers on the free lists (!!!),
1140  * for the vn device, that puts malloc'd buffers on the free lists!)
1141  *
1142  * Must be called at splbio().
1143  */
1144 void
1145 biodone(struct buf *bp)
1146 {
1147 	splassert(IPL_BIO);
1148 
1149 	if (ISSET(bp->b_flags, B_DONE))
1150 		panic("biodone already");
1151 	SET(bp->b_flags, B_DONE);		/* note that it's done */
1152 
1153 	if (bp->b_bq)
1154 		bufq_done(bp->b_bq, bp);
1155 
1156 	if (LIST_FIRST(&bp->b_dep) != NULL)
1157 		buf_complete(bp);
1158 
1159 	if (!ISSET(bp->b_flags, B_READ)) {
1160 		CLR(bp->b_flags, B_WRITEINPROG);
1161 		vwakeup(bp->b_vp);
1162 	}
1163 	if (bcstats.numbufs &&
1164 	    (!(ISSET(bp->b_flags, B_RAW) || ISSET(bp->b_flags, B_PHYS)))) {
1165 		if (!ISSET(bp->b_flags, B_READ)) {
1166 			bcstats.pendingwrites--;
1167 		} else
1168 			bcstats.pendingreads--;
1169 	}
1170 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
1171 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
1172 		(*bp->b_iodone)(bp);
1173 	} else {
1174 		if (ISSET(bp->b_flags, B_ASYNC)) {/* if async, release it */
1175 			brelse(bp);
1176 		} else {			/* or just wakeup the buffer */
1177 			CLR(bp->b_flags, B_WANTED);
1178 			wakeup(bp);
1179 		}
1180 	}
1181 }
1182 
1183 #ifdef DDB
1184 void	bcstats_print(int (*)(const char *, ...)
1185     __attribute__((__format__(__kprintf__,1,2))));
1186 /*
1187  * bcstats_print: ddb hook to print interesting buffer cache counters
1188  */
1189 void
1190 bcstats_print(
1191     int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
1192 {
1193 	(*pr)("Current Buffer Cache status:\n");
1194 	(*pr)("numbufs %lld busymapped %lld, delwri %lld\n",
1195 	    bcstats.numbufs, bcstats.busymapped, bcstats.delwribufs);
1196 	(*pr)("kvaslots %lld avail kva slots %lld\n",
1197 	    bcstats.kvaslots, bcstats.kvaslots_avail);
1198     	(*pr)("bufpages %lld, dirtypages %lld\n",
1199 	    bcstats.numbufpages,  bcstats.numdirtypages);
1200 	(*pr)("pendingreads %lld, pendingwrites %lld\n",
1201 	    bcstats.pendingreads, bcstats.pendingwrites);
1202 }
1203 #endif
1204 
1205 void
1206 buf_adjcnt(struct buf *bp, long ncount)
1207 {
1208 	KASSERT(ncount <= bp->b_bufsize);
1209 	bp->b_bcount = ncount;
1210 }
1211 
1212 /* bufcache freelist code below */
1213 /*
1214  * Copyright (c) 2014 Ted Unangst <tedu@openbsd.org>
1215  *
1216  * Permission to use, copy, modify, and distribute this software for any
1217  * purpose with or without fee is hereby granted, provided that the above
1218  * copyright notice and this permission notice appear in all copies.
1219  *
1220  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1221  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1222  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1223  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1224  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1225  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1226  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
1227  */
1228 
1229 /*
1230  * The code below implements a variant of the 2Q buffer cache algorithm by
1231  * Johnson and Shasha.
1232  *
1233  * General Outline
1234  * We divide the buffer cache into three working sets: current, previous,
1235  * and long term. Each list is itself LRU and buffers get promoted and moved
1236  * around between them. A buffer starts its life in the current working set.
1237  * As time passes and newer buffers push it out, it will turn into the previous
1238  * working set and is subject to recycling. But if it's accessed again from
1239  * the previous working set, that's an indication that it's actually in the
1240  * long term working set, so we promote it there. The separation of current
1241  * and previous working sets prevents us from promoting a buffer that's only
1242  * temporarily hot to the long term cache.
1243  *
1244  * The objective is to provide scan resistance by making the long term
1245  * working set ineligible for immediate recycling, even as the current
1246  * working set is rapidly turned over.
1247  *
1248  * Implementation
1249  * The code below identifies the current, previous, and long term sets as
1250  * hotqueue, coldqueue, and warmqueue. The hot and warm queues are capped at
1251  * 1/3 of the total clean pages, after which point they start pushing their
1252  * oldest buffers into coldqueue.
1253  * A buf always starts out with neither WARM or COLD flags set (implying HOT).
1254  * When released, it will be returned to the tail of the hotqueue list.
1255  * When the hotqueue gets too large, the oldest hot buf will be moved to the
1256  * coldqueue, with the B_COLD flag set. When a cold buf is released, we set
1257  * the B_WARM flag and put it onto the warmqueue. Warm bufs are also
1258  * directly returned to the end of the warmqueue. As with the hotqueue, when
1259  * the warmqueue grows too large, B_WARM bufs are moved onto the coldqueue.
1260  *
1261  * Note that this design does still support large working sets, greater
1262  * than the cap of hotqueue or warmqueue would imply. The coldqueue is still
1263  * cached and has no maximum length. The hot and warm queues form a Y feeding
1264  * into the coldqueue. Moving bufs between queues is constant time, so this
1265  * design decays to one long warm->cold queue.
1266  *
1267  * In the 2Q paper, hotqueue and coldqueue are A1in and A1out. The warmqueue
1268  * is Am. We always cache pages, as opposed to pointers to pages for A1.
1269  *
1270  * This implementation adds support for multiple 2q caches.
1271  *
1272  * If we have more than one 2q cache, as bufs fall off the cold queue
1273  * for recyclying, bufs that have been warm before (which retain the
1274  * B_WARM flag in addition to B_COLD) can be put into the hot queue of
1275  * a second level 2Q cache. buffers which are only B_COLD are
1276  * recycled. Bufs falling off the last cache's cold queue are always
1277  * recycled.
1278  *
1279  */
1280 
1281 /*
1282  * this function is called when a hot or warm queue may have exceeded its
1283  * size limit. it will move a buf to the coldqueue.
1284  */
1285 int chillbufs(struct
1286     bufcache *cache, struct bufqueue *queue, int64_t *queuepages);
1287 
1288 void
1289 bufcache_init(void)
1290 {
1291 	int i;
1292 	for (i=0; i < NUM_CACHES; i++) {
1293 		TAILQ_INIT(&cleancache[i].hotqueue);
1294 		TAILQ_INIT(&cleancache[i].coldqueue);
1295 		TAILQ_INIT(&cleancache[i].warmqueue);
1296 	}
1297 	TAILQ_INIT(&dirtyqueue);
1298 }
1299 
1300 /*
1301  * if the buffer caches have shrunk, we may need to rebalance our queues.
1302  */
1303 void
1304 bufcache_adjust(void)
1305 {
1306 	int i;
1307 	for (i=0; i < NUM_CACHES; i++) {
1308 		while (chillbufs(&cleancache[i], &cleancache[i].warmqueue,
1309 		    &cleancache[i].warmbufpages) ||
1310 		    chillbufs(&cleancache[i], &cleancache[i].hotqueue,
1311 		    &cleancache[i].hotbufpages))
1312 			continue;
1313 	}
1314 }
1315 
1316 /*
1317  * Get a clean buffer from the cache. if "discard" is set do not promote
1318  * previously warm buffers as normal, because we are tossing everything
1319  * away such as in a hibernation
1320  */
1321 struct buf *
1322 bufcache_getcleanbuf(int cachenum, int discard)
1323 {
1324 	struct buf *bp = NULL;
1325 	struct bufcache *cache = &cleancache[cachenum];
1326 
1327 	splassert(IPL_BIO);
1328 
1329 	/* try  cold queue */
1330 	while ((bp = TAILQ_FIRST(&cache->coldqueue))) {
1331 		if ((!discard) &&
1332 		    cachenum < NUM_CACHES - 1 && ISSET(bp->b_flags, B_WARM)) {
1333 			/*
1334 			 * If this buffer was warm before, move it to
1335 			 *  the hot queue in the next cache
1336 			 */
1337 			TAILQ_REMOVE(&cache->coldqueue, bp, b_freelist);
1338 			CLR(bp->b_flags, B_WARM);
1339 			CLR(bp->b_flags, B_COLD);
1340 			int64_t pages = atop(bp->b_bufsize);
1341 			KASSERT(bp->cache == cachenum);
1342 			if (bp->cache == 0)
1343 				buf_flip_high(bp);
1344 			bp->cache++;
1345 			struct bufcache *newcache = &cleancache[bp->cache];
1346 			newcache->cachepages += pages;
1347 			newcache->hotbufpages += pages;
1348 			chillbufs(newcache, &newcache->hotqueue,
1349 			    &newcache->hotbufpages);
1350 			TAILQ_INSERT_TAIL(&newcache->hotqueue, bp, b_freelist);
1351 		}
1352 		else
1353 			/* buffer is cold - give it up */
1354 			return bp;
1355 	}
1356 	if ((bp = TAILQ_FIRST(&cache->warmqueue)))
1357 		return bp;
1358 	if ((bp = TAILQ_FIRST(&cache->hotqueue)))
1359  		return bp;
1360 	return bp;
1361 }
1362 
1363 struct buf *
1364 bufcache_getcleanbuf_range(int start, int end, int discard)
1365 {
1366 	int i, j = start, q = end;
1367 	struct buf *bp = NULL;
1368 
1369 	/*
1370 	 * XXX in theory we could promote warm buffers into a previous queue
1371 	 * so in the pathological case of where we go through all the caches
1372 	 * without getting a buffer we have to start at the beginning again.
1373 	 */
1374 	while (j <= q)	{
1375 		for (i = q; i >= j; i--)
1376 			if ((bp = bufcache_getcleanbuf(i, discard)))
1377 				return(bp);
1378 		j++;
1379 	}
1380 	return bp;
1381 }
1382 
1383 struct buf *
1384 bufcache_getanycleanbuf(void)
1385 {
1386 	return bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES -1, 0);
1387 }
1388 
1389 
1390 struct buf *
1391 bufcache_getdirtybuf(void)
1392 {
1393 	return TAILQ_FIRST(&dirtyqueue);
1394 }
1395 
1396 void
1397 bufcache_take(struct buf *bp)
1398 {
1399 	struct bufqueue *queue;
1400 	int64_t pages;
1401 
1402 	splassert(IPL_BIO);
1403 
1404 	KASSERT(ISSET(bp->b_flags, B_BC));
1405 	KASSERT(bp->cache >= DMA_CACHE);
1406 	KASSERT((bp->cache < NUM_CACHES));
1407 	pages = atop(bp->b_bufsize);
1408 	struct bufcache *cache = &cleancache[bp->cache];
1409 	if (!ISSET(bp->b_flags, B_DELWRI)) {
1410                 if (ISSET(bp->b_flags, B_COLD)) {
1411 			queue = &cache->coldqueue;
1412 		} else if (ISSET(bp->b_flags, B_WARM)) {
1413 			queue = &cache->warmqueue;
1414 			cache->warmbufpages -= pages;
1415 		} else {
1416 			queue = &cache->hotqueue;
1417 			cache->hotbufpages -= pages;
1418 		}
1419 		bcstats.numcleanpages -= pages;
1420 		cache->cachepages -= pages;
1421 	} else {
1422 		queue = &dirtyqueue;
1423 		bcstats.numdirtypages -= pages;
1424 		bcstats.delwribufs--;
1425 	}
1426 	TAILQ_REMOVE(queue, bp, b_freelist);
1427 }
1428 
1429 /* move buffers from a hot or warm queue to a cold queue in a cache */
1430 int
1431 chillbufs(struct bufcache *cache, struct bufqueue *queue, int64_t *queuepages)
1432 {
1433 	struct buf *bp;
1434 	int64_t limit, pages;
1435 
1436 	/*
1437 	 * The warm and hot queues are allowed to be up to one third each.
1438 	 * We impose a minimum size of 96 to prevent too much "wobbling".
1439 	 */
1440 	limit = cache->cachepages / 3;
1441 	if (*queuepages > 96 && *queuepages > limit) {
1442 		bp = TAILQ_FIRST(queue);
1443 		if (!bp)
1444 			panic("inconsistent bufpage counts");
1445 		pages = atop(bp->b_bufsize);
1446 		*queuepages -= pages;
1447 		TAILQ_REMOVE(queue, bp, b_freelist);
1448 		/* we do not clear B_WARM */
1449 		SET(bp->b_flags, B_COLD);
1450 		TAILQ_INSERT_TAIL(&cache->coldqueue, bp, b_freelist);
1451 		return 1;
1452 	}
1453 	return 0;
1454 }
1455 
1456 void
1457 bufcache_release(struct buf *bp)
1458 {
1459 	struct bufqueue *queue;
1460 	int64_t pages;
1461 	struct bufcache *cache = &cleancache[bp->cache];
1462 	pages = atop(bp->b_bufsize);
1463 	KASSERT(ISSET(bp->b_flags, B_BC));
1464 	KASSERT((ISSET(bp->b_flags, B_DMA) && bp->cache == 0)
1465 	    || ((!ISSET(bp->b_flags, B_DMA)) && bp->cache > 0));
1466 	if (!ISSET(bp->b_flags, B_DELWRI)) {
1467 		int64_t *queuepages;
1468 		if (ISSET(bp->b_flags, B_WARM | B_COLD)) {
1469 			SET(bp->b_flags, B_WARM);
1470 			CLR(bp->b_flags, B_COLD);
1471 			queue = &cache->warmqueue;
1472 			queuepages = &cache->warmbufpages;
1473 		} else {
1474 			queue = &cache->hotqueue;
1475 			queuepages = &cache->hotbufpages;
1476 		}
1477 		*queuepages += pages;
1478 		bcstats.numcleanpages += pages;
1479 		cache->cachepages += pages;
1480 		chillbufs(cache, queue, queuepages);
1481 	} else {
1482 		queue = &dirtyqueue;
1483 		bcstats.numdirtypages += pages;
1484 		bcstats.delwribufs++;
1485 	}
1486 	TAILQ_INSERT_TAIL(queue, bp, b_freelist);
1487 }
1488 
1489 #ifdef HIBERNATE
1490 /*
1491  * Nuke the buffer cache from orbit when hibernating. We do not want to save
1492  * any clean cache pages to swap and read them back. the original disk files
1493  * are just as good.
1494  */
1495 void
1496 hibernate_suspend_bufcache(void)
1497 {
1498 	struct buf *bp;
1499 	int s;
1500 
1501 	s = splbio();
1502 	/* Chuck away all the cache pages.. discard bufs, do not promote */
1503 	while ((bp = bufcache_getcleanbuf_range(DMA_CACHE, NUM_CACHES - 1, 1))) {
1504 		bufcache_take(bp);
1505 		if (bp->b_vp) {
1506 			RBT_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree, bp);
1507 			brelvp(bp);
1508 		}
1509 		buf_put(bp);
1510 	}
1511 	splx(s);
1512 }
1513 
1514 void
1515 hibernate_resume_bufcache(void)
1516 {
1517 	/* XXX Nothing needed here for now */
1518 }
1519 #endif /* HIBERNATE */
1520