xref: /freebsd/sys/kern/vfs_bio.c (revision 4e78addb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2004 Poul-Henning Kamp
5  * Copyright (c) 1994,1997 John S. Dyson
6  * Copyright (c) 2013 The FreeBSD Foundation
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Konstantin Belousov
10  * under sponsorship from the FreeBSD Foundation.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  */
33 
34 /*
35  * this file contains a new buffer I/O scheme implementing a coherent
36  * VM object and buffer cache scheme.  Pains have been taken to make
37  * sure that the performance degradation associated with schemes such
38  * as this is not realized.
39  *
40  * Author:  John S. Dyson
41  * Significant help during the development and debugging phases
42  * had been provided by David Greenman, also of the FreeBSD core team.
43  *
44  * see man buf(9) for more info.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/asan.h>
53 #include <sys/bio.h>
54 #include <sys/bitset.h>
55 #include <sys/boottrace.h>
56 #include <sys/buf.h>
57 #include <sys/conf.h>
58 #include <sys/counter.h>
59 #include <sys/devicestat.h>
60 #include <sys/eventhandler.h>
61 #include <sys/fail.h>
62 #include <sys/ktr.h>
63 #include <sys/limits.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mount.h>
67 #include <sys/mutex.h>
68 #include <sys/kernel.h>
69 #include <sys/kthread.h>
70 #include <sys/proc.h>
71 #include <sys/racct.h>
72 #include <sys/refcount.h>
73 #include <sys/resourcevar.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/smp.h>
77 #include <sys/sysctl.h>
78 #include <sys/syscallsubr.h>
79 #include <sys/vmem.h>
80 #include <sys/vmmeter.h>
81 #include <sys/vnode.h>
82 #include <sys/watchdog.h>
83 #include <geom/geom.h>
84 #include <vm/vm.h>
85 #include <vm/vm_param.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_object.h>
88 #include <vm/vm_page.h>
89 #include <vm/vm_pageout.h>
90 #include <vm/vm_pager.h>
91 #include <vm/vm_extern.h>
92 #include <vm/vm_map.h>
93 #include <vm/swap_pager.h>
94 
95 static MALLOC_DEFINE(M_BIOBUF, "biobuf", "BIO buffer");
96 
97 struct	bio_ops bioops;		/* I/O operation notification */
98 
99 struct	buf_ops buf_ops_bio = {
100 	.bop_name	=	"buf_ops_bio",
101 	.bop_write	=	bufwrite,
102 	.bop_strategy	=	bufstrategy,
103 	.bop_sync	=	bufsync,
104 	.bop_bdflush	=	bufbdflush,
105 };
106 
107 struct bufqueue {
108 	struct mtx_padalign	bq_lock;
109 	TAILQ_HEAD(, buf)	bq_queue;
110 	uint8_t			bq_index;
111 	uint16_t		bq_subqueue;
112 	int			bq_len;
113 } __aligned(CACHE_LINE_SIZE);
114 
115 #define	BQ_LOCKPTR(bq)		(&(bq)->bq_lock)
116 #define	BQ_LOCK(bq)		mtx_lock(BQ_LOCKPTR((bq)))
117 #define	BQ_UNLOCK(bq)		mtx_unlock(BQ_LOCKPTR((bq)))
118 #define	BQ_ASSERT_LOCKED(bq)	mtx_assert(BQ_LOCKPTR((bq)), MA_OWNED)
119 
120 struct bufdomain {
121 	struct bufqueue	*bd_subq;
122 	struct bufqueue bd_dirtyq;
123 	struct bufqueue	*bd_cleanq;
124 	struct mtx_padalign bd_run_lock;
125 	/* Constants */
126 	long		bd_maxbufspace;
127 	long		bd_hibufspace;
128 	long 		bd_lobufspace;
129 	long 		bd_bufspacethresh;
130 	int		bd_hifreebuffers;
131 	int		bd_lofreebuffers;
132 	int		bd_hidirtybuffers;
133 	int		bd_lodirtybuffers;
134 	int		bd_dirtybufthresh;
135 	int		bd_lim;
136 	/* atomics */
137 	int		bd_wanted;
138 	bool		bd_shutdown;
139 	int __aligned(CACHE_LINE_SIZE)	bd_numdirtybuffers;
140 	int __aligned(CACHE_LINE_SIZE)	bd_running;
141 	long __aligned(CACHE_LINE_SIZE) bd_bufspace;
142 	int __aligned(CACHE_LINE_SIZE)	bd_freebuffers;
143 } __aligned(CACHE_LINE_SIZE);
144 
145 #define	BD_LOCKPTR(bd)		(&(bd)->bd_cleanq->bq_lock)
146 #define	BD_LOCK(bd)		mtx_lock(BD_LOCKPTR((bd)))
147 #define	BD_UNLOCK(bd)		mtx_unlock(BD_LOCKPTR((bd)))
148 #define	BD_ASSERT_LOCKED(bd)	mtx_assert(BD_LOCKPTR((bd)), MA_OWNED)
149 #define	BD_RUN_LOCKPTR(bd)	(&(bd)->bd_run_lock)
150 #define	BD_RUN_LOCK(bd)		mtx_lock(BD_RUN_LOCKPTR((bd)))
151 #define	BD_RUN_UNLOCK(bd)	mtx_unlock(BD_RUN_LOCKPTR((bd)))
152 #define	BD_DOMAIN(bd)		(bd - bdomain)
153 
154 static char *buf;		/* buffer header pool */
155 static struct buf *
156 nbufp(unsigned i)
157 {
158 	return ((struct buf *)(buf + (sizeof(struct buf) +
159 	    sizeof(vm_page_t) * atop(maxbcachebuf)) * i));
160 }
161 
162 caddr_t __read_mostly unmapped_buf;
163 
164 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
165 struct proc *bufdaemonproc;
166 
167 static void vm_hold_free_pages(struct buf *bp, int newbsize);
168 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
169 		vm_offset_t to);
170 static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m);
171 static void vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off,
172 		vm_page_t m);
173 static void vfs_clean_pages_dirty_buf(struct buf *bp);
174 static void vfs_setdirty_range(struct buf *bp);
175 static void vfs_vmio_invalidate(struct buf *bp);
176 static void vfs_vmio_truncate(struct buf *bp, int npages);
177 static void vfs_vmio_extend(struct buf *bp, int npages, int size);
178 static int vfs_bio_clcheck(struct vnode *vp, int size,
179 		daddr_t lblkno, daddr_t blkno);
180 static void breada(struct vnode *, daddr_t *, int *, int, struct ucred *, int,
181 		void (*)(struct buf *));
182 static int buf_flush(struct vnode *vp, struct bufdomain *, int);
183 static int flushbufqueues(struct vnode *, struct bufdomain *, int, int);
184 static void buf_daemon(void);
185 static __inline void bd_wakeup(void);
186 static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
187 static void bufkva_reclaim(vmem_t *, int);
188 static void bufkva_free(struct buf *);
189 static int buf_import(void *, void **, int, int, int);
190 static void buf_release(void *, void **, int);
191 static void maxbcachebuf_adjust(void);
192 static inline struct bufdomain *bufdomain(struct buf *);
193 static void bq_remove(struct bufqueue *bq, struct buf *bp);
194 static void bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock);
195 static int buf_recycle(struct bufdomain *, bool kva);
196 static void bq_init(struct bufqueue *bq, int qindex, int cpu,
197 	    const char *lockname);
198 static void bd_init(struct bufdomain *bd);
199 static int bd_flushall(struct bufdomain *bd);
200 static int sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS);
201 static int sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS);
202 
203 static int sysctl_bufspace(SYSCTL_HANDLER_ARGS);
204 int vmiodirenable = TRUE;
205 SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
206     "Use the VM system for directory writes");
207 long runningbufspace;
208 SYSCTL_LONG(_vfs, OID_AUTO, runningbufspace, CTLFLAG_RD, &runningbufspace, 0,
209     "Amount of presently outstanding async buffer io");
210 SYSCTL_PROC(_vfs, OID_AUTO, bufspace, CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RD,
211     NULL, 0, sysctl_bufspace, "L", "Physical memory used for buffers");
212 static counter_u64_t bufkvaspace;
213 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufkvaspace, CTLFLAG_RD, &bufkvaspace,
214     "Kernel virtual memory used for buffers");
215 static long maxbufspace;
216 SYSCTL_PROC(_vfs, OID_AUTO, maxbufspace,
217     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &maxbufspace,
218     __offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
219     "Maximum allowed value of bufspace (including metadata)");
220 static long bufmallocspace;
221 SYSCTL_LONG(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, &bufmallocspace, 0,
222     "Amount of malloced memory for buffers");
223 static long maxbufmallocspace;
224 SYSCTL_LONG(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, &maxbufmallocspace,
225     0, "Maximum amount of malloced memory for buffers");
226 static long lobufspace;
227 SYSCTL_PROC(_vfs, OID_AUTO, lobufspace,
228     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &lobufspace,
229     __offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
230     "Minimum amount of buffers we want to have");
231 long hibufspace;
232 SYSCTL_PROC(_vfs, OID_AUTO, hibufspace,
233     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &hibufspace,
234     __offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
235     "Maximum allowed value of bufspace (excluding metadata)");
236 long bufspacethresh;
237 SYSCTL_PROC(_vfs, OID_AUTO, bufspacethresh,
238     CTLTYPE_LONG|CTLFLAG_MPSAFE|CTLFLAG_RW, &bufspacethresh,
239     __offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
240     "Bufspace consumed before waking the daemon to free some");
241 static counter_u64_t buffreekvacnt;
242 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, buffreekvacnt, CTLFLAG_RW, &buffreekvacnt,
243     "Number of times we have freed the KVA space from some buffer");
244 static counter_u64_t bufdefragcnt;
245 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, bufdefragcnt, CTLFLAG_RW, &bufdefragcnt,
246     "Number of times we have had to repeat buffer allocation to defragment");
247 static long lorunningspace;
248 SYSCTL_PROC(_vfs, OID_AUTO, lorunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
249     CTLFLAG_RW, &lorunningspace, 0, sysctl_runningspace, "L",
250     "Minimum preferred space used for in-progress I/O");
251 static long hirunningspace;
252 SYSCTL_PROC(_vfs, OID_AUTO, hirunningspace, CTLTYPE_LONG | CTLFLAG_MPSAFE |
253     CTLFLAG_RW, &hirunningspace, 0, sysctl_runningspace, "L",
254     "Maximum amount of space to use for in-progress I/O");
255 int dirtybufferflushes;
256 SYSCTL_INT(_vfs, OID_AUTO, dirtybufferflushes, CTLFLAG_RW, &dirtybufferflushes,
257     0, "Number of bdwrite to bawrite conversions to limit dirty buffers");
258 int bdwriteskip;
259 SYSCTL_INT(_vfs, OID_AUTO, bdwriteskip, CTLFLAG_RW, &bdwriteskip,
260     0, "Number of buffers supplied to bdwrite with snapshot deadlock risk");
261 int altbufferflushes;
262 SYSCTL_INT(_vfs, OID_AUTO, altbufferflushes, CTLFLAG_RW | CTLFLAG_STATS,
263     &altbufferflushes, 0, "Number of fsync flushes to limit dirty buffers");
264 static int recursiveflushes;
265 SYSCTL_INT(_vfs, OID_AUTO, recursiveflushes, CTLFLAG_RW | CTLFLAG_STATS,
266     &recursiveflushes, 0, "Number of flushes skipped due to being recursive");
267 static int sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS);
268 SYSCTL_PROC(_vfs, OID_AUTO, numdirtybuffers,
269     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RD, NULL, 0, sysctl_numdirtybuffers, "I",
270     "Number of buffers that are dirty (has unwritten changes) at the moment");
271 static int lodirtybuffers;
272 SYSCTL_PROC(_vfs, OID_AUTO, lodirtybuffers,
273     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lodirtybuffers,
274     __offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
275     "How many buffers we want to have free before bufdaemon can sleep");
276 static int hidirtybuffers;
277 SYSCTL_PROC(_vfs, OID_AUTO, hidirtybuffers,
278     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hidirtybuffers,
279     __offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
280     "When the number of dirty buffers is considered severe");
281 int dirtybufthresh;
282 SYSCTL_PROC(_vfs, OID_AUTO, dirtybufthresh,
283     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &dirtybufthresh,
284     __offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
285     "Number of bdwrite to bawrite conversions to clear dirty buffers");
286 static int numfreebuffers;
287 SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, &numfreebuffers, 0,
288     "Number of free buffers");
289 static int lofreebuffers;
290 SYSCTL_PROC(_vfs, OID_AUTO, lofreebuffers,
291     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &lofreebuffers,
292     __offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
293    "Target number of free buffers");
294 static int hifreebuffers;
295 SYSCTL_PROC(_vfs, OID_AUTO, hifreebuffers,
296     CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, &hifreebuffers,
297     __offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
298    "Threshold for clean buffer recycling");
299 static counter_u64_t getnewbufcalls;
300 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufcalls, CTLFLAG_RD,
301    &getnewbufcalls, "Number of calls to getnewbuf");
302 static counter_u64_t getnewbufrestarts;
303 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, getnewbufrestarts, CTLFLAG_RD,
304     &getnewbufrestarts,
305     "Number of times getnewbuf has had to restart a buffer acquisition");
306 static counter_u64_t mappingrestarts;
307 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, mappingrestarts, CTLFLAG_RD,
308     &mappingrestarts,
309     "Number of times getblk has had to restart a buffer mapping for "
310     "unmapped buffer");
311 static counter_u64_t numbufallocfails;
312 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, numbufallocfails, CTLFLAG_RW,
313     &numbufallocfails, "Number of times buffer allocations failed");
314 static int flushbufqtarget = 100;
315 SYSCTL_INT(_vfs, OID_AUTO, flushbufqtarget, CTLFLAG_RW, &flushbufqtarget, 0,
316     "Amount of work to do in flushbufqueues when helping bufdaemon");
317 static counter_u64_t notbufdflushes;
318 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, notbufdflushes, CTLFLAG_RD, &notbufdflushes,
319     "Number of dirty buffer flushes done by the bufdaemon helpers");
320 static long barrierwrites;
321 SYSCTL_LONG(_vfs, OID_AUTO, barrierwrites, CTLFLAG_RW | CTLFLAG_STATS,
322     &barrierwrites, 0, "Number of barrier writes");
323 SYSCTL_INT(_vfs, OID_AUTO, unmapped_buf_allowed, CTLFLAG_RD,
324     &unmapped_buf_allowed, 0,
325     "Permit the use of the unmapped i/o");
326 int maxbcachebuf = MAXBCACHEBUF;
327 SYSCTL_INT(_vfs, OID_AUTO, maxbcachebuf, CTLFLAG_RDTUN, &maxbcachebuf, 0,
328     "Maximum size of a buffer cache block");
329 
330 /*
331  * This lock synchronizes access to bd_request.
332  */
333 static struct mtx_padalign __exclusive_cache_line bdlock;
334 
335 /*
336  * This lock protects the runningbufreq and synchronizes runningbufwakeup and
337  * waitrunningbufspace().
338  */
339 static struct mtx_padalign __exclusive_cache_line rbreqlock;
340 
341 /*
342  * Lock that protects bdirtywait.
343  */
344 static struct mtx_padalign __exclusive_cache_line bdirtylock;
345 
346 /*
347  * bufdaemon shutdown request and sleep channel.
348  */
349 static bool bd_shutdown;
350 
351 /*
352  * Wakeup point for bufdaemon, as well as indicator of whether it is already
353  * active.  Set to 1 when the bufdaemon is already "on" the queue, 0 when it
354  * is idling.
355  */
356 static int bd_request;
357 
358 /*
359  * Request for the buf daemon to write more buffers than is indicated by
360  * lodirtybuf.  This may be necessary to push out excess dependencies or
361  * defragment the address space where a simple count of the number of dirty
362  * buffers is insufficient to characterize the demand for flushing them.
363  */
364 static int bd_speedupreq;
365 
366 /*
367  * Synchronization (sleep/wakeup) variable for active buffer space requests.
368  * Set when wait starts, cleared prior to wakeup().
369  * Used in runningbufwakeup() and waitrunningbufspace().
370  */
371 static int runningbufreq;
372 
373 /*
374  * Synchronization for bwillwrite() waiters.
375  */
376 static int bdirtywait;
377 
378 /*
379  * Definitions for the buffer free lists.
380  */
381 #define QUEUE_NONE	0	/* on no queue */
382 #define QUEUE_EMPTY	1	/* empty buffer headers */
383 #define QUEUE_DIRTY	2	/* B_DELWRI buffers */
384 #define QUEUE_CLEAN	3	/* non-B_DELWRI buffers */
385 #define QUEUE_SENTINEL	4	/* not an queue index, but mark for sentinel */
386 
387 /* Maximum number of buffer domains. */
388 #define	BUF_DOMAINS	8
389 
390 struct bufdomainset bdlodirty;		/* Domains > lodirty */
391 struct bufdomainset bdhidirty;		/* Domains > hidirty */
392 
393 /* Configured number of clean queues. */
394 static int __read_mostly buf_domains;
395 
396 BITSET_DEFINE(bufdomainset, BUF_DOMAINS);
397 struct bufdomain __exclusive_cache_line bdomain[BUF_DOMAINS];
398 struct bufqueue __exclusive_cache_line bqempty;
399 
400 /*
401  * per-cpu empty buffer cache.
402  */
403 uma_zone_t buf_zone;
404 
405 static int
406 sysctl_runningspace(SYSCTL_HANDLER_ARGS)
407 {
408 	long value;
409 	int error;
410 
411 	value = *(long *)arg1;
412 	error = sysctl_handle_long(oidp, &value, 0, req);
413 	if (error != 0 || req->newptr == NULL)
414 		return (error);
415 	mtx_lock(&rbreqlock);
416 	if (arg1 == &hirunningspace) {
417 		if (value < lorunningspace)
418 			error = EINVAL;
419 		else
420 			hirunningspace = value;
421 	} else {
422 		KASSERT(arg1 == &lorunningspace,
423 		    ("%s: unknown arg1", __func__));
424 		if (value > hirunningspace)
425 			error = EINVAL;
426 		else
427 			lorunningspace = value;
428 	}
429 	mtx_unlock(&rbreqlock);
430 	return (error);
431 }
432 
433 static int
434 sysctl_bufdomain_int(SYSCTL_HANDLER_ARGS)
435 {
436 	int error;
437 	int value;
438 	int i;
439 
440 	value = *(int *)arg1;
441 	error = sysctl_handle_int(oidp, &value, 0, req);
442 	if (error != 0 || req->newptr == NULL)
443 		return (error);
444 	*(int *)arg1 = value;
445 	for (i = 0; i < buf_domains; i++)
446 		*(int *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
447 		    value / buf_domains;
448 
449 	return (error);
450 }
451 
452 static int
453 sysctl_bufdomain_long(SYSCTL_HANDLER_ARGS)
454 {
455 	long value;
456 	int error;
457 	int i;
458 
459 	value = *(long *)arg1;
460 	error = sysctl_handle_long(oidp, &value, 0, req);
461 	if (error != 0 || req->newptr == NULL)
462 		return (error);
463 	*(long *)arg1 = value;
464 	for (i = 0; i < buf_domains; i++)
465 		*(long *)(uintptr_t)(((uintptr_t)&bdomain[i]) + arg2) =
466 		    value / buf_domains;
467 
468 	return (error);
469 }
470 
471 #if defined(COMPAT_FREEBSD4) || defined(COMPAT_FREEBSD5) || \
472     defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD7)
473 static int
474 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
475 {
476 	long lvalue;
477 	int ivalue;
478 	int i;
479 
480 	lvalue = 0;
481 	for (i = 0; i < buf_domains; i++)
482 		lvalue += bdomain[i].bd_bufspace;
483 	if (sizeof(int) == sizeof(long) || req->oldlen >= sizeof(long))
484 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
485 	if (lvalue > INT_MAX)
486 		/* On overflow, still write out a long to trigger ENOMEM. */
487 		return (sysctl_handle_long(oidp, &lvalue, 0, req));
488 	ivalue = lvalue;
489 	return (sysctl_handle_int(oidp, &ivalue, 0, req));
490 }
491 #else
492 static int
493 sysctl_bufspace(SYSCTL_HANDLER_ARGS)
494 {
495 	long lvalue;
496 	int i;
497 
498 	lvalue = 0;
499 	for (i = 0; i < buf_domains; i++)
500 		lvalue += bdomain[i].bd_bufspace;
501 	return (sysctl_handle_long(oidp, &lvalue, 0, req));
502 }
503 #endif
504 
505 static int
506 sysctl_numdirtybuffers(SYSCTL_HANDLER_ARGS)
507 {
508 	int value;
509 	int i;
510 
511 	value = 0;
512 	for (i = 0; i < buf_domains; i++)
513 		value += bdomain[i].bd_numdirtybuffers;
514 	return (sysctl_handle_int(oidp, &value, 0, req));
515 }
516 
517 /*
518  *	bdirtywakeup:
519  *
520  *	Wakeup any bwillwrite() waiters.
521  */
522 static void
523 bdirtywakeup(void)
524 {
525 	mtx_lock(&bdirtylock);
526 	if (bdirtywait) {
527 		bdirtywait = 0;
528 		wakeup(&bdirtywait);
529 	}
530 	mtx_unlock(&bdirtylock);
531 }
532 
533 /*
534  *	bd_clear:
535  *
536  *	Clear a domain from the appropriate bitsets when dirtybuffers
537  *	is decremented.
538  */
539 static void
540 bd_clear(struct bufdomain *bd)
541 {
542 
543 	mtx_lock(&bdirtylock);
544 	if (bd->bd_numdirtybuffers <= bd->bd_lodirtybuffers)
545 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
546 	if (bd->bd_numdirtybuffers <= bd->bd_hidirtybuffers)
547 		BIT_CLR(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
548 	mtx_unlock(&bdirtylock);
549 }
550 
551 /*
552  *	bd_set:
553  *
554  *	Set a domain in the appropriate bitsets when dirtybuffers
555  *	is incremented.
556  */
557 static void
558 bd_set(struct bufdomain *bd)
559 {
560 
561 	mtx_lock(&bdirtylock);
562 	if (bd->bd_numdirtybuffers > bd->bd_lodirtybuffers)
563 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdlodirty);
564 	if (bd->bd_numdirtybuffers > bd->bd_hidirtybuffers)
565 		BIT_SET(BUF_DOMAINS, BD_DOMAIN(bd), &bdhidirty);
566 	mtx_unlock(&bdirtylock);
567 }
568 
569 /*
570  *	bdirtysub:
571  *
572  *	Decrement the numdirtybuffers count by one and wakeup any
573  *	threads blocked in bwillwrite().
574  */
575 static void
576 bdirtysub(struct buf *bp)
577 {
578 	struct bufdomain *bd;
579 	int num;
580 
581 	bd = bufdomain(bp);
582 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, -1);
583 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
584 		bdirtywakeup();
585 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
586 		bd_clear(bd);
587 }
588 
589 /*
590  *	bdirtyadd:
591  *
592  *	Increment the numdirtybuffers count by one and wakeup the buf
593  *	daemon if needed.
594  */
595 static void
596 bdirtyadd(struct buf *bp)
597 {
598 	struct bufdomain *bd;
599 	int num;
600 
601 	/*
602 	 * Only do the wakeup once as we cross the boundary.  The
603 	 * buf daemon will keep running until the condition clears.
604 	 */
605 	bd = bufdomain(bp);
606 	num = atomic_fetchadd_int(&bd->bd_numdirtybuffers, 1);
607 	if (num == (bd->bd_lodirtybuffers + bd->bd_hidirtybuffers) / 2)
608 		bd_wakeup();
609 	if (num == bd->bd_lodirtybuffers || num == bd->bd_hidirtybuffers)
610 		bd_set(bd);
611 }
612 
613 /*
614  *	bufspace_daemon_wakeup:
615  *
616  *	Wakeup the daemons responsible for freeing clean bufs.
617  */
618 static void
619 bufspace_daemon_wakeup(struct bufdomain *bd)
620 {
621 
622 	/*
623 	 * avoid the lock if the daemon is running.
624 	 */
625 	if (atomic_fetchadd_int(&bd->bd_running, 1) == 0) {
626 		BD_RUN_LOCK(bd);
627 		atomic_store_int(&bd->bd_running, 1);
628 		wakeup(&bd->bd_running);
629 		BD_RUN_UNLOCK(bd);
630 	}
631 }
632 
633 /*
634  *	bufspace_adjust:
635  *
636  *	Adjust the reported bufspace for a KVA managed buffer, possibly
637  * 	waking any waiters.
638  */
639 static void
640 bufspace_adjust(struct buf *bp, int bufsize)
641 {
642 	struct bufdomain *bd;
643 	long space;
644 	int diff;
645 
646 	KASSERT((bp->b_flags & B_MALLOC) == 0,
647 	    ("bufspace_adjust: malloc buf %p", bp));
648 	bd = bufdomain(bp);
649 	diff = bufsize - bp->b_bufsize;
650 	if (diff < 0) {
651 		atomic_subtract_long(&bd->bd_bufspace, -diff);
652 	} else if (diff > 0) {
653 		space = atomic_fetchadd_long(&bd->bd_bufspace, diff);
654 		/* Wake up the daemon on the transition. */
655 		if (space < bd->bd_bufspacethresh &&
656 		    space + diff >= bd->bd_bufspacethresh)
657 			bufspace_daemon_wakeup(bd);
658 	}
659 	bp->b_bufsize = bufsize;
660 }
661 
662 /*
663  *	bufspace_reserve:
664  *
665  *	Reserve bufspace before calling allocbuf().  metadata has a
666  *	different space limit than data.
667  */
668 static int
669 bufspace_reserve(struct bufdomain *bd, int size, bool metadata)
670 {
671 	long limit, new;
672 	long space;
673 
674 	if (metadata)
675 		limit = bd->bd_maxbufspace;
676 	else
677 		limit = bd->bd_hibufspace;
678 	space = atomic_fetchadd_long(&bd->bd_bufspace, size);
679 	new = space + size;
680 	if (new > limit) {
681 		atomic_subtract_long(&bd->bd_bufspace, size);
682 		return (ENOSPC);
683 	}
684 
685 	/* Wake up the daemon on the transition. */
686 	if (space < bd->bd_bufspacethresh && new >= bd->bd_bufspacethresh)
687 		bufspace_daemon_wakeup(bd);
688 
689 	return (0);
690 }
691 
692 /*
693  *	bufspace_release:
694  *
695  *	Release reserved bufspace after bufspace_adjust() has consumed it.
696  */
697 static void
698 bufspace_release(struct bufdomain *bd, int size)
699 {
700 
701 	atomic_subtract_long(&bd->bd_bufspace, size);
702 }
703 
704 /*
705  *	bufspace_wait:
706  *
707  *	Wait for bufspace, acting as the buf daemon if a locked vnode is
708  *	supplied.  bd_wanted must be set prior to polling for space.  The
709  *	operation must be re-tried on return.
710  */
711 static void
712 bufspace_wait(struct bufdomain *bd, struct vnode *vp, int gbflags,
713     int slpflag, int slptimeo)
714 {
715 	struct thread *td;
716 	int error, fl, norunbuf;
717 
718 	if ((gbflags & GB_NOWAIT_BD) != 0)
719 		return;
720 
721 	td = curthread;
722 	BD_LOCK(bd);
723 	while (bd->bd_wanted) {
724 		if (vp != NULL && vp->v_type != VCHR &&
725 		    (td->td_pflags & TDP_BUFNEED) == 0) {
726 			BD_UNLOCK(bd);
727 			/*
728 			 * getblk() is called with a vnode locked, and
729 			 * some majority of the dirty buffers may as
730 			 * well belong to the vnode.  Flushing the
731 			 * buffers there would make a progress that
732 			 * cannot be achieved by the buf_daemon, that
733 			 * cannot lock the vnode.
734 			 */
735 			norunbuf = ~(TDP_BUFNEED | TDP_NORUNNINGBUF) |
736 			    (td->td_pflags & TDP_NORUNNINGBUF);
737 
738 			/*
739 			 * Play bufdaemon.  The getnewbuf() function
740 			 * may be called while the thread owns lock
741 			 * for another dirty buffer for the same
742 			 * vnode, which makes it impossible to use
743 			 * VOP_FSYNC() there, due to the buffer lock
744 			 * recursion.
745 			 */
746 			td->td_pflags |= TDP_BUFNEED | TDP_NORUNNINGBUF;
747 			fl = buf_flush(vp, bd, flushbufqtarget);
748 			td->td_pflags &= norunbuf;
749 			BD_LOCK(bd);
750 			if (fl != 0)
751 				continue;
752 			if (bd->bd_wanted == 0)
753 				break;
754 		}
755 		error = msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
756 		    (PRIBIO + 4) | slpflag, "newbuf", slptimeo);
757 		if (error != 0)
758 			break;
759 	}
760 	BD_UNLOCK(bd);
761 }
762 
763 static void
764 bufspace_daemon_shutdown(void *arg, int howto __unused)
765 {
766 	struct bufdomain *bd = arg;
767 	int error;
768 
769 	if (KERNEL_PANICKED())
770 		return;
771 
772 	BD_RUN_LOCK(bd);
773 	bd->bd_shutdown = true;
774 	wakeup(&bd->bd_running);
775 	error = msleep(&bd->bd_shutdown, BD_RUN_LOCKPTR(bd), 0,
776 	    "bufspace_shutdown", 60 * hz);
777 	BD_RUN_UNLOCK(bd);
778 	if (error != 0)
779 		printf("bufspacedaemon wait error: %d\n", error);
780 }
781 
782 /*
783  *	bufspace_daemon:
784  *
785  *	buffer space management daemon.  Tries to maintain some marginal
786  *	amount of free buffer space so that requesting processes neither
787  *	block nor work to reclaim buffers.
788  */
789 static void
790 bufspace_daemon(void *arg)
791 {
792 	struct bufdomain *bd = arg;
793 
794 	EVENTHANDLER_REGISTER(shutdown_pre_sync, bufspace_daemon_shutdown, bd,
795 	    SHUTDOWN_PRI_LAST + 100);
796 
797 	BD_RUN_LOCK(bd);
798 	while (!bd->bd_shutdown) {
799 		BD_RUN_UNLOCK(bd);
800 
801 		/*
802 		 * Free buffers from the clean queue until we meet our
803 		 * targets.
804 		 *
805 		 * Theory of operation:  The buffer cache is most efficient
806 		 * when some free buffer headers and space are always
807 		 * available to getnewbuf().  This daemon attempts to prevent
808 		 * the excessive blocking and synchronization associated
809 		 * with shortfall.  It goes through three phases according
810 		 * demand:
811 		 *
812 		 * 1)	The daemon wakes up voluntarily once per-second
813 		 *	during idle periods when the counters are below
814 		 *	the wakeup thresholds (bufspacethresh, lofreebuffers).
815 		 *
816 		 * 2)	The daemon wakes up as we cross the thresholds
817 		 *	ahead of any potential blocking.  This may bounce
818 		 *	slightly according to the rate of consumption and
819 		 *	release.
820 		 *
821 		 * 3)	The daemon and consumers are starved for working
822 		 *	clean buffers.  This is the 'bufspace' sleep below
823 		 *	which will inefficiently trade bufs with bqrelse
824 		 *	until we return to condition 2.
825 		 */
826 		while (bd->bd_bufspace > bd->bd_lobufspace ||
827 		    bd->bd_freebuffers < bd->bd_hifreebuffers) {
828 			if (buf_recycle(bd, false) != 0) {
829 				if (bd_flushall(bd))
830 					continue;
831 				/*
832 				 * Speedup dirty if we've run out of clean
833 				 * buffers.  This is possible in particular
834 				 * because softdep may held many bufs locked
835 				 * pending writes to other bufs which are
836 				 * marked for delayed write, exhausting
837 				 * clean space until they are written.
838 				 */
839 				bd_speedup();
840 				BD_LOCK(bd);
841 				if (bd->bd_wanted) {
842 					msleep(&bd->bd_wanted, BD_LOCKPTR(bd),
843 					    PRIBIO|PDROP, "bufspace", hz/10);
844 				} else
845 					BD_UNLOCK(bd);
846 			}
847 			maybe_yield();
848 		}
849 
850 		/*
851 		 * Re-check our limits and sleep.  bd_running must be
852 		 * cleared prior to checking the limits to avoid missed
853 		 * wakeups.  The waker will adjust one of bufspace or
854 		 * freebuffers prior to checking bd_running.
855 		 */
856 		BD_RUN_LOCK(bd);
857 		if (bd->bd_shutdown)
858 			break;
859 		atomic_store_int(&bd->bd_running, 0);
860 		if (bd->bd_bufspace < bd->bd_bufspacethresh &&
861 		    bd->bd_freebuffers > bd->bd_lofreebuffers) {
862 			msleep(&bd->bd_running, BD_RUN_LOCKPTR(bd),
863 			    PRIBIO, "-", hz);
864 		} else {
865 			/* Avoid spurious wakeups while running. */
866 			atomic_store_int(&bd->bd_running, 1);
867 		}
868 	}
869 	wakeup(&bd->bd_shutdown);
870 	BD_RUN_UNLOCK(bd);
871 	kthread_exit();
872 }
873 
874 /*
875  *	bufmallocadjust:
876  *
877  *	Adjust the reported bufspace for a malloc managed buffer, possibly
878  *	waking any waiters.
879  */
880 static void
881 bufmallocadjust(struct buf *bp, int bufsize)
882 {
883 	int diff;
884 
885 	KASSERT((bp->b_flags & B_MALLOC) != 0,
886 	    ("bufmallocadjust: non-malloc buf %p", bp));
887 	diff = bufsize - bp->b_bufsize;
888 	if (diff < 0)
889 		atomic_subtract_long(&bufmallocspace, -diff);
890 	else
891 		atomic_add_long(&bufmallocspace, diff);
892 	bp->b_bufsize = bufsize;
893 }
894 
895 /*
896  *	runningwakeup:
897  *
898  *	Wake up processes that are waiting on asynchronous writes to fall
899  *	below lorunningspace.
900  */
901 static void
902 runningwakeup(void)
903 {
904 
905 	mtx_lock(&rbreqlock);
906 	if (runningbufreq) {
907 		runningbufreq = 0;
908 		wakeup(&runningbufreq);
909 	}
910 	mtx_unlock(&rbreqlock);
911 }
912 
913 /*
914  *	runningbufwakeup:
915  *
916  *	Decrement the outstanding write count according.
917  */
918 void
919 runningbufwakeup(struct buf *bp)
920 {
921 	long space, bspace;
922 
923 	bspace = bp->b_runningbufspace;
924 	if (bspace == 0)
925 		return;
926 	space = atomic_fetchadd_long(&runningbufspace, -bspace);
927 	KASSERT(space >= bspace, ("runningbufspace underflow %ld %ld",
928 	    space, bspace));
929 	bp->b_runningbufspace = 0;
930 	/*
931 	 * Only acquire the lock and wakeup on the transition from exceeding
932 	 * the threshold to falling below it.
933 	 */
934 	if (space < lorunningspace)
935 		return;
936 	if (space - bspace > lorunningspace)
937 		return;
938 	runningwakeup();
939 }
940 
941 /*
942  *	waitrunningbufspace()
943  *
944  *	runningbufspace is a measure of the amount of I/O currently
945  *	running.  This routine is used in async-write situations to
946  *	prevent creating huge backups of pending writes to a device.
947  *	Only asynchronous writes are governed by this function.
948  *
949  *	This does NOT turn an async write into a sync write.  It waits
950  *	for earlier writes to complete and generally returns before the
951  *	caller's write has reached the device.
952  */
953 void
954 waitrunningbufspace(void)
955 {
956 
957 	mtx_lock(&rbreqlock);
958 	while (runningbufspace > hirunningspace) {
959 		runningbufreq = 1;
960 		msleep(&runningbufreq, &rbreqlock, PVM, "wdrain", 0);
961 	}
962 	mtx_unlock(&rbreqlock);
963 }
964 
965 /*
966  *	vfs_buf_test_cache:
967  *
968  *	Called when a buffer is extended.  This function clears the B_CACHE
969  *	bit if the newly extended portion of the buffer does not contain
970  *	valid data.
971  */
972 static __inline void
973 vfs_buf_test_cache(struct buf *bp, vm_ooffset_t foff, vm_offset_t off,
974     vm_offset_t size, vm_page_t m)
975 {
976 
977 	/*
978 	 * This function and its results are protected by higher level
979 	 * synchronization requiring vnode and buf locks to page in and
980 	 * validate pages.
981 	 */
982 	if (bp->b_flags & B_CACHE) {
983 		int base = (foff + off) & PAGE_MASK;
984 		if (vm_page_is_valid(m, base, size) == 0)
985 			bp->b_flags &= ~B_CACHE;
986 	}
987 }
988 
989 /* Wake up the buffer daemon if necessary */
990 static void
991 bd_wakeup(void)
992 {
993 
994 	mtx_lock(&bdlock);
995 	if (bd_request == 0) {
996 		bd_request = 1;
997 		wakeup(&bd_request);
998 	}
999 	mtx_unlock(&bdlock);
1000 }
1001 
1002 /*
1003  * Adjust the maxbcachbuf tunable.
1004  */
1005 static void
1006 maxbcachebuf_adjust(void)
1007 {
1008 	int i;
1009 
1010 	/*
1011 	 * maxbcachebuf must be a power of 2 >= MAXBSIZE.
1012 	 */
1013 	i = 2;
1014 	while (i * 2 <= maxbcachebuf)
1015 		i *= 2;
1016 	maxbcachebuf = i;
1017 	if (maxbcachebuf < MAXBSIZE)
1018 		maxbcachebuf = MAXBSIZE;
1019 	if (maxbcachebuf > maxphys)
1020 		maxbcachebuf = maxphys;
1021 	if (bootverbose != 0 && maxbcachebuf != MAXBCACHEBUF)
1022 		printf("maxbcachebuf=%d\n", maxbcachebuf);
1023 }
1024 
1025 /*
1026  * bd_speedup - speedup the buffer cache flushing code
1027  */
1028 void
1029 bd_speedup(void)
1030 {
1031 	int needwake;
1032 
1033 	mtx_lock(&bdlock);
1034 	needwake = 0;
1035 	if (bd_speedupreq == 0 || bd_request == 0)
1036 		needwake = 1;
1037 	bd_speedupreq = 1;
1038 	bd_request = 1;
1039 	if (needwake)
1040 		wakeup(&bd_request);
1041 	mtx_unlock(&bdlock);
1042 }
1043 
1044 #ifdef __i386__
1045 #define	TRANSIENT_DENOM	5
1046 #else
1047 #define	TRANSIENT_DENOM 10
1048 #endif
1049 
1050 /*
1051  * Calculating buffer cache scaling values and reserve space for buffer
1052  * headers.  This is called during low level kernel initialization and
1053  * may be called more then once.  We CANNOT write to the memory area
1054  * being reserved at this time.
1055  */
1056 caddr_t
1057 kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
1058 {
1059 	int tuned_nbuf;
1060 	long maxbuf, maxbuf_sz, buf_sz,	biotmap_sz;
1061 
1062 	/*
1063 	 * With KASAN or KMSAN enabled, the kernel map is shadowed.  Account for
1064 	 * this when sizing maps based on the amount of physical memory
1065 	 * available.
1066 	 */
1067 #if defined(KASAN)
1068 	physmem_est = (physmem_est * KASAN_SHADOW_SCALE) /
1069 	    (KASAN_SHADOW_SCALE + 1);
1070 #elif defined(KMSAN)
1071 	physmem_est /= 3;
1072 
1073 	/*
1074 	 * KMSAN cannot reliably determine whether buffer data is initialized
1075 	 * unless it is updated through a KVA mapping.
1076 	 */
1077 	unmapped_buf_allowed = 0;
1078 #endif
1079 
1080 	/*
1081 	 * physmem_est is in pages.  Convert it to kilobytes (assumes
1082 	 * PAGE_SIZE is >= 1K)
1083 	 */
1084 	physmem_est = physmem_est * (PAGE_SIZE / 1024);
1085 
1086 	maxbcachebuf_adjust();
1087 	/*
1088 	 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE.
1089 	 * For the first 64MB of ram nominally allocate sufficient buffers to
1090 	 * cover 1/4 of our ram.  Beyond the first 64MB allocate additional
1091 	 * buffers to cover 1/10 of our ram over 64MB.  When auto-sizing
1092 	 * the buffer cache we limit the eventual kva reservation to
1093 	 * maxbcache bytes.
1094 	 *
1095 	 * factor represents the 1/4 x ram conversion.
1096 	 */
1097 	if (nbuf == 0) {
1098 		int factor = 4 * BKVASIZE / 1024;
1099 
1100 		nbuf = 50;
1101 		if (physmem_est > 4096)
1102 			nbuf += min((physmem_est - 4096) / factor,
1103 			    65536 / factor);
1104 		if (physmem_est > 65536)
1105 			nbuf += min((physmem_est - 65536) * 2 / (factor * 5),
1106 			    32 * 1024 * 1024 / (factor * 5));
1107 
1108 		if (maxbcache && nbuf > maxbcache / BKVASIZE)
1109 			nbuf = maxbcache / BKVASIZE;
1110 		tuned_nbuf = 1;
1111 	} else
1112 		tuned_nbuf = 0;
1113 
1114 	/* XXX Avoid unsigned long overflows later on with maxbufspace. */
1115 	maxbuf = (LONG_MAX / 3) / BKVASIZE;
1116 	if (nbuf > maxbuf) {
1117 		if (!tuned_nbuf)
1118 			printf("Warning: nbufs lowered from %d to %ld\n", nbuf,
1119 			    maxbuf);
1120 		nbuf = maxbuf;
1121 	}
1122 
1123 	/*
1124 	 * Ideal allocation size for the transient bio submap is 10%
1125 	 * of the maximal space buffer map.  This roughly corresponds
1126 	 * to the amount of the buffer mapped for typical UFS load.
1127 	 *
1128 	 * Clip the buffer map to reserve space for the transient
1129 	 * BIOs, if its extent is bigger than 90% (80% on i386) of the
1130 	 * maximum buffer map extent on the platform.
1131 	 *
1132 	 * The fall-back to the maxbuf in case of maxbcache unset,
1133 	 * allows to not trim the buffer KVA for the architectures
1134 	 * with ample KVA space.
1135 	 */
1136 	if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) {
1137 		maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE;
1138 		buf_sz = (long)nbuf * BKVASIZE;
1139 		if (buf_sz < maxbuf_sz / TRANSIENT_DENOM *
1140 		    (TRANSIENT_DENOM - 1)) {
1141 			/*
1142 			 * There is more KVA than memory.  Do not
1143 			 * adjust buffer map size, and assign the rest
1144 			 * of maxbuf to transient map.
1145 			 */
1146 			biotmap_sz = maxbuf_sz - buf_sz;
1147 		} else {
1148 			/*
1149 			 * Buffer map spans all KVA we could afford on
1150 			 * this platform.  Give 10% (20% on i386) of
1151 			 * the buffer map to the transient bio map.
1152 			 */
1153 			biotmap_sz = buf_sz / TRANSIENT_DENOM;
1154 			buf_sz -= biotmap_sz;
1155 		}
1156 		if (biotmap_sz / INT_MAX > maxphys)
1157 			bio_transient_maxcnt = INT_MAX;
1158 		else
1159 			bio_transient_maxcnt = biotmap_sz / maxphys;
1160 		/*
1161 		 * Artificially limit to 1024 simultaneous in-flight I/Os
1162 		 * using the transient mapping.
1163 		 */
1164 		if (bio_transient_maxcnt > 1024)
1165 			bio_transient_maxcnt = 1024;
1166 		if (tuned_nbuf)
1167 			nbuf = buf_sz / BKVASIZE;
1168 	}
1169 
1170 	if (nswbuf == 0) {
1171 		/*
1172 		 * Pager buffers are allocated for short periods, so scale the
1173 		 * number of reserved buffers based on the number of CPUs rather
1174 		 * than amount of memory.
1175 		 */
1176 		nswbuf = min(nbuf / 4, 32 * mp_ncpus);
1177 		if (nswbuf < NSWBUF_MIN)
1178 			nswbuf = NSWBUF_MIN;
1179 	}
1180 
1181 	/*
1182 	 * Reserve space for the buffer cache buffers
1183 	 */
1184 	buf = (char *)v;
1185 	v = (caddr_t)buf + (sizeof(struct buf) + sizeof(vm_page_t) *
1186 	    atop(maxbcachebuf)) * nbuf;
1187 
1188 	return (v);
1189 }
1190 
1191 /*
1192  * Single global constant for BUF_WMESG, to avoid getting multiple
1193  * references.
1194  */
1195 static const char buf_wmesg[] = "bufwait";
1196 
1197 /* Initialize the buffer subsystem.  Called before use of any buffers. */
1198 void
1199 bufinit(void)
1200 {
1201 	struct buf *bp;
1202 	int i;
1203 
1204 	KASSERT(maxbcachebuf >= MAXBSIZE,
1205 	    ("maxbcachebuf (%d) must be >= MAXBSIZE (%d)\n", maxbcachebuf,
1206 	    MAXBSIZE));
1207 	bq_init(&bqempty, QUEUE_EMPTY, -1, "bufq empty lock");
1208 	mtx_init(&rbreqlock, "runningbufspace lock", NULL, MTX_DEF);
1209 	mtx_init(&bdlock, "buffer daemon lock", NULL, MTX_DEF);
1210 	mtx_init(&bdirtylock, "dirty buf lock", NULL, MTX_DEF);
1211 
1212 	unmapped_buf = (caddr_t)kva_alloc(maxphys);
1213 
1214 	/* finally, initialize each buffer header and stick on empty q */
1215 	for (i = 0; i < nbuf; i++) {
1216 		bp = nbufp(i);
1217 		bzero(bp, sizeof(*bp) + sizeof(vm_page_t) * atop(maxbcachebuf));
1218 		bp->b_flags = B_INVAL;
1219 		bp->b_rcred = NOCRED;
1220 		bp->b_wcred = NOCRED;
1221 		bp->b_qindex = QUEUE_NONE;
1222 		bp->b_domain = -1;
1223 		bp->b_subqueue = mp_maxid + 1;
1224 		bp->b_xflags = 0;
1225 		bp->b_data = bp->b_kvabase = unmapped_buf;
1226 		LIST_INIT(&bp->b_dep);
1227 		BUF_LOCKINIT(bp, buf_wmesg);
1228 		bq_insert(&bqempty, bp, false);
1229 	}
1230 
1231 	/*
1232 	 * maxbufspace is the absolute maximum amount of buffer space we are
1233 	 * allowed to reserve in KVM and in real terms.  The absolute maximum
1234 	 * is nominally used by metadata.  hibufspace is the nominal maximum
1235 	 * used by most other requests.  The differential is required to
1236 	 * ensure that metadata deadlocks don't occur.
1237 	 *
1238 	 * maxbufspace is based on BKVASIZE.  Allocating buffers larger then
1239 	 * this may result in KVM fragmentation which is not handled optimally
1240 	 * by the system. XXX This is less true with vmem.  We could use
1241 	 * PAGE_SIZE.
1242 	 */
1243 	maxbufspace = (long)nbuf * BKVASIZE;
1244 	hibufspace = lmax(3 * maxbufspace / 4, maxbufspace - maxbcachebuf * 10);
1245 	lobufspace = (hibufspace / 20) * 19; /* 95% */
1246 	bufspacethresh = lobufspace + (hibufspace - lobufspace) / 2;
1247 
1248 	/*
1249 	 * Note: The 16 MiB upper limit for hirunningspace was chosen
1250 	 * arbitrarily and may need further tuning. It corresponds to
1251 	 * 128 outstanding write IO requests (if IO size is 128 KiB),
1252 	 * which fits with many RAID controllers' tagged queuing limits.
1253 	 * The lower 1 MiB limit is the historical upper limit for
1254 	 * hirunningspace.
1255 	 */
1256 	hirunningspace = lmax(lmin(roundup(hibufspace / 64, maxbcachebuf),
1257 	    16 * 1024 * 1024), 1024 * 1024);
1258 	lorunningspace = roundup((hirunningspace * 2) / 3, maxbcachebuf);
1259 
1260 	/*
1261 	 * Limit the amount of malloc memory since it is wired permanently into
1262 	 * the kernel space.  Even though this is accounted for in the buffer
1263 	 * allocation, we don't want the malloced region to grow uncontrolled.
1264 	 * The malloc scheme improves memory utilization significantly on
1265 	 * average (small) directories.
1266 	 */
1267 	maxbufmallocspace = hibufspace / 20;
1268 
1269 	/*
1270 	 * Reduce the chance of a deadlock occurring by limiting the number
1271 	 * of delayed-write dirty buffers we allow to stack up.
1272 	 */
1273 	hidirtybuffers = nbuf / 4 + 20;
1274 	dirtybufthresh = hidirtybuffers * 9 / 10;
1275 	/*
1276 	 * To support extreme low-memory systems, make sure hidirtybuffers
1277 	 * cannot eat up all available buffer space.  This occurs when our
1278 	 * minimum cannot be met.  We try to size hidirtybuffers to 3/4 our
1279 	 * buffer space assuming BKVASIZE'd buffers.
1280 	 */
1281 	while ((long)hidirtybuffers * BKVASIZE > 3 * hibufspace / 4) {
1282 		hidirtybuffers >>= 1;
1283 	}
1284 	lodirtybuffers = hidirtybuffers / 2;
1285 
1286 	/*
1287 	 * lofreebuffers should be sufficient to avoid stalling waiting on
1288 	 * buf headers under heavy utilization.  The bufs in per-cpu caches
1289 	 * are counted as free but will be unavailable to threads executing
1290 	 * on other cpus.
1291 	 *
1292 	 * hifreebuffers is the free target for the bufspace daemon.  This
1293 	 * should be set appropriately to limit work per-iteration.
1294 	 */
1295 	lofreebuffers = MIN((nbuf / 25) + (20 * mp_ncpus), 128 * mp_ncpus);
1296 	hifreebuffers = (3 * lofreebuffers) / 2;
1297 	numfreebuffers = nbuf;
1298 
1299 	/* Setup the kva and free list allocators. */
1300 	vmem_set_reclaim(buffer_arena, bufkva_reclaim);
1301 	buf_zone = uma_zcache_create("buf free cache",
1302 	    sizeof(struct buf) + sizeof(vm_page_t) * atop(maxbcachebuf),
1303 	    NULL, NULL, NULL, NULL, buf_import, buf_release, NULL, 0);
1304 
1305 	/*
1306 	 * Size the clean queue according to the amount of buffer space.
1307 	 * One queue per-256mb up to the max.  More queues gives better
1308 	 * concurrency but less accurate LRU.
1309 	 */
1310 	buf_domains = MIN(howmany(maxbufspace, 256*1024*1024), BUF_DOMAINS);
1311 	for (i = 0 ; i < buf_domains; i++) {
1312 		struct bufdomain *bd;
1313 
1314 		bd = &bdomain[i];
1315 		bd_init(bd);
1316 		bd->bd_freebuffers = nbuf / buf_domains;
1317 		bd->bd_hifreebuffers = hifreebuffers / buf_domains;
1318 		bd->bd_lofreebuffers = lofreebuffers / buf_domains;
1319 		bd->bd_bufspace = 0;
1320 		bd->bd_maxbufspace = maxbufspace / buf_domains;
1321 		bd->bd_hibufspace = hibufspace / buf_domains;
1322 		bd->bd_lobufspace = lobufspace / buf_domains;
1323 		bd->bd_bufspacethresh = bufspacethresh / buf_domains;
1324 		bd->bd_numdirtybuffers = 0;
1325 		bd->bd_hidirtybuffers = hidirtybuffers / buf_domains;
1326 		bd->bd_lodirtybuffers = lodirtybuffers / buf_domains;
1327 		bd->bd_dirtybufthresh = dirtybufthresh / buf_domains;
1328 		/* Don't allow more than 2% of bufs in the per-cpu caches. */
1329 		bd->bd_lim = nbuf / buf_domains / 50 / mp_ncpus;
1330 	}
1331 	getnewbufcalls = counter_u64_alloc(M_WAITOK);
1332 	getnewbufrestarts = counter_u64_alloc(M_WAITOK);
1333 	mappingrestarts = counter_u64_alloc(M_WAITOK);
1334 	numbufallocfails = counter_u64_alloc(M_WAITOK);
1335 	notbufdflushes = counter_u64_alloc(M_WAITOK);
1336 	buffreekvacnt = counter_u64_alloc(M_WAITOK);
1337 	bufdefragcnt = counter_u64_alloc(M_WAITOK);
1338 	bufkvaspace = counter_u64_alloc(M_WAITOK);
1339 }
1340 
1341 #ifdef INVARIANTS
1342 static inline void
1343 vfs_buf_check_mapped(struct buf *bp)
1344 {
1345 
1346 	KASSERT(bp->b_kvabase != unmapped_buf,
1347 	    ("mapped buf: b_kvabase was not updated %p", bp));
1348 	KASSERT(bp->b_data != unmapped_buf,
1349 	    ("mapped buf: b_data was not updated %p", bp));
1350 	KASSERT(bp->b_data < unmapped_buf || bp->b_data >= unmapped_buf +
1351 	    maxphys, ("b_data + b_offset unmapped %p", bp));
1352 }
1353 
1354 static inline void
1355 vfs_buf_check_unmapped(struct buf *bp)
1356 {
1357 
1358 	KASSERT(bp->b_data == unmapped_buf,
1359 	    ("unmapped buf: corrupted b_data %p", bp));
1360 }
1361 
1362 #define	BUF_CHECK_MAPPED(bp) vfs_buf_check_mapped(bp)
1363 #define	BUF_CHECK_UNMAPPED(bp) vfs_buf_check_unmapped(bp)
1364 #else
1365 #define	BUF_CHECK_MAPPED(bp) do {} while (0)
1366 #define	BUF_CHECK_UNMAPPED(bp) do {} while (0)
1367 #endif
1368 
1369 static int
1370 isbufbusy(struct buf *bp)
1371 {
1372 	if (((bp->b_flags & B_INVAL) == 0 && BUF_ISLOCKED(bp)) ||
1373 	    ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI))
1374 		return (1);
1375 	return (0);
1376 }
1377 
1378 /*
1379  * Shutdown the system cleanly to prepare for reboot, halt, or power off.
1380  */
1381 void
1382 bufshutdown(int show_busybufs)
1383 {
1384 	static int first_buf_printf = 1;
1385 	struct buf *bp;
1386 	int i, iter, nbusy, pbusy;
1387 #ifndef PREEMPTION
1388 	int subiter;
1389 #endif
1390 
1391 	/*
1392 	 * Sync filesystems for shutdown
1393 	 */
1394 	wdog_kern_pat(WD_LASTVAL);
1395 	kern_sync(curthread);
1396 
1397 	/*
1398 	 * With soft updates, some buffers that are
1399 	 * written will be remarked as dirty until other
1400 	 * buffers are written.
1401 	 */
1402 	for (iter = pbusy = 0; iter < 20; iter++) {
1403 		nbusy = 0;
1404 		for (i = nbuf - 1; i >= 0; i--) {
1405 			bp = nbufp(i);
1406 			if (isbufbusy(bp))
1407 				nbusy++;
1408 		}
1409 		if (nbusy == 0) {
1410 			if (first_buf_printf)
1411 				printf("All buffers synced.");
1412 			break;
1413 		}
1414 		if (first_buf_printf) {
1415 			printf("Syncing disks, buffers remaining... ");
1416 			first_buf_printf = 0;
1417 		}
1418 		printf("%d ", nbusy);
1419 		if (nbusy < pbusy)
1420 			iter = 0;
1421 		pbusy = nbusy;
1422 
1423 		wdog_kern_pat(WD_LASTVAL);
1424 		kern_sync(curthread);
1425 
1426 #ifdef PREEMPTION
1427 		/*
1428 		 * Spin for a while to allow interrupt threads to run.
1429 		 */
1430 		DELAY(50000 * iter);
1431 #else
1432 		/*
1433 		 * Context switch several times to allow interrupt
1434 		 * threads to run.
1435 		 */
1436 		for (subiter = 0; subiter < 50 * iter; subiter++) {
1437 			sched_relinquish(curthread);
1438 			DELAY(1000);
1439 		}
1440 #endif
1441 	}
1442 	printf("\n");
1443 	/*
1444 	 * Count only busy local buffers to prevent forcing
1445 	 * a fsck if we're just a client of a wedged NFS server
1446 	 */
1447 	nbusy = 0;
1448 	for (i = nbuf - 1; i >= 0; i--) {
1449 		bp = nbufp(i);
1450 		if (isbufbusy(bp)) {
1451 #if 0
1452 /* XXX: This is bogus.  We should probably have a BO_REMOTE flag instead */
1453 			if (bp->b_dev == NULL) {
1454 				TAILQ_REMOVE(&mountlist,
1455 				    bp->b_vp->v_mount, mnt_list);
1456 				continue;
1457 			}
1458 #endif
1459 			nbusy++;
1460 			if (show_busybufs > 0) {
1461 				printf(
1462 	    "%d: buf:%p, vnode:%p, flags:%0x, blkno:%jd, lblkno:%jd, buflock:",
1463 				    nbusy, bp, bp->b_vp, bp->b_flags,
1464 				    (intmax_t)bp->b_blkno,
1465 				    (intmax_t)bp->b_lblkno);
1466 				BUF_LOCKPRINTINFO(bp);
1467 				if (show_busybufs > 1)
1468 					vn_printf(bp->b_vp,
1469 					    "vnode content: ");
1470 			}
1471 		}
1472 	}
1473 	if (nbusy) {
1474 		/*
1475 		 * Failed to sync all blocks. Indicate this and don't
1476 		 * unmount filesystems (thus forcing an fsck on reboot).
1477 		 */
1478 		BOOTTRACE("shutdown failed to sync buffers");
1479 		printf("Giving up on %d buffers\n", nbusy);
1480 		DELAY(5000000);	/* 5 seconds */
1481 		swapoff_all();
1482 	} else {
1483 		BOOTTRACE("shutdown sync complete");
1484 		if (!first_buf_printf)
1485 			printf("Final sync complete\n");
1486 
1487 		/*
1488 		 * Unmount filesystems and perform swapoff, to quiesce
1489 		 * the system as much as possible.  In particular, no
1490 		 * I/O should be initiated from top levels since it
1491 		 * might be abruptly terminated by reset, or otherwise
1492 		 * erronously handled because other parts of the
1493 		 * system are disabled.
1494 		 *
1495 		 * Swapoff before unmount, because file-backed swap is
1496 		 * non-operational after unmount of the underlying
1497 		 * filesystem.
1498 		 */
1499 		if (!KERNEL_PANICKED()) {
1500 			swapoff_all();
1501 			vfs_unmountall();
1502 		}
1503 		BOOTTRACE("shutdown unmounted all filesystems");
1504 	}
1505 	DELAY(100000);		/* wait for console output to finish */
1506 }
1507 
1508 static void
1509 bpmap_qenter(struct buf *bp)
1510 {
1511 
1512 	BUF_CHECK_MAPPED(bp);
1513 
1514 	/*
1515 	 * bp->b_data is relative to bp->b_offset, but
1516 	 * bp->b_offset may be offset into the first page.
1517 	 */
1518 	bp->b_data = (caddr_t)trunc_page((vm_offset_t)bp->b_data);
1519 	pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1520 	bp->b_data = (caddr_t)((vm_offset_t)bp->b_data |
1521 	    (vm_offset_t)(bp->b_offset & PAGE_MASK));
1522 }
1523 
1524 static inline struct bufdomain *
1525 bufdomain(struct buf *bp)
1526 {
1527 
1528 	return (&bdomain[bp->b_domain]);
1529 }
1530 
1531 static struct bufqueue *
1532 bufqueue(struct buf *bp)
1533 {
1534 
1535 	switch (bp->b_qindex) {
1536 	case QUEUE_NONE:
1537 		/* FALLTHROUGH */
1538 	case QUEUE_SENTINEL:
1539 		return (NULL);
1540 	case QUEUE_EMPTY:
1541 		return (&bqempty);
1542 	case QUEUE_DIRTY:
1543 		return (&bufdomain(bp)->bd_dirtyq);
1544 	case QUEUE_CLEAN:
1545 		return (&bufdomain(bp)->bd_subq[bp->b_subqueue]);
1546 	default:
1547 		break;
1548 	}
1549 	panic("bufqueue(%p): Unhandled type %d\n", bp, bp->b_qindex);
1550 }
1551 
1552 /*
1553  * Return the locked bufqueue that bp is a member of.
1554  */
1555 static struct bufqueue *
1556 bufqueue_acquire(struct buf *bp)
1557 {
1558 	struct bufqueue *bq, *nbq;
1559 
1560 	/*
1561 	 * bp can be pushed from a per-cpu queue to the
1562 	 * cleanq while we're waiting on the lock.  Retry
1563 	 * if the queues don't match.
1564 	 */
1565 	bq = bufqueue(bp);
1566 	BQ_LOCK(bq);
1567 	for (;;) {
1568 		nbq = bufqueue(bp);
1569 		if (bq == nbq)
1570 			break;
1571 		BQ_UNLOCK(bq);
1572 		BQ_LOCK(nbq);
1573 		bq = nbq;
1574 	}
1575 	return (bq);
1576 }
1577 
1578 /*
1579  *	binsfree:
1580  *
1581  *	Insert the buffer into the appropriate free list.  Requires a
1582  *	locked buffer on entry and buffer is unlocked before return.
1583  */
1584 static void
1585 binsfree(struct buf *bp, int qindex)
1586 {
1587 	struct bufdomain *bd;
1588 	struct bufqueue *bq;
1589 
1590 	KASSERT(qindex == QUEUE_CLEAN || qindex == QUEUE_DIRTY,
1591 	    ("binsfree: Invalid qindex %d", qindex));
1592 	BUF_ASSERT_XLOCKED(bp);
1593 
1594 	/*
1595 	 * Handle delayed bremfree() processing.
1596 	 */
1597 	if (bp->b_flags & B_REMFREE) {
1598 		if (bp->b_qindex == qindex) {
1599 			bp->b_flags |= B_REUSE;
1600 			bp->b_flags &= ~B_REMFREE;
1601 			BUF_UNLOCK(bp);
1602 			return;
1603 		}
1604 		bq = bufqueue_acquire(bp);
1605 		bq_remove(bq, bp);
1606 		BQ_UNLOCK(bq);
1607 	}
1608 	bd = bufdomain(bp);
1609 	if (qindex == QUEUE_CLEAN) {
1610 		if (bd->bd_lim != 0)
1611 			bq = &bd->bd_subq[PCPU_GET(cpuid)];
1612 		else
1613 			bq = bd->bd_cleanq;
1614 	} else
1615 		bq = &bd->bd_dirtyq;
1616 	bq_insert(bq, bp, true);
1617 }
1618 
1619 /*
1620  * buf_free:
1621  *
1622  *	Free a buffer to the buf zone once it no longer has valid contents.
1623  */
1624 static void
1625 buf_free(struct buf *bp)
1626 {
1627 
1628 	if (bp->b_flags & B_REMFREE)
1629 		bremfreef(bp);
1630 	if (bp->b_vflags & BV_BKGRDINPROG)
1631 		panic("losing buffer 1");
1632 	if (bp->b_rcred != NOCRED) {
1633 		crfree(bp->b_rcred);
1634 		bp->b_rcred = NOCRED;
1635 	}
1636 	if (bp->b_wcred != NOCRED) {
1637 		crfree(bp->b_wcred);
1638 		bp->b_wcred = NOCRED;
1639 	}
1640 	if (!LIST_EMPTY(&bp->b_dep))
1641 		buf_deallocate(bp);
1642 	bufkva_free(bp);
1643 	atomic_add_int(&bufdomain(bp)->bd_freebuffers, 1);
1644 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1645 	BUF_UNLOCK(bp);
1646 	uma_zfree(buf_zone, bp);
1647 }
1648 
1649 /*
1650  * buf_import:
1651  *
1652  *	Import bufs into the uma cache from the buf list.  The system still
1653  *	expects a static array of bufs and much of the synchronization
1654  *	around bufs assumes type stable storage.  As a result, UMA is used
1655  *	only as a per-cpu cache of bufs still maintained on a global list.
1656  */
1657 static int
1658 buf_import(void *arg, void **store, int cnt, int domain, int flags)
1659 {
1660 	struct buf *bp;
1661 	int i;
1662 
1663 	BQ_LOCK(&bqempty);
1664 	for (i = 0; i < cnt; i++) {
1665 		bp = TAILQ_FIRST(&bqempty.bq_queue);
1666 		if (bp == NULL)
1667 			break;
1668 		bq_remove(&bqempty, bp);
1669 		store[i] = bp;
1670 	}
1671 	BQ_UNLOCK(&bqempty);
1672 
1673 	return (i);
1674 }
1675 
1676 /*
1677  * buf_release:
1678  *
1679  *	Release bufs from the uma cache back to the buffer queues.
1680  */
1681 static void
1682 buf_release(void *arg, void **store, int cnt)
1683 {
1684 	struct bufqueue *bq;
1685 	struct buf *bp;
1686         int i;
1687 
1688 	bq = &bqempty;
1689 	BQ_LOCK(bq);
1690         for (i = 0; i < cnt; i++) {
1691 		bp = store[i];
1692 		/* Inline bq_insert() to batch locking. */
1693 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1694 		bp->b_flags &= ~(B_AGE | B_REUSE);
1695 		bq->bq_len++;
1696 		bp->b_qindex = bq->bq_index;
1697 	}
1698 	BQ_UNLOCK(bq);
1699 }
1700 
1701 /*
1702  * buf_alloc:
1703  *
1704  *	Allocate an empty buffer header.
1705  */
1706 static struct buf *
1707 buf_alloc(struct bufdomain *bd)
1708 {
1709 	struct buf *bp;
1710 	int freebufs, error;
1711 
1712 	/*
1713 	 * We can only run out of bufs in the buf zone if the average buf
1714 	 * is less than BKVASIZE.  In this case the actual wait/block will
1715 	 * come from buf_reycle() failing to flush one of these small bufs.
1716 	 */
1717 	bp = NULL;
1718 	freebufs = atomic_fetchadd_int(&bd->bd_freebuffers, -1);
1719 	if (freebufs > 0)
1720 		bp = uma_zalloc(buf_zone, M_NOWAIT);
1721 	if (bp == NULL) {
1722 		atomic_add_int(&bd->bd_freebuffers, 1);
1723 		bufspace_daemon_wakeup(bd);
1724 		counter_u64_add(numbufallocfails, 1);
1725 		return (NULL);
1726 	}
1727 	/*
1728 	 * Wake-up the bufspace daemon on transition below threshold.
1729 	 */
1730 	if (freebufs == bd->bd_lofreebuffers)
1731 		bufspace_daemon_wakeup(bd);
1732 
1733 	error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1734 	KASSERT(error == 0, ("%s: BUF_LOCK on free buf %p: %d.", __func__, bp,
1735 	    error));
1736 	(void)error;
1737 
1738 	KASSERT(bp->b_vp == NULL,
1739 	    ("bp: %p still has vnode %p.", bp, bp->b_vp));
1740 	KASSERT((bp->b_flags & (B_DELWRI | B_NOREUSE)) == 0,
1741 	    ("invalid buffer %p flags %#x", bp, bp->b_flags));
1742 	KASSERT((bp->b_xflags & (BX_VNCLEAN|BX_VNDIRTY)) == 0,
1743 	    ("bp: %p still on a buffer list. xflags %X", bp, bp->b_xflags));
1744 	KASSERT(bp->b_npages == 0,
1745 	    ("bp: %p still has %d vm pages\n", bp, bp->b_npages));
1746 	KASSERT(bp->b_kvasize == 0, ("bp: %p still has kva\n", bp));
1747 	KASSERT(bp->b_bufsize == 0, ("bp: %p still has bufspace\n", bp));
1748 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
1749 
1750 	bp->b_domain = BD_DOMAIN(bd);
1751 	bp->b_flags = 0;
1752 	bp->b_ioflags = 0;
1753 	bp->b_xflags = 0;
1754 	bp->b_vflags = 0;
1755 	bp->b_vp = NULL;
1756 	bp->b_blkno = bp->b_lblkno = 0;
1757 	bp->b_offset = NOOFFSET;
1758 	bp->b_iodone = 0;
1759 	bp->b_error = 0;
1760 	bp->b_resid = 0;
1761 	bp->b_bcount = 0;
1762 	bp->b_npages = 0;
1763 	bp->b_dirtyoff = bp->b_dirtyend = 0;
1764 	bp->b_bufobj = NULL;
1765 	bp->b_data = bp->b_kvabase = unmapped_buf;
1766 	bp->b_fsprivate1 = NULL;
1767 	bp->b_fsprivate2 = NULL;
1768 	bp->b_fsprivate3 = NULL;
1769 	LIST_INIT(&bp->b_dep);
1770 
1771 	return (bp);
1772 }
1773 
1774 /*
1775  *	buf_recycle:
1776  *
1777  *	Free a buffer from the given bufqueue.  kva controls whether the
1778  *	freed buf must own some kva resources.  This is used for
1779  *	defragmenting.
1780  */
1781 static int
1782 buf_recycle(struct bufdomain *bd, bool kva)
1783 {
1784 	struct bufqueue *bq;
1785 	struct buf *bp, *nbp;
1786 
1787 	if (kva)
1788 		counter_u64_add(bufdefragcnt, 1);
1789 	nbp = NULL;
1790 	bq = bd->bd_cleanq;
1791 	BQ_LOCK(bq);
1792 	KASSERT(BQ_LOCKPTR(bq) == BD_LOCKPTR(bd),
1793 	    ("buf_recycle: Locks don't match"));
1794 	nbp = TAILQ_FIRST(&bq->bq_queue);
1795 
1796 	/*
1797 	 * Run scan, possibly freeing data and/or kva mappings on the fly
1798 	 * depending.
1799 	 */
1800 	while ((bp = nbp) != NULL) {
1801 		/*
1802 		 * Calculate next bp (we can only use it if we do not
1803 		 * release the bqlock).
1804 		 */
1805 		nbp = TAILQ_NEXT(bp, b_freelist);
1806 
1807 		/*
1808 		 * If we are defragging then we need a buffer with
1809 		 * some kva to reclaim.
1810 		 */
1811 		if (kva && bp->b_kvasize == 0)
1812 			continue;
1813 
1814 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
1815 			continue;
1816 
1817 		/*
1818 		 * Implement a second chance algorithm for frequently
1819 		 * accessed buffers.
1820 		 */
1821 		if ((bp->b_flags & B_REUSE) != 0) {
1822 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1823 			TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
1824 			bp->b_flags &= ~B_REUSE;
1825 			BUF_UNLOCK(bp);
1826 			continue;
1827 		}
1828 
1829 		/*
1830 		 * Skip buffers with background writes in progress.
1831 		 */
1832 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
1833 			BUF_UNLOCK(bp);
1834 			continue;
1835 		}
1836 
1837 		KASSERT(bp->b_qindex == QUEUE_CLEAN,
1838 		    ("buf_recycle: inconsistent queue %d bp %p",
1839 		    bp->b_qindex, bp));
1840 		KASSERT(bp->b_domain == BD_DOMAIN(bd),
1841 		    ("getnewbuf: queue domain %d doesn't match request %d",
1842 		    bp->b_domain, (int)BD_DOMAIN(bd)));
1843 		/*
1844 		 * NOTE:  nbp is now entirely invalid.  We can only restart
1845 		 * the scan from this point on.
1846 		 */
1847 		bq_remove(bq, bp);
1848 		BQ_UNLOCK(bq);
1849 
1850 		/*
1851 		 * Requeue the background write buffer with error and
1852 		 * restart the scan.
1853 		 */
1854 		if ((bp->b_vflags & BV_BKGRDERR) != 0) {
1855 			bqrelse(bp);
1856 			BQ_LOCK(bq);
1857 			nbp = TAILQ_FIRST(&bq->bq_queue);
1858 			continue;
1859 		}
1860 		bp->b_flags |= B_INVAL;
1861 		brelse(bp);
1862 		return (0);
1863 	}
1864 	bd->bd_wanted = 1;
1865 	BQ_UNLOCK(bq);
1866 
1867 	return (ENOBUFS);
1868 }
1869 
1870 /*
1871  *	bremfree:
1872  *
1873  *	Mark the buffer for removal from the appropriate free list.
1874  *
1875  */
1876 void
1877 bremfree(struct buf *bp)
1878 {
1879 
1880 	CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1881 	KASSERT((bp->b_flags & B_REMFREE) == 0,
1882 	    ("bremfree: buffer %p already marked for delayed removal.", bp));
1883 	KASSERT(bp->b_qindex != QUEUE_NONE,
1884 	    ("bremfree: buffer %p not on a queue.", bp));
1885 	BUF_ASSERT_XLOCKED(bp);
1886 
1887 	bp->b_flags |= B_REMFREE;
1888 }
1889 
1890 /*
1891  *	bremfreef:
1892  *
1893  *	Force an immediate removal from a free list.  Used only in nfs when
1894  *	it abuses the b_freelist pointer.
1895  */
1896 void
1897 bremfreef(struct buf *bp)
1898 {
1899 	struct bufqueue *bq;
1900 
1901 	bq = bufqueue_acquire(bp);
1902 	bq_remove(bq, bp);
1903 	BQ_UNLOCK(bq);
1904 }
1905 
1906 static void
1907 bq_init(struct bufqueue *bq, int qindex, int subqueue, const char *lockname)
1908 {
1909 
1910 	mtx_init(&bq->bq_lock, lockname, NULL, MTX_DEF);
1911 	TAILQ_INIT(&bq->bq_queue);
1912 	bq->bq_len = 0;
1913 	bq->bq_index = qindex;
1914 	bq->bq_subqueue = subqueue;
1915 }
1916 
1917 static void
1918 bd_init(struct bufdomain *bd)
1919 {
1920 	int i;
1921 
1922 	/* Per-CPU clean buf queues, plus one global queue. */
1923 	bd->bd_subq = mallocarray(mp_maxid + 2, sizeof(struct bufqueue),
1924 	    M_BIOBUF, M_WAITOK | M_ZERO);
1925 	bd->bd_cleanq = &bd->bd_subq[mp_maxid + 1];
1926 	bq_init(bd->bd_cleanq, QUEUE_CLEAN, mp_maxid + 1, "bufq clean lock");
1927 	bq_init(&bd->bd_dirtyq, QUEUE_DIRTY, -1, "bufq dirty lock");
1928 	for (i = 0; i <= mp_maxid; i++)
1929 		bq_init(&bd->bd_subq[i], QUEUE_CLEAN, i,
1930 		    "bufq clean subqueue lock");
1931 	mtx_init(&bd->bd_run_lock, "bufspace daemon run lock", NULL, MTX_DEF);
1932 }
1933 
1934 /*
1935  *	bq_remove:
1936  *
1937  *	Removes a buffer from the free list, must be called with the
1938  *	correct qlock held.
1939  */
1940 static void
1941 bq_remove(struct bufqueue *bq, struct buf *bp)
1942 {
1943 
1944 	CTR3(KTR_BUF, "bq_remove(%p) vp %p flags %X",
1945 	    bp, bp->b_vp, bp->b_flags);
1946 	KASSERT(bp->b_qindex != QUEUE_NONE,
1947 	    ("bq_remove: buffer %p not on a queue.", bp));
1948 	KASSERT(bufqueue(bp) == bq,
1949 	    ("bq_remove: Remove buffer %p from wrong queue.", bp));
1950 
1951 	BQ_ASSERT_LOCKED(bq);
1952 	if (bp->b_qindex != QUEUE_EMPTY) {
1953 		BUF_ASSERT_XLOCKED(bp);
1954 	}
1955 	KASSERT(bq->bq_len >= 1,
1956 	    ("queue %d underflow", bp->b_qindex));
1957 	TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1958 	bq->bq_len--;
1959 	bp->b_qindex = QUEUE_NONE;
1960 	bp->b_flags &= ~(B_REMFREE | B_REUSE);
1961 }
1962 
1963 static void
1964 bd_flush(struct bufdomain *bd, struct bufqueue *bq)
1965 {
1966 	struct buf *bp;
1967 
1968 	BQ_ASSERT_LOCKED(bq);
1969 	if (bq != bd->bd_cleanq) {
1970 		BD_LOCK(bd);
1971 		while ((bp = TAILQ_FIRST(&bq->bq_queue)) != NULL) {
1972 			TAILQ_REMOVE(&bq->bq_queue, bp, b_freelist);
1973 			TAILQ_INSERT_TAIL(&bd->bd_cleanq->bq_queue, bp,
1974 			    b_freelist);
1975 			bp->b_subqueue = bd->bd_cleanq->bq_subqueue;
1976 		}
1977 		bd->bd_cleanq->bq_len += bq->bq_len;
1978 		bq->bq_len = 0;
1979 	}
1980 	if (bd->bd_wanted) {
1981 		bd->bd_wanted = 0;
1982 		wakeup(&bd->bd_wanted);
1983 	}
1984 	if (bq != bd->bd_cleanq)
1985 		BD_UNLOCK(bd);
1986 }
1987 
1988 static int
1989 bd_flushall(struct bufdomain *bd)
1990 {
1991 	struct bufqueue *bq;
1992 	int flushed;
1993 	int i;
1994 
1995 	if (bd->bd_lim == 0)
1996 		return (0);
1997 	flushed = 0;
1998 	for (i = 0; i <= mp_maxid; i++) {
1999 		bq = &bd->bd_subq[i];
2000 		if (bq->bq_len == 0)
2001 			continue;
2002 		BQ_LOCK(bq);
2003 		bd_flush(bd, bq);
2004 		BQ_UNLOCK(bq);
2005 		flushed++;
2006 	}
2007 
2008 	return (flushed);
2009 }
2010 
2011 static void
2012 bq_insert(struct bufqueue *bq, struct buf *bp, bool unlock)
2013 {
2014 	struct bufdomain *bd;
2015 
2016 	if (bp->b_qindex != QUEUE_NONE)
2017 		panic("bq_insert: free buffer %p onto another queue?", bp);
2018 
2019 	bd = bufdomain(bp);
2020 	if (bp->b_flags & B_AGE) {
2021 		/* Place this buf directly on the real queue. */
2022 		if (bq->bq_index == QUEUE_CLEAN)
2023 			bq = bd->bd_cleanq;
2024 		BQ_LOCK(bq);
2025 		TAILQ_INSERT_HEAD(&bq->bq_queue, bp, b_freelist);
2026 	} else {
2027 		BQ_LOCK(bq);
2028 		TAILQ_INSERT_TAIL(&bq->bq_queue, bp, b_freelist);
2029 	}
2030 	bp->b_flags &= ~(B_AGE | B_REUSE);
2031 	bq->bq_len++;
2032 	bp->b_qindex = bq->bq_index;
2033 	bp->b_subqueue = bq->bq_subqueue;
2034 
2035 	/*
2036 	 * Unlock before we notify so that we don't wakeup a waiter that
2037 	 * fails a trylock on the buf and sleeps again.
2038 	 */
2039 	if (unlock)
2040 		BUF_UNLOCK(bp);
2041 
2042 	if (bp->b_qindex == QUEUE_CLEAN) {
2043 		/*
2044 		 * Flush the per-cpu queue and notify any waiters.
2045 		 */
2046 		if (bd->bd_wanted || (bq != bd->bd_cleanq &&
2047 		    bq->bq_len >= bd->bd_lim))
2048 			bd_flush(bd, bq);
2049 	}
2050 	BQ_UNLOCK(bq);
2051 }
2052 
2053 /*
2054  *	bufkva_free:
2055  *
2056  *	Free the kva allocation for a buffer.
2057  *
2058  */
2059 static void
2060 bufkva_free(struct buf *bp)
2061 {
2062 
2063 #ifdef INVARIANTS
2064 	if (bp->b_kvasize == 0) {
2065 		KASSERT(bp->b_kvabase == unmapped_buf &&
2066 		    bp->b_data == unmapped_buf,
2067 		    ("Leaked KVA space on %p", bp));
2068 	} else if (buf_mapped(bp))
2069 		BUF_CHECK_MAPPED(bp);
2070 	else
2071 		BUF_CHECK_UNMAPPED(bp);
2072 #endif
2073 	if (bp->b_kvasize == 0)
2074 		return;
2075 
2076 	vmem_free(buffer_arena, (vm_offset_t)bp->b_kvabase, bp->b_kvasize);
2077 	counter_u64_add(bufkvaspace, -bp->b_kvasize);
2078 	counter_u64_add(buffreekvacnt, 1);
2079 	bp->b_data = bp->b_kvabase = unmapped_buf;
2080 	bp->b_kvasize = 0;
2081 }
2082 
2083 /*
2084  *	bufkva_alloc:
2085  *
2086  *	Allocate the buffer KVA and set b_kvasize and b_kvabase.
2087  */
2088 static int
2089 bufkva_alloc(struct buf *bp, int maxsize, int gbflags)
2090 {
2091 	vm_offset_t addr;
2092 	int error;
2093 
2094 	KASSERT((gbflags & GB_UNMAPPED) == 0 || (gbflags & GB_KVAALLOC) != 0,
2095 	    ("Invalid gbflags 0x%x in %s", gbflags, __func__));
2096 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
2097 	KASSERT(maxsize <= maxbcachebuf,
2098 	    ("bufkva_alloc kva too large %d %u", maxsize, maxbcachebuf));
2099 
2100 	bufkva_free(bp);
2101 
2102 	addr = 0;
2103 	error = vmem_alloc(buffer_arena, maxsize, M_BESTFIT | M_NOWAIT, &addr);
2104 	if (error != 0) {
2105 		/*
2106 		 * Buffer map is too fragmented.  Request the caller
2107 		 * to defragment the map.
2108 		 */
2109 		return (error);
2110 	}
2111 	bp->b_kvabase = (caddr_t)addr;
2112 	bp->b_kvasize = maxsize;
2113 	counter_u64_add(bufkvaspace, bp->b_kvasize);
2114 	if ((gbflags & GB_UNMAPPED) != 0) {
2115 		bp->b_data = unmapped_buf;
2116 		BUF_CHECK_UNMAPPED(bp);
2117 	} else {
2118 		bp->b_data = bp->b_kvabase;
2119 		BUF_CHECK_MAPPED(bp);
2120 	}
2121 	return (0);
2122 }
2123 
2124 /*
2125  *	bufkva_reclaim:
2126  *
2127  *	Reclaim buffer kva by freeing buffers holding kva.  This is a vmem
2128  *	callback that fires to avoid returning failure.
2129  */
2130 static void
2131 bufkva_reclaim(vmem_t *vmem, int flags)
2132 {
2133 	bool done;
2134 	int q;
2135 	int i;
2136 
2137 	done = false;
2138 	for (i = 0; i < 5; i++) {
2139 		for (q = 0; q < buf_domains; q++)
2140 			if (buf_recycle(&bdomain[q], true) != 0)
2141 				done = true;
2142 		if (done)
2143 			break;
2144 	}
2145 	return;
2146 }
2147 
2148 /*
2149  * Attempt to initiate asynchronous I/O on read-ahead blocks.  We must
2150  * clear BIO_ERROR and B_INVAL prior to initiating I/O . If B_CACHE is set,
2151  * the buffer is valid and we do not have to do anything.
2152  */
2153 static void
2154 breada(struct vnode * vp, daddr_t * rablkno, int * rabsize, int cnt,
2155     struct ucred * cred, int flags, void (*ckhashfunc)(struct buf *))
2156 {
2157 	struct buf *rabp;
2158 	struct thread *td;
2159 	int i;
2160 
2161 	td = curthread;
2162 
2163 	for (i = 0; i < cnt; i++, rablkno++, rabsize++) {
2164 		if (inmem(vp, *rablkno))
2165 			continue;
2166 		rabp = getblk(vp, *rablkno, *rabsize, 0, 0, 0);
2167 		if ((rabp->b_flags & B_CACHE) != 0) {
2168 			brelse(rabp);
2169 			continue;
2170 		}
2171 #ifdef RACCT
2172 		if (racct_enable) {
2173 			PROC_LOCK(curproc);
2174 			racct_add_buf(curproc, rabp, 0);
2175 			PROC_UNLOCK(curproc);
2176 		}
2177 #endif /* RACCT */
2178 		td->td_ru.ru_inblock++;
2179 		rabp->b_flags |= B_ASYNC;
2180 		rabp->b_flags &= ~B_INVAL;
2181 		if ((flags & GB_CKHASH) != 0) {
2182 			rabp->b_flags |= B_CKHASH;
2183 			rabp->b_ckhashcalc = ckhashfunc;
2184 		}
2185 		rabp->b_ioflags &= ~BIO_ERROR;
2186 		rabp->b_iocmd = BIO_READ;
2187 		if (rabp->b_rcred == NOCRED && cred != NOCRED)
2188 			rabp->b_rcred = crhold(cred);
2189 		vfs_busy_pages(rabp, 0);
2190 		BUF_KERNPROC(rabp);
2191 		rabp->b_iooffset = dbtob(rabp->b_blkno);
2192 		bstrategy(rabp);
2193 	}
2194 }
2195 
2196 /*
2197  * Entry point for bread() and breadn() via #defines in sys/buf.h.
2198  *
2199  * Get a buffer with the specified data.  Look in the cache first.  We
2200  * must clear BIO_ERROR and B_INVAL prior to initiating I/O.  If B_CACHE
2201  * is set, the buffer is valid and we do not have to do anything, see
2202  * getblk(). Also starts asynchronous I/O on read-ahead blocks.
2203  *
2204  * Always return a NULL buffer pointer (in bpp) when returning an error.
2205  *
2206  * The blkno parameter is the logical block being requested. Normally
2207  * the mapping of logical block number to disk block address is done
2208  * by calling VOP_BMAP(). However, if the mapping is already known, the
2209  * disk block address can be passed using the dblkno parameter. If the
2210  * disk block address is not known, then the same value should be passed
2211  * for blkno and dblkno.
2212  */
2213 int
2214 breadn_flags(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size,
2215     daddr_t *rablkno, int *rabsize, int cnt, struct ucred *cred, int flags,
2216     void (*ckhashfunc)(struct buf *), struct buf **bpp)
2217 {
2218 	struct buf *bp;
2219 	struct thread *td;
2220 	int error, readwait, rv;
2221 
2222 	CTR3(KTR_BUF, "breadn(%p, %jd, %d)", vp, blkno, size);
2223 	td = curthread;
2224 	/*
2225 	 * Can only return NULL if GB_LOCK_NOWAIT or GB_SPARSE flags
2226 	 * are specified.
2227 	 */
2228 	error = getblkx(vp, blkno, dblkno, size, 0, 0, flags, &bp);
2229 	if (error != 0) {
2230 		*bpp = NULL;
2231 		return (error);
2232 	}
2233 	KASSERT(blkno == bp->b_lblkno,
2234 	    ("getblkx returned buffer for blkno %jd instead of blkno %jd",
2235 	    (intmax_t)bp->b_lblkno, (intmax_t)blkno));
2236 	flags &= ~GB_NOSPARSE;
2237 	*bpp = bp;
2238 
2239 	/*
2240 	 * If not found in cache, do some I/O
2241 	 */
2242 	readwait = 0;
2243 	if ((bp->b_flags & B_CACHE) == 0) {
2244 #ifdef RACCT
2245 		if (racct_enable) {
2246 			PROC_LOCK(td->td_proc);
2247 			racct_add_buf(td->td_proc, bp, 0);
2248 			PROC_UNLOCK(td->td_proc);
2249 		}
2250 #endif /* RACCT */
2251 		td->td_ru.ru_inblock++;
2252 		bp->b_iocmd = BIO_READ;
2253 		bp->b_flags &= ~B_INVAL;
2254 		if ((flags & GB_CKHASH) != 0) {
2255 			bp->b_flags |= B_CKHASH;
2256 			bp->b_ckhashcalc = ckhashfunc;
2257 		}
2258 		if ((flags & GB_CVTENXIO) != 0)
2259 			bp->b_xflags |= BX_CVTENXIO;
2260 		bp->b_ioflags &= ~BIO_ERROR;
2261 		if (bp->b_rcred == NOCRED && cred != NOCRED)
2262 			bp->b_rcred = crhold(cred);
2263 		vfs_busy_pages(bp, 0);
2264 		bp->b_iooffset = dbtob(bp->b_blkno);
2265 		bstrategy(bp);
2266 		++readwait;
2267 	}
2268 
2269 	/*
2270 	 * Attempt to initiate asynchronous I/O on read-ahead blocks.
2271 	 */
2272 	breada(vp, rablkno, rabsize, cnt, cred, flags, ckhashfunc);
2273 
2274 	rv = 0;
2275 	if (readwait) {
2276 		rv = bufwait(bp);
2277 		if (rv != 0) {
2278 			brelse(bp);
2279 			*bpp = NULL;
2280 		}
2281 	}
2282 	return (rv);
2283 }
2284 
2285 /*
2286  * Write, release buffer on completion.  (Done by iodone
2287  * if async).  Do not bother writing anything if the buffer
2288  * is invalid.
2289  *
2290  * Note that we set B_CACHE here, indicating that buffer is
2291  * fully valid and thus cacheable.  This is true even of NFS
2292  * now so we set it generally.  This could be set either here
2293  * or in biodone() since the I/O is synchronous.  We put it
2294  * here.
2295  */
2296 int
2297 bufwrite(struct buf *bp)
2298 {
2299 	int oldflags;
2300 	struct vnode *vp;
2301 	long space;
2302 	int vp_md;
2303 
2304 	CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2305 	if ((bp->b_bufobj->bo_flag & BO_DEAD) != 0) {
2306 		bp->b_flags |= B_INVAL | B_RELBUF;
2307 		bp->b_flags &= ~B_CACHE;
2308 		brelse(bp);
2309 		return (ENXIO);
2310 	}
2311 	if (bp->b_flags & B_INVAL) {
2312 		brelse(bp);
2313 		return (0);
2314 	}
2315 
2316 	if (bp->b_flags & B_BARRIER)
2317 		atomic_add_long(&barrierwrites, 1);
2318 
2319 	oldflags = bp->b_flags;
2320 
2321 	KASSERT(!(bp->b_vflags & BV_BKGRDINPROG),
2322 	    ("FFS background buffer should not get here %p", bp));
2323 
2324 	vp = bp->b_vp;
2325 	if (vp)
2326 		vp_md = vp->v_vflag & VV_MD;
2327 	else
2328 		vp_md = 0;
2329 
2330 	/*
2331 	 * Mark the buffer clean.  Increment the bufobj write count
2332 	 * before bundirty() call, to prevent other thread from seeing
2333 	 * empty dirty list and zero counter for writes in progress,
2334 	 * falsely indicating that the bufobj is clean.
2335 	 */
2336 	bufobj_wref(bp->b_bufobj);
2337 	bundirty(bp);
2338 
2339 	bp->b_flags &= ~B_DONE;
2340 	bp->b_ioflags &= ~BIO_ERROR;
2341 	bp->b_flags |= B_CACHE;
2342 	bp->b_iocmd = BIO_WRITE;
2343 
2344 	vfs_busy_pages(bp, 1);
2345 
2346 	/*
2347 	 * Normal bwrites pipeline writes
2348 	 */
2349 	bp->b_runningbufspace = bp->b_bufsize;
2350 	space = atomic_fetchadd_long(&runningbufspace, bp->b_runningbufspace);
2351 
2352 #ifdef RACCT
2353 	if (racct_enable) {
2354 		PROC_LOCK(curproc);
2355 		racct_add_buf(curproc, bp, 1);
2356 		PROC_UNLOCK(curproc);
2357 	}
2358 #endif /* RACCT */
2359 	curthread->td_ru.ru_oublock++;
2360 	if (oldflags & B_ASYNC)
2361 		BUF_KERNPROC(bp);
2362 	bp->b_iooffset = dbtob(bp->b_blkno);
2363 	buf_track(bp, __func__);
2364 	bstrategy(bp);
2365 
2366 	if ((oldflags & B_ASYNC) == 0) {
2367 		int rtval = bufwait(bp);
2368 		brelse(bp);
2369 		return (rtval);
2370 	} else if (space > hirunningspace) {
2371 		/*
2372 		 * don't allow the async write to saturate the I/O
2373 		 * system.  We will not deadlock here because
2374 		 * we are blocking waiting for I/O that is already in-progress
2375 		 * to complete. We do not block here if it is the update
2376 		 * or syncer daemon trying to clean up as that can lead
2377 		 * to deadlock.
2378 		 */
2379 		if ((curthread->td_pflags & TDP_NORUNNINGBUF) == 0 && !vp_md)
2380 			waitrunningbufspace();
2381 	}
2382 
2383 	return (0);
2384 }
2385 
2386 void
2387 bufbdflush(struct bufobj *bo, struct buf *bp)
2388 {
2389 	struct buf *nbp;
2390 	struct bufdomain *bd;
2391 
2392 	bd = &bdomain[bo->bo_domain];
2393 	if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh + 10) {
2394 		(void) VOP_FSYNC(bp->b_vp, MNT_NOWAIT, curthread);
2395 		altbufferflushes++;
2396 	} else if (bo->bo_dirty.bv_cnt > bd->bd_dirtybufthresh) {
2397 		BO_LOCK(bo);
2398 		/*
2399 		 * Try to find a buffer to flush.
2400 		 */
2401 		TAILQ_FOREACH(nbp, &bo->bo_dirty.bv_hd, b_bobufs) {
2402 			if ((nbp->b_vflags & BV_BKGRDINPROG) ||
2403 			    BUF_LOCK(nbp,
2404 				     LK_EXCLUSIVE | LK_NOWAIT, NULL))
2405 				continue;
2406 			if (bp == nbp)
2407 				panic("bdwrite: found ourselves");
2408 			BO_UNLOCK(bo);
2409 			/* Don't countdeps with the bo lock held. */
2410 			if (buf_countdeps(nbp, 0)) {
2411 				BO_LOCK(bo);
2412 				BUF_UNLOCK(nbp);
2413 				continue;
2414 			}
2415 			if (nbp->b_flags & B_CLUSTEROK) {
2416 				vfs_bio_awrite(nbp);
2417 			} else {
2418 				bremfree(nbp);
2419 				bawrite(nbp);
2420 			}
2421 			dirtybufferflushes++;
2422 			break;
2423 		}
2424 		if (nbp == NULL)
2425 			BO_UNLOCK(bo);
2426 	}
2427 }
2428 
2429 /*
2430  * Delayed write. (Buffer is marked dirty).  Do not bother writing
2431  * anything if the buffer is marked invalid.
2432  *
2433  * Note that since the buffer must be completely valid, we can safely
2434  * set B_CACHE.  In fact, we have to set B_CACHE here rather then in
2435  * biodone() in order to prevent getblk from writing the buffer
2436  * out synchronously.
2437  */
2438 void
2439 bdwrite(struct buf *bp)
2440 {
2441 	struct thread *td = curthread;
2442 	struct vnode *vp;
2443 	struct bufobj *bo;
2444 
2445 	CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2446 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2447 	KASSERT((bp->b_flags & B_BARRIER) == 0,
2448 	    ("Barrier request in delayed write %p", bp));
2449 
2450 	if (bp->b_flags & B_INVAL) {
2451 		brelse(bp);
2452 		return;
2453 	}
2454 
2455 	/*
2456 	 * If we have too many dirty buffers, don't create any more.
2457 	 * If we are wildly over our limit, then force a complete
2458 	 * cleanup. Otherwise, just keep the situation from getting
2459 	 * out of control. Note that we have to avoid a recursive
2460 	 * disaster and not try to clean up after our own cleanup!
2461 	 */
2462 	vp = bp->b_vp;
2463 	bo = bp->b_bufobj;
2464 	if ((td->td_pflags & (TDP_COWINPROGRESS|TDP_INBDFLUSH)) == 0) {
2465 		td->td_pflags |= TDP_INBDFLUSH;
2466 		BO_BDFLUSH(bo, bp);
2467 		td->td_pflags &= ~TDP_INBDFLUSH;
2468 	} else
2469 		recursiveflushes++;
2470 
2471 	bdirty(bp);
2472 	/*
2473 	 * Set B_CACHE, indicating that the buffer is fully valid.  This is
2474 	 * true even of NFS now.
2475 	 */
2476 	bp->b_flags |= B_CACHE;
2477 
2478 	/*
2479 	 * This bmap keeps the system from needing to do the bmap later,
2480 	 * perhaps when the system is attempting to do a sync.  Since it
2481 	 * is likely that the indirect block -- or whatever other datastructure
2482 	 * that the filesystem needs is still in memory now, it is a good
2483 	 * thing to do this.  Note also, that if the pageout daemon is
2484 	 * requesting a sync -- there might not be enough memory to do
2485 	 * the bmap then...  So, this is important to do.
2486 	 */
2487 	if (vp->v_type != VCHR && bp->b_lblkno == bp->b_blkno) {
2488 		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
2489 	}
2490 
2491 	buf_track(bp, __func__);
2492 
2493 	/*
2494 	 * Set the *dirty* buffer range based upon the VM system dirty
2495 	 * pages.
2496 	 *
2497 	 * Mark the buffer pages as clean.  We need to do this here to
2498 	 * satisfy the vnode_pager and the pageout daemon, so that it
2499 	 * thinks that the pages have been "cleaned".  Note that since
2500 	 * the pages are in a delayed write buffer -- the VFS layer
2501 	 * "will" see that the pages get written out on the next sync,
2502 	 * or perhaps the cluster will be completed.
2503 	 */
2504 	vfs_clean_pages_dirty_buf(bp);
2505 	bqrelse(bp);
2506 
2507 	/*
2508 	 * note: we cannot initiate I/O from a bdwrite even if we wanted to,
2509 	 * due to the softdep code.
2510 	 */
2511 }
2512 
2513 /*
2514  *	bdirty:
2515  *
2516  *	Turn buffer into delayed write request.  We must clear BIO_READ and
2517  *	B_RELBUF, and we must set B_DELWRI.  We reassign the buffer to
2518  *	itself to properly update it in the dirty/clean lists.  We mark it
2519  *	B_DONE to ensure that any asynchronization of the buffer properly
2520  *	clears B_DONE ( else a panic will occur later ).
2521  *
2522  *	bdirty() is kinda like bdwrite() - we have to clear B_INVAL which
2523  *	might have been set pre-getblk().  Unlike bwrite/bdwrite, bdirty()
2524  *	should only be called if the buffer is known-good.
2525  *
2526  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2527  *	count.
2528  *
2529  *	The buffer must be on QUEUE_NONE.
2530  */
2531 void
2532 bdirty(struct buf *bp)
2533 {
2534 
2535 	CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X",
2536 	    bp, bp->b_vp, bp->b_flags);
2537 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2538 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2539 	    ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
2540 	bp->b_flags &= ~(B_RELBUF);
2541 	bp->b_iocmd = BIO_WRITE;
2542 
2543 	if ((bp->b_flags & B_DELWRI) == 0) {
2544 		bp->b_flags |= /* XXX B_DONE | */ B_DELWRI;
2545 		reassignbuf(bp);
2546 		bdirtyadd(bp);
2547 	}
2548 }
2549 
2550 /*
2551  *	bundirty:
2552  *
2553  *	Clear B_DELWRI for buffer.
2554  *
2555  *	Since the buffer is not on a queue, we do not update the numfreebuffers
2556  *	count.
2557  *
2558  *	The buffer must be on QUEUE_NONE.
2559  */
2560 
2561 void
2562 bundirty(struct buf *bp)
2563 {
2564 
2565 	CTR3(KTR_BUF, "bundirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2566 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2567 	KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE,
2568 	    ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
2569 
2570 	if (bp->b_flags & B_DELWRI) {
2571 		bp->b_flags &= ~B_DELWRI;
2572 		reassignbuf(bp);
2573 		bdirtysub(bp);
2574 	}
2575 	/*
2576 	 * Since it is now being written, we can clear its deferred write flag.
2577 	 */
2578 	bp->b_flags &= ~B_DEFERRED;
2579 }
2580 
2581 /*
2582  *	bawrite:
2583  *
2584  *	Asynchronous write.  Start output on a buffer, but do not wait for
2585  *	it to complete.  The buffer is released when the output completes.
2586  *
2587  *	bwrite() ( or the VOP routine anyway ) is responsible for handling
2588  *	B_INVAL buffers.  Not us.
2589  */
2590 void
2591 bawrite(struct buf *bp)
2592 {
2593 
2594 	bp->b_flags |= B_ASYNC;
2595 	(void) bwrite(bp);
2596 }
2597 
2598 /*
2599  *	babarrierwrite:
2600  *
2601  *	Asynchronous barrier write.  Start output on a buffer, but do not
2602  *	wait for it to complete.  Place a write barrier after this write so
2603  *	that this buffer and all buffers written before it are committed to
2604  *	the disk before any buffers written after this write are committed
2605  *	to the disk.  The buffer is released when the output completes.
2606  */
2607 void
2608 babarrierwrite(struct buf *bp)
2609 {
2610 
2611 	bp->b_flags |= B_ASYNC | B_BARRIER;
2612 	(void) bwrite(bp);
2613 }
2614 
2615 /*
2616  *	bbarrierwrite:
2617  *
2618  *	Synchronous barrier write.  Start output on a buffer and wait for
2619  *	it to complete.  Place a write barrier after this write so that
2620  *	this buffer and all buffers written before it are committed to
2621  *	the disk before any buffers written after this write are committed
2622  *	to the disk.  The buffer is released when the output completes.
2623  */
2624 int
2625 bbarrierwrite(struct buf *bp)
2626 {
2627 
2628 	bp->b_flags |= B_BARRIER;
2629 	return (bwrite(bp));
2630 }
2631 
2632 /*
2633  *	bwillwrite:
2634  *
2635  *	Called prior to the locking of any vnodes when we are expecting to
2636  *	write.  We do not want to starve the buffer cache with too many
2637  *	dirty buffers so we block here.  By blocking prior to the locking
2638  *	of any vnodes we attempt to avoid the situation where a locked vnode
2639  *	prevents the various system daemons from flushing related buffers.
2640  */
2641 void
2642 bwillwrite(void)
2643 {
2644 
2645 	if (buf_dirty_count_severe()) {
2646 		mtx_lock(&bdirtylock);
2647 		while (buf_dirty_count_severe()) {
2648 			bdirtywait = 1;
2649 			msleep(&bdirtywait, &bdirtylock, (PRIBIO + 4),
2650 			    "flswai", 0);
2651 		}
2652 		mtx_unlock(&bdirtylock);
2653 	}
2654 }
2655 
2656 /*
2657  * Return true if we have too many dirty buffers.
2658  */
2659 int
2660 buf_dirty_count_severe(void)
2661 {
2662 
2663 	return (!BIT_EMPTY(BUF_DOMAINS, &bdhidirty));
2664 }
2665 
2666 /*
2667  *	brelse:
2668  *
2669  *	Release a busy buffer and, if requested, free its resources.  The
2670  *	buffer will be stashed in the appropriate bufqueue[] allowing it
2671  *	to be accessed later as a cache entity or reused for other purposes.
2672  */
2673 void
2674 brelse(struct buf *bp)
2675 {
2676 	struct mount *v_mnt;
2677 	int qindex;
2678 
2679 	/*
2680 	 * Many functions erroneously call brelse with a NULL bp under rare
2681 	 * error conditions. Simply return when called with a NULL bp.
2682 	 */
2683 	if (bp == NULL)
2684 		return;
2685 	CTR3(KTR_BUF, "brelse(%p) vp %p flags %X",
2686 	    bp, bp->b_vp, bp->b_flags);
2687 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2688 	    ("brelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2689 	KASSERT((bp->b_flags & B_VMIO) != 0 || (bp->b_flags & B_NOREUSE) == 0,
2690 	    ("brelse: non-VMIO buffer marked NOREUSE"));
2691 
2692 	if (BUF_LOCKRECURSED(bp)) {
2693 		/*
2694 		 * Do not process, in particular, do not handle the
2695 		 * B_INVAL/B_RELBUF and do not release to free list.
2696 		 */
2697 		BUF_UNLOCK(bp);
2698 		return;
2699 	}
2700 
2701 	if (bp->b_flags & B_MANAGED) {
2702 		bqrelse(bp);
2703 		return;
2704 	}
2705 
2706 	if (LIST_EMPTY(&bp->b_dep)) {
2707 		bp->b_flags &= ~B_IOSTARTED;
2708 	} else {
2709 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2710 		    ("brelse: SU io not finished bp %p", bp));
2711 	}
2712 
2713 	if ((bp->b_vflags & (BV_BKGRDINPROG | BV_BKGRDERR)) == BV_BKGRDERR) {
2714 		BO_LOCK(bp->b_bufobj);
2715 		bp->b_vflags &= ~BV_BKGRDERR;
2716 		BO_UNLOCK(bp->b_bufobj);
2717 		bdirty(bp);
2718 	}
2719 
2720 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2721 	    (bp->b_flags & B_INVALONERR)) {
2722 		/*
2723 		 * Forced invalidation of dirty buffer contents, to be used
2724 		 * after a failed write in the rare case that the loss of the
2725 		 * contents is acceptable.  The buffer is invalidated and
2726 		 * freed.
2727 		 */
2728 		bp->b_flags |= B_INVAL | B_RELBUF | B_NOCACHE;
2729 		bp->b_flags &= ~(B_ASYNC | B_CACHE);
2730 	}
2731 
2732 	if (bp->b_iocmd == BIO_WRITE && (bp->b_ioflags & BIO_ERROR) &&
2733 	    (bp->b_error != ENXIO || !LIST_EMPTY(&bp->b_dep)) &&
2734 	    !(bp->b_flags & B_INVAL)) {
2735 		/*
2736 		 * Failed write, redirty.  All errors except ENXIO (which
2737 		 * means the device is gone) are treated as being
2738 		 * transient.
2739 		 *
2740 		 * XXX Treating EIO as transient is not correct; the
2741 		 * contract with the local storage device drivers is that
2742 		 * they will only return EIO once the I/O is no longer
2743 		 * retriable.  Network I/O also respects this through the
2744 		 * guarantees of TCP and/or the internal retries of NFS.
2745 		 * ENOMEM might be transient, but we also have no way of
2746 		 * knowing when its ok to retry/reschedule.  In general,
2747 		 * this entire case should be made obsolete through better
2748 		 * error handling/recovery and resource scheduling.
2749 		 *
2750 		 * Do this also for buffers that failed with ENXIO, but have
2751 		 * non-empty dependencies - the soft updates code might need
2752 		 * to access the buffer to untangle them.
2753 		 *
2754 		 * Must clear BIO_ERROR to prevent pages from being scrapped.
2755 		 */
2756 		bp->b_ioflags &= ~BIO_ERROR;
2757 		bdirty(bp);
2758 	} else if ((bp->b_flags & (B_NOCACHE | B_INVAL)) ||
2759 	    (bp->b_ioflags & BIO_ERROR) || (bp->b_bufsize <= 0)) {
2760 		/*
2761 		 * Either a failed read I/O, or we were asked to free or not
2762 		 * cache the buffer, or we failed to write to a device that's
2763 		 * no longer present.
2764 		 */
2765 		bp->b_flags |= B_INVAL;
2766 		if (!LIST_EMPTY(&bp->b_dep))
2767 			buf_deallocate(bp);
2768 		if (bp->b_flags & B_DELWRI)
2769 			bdirtysub(bp);
2770 		bp->b_flags &= ~(B_DELWRI | B_CACHE);
2771 		if ((bp->b_flags & B_VMIO) == 0) {
2772 			allocbuf(bp, 0);
2773 			if (bp->b_vp)
2774 				brelvp(bp);
2775 		}
2776 	}
2777 
2778 	/*
2779 	 * We must clear B_RELBUF if B_DELWRI is set.  If vfs_vmio_truncate()
2780 	 * is called with B_DELWRI set, the underlying pages may wind up
2781 	 * getting freed causing a previous write (bdwrite()) to get 'lost'
2782 	 * because pages associated with a B_DELWRI bp are marked clean.
2783 	 *
2784 	 * We still allow the B_INVAL case to call vfs_vmio_truncate(), even
2785 	 * if B_DELWRI is set.
2786 	 */
2787 	if (bp->b_flags & B_DELWRI)
2788 		bp->b_flags &= ~B_RELBUF;
2789 
2790 	/*
2791 	 * VMIO buffer rundown.  It is not very necessary to keep a VMIO buffer
2792 	 * constituted, not even NFS buffers now.  Two flags effect this.  If
2793 	 * B_INVAL, the struct buf is invalidated but the VM object is kept
2794 	 * around ( i.e. so it is trivial to reconstitute the buffer later ).
2795 	 *
2796 	 * If BIO_ERROR or B_NOCACHE is set, pages in the VM object will be
2797 	 * invalidated.  BIO_ERROR cannot be set for a failed write unless the
2798 	 * buffer is also B_INVAL because it hits the re-dirtying code above.
2799 	 *
2800 	 * Normally we can do this whether a buffer is B_DELWRI or not.  If
2801 	 * the buffer is an NFS buffer, it is tracking piecemeal writes or
2802 	 * the commit state and we cannot afford to lose the buffer. If the
2803 	 * buffer has a background write in progress, we need to keep it
2804 	 * around to prevent it from being reconstituted and starting a second
2805 	 * background write.
2806 	 */
2807 
2808 	v_mnt = bp->b_vp != NULL ? bp->b_vp->v_mount : NULL;
2809 
2810 	if ((bp->b_flags & B_VMIO) && (bp->b_flags & B_NOCACHE ||
2811 	    (bp->b_ioflags & BIO_ERROR && bp->b_iocmd == BIO_READ)) &&
2812 	    (v_mnt == NULL || (v_mnt->mnt_vfc->vfc_flags & VFCF_NETWORK) == 0 ||
2813 	    vn_isdisk(bp->b_vp) || (bp->b_flags & B_DELWRI) == 0)) {
2814 		vfs_vmio_invalidate(bp);
2815 		allocbuf(bp, 0);
2816 	}
2817 
2818 	if ((bp->b_flags & (B_INVAL | B_RELBUF)) != 0 ||
2819 	    (bp->b_flags & (B_DELWRI | B_NOREUSE)) == B_NOREUSE) {
2820 		allocbuf(bp, 0);
2821 		bp->b_flags &= ~B_NOREUSE;
2822 		if (bp->b_vp != NULL)
2823 			brelvp(bp);
2824 	}
2825 
2826 	/*
2827 	 * If the buffer has junk contents signal it and eventually
2828 	 * clean up B_DELWRI and diassociate the vnode so that gbincore()
2829 	 * doesn't find it.
2830 	 */
2831 	if (bp->b_bufsize == 0 || (bp->b_ioflags & BIO_ERROR) != 0 ||
2832 	    (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF)) != 0)
2833 		bp->b_flags |= B_INVAL;
2834 	if (bp->b_flags & B_INVAL) {
2835 		if (bp->b_flags & B_DELWRI)
2836 			bundirty(bp);
2837 		if (bp->b_vp)
2838 			brelvp(bp);
2839 	}
2840 
2841 	buf_track(bp, __func__);
2842 
2843 	/* buffers with no memory */
2844 	if (bp->b_bufsize == 0) {
2845 		buf_free(bp);
2846 		return;
2847 	}
2848 	/* buffers with junk contents */
2849 	if (bp->b_flags & (B_INVAL | B_NOCACHE | B_RELBUF) ||
2850 	    (bp->b_ioflags & BIO_ERROR)) {
2851 		bp->b_xflags &= ~(BX_BKGRDWRITE | BX_ALTDATA);
2852 		if (bp->b_vflags & BV_BKGRDINPROG)
2853 			panic("losing buffer 2");
2854 		qindex = QUEUE_CLEAN;
2855 		bp->b_flags |= B_AGE;
2856 	/* remaining buffers */
2857 	} else if (bp->b_flags & B_DELWRI)
2858 		qindex = QUEUE_DIRTY;
2859 	else
2860 		qindex = QUEUE_CLEAN;
2861 
2862 	if ((bp->b_flags & B_DELWRI) == 0 && (bp->b_xflags & BX_VNDIRTY))
2863 		panic("brelse: not dirty");
2864 
2865 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_RELBUF | B_DIRECT);
2866 	bp->b_xflags &= ~(BX_CVTENXIO);
2867 	/* binsfree unlocks bp. */
2868 	binsfree(bp, qindex);
2869 }
2870 
2871 /*
2872  * Release a buffer back to the appropriate queue but do not try to free
2873  * it.  The buffer is expected to be used again soon.
2874  *
2875  * bqrelse() is used by bdwrite() to requeue a delayed write, and used by
2876  * biodone() to requeue an async I/O on completion.  It is also used when
2877  * known good buffers need to be requeued but we think we may need the data
2878  * again soon.
2879  *
2880  * XXX we should be able to leave the B_RELBUF hint set on completion.
2881  */
2882 void
2883 bqrelse(struct buf *bp)
2884 {
2885 	int qindex;
2886 
2887 	CTR3(KTR_BUF, "bqrelse(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2888 	KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
2889 	    ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
2890 
2891 	qindex = QUEUE_NONE;
2892 	if (BUF_LOCKRECURSED(bp)) {
2893 		/* do not release to free list */
2894 		BUF_UNLOCK(bp);
2895 		return;
2896 	}
2897 	bp->b_flags &= ~(B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF);
2898 	bp->b_xflags &= ~(BX_CVTENXIO);
2899 
2900 	if (LIST_EMPTY(&bp->b_dep)) {
2901 		bp->b_flags &= ~B_IOSTARTED;
2902 	} else {
2903 		KASSERT((bp->b_flags & B_IOSTARTED) == 0,
2904 		    ("bqrelse: SU io not finished bp %p", bp));
2905 	}
2906 
2907 	if (bp->b_flags & B_MANAGED) {
2908 		if (bp->b_flags & B_REMFREE)
2909 			bremfreef(bp);
2910 		goto out;
2911 	}
2912 
2913 	/* buffers with stale but valid contents */
2914 	if ((bp->b_flags & B_DELWRI) != 0 || (bp->b_vflags & (BV_BKGRDINPROG |
2915 	    BV_BKGRDERR)) == BV_BKGRDERR) {
2916 		BO_LOCK(bp->b_bufobj);
2917 		bp->b_vflags &= ~BV_BKGRDERR;
2918 		BO_UNLOCK(bp->b_bufobj);
2919 		qindex = QUEUE_DIRTY;
2920 	} else {
2921 		if ((bp->b_flags & B_DELWRI) == 0 &&
2922 		    (bp->b_xflags & BX_VNDIRTY))
2923 			panic("bqrelse: not dirty");
2924 		if ((bp->b_flags & B_NOREUSE) != 0) {
2925 			brelse(bp);
2926 			return;
2927 		}
2928 		qindex = QUEUE_CLEAN;
2929 	}
2930 	buf_track(bp, __func__);
2931 	/* binsfree unlocks bp. */
2932 	binsfree(bp, qindex);
2933 	return;
2934 
2935 out:
2936 	buf_track(bp, __func__);
2937 	/* unlock */
2938 	BUF_UNLOCK(bp);
2939 }
2940 
2941 /*
2942  * Complete I/O to a VMIO backed page.  Validate the pages as appropriate,
2943  * restore bogus pages.
2944  */
2945 static void
2946 vfs_vmio_iodone(struct buf *bp)
2947 {
2948 	vm_ooffset_t foff;
2949 	vm_page_t m;
2950 	vm_object_t obj;
2951 	struct vnode *vp __unused;
2952 	int i, iosize, resid;
2953 	bool bogus;
2954 
2955 	obj = bp->b_bufobj->bo_object;
2956 	KASSERT(blockcount_read(&obj->paging_in_progress) >= bp->b_npages,
2957 	    ("vfs_vmio_iodone: paging in progress(%d) < b_npages(%d)",
2958 	    blockcount_read(&obj->paging_in_progress), bp->b_npages));
2959 
2960 	vp = bp->b_vp;
2961 	VNPASS(vp->v_holdcnt > 0, vp);
2962 	VNPASS(vp->v_object != NULL, vp);
2963 
2964 	foff = bp->b_offset;
2965 	KASSERT(bp->b_offset != NOOFFSET,
2966 	    ("vfs_vmio_iodone: bp %p has no buffer offset", bp));
2967 
2968 	bogus = false;
2969 	iosize = bp->b_bcount - bp->b_resid;
2970 	for (i = 0; i < bp->b_npages; i++) {
2971 		resid = ((foff + PAGE_SIZE) & ~(off_t)PAGE_MASK) - foff;
2972 		if (resid > iosize)
2973 			resid = iosize;
2974 
2975 		/*
2976 		 * cleanup bogus pages, restoring the originals
2977 		 */
2978 		m = bp->b_pages[i];
2979 		if (m == bogus_page) {
2980 			bogus = true;
2981 			m = vm_page_relookup(obj, OFF_TO_IDX(foff));
2982 			if (m == NULL)
2983 				panic("biodone: page disappeared!");
2984 			bp->b_pages[i] = m;
2985 		} else if ((bp->b_iocmd == BIO_READ) && resid > 0) {
2986 			/*
2987 			 * In the write case, the valid and clean bits are
2988 			 * already changed correctly ( see bdwrite() ), so we
2989 			 * only need to do this here in the read case.
2990 			 */
2991 			KASSERT((m->dirty & vm_page_bits(foff & PAGE_MASK,
2992 			    resid)) == 0, ("vfs_vmio_iodone: page %p "
2993 			    "has unexpected dirty bits", m));
2994 			vfs_page_set_valid(bp, foff, m);
2995 		}
2996 		KASSERT(OFF_TO_IDX(foff) == m->pindex,
2997 		    ("vfs_vmio_iodone: foff(%jd)/pindex(%ju) mismatch",
2998 		    (intmax_t)foff, (uintmax_t)m->pindex));
2999 
3000 		vm_page_sunbusy(m);
3001 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3002 		iosize -= resid;
3003 	}
3004 	vm_object_pip_wakeupn(obj, bp->b_npages);
3005 	if (bogus && buf_mapped(bp)) {
3006 		BUF_CHECK_MAPPED(bp);
3007 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
3008 		    bp->b_pages, bp->b_npages);
3009 	}
3010 }
3011 
3012 /*
3013  * Perform page invalidation when a buffer is released.  The fully invalid
3014  * pages will be reclaimed later in vfs_vmio_truncate().
3015  */
3016 static void
3017 vfs_vmio_invalidate(struct buf *bp)
3018 {
3019 	vm_object_t obj;
3020 	vm_page_t m;
3021 	int flags, i, resid, poffset, presid;
3022 
3023 	if (buf_mapped(bp)) {
3024 		BUF_CHECK_MAPPED(bp);
3025 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), bp->b_npages);
3026 	} else
3027 		BUF_CHECK_UNMAPPED(bp);
3028 	/*
3029 	 * Get the base offset and length of the buffer.  Note that
3030 	 * in the VMIO case if the buffer block size is not
3031 	 * page-aligned then b_data pointer may not be page-aligned.
3032 	 * But our b_pages[] array *IS* page aligned.
3033 	 *
3034 	 * block sizes less then DEV_BSIZE (usually 512) are not
3035 	 * supported due to the page granularity bits (m->valid,
3036 	 * m->dirty, etc...).
3037 	 *
3038 	 * See man buf(9) for more information
3039 	 */
3040 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3041 	obj = bp->b_bufobj->bo_object;
3042 	resid = bp->b_bufsize;
3043 	poffset = bp->b_offset & PAGE_MASK;
3044 	VM_OBJECT_WLOCK(obj);
3045 	for (i = 0; i < bp->b_npages; i++) {
3046 		m = bp->b_pages[i];
3047 		if (m == bogus_page)
3048 			panic("vfs_vmio_invalidate: Unexpected bogus page.");
3049 		bp->b_pages[i] = NULL;
3050 
3051 		presid = resid > (PAGE_SIZE - poffset) ?
3052 		    (PAGE_SIZE - poffset) : resid;
3053 		KASSERT(presid >= 0, ("brelse: extra page"));
3054 		vm_page_busy_acquire(m, VM_ALLOC_SBUSY);
3055 		if (pmap_page_wired_mappings(m) == 0)
3056 			vm_page_set_invalid(m, poffset, presid);
3057 		vm_page_sunbusy(m);
3058 		vm_page_release_locked(m, flags);
3059 		resid -= presid;
3060 		poffset = 0;
3061 	}
3062 	VM_OBJECT_WUNLOCK(obj);
3063 	bp->b_npages = 0;
3064 }
3065 
3066 /*
3067  * Page-granular truncation of an existing VMIO buffer.
3068  */
3069 static void
3070 vfs_vmio_truncate(struct buf *bp, int desiredpages)
3071 {
3072 	vm_object_t obj;
3073 	vm_page_t m;
3074 	int flags, i;
3075 
3076 	if (bp->b_npages == desiredpages)
3077 		return;
3078 
3079 	if (buf_mapped(bp)) {
3080 		BUF_CHECK_MAPPED(bp);
3081 		pmap_qremove((vm_offset_t)trunc_page((vm_offset_t)bp->b_data) +
3082 		    (desiredpages << PAGE_SHIFT), bp->b_npages - desiredpages);
3083 	} else
3084 		BUF_CHECK_UNMAPPED(bp);
3085 
3086 	/*
3087 	 * The object lock is needed only if we will attempt to free pages.
3088 	 */
3089 	flags = (bp->b_flags & B_NOREUSE) != 0 ? VPR_NOREUSE : 0;
3090 	if ((bp->b_flags & B_DIRECT) != 0) {
3091 		flags |= VPR_TRYFREE;
3092 		obj = bp->b_bufobj->bo_object;
3093 		VM_OBJECT_WLOCK(obj);
3094 	} else {
3095 		obj = NULL;
3096 	}
3097 	for (i = desiredpages; i < bp->b_npages; i++) {
3098 		m = bp->b_pages[i];
3099 		KASSERT(m != bogus_page, ("allocbuf: bogus page found"));
3100 		bp->b_pages[i] = NULL;
3101 		if (obj != NULL)
3102 			vm_page_release_locked(m, flags);
3103 		else
3104 			vm_page_release(m, flags);
3105 	}
3106 	if (obj != NULL)
3107 		VM_OBJECT_WUNLOCK(obj);
3108 	bp->b_npages = desiredpages;
3109 }
3110 
3111 /*
3112  * Byte granular extension of VMIO buffers.
3113  */
3114 static void
3115 vfs_vmio_extend(struct buf *bp, int desiredpages, int size)
3116 {
3117 	/*
3118 	 * We are growing the buffer, possibly in a
3119 	 * byte-granular fashion.
3120 	 */
3121 	vm_object_t obj;
3122 	vm_offset_t toff;
3123 	vm_offset_t tinc;
3124 	vm_page_t m;
3125 
3126 	/*
3127 	 * Step 1, bring in the VM pages from the object, allocating
3128 	 * them if necessary.  We must clear B_CACHE if these pages
3129 	 * are not valid for the range covered by the buffer.
3130 	 */
3131 	obj = bp->b_bufobj->bo_object;
3132 	if (bp->b_npages < desiredpages) {
3133 		KASSERT(desiredpages <= atop(maxbcachebuf),
3134 		    ("vfs_vmio_extend past maxbcachebuf %p %d %u",
3135 		    bp, desiredpages, maxbcachebuf));
3136 
3137 		/*
3138 		 * We must allocate system pages since blocking
3139 		 * here could interfere with paging I/O, no
3140 		 * matter which process we are.
3141 		 *
3142 		 * Only exclusive busy can be tested here.
3143 		 * Blocking on shared busy might lead to
3144 		 * deadlocks once allocbuf() is called after
3145 		 * pages are vfs_busy_pages().
3146 		 */
3147 		(void)vm_page_grab_pages_unlocked(obj,
3148 		    OFF_TO_IDX(bp->b_offset) + bp->b_npages,
3149 		    VM_ALLOC_SYSTEM | VM_ALLOC_IGN_SBUSY |
3150 		    VM_ALLOC_NOBUSY | VM_ALLOC_WIRED,
3151 		    &bp->b_pages[bp->b_npages], desiredpages - bp->b_npages);
3152 		bp->b_npages = desiredpages;
3153 	}
3154 
3155 	/*
3156 	 * Step 2.  We've loaded the pages into the buffer,
3157 	 * we have to figure out if we can still have B_CACHE
3158 	 * set.  Note that B_CACHE is set according to the
3159 	 * byte-granular range ( bcount and size ), not the
3160 	 * aligned range ( newbsize ).
3161 	 *
3162 	 * The VM test is against m->valid, which is DEV_BSIZE
3163 	 * aligned.  Needless to say, the validity of the data
3164 	 * needs to also be DEV_BSIZE aligned.  Note that this
3165 	 * fails with NFS if the server or some other client
3166 	 * extends the file's EOF.  If our buffer is resized,
3167 	 * B_CACHE may remain set! XXX
3168 	 */
3169 	toff = bp->b_bcount;
3170 	tinc = PAGE_SIZE - ((bp->b_offset + toff) & PAGE_MASK);
3171 	while ((bp->b_flags & B_CACHE) && toff < size) {
3172 		vm_pindex_t pi;
3173 
3174 		if (tinc > (size - toff))
3175 			tinc = size - toff;
3176 		pi = ((bp->b_offset & PAGE_MASK) + toff) >> PAGE_SHIFT;
3177 		m = bp->b_pages[pi];
3178 		vfs_buf_test_cache(bp, bp->b_offset, toff, tinc, m);
3179 		toff += tinc;
3180 		tinc = PAGE_SIZE;
3181 	}
3182 
3183 	/*
3184 	 * Step 3, fixup the KVA pmap.
3185 	 */
3186 	if (buf_mapped(bp))
3187 		bpmap_qenter(bp);
3188 	else
3189 		BUF_CHECK_UNMAPPED(bp);
3190 }
3191 
3192 /*
3193  * Check to see if a block at a particular lbn is available for a clustered
3194  * write.
3195  */
3196 static int
3197 vfs_bio_clcheck(struct vnode *vp, int size, daddr_t lblkno, daddr_t blkno)
3198 {
3199 	struct buf *bpa;
3200 	int match;
3201 
3202 	match = 0;
3203 
3204 	/* If the buf isn't in core skip it */
3205 	if ((bpa = gbincore(&vp->v_bufobj, lblkno)) == NULL)
3206 		return (0);
3207 
3208 	/* If the buf is busy we don't want to wait for it */
3209 	if (BUF_LOCK(bpa, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0)
3210 		return (0);
3211 
3212 	/* Only cluster with valid clusterable delayed write buffers */
3213 	if ((bpa->b_flags & (B_DELWRI | B_CLUSTEROK | B_INVAL)) !=
3214 	    (B_DELWRI | B_CLUSTEROK))
3215 		goto done;
3216 
3217 	if (bpa->b_bufsize != size)
3218 		goto done;
3219 
3220 	/*
3221 	 * Check to see if it is in the expected place on disk and that the
3222 	 * block has been mapped.
3223 	 */
3224 	if ((bpa->b_blkno != bpa->b_lblkno) && (bpa->b_blkno == blkno))
3225 		match = 1;
3226 done:
3227 	BUF_UNLOCK(bpa);
3228 	return (match);
3229 }
3230 
3231 /*
3232  *	vfs_bio_awrite:
3233  *
3234  *	Implement clustered async writes for clearing out B_DELWRI buffers.
3235  *	This is much better then the old way of writing only one buffer at
3236  *	a time.  Note that we may not be presented with the buffers in the
3237  *	correct order, so we search for the cluster in both directions.
3238  */
3239 int
3240 vfs_bio_awrite(struct buf *bp)
3241 {
3242 	struct bufobj *bo;
3243 	int i;
3244 	int j;
3245 	daddr_t lblkno = bp->b_lblkno;
3246 	struct vnode *vp = bp->b_vp;
3247 	int ncl;
3248 	int nwritten;
3249 	int size;
3250 	int maxcl;
3251 	int gbflags;
3252 
3253 	bo = &vp->v_bufobj;
3254 	gbflags = (bp->b_data == unmapped_buf) ? GB_UNMAPPED : 0;
3255 	/*
3256 	 * right now we support clustered writing only to regular files.  If
3257 	 * we find a clusterable block we could be in the middle of a cluster
3258 	 * rather then at the beginning.
3259 	 */
3260 	if ((vp->v_type == VREG) &&
3261 	    (vp->v_mount != 0) && /* Only on nodes that have the size info */
3262 	    (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) {
3263 		size = vp->v_mount->mnt_stat.f_iosize;
3264 		maxcl = maxphys / size;
3265 
3266 		BO_RLOCK(bo);
3267 		for (i = 1; i < maxcl; i++)
3268 			if (vfs_bio_clcheck(vp, size, lblkno + i,
3269 			    bp->b_blkno + ((i * size) >> DEV_BSHIFT)) == 0)
3270 				break;
3271 
3272 		for (j = 1; i + j <= maxcl && j <= lblkno; j++)
3273 			if (vfs_bio_clcheck(vp, size, lblkno - j,
3274 			    bp->b_blkno - ((j * size) >> DEV_BSHIFT)) == 0)
3275 				break;
3276 		BO_RUNLOCK(bo);
3277 		--j;
3278 		ncl = i + j;
3279 		/*
3280 		 * this is a possible cluster write
3281 		 */
3282 		if (ncl != 1) {
3283 			BUF_UNLOCK(bp);
3284 			nwritten = cluster_wbuild(vp, size, lblkno - j, ncl,
3285 			    gbflags);
3286 			return (nwritten);
3287 		}
3288 	}
3289 	bremfree(bp);
3290 	bp->b_flags |= B_ASYNC;
3291 	/*
3292 	 * default (old) behavior, writing out only one block
3293 	 *
3294 	 * XXX returns b_bufsize instead of b_bcount for nwritten?
3295 	 */
3296 	nwritten = bp->b_bufsize;
3297 	(void) bwrite(bp);
3298 
3299 	return (nwritten);
3300 }
3301 
3302 /*
3303  *	getnewbuf_kva:
3304  *
3305  *	Allocate KVA for an empty buf header according to gbflags.
3306  */
3307 static int
3308 getnewbuf_kva(struct buf *bp, int gbflags, int maxsize)
3309 {
3310 
3311 	if ((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_UNMAPPED) {
3312 		/*
3313 		 * In order to keep fragmentation sane we only allocate kva
3314 		 * in BKVASIZE chunks.  XXX with vmem we can do page size.
3315 		 */
3316 		maxsize = (maxsize + BKVAMASK) & ~BKVAMASK;
3317 
3318 		if (maxsize != bp->b_kvasize &&
3319 		    bufkva_alloc(bp, maxsize, gbflags))
3320 			return (ENOSPC);
3321 	}
3322 	return (0);
3323 }
3324 
3325 /*
3326  *	getnewbuf:
3327  *
3328  *	Find and initialize a new buffer header, freeing up existing buffers
3329  *	in the bufqueues as necessary.  The new buffer is returned locked.
3330  *
3331  *	We block if:
3332  *		We have insufficient buffer headers
3333  *		We have insufficient buffer space
3334  *		buffer_arena is too fragmented ( space reservation fails )
3335  *		If we have to flush dirty buffers ( but we try to avoid this )
3336  *
3337  *	The caller is responsible for releasing the reserved bufspace after
3338  *	allocbuf() is called.
3339  */
3340 static struct buf *
3341 getnewbuf(struct vnode *vp, int slpflag, int slptimeo, int maxsize, int gbflags)
3342 {
3343 	struct bufdomain *bd;
3344 	struct buf *bp;
3345 	bool metadata, reserved;
3346 
3347 	bp = NULL;
3348 	KASSERT((gbflags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3349 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3350 	if (!unmapped_buf_allowed)
3351 		gbflags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3352 
3353 	if (vp == NULL || (vp->v_vflag & (VV_MD | VV_SYSTEM)) != 0 ||
3354 	    vp->v_type == VCHR)
3355 		metadata = true;
3356 	else
3357 		metadata = false;
3358 	if (vp == NULL)
3359 		bd = &bdomain[0];
3360 	else
3361 		bd = &bdomain[vp->v_bufobj.bo_domain];
3362 
3363 	counter_u64_add(getnewbufcalls, 1);
3364 	reserved = false;
3365 	do {
3366 		if (reserved == false &&
3367 		    bufspace_reserve(bd, maxsize, metadata) != 0) {
3368 			counter_u64_add(getnewbufrestarts, 1);
3369 			continue;
3370 		}
3371 		reserved = true;
3372 		if ((bp = buf_alloc(bd)) == NULL) {
3373 			counter_u64_add(getnewbufrestarts, 1);
3374 			continue;
3375 		}
3376 		if (getnewbuf_kva(bp, gbflags, maxsize) == 0)
3377 			return (bp);
3378 		break;
3379 	} while (buf_recycle(bd, false) == 0);
3380 
3381 	if (reserved)
3382 		bufspace_release(bd, maxsize);
3383 	if (bp != NULL) {
3384 		bp->b_flags |= B_INVAL;
3385 		brelse(bp);
3386 	}
3387 	bufspace_wait(bd, vp, gbflags, slpflag, slptimeo);
3388 
3389 	return (NULL);
3390 }
3391 
3392 /*
3393  *	buf_daemon:
3394  *
3395  *	buffer flushing daemon.  Buffers are normally flushed by the
3396  *	update daemon but if it cannot keep up this process starts to
3397  *	take the load in an attempt to prevent getnewbuf() from blocking.
3398  */
3399 static struct kproc_desc buf_kp = {
3400 	"bufdaemon",
3401 	buf_daemon,
3402 	&bufdaemonproc
3403 };
3404 SYSINIT(bufdaemon, SI_SUB_KTHREAD_BUF, SI_ORDER_FIRST, kproc_start, &buf_kp);
3405 
3406 static int
3407 buf_flush(struct vnode *vp, struct bufdomain *bd, int target)
3408 {
3409 	int flushed;
3410 
3411 	flushed = flushbufqueues(vp, bd, target, 0);
3412 	if (flushed == 0) {
3413 		/*
3414 		 * Could not find any buffers without rollback
3415 		 * dependencies, so just write the first one
3416 		 * in the hopes of eventually making progress.
3417 		 */
3418 		if (vp != NULL && target > 2)
3419 			target /= 2;
3420 		flushbufqueues(vp, bd, target, 1);
3421 	}
3422 	return (flushed);
3423 }
3424 
3425 static void
3426 buf_daemon_shutdown(void *arg __unused, int howto __unused)
3427 {
3428 	int error;
3429 
3430 	if (KERNEL_PANICKED())
3431 		return;
3432 
3433 	mtx_lock(&bdlock);
3434 	bd_shutdown = true;
3435 	wakeup(&bd_request);
3436 	error = msleep(&bd_shutdown, &bdlock, 0, "buf_daemon_shutdown",
3437 	    60 * hz);
3438 	mtx_unlock(&bdlock);
3439 	if (error != 0)
3440 		printf("bufdaemon wait error: %d\n", error);
3441 }
3442 
3443 static void
3444 buf_daemon(void)
3445 {
3446 	struct bufdomain *bd;
3447 	int speedupreq;
3448 	int lodirty;
3449 	int i;
3450 
3451 	/*
3452 	 * This process needs to be suspended prior to shutdown sync.
3453 	 */
3454 	EVENTHANDLER_REGISTER(shutdown_pre_sync, buf_daemon_shutdown, NULL,
3455 	    SHUTDOWN_PRI_LAST + 100);
3456 
3457 	/*
3458 	 * Start the buf clean daemons as children threads.
3459 	 */
3460 	for (i = 0 ; i < buf_domains; i++) {
3461 		int error;
3462 
3463 		error = kthread_add((void (*)(void *))bufspace_daemon,
3464 		    &bdomain[i], curproc, NULL, 0, 0, "bufspacedaemon-%d", i);
3465 		if (error)
3466 			panic("error %d spawning bufspace daemon", error);
3467 	}
3468 
3469 	/*
3470 	 * This process is allowed to take the buffer cache to the limit
3471 	 */
3472 	curthread->td_pflags |= TDP_NORUNNINGBUF | TDP_BUFNEED;
3473 	mtx_lock(&bdlock);
3474 	while (!bd_shutdown) {
3475 		bd_request = 0;
3476 		mtx_unlock(&bdlock);
3477 
3478 		/*
3479 		 * Save speedupreq for this pass and reset to capture new
3480 		 * requests.
3481 		 */
3482 		speedupreq = bd_speedupreq;
3483 		bd_speedupreq = 0;
3484 
3485 		/*
3486 		 * Flush each domain sequentially according to its level and
3487 		 * the speedup request.
3488 		 */
3489 		for (i = 0; i < buf_domains; i++) {
3490 			bd = &bdomain[i];
3491 			if (speedupreq)
3492 				lodirty = bd->bd_numdirtybuffers / 2;
3493 			else
3494 				lodirty = bd->bd_lodirtybuffers;
3495 			while (bd->bd_numdirtybuffers > lodirty) {
3496 				if (buf_flush(NULL, bd,
3497 				    bd->bd_numdirtybuffers - lodirty) == 0)
3498 					break;
3499 				kern_yield(PRI_USER);
3500 			}
3501 		}
3502 
3503 		/*
3504 		 * Only clear bd_request if we have reached our low water
3505 		 * mark.  The buf_daemon normally waits 1 second and
3506 		 * then incrementally flushes any dirty buffers that have
3507 		 * built up, within reason.
3508 		 *
3509 		 * If we were unable to hit our low water mark and couldn't
3510 		 * find any flushable buffers, we sleep for a short period
3511 		 * to avoid endless loops on unlockable buffers.
3512 		 */
3513 		mtx_lock(&bdlock);
3514 		if (bd_shutdown)
3515 			break;
3516 		if (BIT_EMPTY(BUF_DOMAINS, &bdlodirty)) {
3517 			/*
3518 			 * We reached our low water mark, reset the
3519 			 * request and sleep until we are needed again.
3520 			 * The sleep is just so the suspend code works.
3521 			 */
3522 			bd_request = 0;
3523 			/*
3524 			 * Do an extra wakeup in case dirty threshold
3525 			 * changed via sysctl and the explicit transition
3526 			 * out of shortfall was missed.
3527 			 */
3528 			bdirtywakeup();
3529 			if (runningbufspace <= lorunningspace)
3530 				runningwakeup();
3531 			msleep(&bd_request, &bdlock, PVM, "psleep", hz);
3532 		} else {
3533 			/*
3534 			 * We couldn't find any flushable dirty buffers but
3535 			 * still have too many dirty buffers, we
3536 			 * have to sleep and try again.  (rare)
3537 			 */
3538 			msleep(&bd_request, &bdlock, PVM, "qsleep", hz / 10);
3539 		}
3540 	}
3541 	wakeup(&bd_shutdown);
3542 	mtx_unlock(&bdlock);
3543 	kthread_exit();
3544 }
3545 
3546 /*
3547  *	flushbufqueues:
3548  *
3549  *	Try to flush a buffer in the dirty queue.  We must be careful to
3550  *	free up B_INVAL buffers instead of write them, which NFS is
3551  *	particularly sensitive to.
3552  */
3553 static int flushwithdeps = 0;
3554 SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW | CTLFLAG_STATS,
3555     &flushwithdeps, 0,
3556     "Number of buffers flushed with dependencies that require rollbacks");
3557 
3558 static int
3559 flushbufqueues(struct vnode *lvp, struct bufdomain *bd, int target,
3560     int flushdeps)
3561 {
3562 	struct bufqueue *bq;
3563 	struct buf *sentinel;
3564 	struct vnode *vp;
3565 	struct mount *mp;
3566 	struct buf *bp;
3567 	int hasdeps;
3568 	int flushed;
3569 	int error;
3570 	bool unlock;
3571 
3572 	flushed = 0;
3573 	bq = &bd->bd_dirtyq;
3574 	bp = NULL;
3575 	sentinel = malloc(sizeof(struct buf), M_TEMP, M_WAITOK | M_ZERO);
3576 	sentinel->b_qindex = QUEUE_SENTINEL;
3577 	BQ_LOCK(bq);
3578 	TAILQ_INSERT_HEAD(&bq->bq_queue, sentinel, b_freelist);
3579 	BQ_UNLOCK(bq);
3580 	while (flushed != target) {
3581 		maybe_yield();
3582 		BQ_LOCK(bq);
3583 		bp = TAILQ_NEXT(sentinel, b_freelist);
3584 		if (bp != NULL) {
3585 			TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3586 			TAILQ_INSERT_AFTER(&bq->bq_queue, bp, sentinel,
3587 			    b_freelist);
3588 		} else {
3589 			BQ_UNLOCK(bq);
3590 			break;
3591 		}
3592 		/*
3593 		 * Skip sentinels inserted by other invocations of the
3594 		 * flushbufqueues(), taking care to not reorder them.
3595 		 *
3596 		 * Only flush the buffers that belong to the
3597 		 * vnode locked by the curthread.
3598 		 */
3599 		if (bp->b_qindex == QUEUE_SENTINEL || (lvp != NULL &&
3600 		    bp->b_vp != lvp)) {
3601 			BQ_UNLOCK(bq);
3602 			continue;
3603 		}
3604 		error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
3605 		BQ_UNLOCK(bq);
3606 		if (error != 0)
3607 			continue;
3608 
3609 		/*
3610 		 * BKGRDINPROG can only be set with the buf and bufobj
3611 		 * locks both held.  We tolerate a race to clear it here.
3612 		 */
3613 		if ((bp->b_vflags & BV_BKGRDINPROG) != 0 ||
3614 		    (bp->b_flags & B_DELWRI) == 0) {
3615 			BUF_UNLOCK(bp);
3616 			continue;
3617 		}
3618 		if (bp->b_flags & B_INVAL) {
3619 			bremfreef(bp);
3620 			brelse(bp);
3621 			flushed++;
3622 			continue;
3623 		}
3624 
3625 		if (!LIST_EMPTY(&bp->b_dep) && buf_countdeps(bp, 0)) {
3626 			if (flushdeps == 0) {
3627 				BUF_UNLOCK(bp);
3628 				continue;
3629 			}
3630 			hasdeps = 1;
3631 		} else
3632 			hasdeps = 0;
3633 		/*
3634 		 * We must hold the lock on a vnode before writing
3635 		 * one of its buffers. Otherwise we may confuse, or
3636 		 * in the case of a snapshot vnode, deadlock the
3637 		 * system.
3638 		 *
3639 		 * The lock order here is the reverse of the normal
3640 		 * of vnode followed by buf lock.  This is ok because
3641 		 * the NOWAIT will prevent deadlock.
3642 		 */
3643 		vp = bp->b_vp;
3644 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
3645 			BUF_UNLOCK(bp);
3646 			continue;
3647 		}
3648 		if (lvp == NULL) {
3649 			unlock = true;
3650 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
3651 		} else {
3652 			ASSERT_VOP_LOCKED(vp, "getbuf");
3653 			unlock = false;
3654 			error = VOP_ISLOCKED(vp) == LK_EXCLUSIVE ? 0 :
3655 			    vn_lock(vp, LK_TRYUPGRADE);
3656 		}
3657 		if (error == 0) {
3658 			CTR3(KTR_BUF, "flushbufqueue(%p) vp %p flags %X",
3659 			    bp, bp->b_vp, bp->b_flags);
3660 			if (curproc == bufdaemonproc) {
3661 				vfs_bio_awrite(bp);
3662 			} else {
3663 				bremfree(bp);
3664 				bwrite(bp);
3665 				counter_u64_add(notbufdflushes, 1);
3666 			}
3667 			vn_finished_write(mp);
3668 			if (unlock)
3669 				VOP_UNLOCK(vp);
3670 			flushwithdeps += hasdeps;
3671 			flushed++;
3672 
3673 			/*
3674 			 * Sleeping on runningbufspace while holding
3675 			 * vnode lock leads to deadlock.
3676 			 */
3677 			if (curproc == bufdaemonproc &&
3678 			    runningbufspace > hirunningspace)
3679 				waitrunningbufspace();
3680 			continue;
3681 		}
3682 		vn_finished_write(mp);
3683 		BUF_UNLOCK(bp);
3684 	}
3685 	BQ_LOCK(bq);
3686 	TAILQ_REMOVE(&bq->bq_queue, sentinel, b_freelist);
3687 	BQ_UNLOCK(bq);
3688 	free(sentinel, M_TEMP);
3689 	return (flushed);
3690 }
3691 
3692 /*
3693  * Check to see if a block is currently memory resident.
3694  */
3695 struct buf *
3696 incore(struct bufobj *bo, daddr_t blkno)
3697 {
3698 	return (gbincore_unlocked(bo, blkno));
3699 }
3700 
3701 /*
3702  * Returns true if no I/O is needed to access the
3703  * associated VM object.  This is like incore except
3704  * it also hunts around in the VM system for the data.
3705  */
3706 bool
3707 inmem(struct vnode * vp, daddr_t blkno)
3708 {
3709 	vm_object_t obj;
3710 	vm_offset_t toff, tinc, size;
3711 	vm_page_t m, n;
3712 	vm_ooffset_t off;
3713 	int valid;
3714 
3715 	ASSERT_VOP_LOCKED(vp, "inmem");
3716 
3717 	if (incore(&vp->v_bufobj, blkno))
3718 		return (true);
3719 	if (vp->v_mount == NULL)
3720 		return (false);
3721 	obj = vp->v_object;
3722 	if (obj == NULL)
3723 		return (false);
3724 
3725 	size = PAGE_SIZE;
3726 	if (size > vp->v_mount->mnt_stat.f_iosize)
3727 		size = vp->v_mount->mnt_stat.f_iosize;
3728 	off = (vm_ooffset_t)blkno * (vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
3729 
3730 	for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
3731 		m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3732 recheck:
3733 		if (m == NULL)
3734 			return (false);
3735 
3736 		tinc = size;
3737 		if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
3738 			tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
3739 		/*
3740 		 * Consider page validity only if page mapping didn't change
3741 		 * during the check.
3742 		 */
3743 		valid = vm_page_is_valid(m,
3744 		    (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
3745 		n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
3746 		if (m != n) {
3747 			m = n;
3748 			goto recheck;
3749 		}
3750 		if (!valid)
3751 			return (false);
3752 	}
3753 	return (true);
3754 }
3755 
3756 /*
3757  * Set the dirty range for a buffer based on the status of the dirty
3758  * bits in the pages comprising the buffer.  The range is limited
3759  * to the size of the buffer.
3760  *
3761  * Tell the VM system that the pages associated with this buffer
3762  * are clean.  This is used for delayed writes where the data is
3763  * going to go to disk eventually without additional VM intevention.
3764  *
3765  * Note that while we only really need to clean through to b_bcount, we
3766  * just go ahead and clean through to b_bufsize.
3767  */
3768 static void
3769 vfs_clean_pages_dirty_buf(struct buf *bp)
3770 {
3771 	vm_ooffset_t foff, noff, eoff;
3772 	vm_page_t m;
3773 	int i;
3774 
3775 	if ((bp->b_flags & B_VMIO) == 0 || bp->b_bufsize == 0)
3776 		return;
3777 
3778 	foff = bp->b_offset;
3779 	KASSERT(bp->b_offset != NOOFFSET,
3780 	    ("vfs_clean_pages_dirty_buf: no buffer offset"));
3781 
3782 	vfs_busy_pages_acquire(bp);
3783 	vfs_setdirty_range(bp);
3784 	for (i = 0; i < bp->b_npages; i++) {
3785 		noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
3786 		eoff = noff;
3787 		if (eoff > bp->b_offset + bp->b_bufsize)
3788 			eoff = bp->b_offset + bp->b_bufsize;
3789 		m = bp->b_pages[i];
3790 		vfs_page_set_validclean(bp, foff, m);
3791 		/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
3792 		foff = noff;
3793 	}
3794 	vfs_busy_pages_release(bp);
3795 }
3796 
3797 static void
3798 vfs_setdirty_range(struct buf *bp)
3799 {
3800 	vm_offset_t boffset;
3801 	vm_offset_t eoffset;
3802 	int i;
3803 
3804 	/*
3805 	 * test the pages to see if they have been modified directly
3806 	 * by users through the VM system.
3807 	 */
3808 	for (i = 0; i < bp->b_npages; i++)
3809 		vm_page_test_dirty(bp->b_pages[i]);
3810 
3811 	/*
3812 	 * Calculate the encompassing dirty range, boffset and eoffset,
3813 	 * (eoffset - boffset) bytes.
3814 	 */
3815 
3816 	for (i = 0; i < bp->b_npages; i++) {
3817 		if (bp->b_pages[i]->dirty)
3818 			break;
3819 	}
3820 	boffset = (i << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3821 
3822 	for (i = bp->b_npages - 1; i >= 0; --i) {
3823 		if (bp->b_pages[i]->dirty) {
3824 			break;
3825 		}
3826 	}
3827 	eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK);
3828 
3829 	/*
3830 	 * Fit it to the buffer.
3831 	 */
3832 
3833 	if (eoffset > bp->b_bcount)
3834 		eoffset = bp->b_bcount;
3835 
3836 	/*
3837 	 * If we have a good dirty range, merge with the existing
3838 	 * dirty range.
3839 	 */
3840 
3841 	if (boffset < eoffset) {
3842 		if (bp->b_dirtyoff > boffset)
3843 			bp->b_dirtyoff = boffset;
3844 		if (bp->b_dirtyend < eoffset)
3845 			bp->b_dirtyend = eoffset;
3846 	}
3847 }
3848 
3849 /*
3850  * Allocate the KVA mapping for an existing buffer.
3851  * If an unmapped buffer is provided but a mapped buffer is requested, take
3852  * also care to properly setup mappings between pages and KVA.
3853  */
3854 static void
3855 bp_unmapped_get_kva(struct buf *bp, daddr_t blkno, int size, int gbflags)
3856 {
3857 	int bsize, maxsize, need_mapping, need_kva;
3858 	off_t offset;
3859 
3860 	need_mapping = bp->b_data == unmapped_buf &&
3861 	    (gbflags & GB_UNMAPPED) == 0;
3862 	need_kva = bp->b_kvabase == unmapped_buf &&
3863 	    bp->b_data == unmapped_buf &&
3864 	    (gbflags & GB_KVAALLOC) != 0;
3865 	if (!need_mapping && !need_kva)
3866 		return;
3867 
3868 	BUF_CHECK_UNMAPPED(bp);
3869 
3870 	if (need_mapping && bp->b_kvabase != unmapped_buf) {
3871 		/*
3872 		 * Buffer is not mapped, but the KVA was already
3873 		 * reserved at the time of the instantiation.  Use the
3874 		 * allocated space.
3875 		 */
3876 		goto has_addr;
3877 	}
3878 
3879 	/*
3880 	 * Calculate the amount of the address space we would reserve
3881 	 * if the buffer was mapped.
3882 	 */
3883 	bsize = vn_isdisk(bp->b_vp) ? DEV_BSIZE : bp->b_bufobj->bo_bsize;
3884 	KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
3885 	offset = blkno * bsize;
3886 	maxsize = size + (offset & PAGE_MASK);
3887 	maxsize = imax(maxsize, bsize);
3888 
3889 	while (bufkva_alloc(bp, maxsize, gbflags) != 0) {
3890 		if ((gbflags & GB_NOWAIT_BD) != 0) {
3891 			/*
3892 			 * XXXKIB: defragmentation cannot
3893 			 * succeed, not sure what else to do.
3894 			 */
3895 			panic("GB_NOWAIT_BD and GB_UNMAPPED %p", bp);
3896 		}
3897 		counter_u64_add(mappingrestarts, 1);
3898 		bufspace_wait(bufdomain(bp), bp->b_vp, gbflags, 0, 0);
3899 	}
3900 has_addr:
3901 	if (need_mapping) {
3902 		/* b_offset is handled by bpmap_qenter. */
3903 		bp->b_data = bp->b_kvabase;
3904 		BUF_CHECK_MAPPED(bp);
3905 		bpmap_qenter(bp);
3906 	}
3907 }
3908 
3909 struct buf *
3910 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo,
3911     int flags)
3912 {
3913 	struct buf *bp;
3914 	int error;
3915 
3916 	error = getblkx(vp, blkno, blkno, size, slpflag, slptimeo, flags, &bp);
3917 	if (error != 0)
3918 		return (NULL);
3919 	return (bp);
3920 }
3921 
3922 /*
3923  *	getblkx:
3924  *
3925  *	Get a block given a specified block and offset into a file/device.
3926  *	The buffers B_DONE bit will be cleared on return, making it almost
3927  * 	ready for an I/O initiation.  B_INVAL may or may not be set on
3928  *	return.  The caller should clear B_INVAL prior to initiating a
3929  *	READ.
3930  *
3931  *	For a non-VMIO buffer, B_CACHE is set to the opposite of B_INVAL for
3932  *	an existing buffer.
3933  *
3934  *	For a VMIO buffer, B_CACHE is modified according to the backing VM.
3935  *	If getblk()ing a previously 0-sized invalid buffer, B_CACHE is set
3936  *	and then cleared based on the backing VM.  If the previous buffer is
3937  *	non-0-sized but invalid, B_CACHE will be cleared.
3938  *
3939  *	If getblk() must create a new buffer, the new buffer is returned with
3940  *	both B_INVAL and B_CACHE clear unless it is a VMIO buffer, in which
3941  *	case it is returned with B_INVAL clear and B_CACHE set based on the
3942  *	backing VM.
3943  *
3944  *	getblk() also forces a bwrite() for any B_DELWRI buffer whose
3945  *	B_CACHE bit is clear.
3946  *
3947  *	What this means, basically, is that the caller should use B_CACHE to
3948  *	determine whether the buffer is fully valid or not and should clear
3949  *	B_INVAL prior to issuing a read.  If the caller intends to validate
3950  *	the buffer by loading its data area with something, the caller needs
3951  *	to clear B_INVAL.  If the caller does this without issuing an I/O,
3952  *	the caller should set B_CACHE ( as an optimization ), else the caller
3953  *	should issue the I/O and biodone() will set B_CACHE if the I/O was
3954  *	a write attempt or if it was a successful read.  If the caller
3955  *	intends to issue a READ, the caller must clear B_INVAL and BIO_ERROR
3956  *	prior to issuing the READ.  biodone() will *not* clear B_INVAL.
3957  *
3958  *	The blkno parameter is the logical block being requested. Normally
3959  *	the mapping of logical block number to disk block address is done
3960  *	by calling VOP_BMAP(). However, if the mapping is already known, the
3961  *	disk block address can be passed using the dblkno parameter. If the
3962  *	disk block address is not known, then the same value should be passed
3963  *	for blkno and dblkno.
3964  */
3965 int
3966 getblkx(struct vnode *vp, daddr_t blkno, daddr_t dblkno, int size, int slpflag,
3967     int slptimeo, int flags, struct buf **bpp)
3968 {
3969 	struct buf *bp;
3970 	struct bufobj *bo;
3971 	daddr_t d_blkno;
3972 	int bsize, error, maxsize, vmio;
3973 	off_t offset;
3974 
3975 	CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
3976 	KASSERT((flags & (GB_UNMAPPED | GB_KVAALLOC)) != GB_KVAALLOC,
3977 	    ("GB_KVAALLOC only makes sense with GB_UNMAPPED"));
3978 	if (vp->v_type != VCHR)
3979 		ASSERT_VOP_LOCKED(vp, "getblk");
3980 	if (size > maxbcachebuf)
3981 		panic("getblk: size(%d) > maxbcachebuf(%d)\n", size,
3982 		    maxbcachebuf);
3983 	if (!unmapped_buf_allowed)
3984 		flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
3985 
3986 	bo = &vp->v_bufobj;
3987 	d_blkno = dblkno;
3988 
3989 	/* Attempt lockless lookup first. */
3990 	bp = gbincore_unlocked(bo, blkno);
3991 	if (bp == NULL) {
3992 		/*
3993 		 * With GB_NOCREAT we must be sure about not finding the buffer
3994 		 * as it may have been reassigned during unlocked lookup.
3995 		 */
3996 		if ((flags & GB_NOCREAT) != 0)
3997 			goto loop;
3998 		goto newbuf_unlocked;
3999 	}
4000 
4001 	error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL, "getblku", 0,
4002 	    0);
4003 	if (error != 0)
4004 		goto loop;
4005 
4006 	/* Verify buf identify has not changed since lookup. */
4007 	if (bp->b_bufobj == bo && bp->b_lblkno == blkno)
4008 		goto foundbuf_fastpath;
4009 
4010 	/* It changed, fallback to locked lookup. */
4011 	BUF_UNLOCK_RAW(bp);
4012 
4013 loop:
4014 	BO_RLOCK(bo);
4015 	bp = gbincore(bo, blkno);
4016 	if (bp != NULL) {
4017 		int lockflags;
4018 
4019 		/*
4020 		 * Buffer is in-core.  If the buffer is not busy nor managed,
4021 		 * it must be on a queue.
4022 		 */
4023 		lockflags = LK_EXCLUSIVE | LK_INTERLOCK |
4024 		    ((flags & GB_LOCK_NOWAIT) != 0 ? LK_NOWAIT : LK_SLEEPFAIL);
4025 #ifdef WITNESS
4026 		lockflags |= (flags & GB_NOWITNESS) != 0 ? LK_NOWITNESS : 0;
4027 #endif
4028 
4029 		error = BUF_TIMELOCK(bp, lockflags,
4030 		    BO_LOCKPTR(bo), "getblk", slpflag, slptimeo);
4031 
4032 		/*
4033 		 * If we slept and got the lock we have to restart in case
4034 		 * the buffer changed identities.
4035 		 */
4036 		if (error == ENOLCK)
4037 			goto loop;
4038 		/* We timed out or were interrupted. */
4039 		else if (error != 0)
4040 			return (error);
4041 
4042 foundbuf_fastpath:
4043 		/* If recursed, assume caller knows the rules. */
4044 		if (BUF_LOCKRECURSED(bp))
4045 			goto end;
4046 
4047 		/*
4048 		 * The buffer is locked.  B_CACHE is cleared if the buffer is
4049 		 * invalid.  Otherwise, for a non-VMIO buffer, B_CACHE is set
4050 		 * and for a VMIO buffer B_CACHE is adjusted according to the
4051 		 * backing VM cache.
4052 		 */
4053 		if (bp->b_flags & B_INVAL)
4054 			bp->b_flags &= ~B_CACHE;
4055 		else if ((bp->b_flags & (B_VMIO | B_INVAL)) == 0)
4056 			bp->b_flags |= B_CACHE;
4057 		if (bp->b_flags & B_MANAGED)
4058 			MPASS(bp->b_qindex == QUEUE_NONE);
4059 		else
4060 			bremfree(bp);
4061 
4062 		/*
4063 		 * check for size inconsistencies for non-VMIO case.
4064 		 */
4065 		if (bp->b_bcount != size) {
4066 			if ((bp->b_flags & B_VMIO) == 0 ||
4067 			    (size > bp->b_kvasize)) {
4068 				if (bp->b_flags & B_DELWRI) {
4069 					bp->b_flags |= B_NOCACHE;
4070 					bwrite(bp);
4071 				} else {
4072 					if (LIST_EMPTY(&bp->b_dep)) {
4073 						bp->b_flags |= B_RELBUF;
4074 						brelse(bp);
4075 					} else {
4076 						bp->b_flags |= B_NOCACHE;
4077 						bwrite(bp);
4078 					}
4079 				}
4080 				goto loop;
4081 			}
4082 		}
4083 
4084 		/*
4085 		 * Handle the case of unmapped buffer which should
4086 		 * become mapped, or the buffer for which KVA
4087 		 * reservation is requested.
4088 		 */
4089 		bp_unmapped_get_kva(bp, blkno, size, flags);
4090 
4091 		/*
4092 		 * If the size is inconsistent in the VMIO case, we can resize
4093 		 * the buffer.  This might lead to B_CACHE getting set or
4094 		 * cleared.  If the size has not changed, B_CACHE remains
4095 		 * unchanged from its previous state.
4096 		 */
4097 		allocbuf(bp, size);
4098 
4099 		KASSERT(bp->b_offset != NOOFFSET,
4100 		    ("getblk: no buffer offset"));
4101 
4102 		/*
4103 		 * A buffer with B_DELWRI set and B_CACHE clear must
4104 		 * be committed before we can return the buffer in
4105 		 * order to prevent the caller from issuing a read
4106 		 * ( due to B_CACHE not being set ) and overwriting
4107 		 * it.
4108 		 *
4109 		 * Most callers, including NFS and FFS, need this to
4110 		 * operate properly either because they assume they
4111 		 * can issue a read if B_CACHE is not set, or because
4112 		 * ( for example ) an uncached B_DELWRI might loop due
4113 		 * to softupdates re-dirtying the buffer.  In the latter
4114 		 * case, B_CACHE is set after the first write completes,
4115 		 * preventing further loops.
4116 		 * NOTE!  b*write() sets B_CACHE.  If we cleared B_CACHE
4117 		 * above while extending the buffer, we cannot allow the
4118 		 * buffer to remain with B_CACHE set after the write
4119 		 * completes or it will represent a corrupt state.  To
4120 		 * deal with this we set B_NOCACHE to scrap the buffer
4121 		 * after the write.
4122 		 *
4123 		 * We might be able to do something fancy, like setting
4124 		 * B_CACHE in bwrite() except if B_DELWRI is already set,
4125 		 * so the below call doesn't set B_CACHE, but that gets real
4126 		 * confusing.  This is much easier.
4127 		 */
4128 
4129 		if ((bp->b_flags & (B_CACHE|B_DELWRI)) == B_DELWRI) {
4130 			bp->b_flags |= B_NOCACHE;
4131 			bwrite(bp);
4132 			goto loop;
4133 		}
4134 		bp->b_flags &= ~B_DONE;
4135 	} else {
4136 		/*
4137 		 * Buffer is not in-core, create new buffer.  The buffer
4138 		 * returned by getnewbuf() is locked.  Note that the returned
4139 		 * buffer is also considered valid (not marked B_INVAL).
4140 		 */
4141 		BO_RUNLOCK(bo);
4142 newbuf_unlocked:
4143 		/*
4144 		 * If the user does not want us to create the buffer, bail out
4145 		 * here.
4146 		 */
4147 		if (flags & GB_NOCREAT)
4148 			return (EEXIST);
4149 
4150 		bsize = vn_isdisk(vp) ? DEV_BSIZE : bo->bo_bsize;
4151 		KASSERT(bsize != 0, ("bsize == 0, check bo->bo_bsize"));
4152 		offset = blkno * bsize;
4153 		vmio = vp->v_object != NULL;
4154 		if (vmio) {
4155 			maxsize = size + (offset & PAGE_MASK);
4156 		} else {
4157 			maxsize = size;
4158 			/* Do not allow non-VMIO notmapped buffers. */
4159 			flags &= ~(GB_UNMAPPED | GB_KVAALLOC);
4160 		}
4161 		maxsize = imax(maxsize, bsize);
4162 		if ((flags & GB_NOSPARSE) != 0 && vmio &&
4163 		    !vn_isdisk(vp)) {
4164 			error = VOP_BMAP(vp, blkno, NULL, &d_blkno, 0, 0);
4165 			KASSERT(error != EOPNOTSUPP,
4166 			    ("GB_NOSPARSE from fs not supporting bmap, vp %p",
4167 			    vp));
4168 			if (error != 0)
4169 				return (error);
4170 			if (d_blkno == -1)
4171 				return (EJUSTRETURN);
4172 		}
4173 
4174 		bp = getnewbuf(vp, slpflag, slptimeo, maxsize, flags);
4175 		if (bp == NULL) {
4176 			if (slpflag || slptimeo)
4177 				return (ETIMEDOUT);
4178 			/*
4179 			 * XXX This is here until the sleep path is diagnosed
4180 			 * enough to work under very low memory conditions.
4181 			 *
4182 			 * There's an issue on low memory, 4BSD+non-preempt
4183 			 * systems (eg MIPS routers with 32MB RAM) where buffer
4184 			 * exhaustion occurs without sleeping for buffer
4185 			 * reclaimation.  This just sticks in a loop and
4186 			 * constantly attempts to allocate a buffer, which
4187 			 * hits exhaustion and tries to wakeup bufdaemon.
4188 			 * This never happens because we never yield.
4189 			 *
4190 			 * The real solution is to identify and fix these cases
4191 			 * so we aren't effectively busy-waiting in a loop
4192 			 * until the reclaimation path has cycles to run.
4193 			 */
4194 			kern_yield(PRI_USER);
4195 			goto loop;
4196 		}
4197 
4198 		/*
4199 		 * This code is used to make sure that a buffer is not
4200 		 * created while the getnewbuf routine is blocked.
4201 		 * This can be a problem whether the vnode is locked or not.
4202 		 * If the buffer is created out from under us, we have to
4203 		 * throw away the one we just created.
4204 		 *
4205 		 * Note: this must occur before we associate the buffer
4206 		 * with the vp especially considering limitations in
4207 		 * the splay tree implementation when dealing with duplicate
4208 		 * lblkno's.
4209 		 */
4210 		BO_LOCK(bo);
4211 		if (gbincore(bo, blkno)) {
4212 			BO_UNLOCK(bo);
4213 			bp->b_flags |= B_INVAL;
4214 			bufspace_release(bufdomain(bp), maxsize);
4215 			brelse(bp);
4216 			goto loop;
4217 		}
4218 
4219 		/*
4220 		 * Insert the buffer into the hash, so that it can
4221 		 * be found by incore.
4222 		 */
4223 		bp->b_lblkno = blkno;
4224 		bp->b_blkno = d_blkno;
4225 		bp->b_offset = offset;
4226 		bgetvp(vp, bp);
4227 		BO_UNLOCK(bo);
4228 
4229 		/*
4230 		 * set B_VMIO bit.  allocbuf() the buffer bigger.  Since the
4231 		 * buffer size starts out as 0, B_CACHE will be set by
4232 		 * allocbuf() for the VMIO case prior to it testing the
4233 		 * backing store for validity.
4234 		 */
4235 
4236 		if (vmio) {
4237 			bp->b_flags |= B_VMIO;
4238 			KASSERT(vp->v_object == bp->b_bufobj->bo_object,
4239 			    ("ARGH! different b_bufobj->bo_object %p %p %p\n",
4240 			    bp, vp->v_object, bp->b_bufobj->bo_object));
4241 		} else {
4242 			bp->b_flags &= ~B_VMIO;
4243 			KASSERT(bp->b_bufobj->bo_object == NULL,
4244 			    ("ARGH! has b_bufobj->bo_object %p %p\n",
4245 			    bp, bp->b_bufobj->bo_object));
4246 			BUF_CHECK_MAPPED(bp);
4247 		}
4248 
4249 		allocbuf(bp, size);
4250 		bufspace_release(bufdomain(bp), maxsize);
4251 		bp->b_flags &= ~B_DONE;
4252 	}
4253 	CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp);
4254 end:
4255 	buf_track(bp, __func__);
4256 	KASSERT(bp->b_bufobj == bo,
4257 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
4258 	*bpp = bp;
4259 	return (0);
4260 }
4261 
4262 /*
4263  * Get an empty, disassociated buffer of given size.  The buffer is initially
4264  * set to B_INVAL.
4265  */
4266 struct buf *
4267 geteblk(int size, int flags)
4268 {
4269 	struct buf *bp;
4270 	int maxsize;
4271 
4272 	maxsize = (size + BKVAMASK) & ~BKVAMASK;
4273 	while ((bp = getnewbuf(NULL, 0, 0, maxsize, flags)) == NULL) {
4274 		if ((flags & GB_NOWAIT_BD) &&
4275 		    (curthread->td_pflags & TDP_BUFNEED) != 0)
4276 			return (NULL);
4277 	}
4278 	allocbuf(bp, size);
4279 	bufspace_release(bufdomain(bp), maxsize);
4280 	bp->b_flags |= B_INVAL;	/* b_dep cleared by getnewbuf() */
4281 	return (bp);
4282 }
4283 
4284 /*
4285  * Truncate the backing store for a non-vmio buffer.
4286  */
4287 static void
4288 vfs_nonvmio_truncate(struct buf *bp, int newbsize)
4289 {
4290 
4291 	if (bp->b_flags & B_MALLOC) {
4292 		/*
4293 		 * malloced buffers are not shrunk
4294 		 */
4295 		if (newbsize == 0) {
4296 			bufmallocadjust(bp, 0);
4297 			free(bp->b_data, M_BIOBUF);
4298 			bp->b_data = bp->b_kvabase;
4299 			bp->b_flags &= ~B_MALLOC;
4300 		}
4301 		return;
4302 	}
4303 	vm_hold_free_pages(bp, newbsize);
4304 	bufspace_adjust(bp, newbsize);
4305 }
4306 
4307 /*
4308  * Extend the backing for a non-VMIO buffer.
4309  */
4310 static void
4311 vfs_nonvmio_extend(struct buf *bp, int newbsize)
4312 {
4313 	caddr_t origbuf;
4314 	int origbufsize;
4315 
4316 	/*
4317 	 * We only use malloced memory on the first allocation.
4318 	 * and revert to page-allocated memory when the buffer
4319 	 * grows.
4320 	 *
4321 	 * There is a potential smp race here that could lead
4322 	 * to bufmallocspace slightly passing the max.  It
4323 	 * is probably extremely rare and not worth worrying
4324 	 * over.
4325 	 */
4326 	if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 &&
4327 	    bufmallocspace < maxbufmallocspace) {
4328 		bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK);
4329 		bp->b_flags |= B_MALLOC;
4330 		bufmallocadjust(bp, newbsize);
4331 		return;
4332 	}
4333 
4334 	/*
4335 	 * If the buffer is growing on its other-than-first
4336 	 * allocation then we revert to the page-allocation
4337 	 * scheme.
4338 	 */
4339 	origbuf = NULL;
4340 	origbufsize = 0;
4341 	if (bp->b_flags & B_MALLOC) {
4342 		origbuf = bp->b_data;
4343 		origbufsize = bp->b_bufsize;
4344 		bp->b_data = bp->b_kvabase;
4345 		bufmallocadjust(bp, 0);
4346 		bp->b_flags &= ~B_MALLOC;
4347 		newbsize = round_page(newbsize);
4348 	}
4349 	vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize,
4350 	    (vm_offset_t) bp->b_data + newbsize);
4351 	if (origbuf != NULL) {
4352 		bcopy(origbuf, bp->b_data, origbufsize);
4353 		free(origbuf, M_BIOBUF);
4354 	}
4355 	bufspace_adjust(bp, newbsize);
4356 }
4357 
4358 /*
4359  * This code constitutes the buffer memory from either anonymous system
4360  * memory (in the case of non-VMIO operations) or from an associated
4361  * VM object (in the case of VMIO operations).  This code is able to
4362  * resize a buffer up or down.
4363  *
4364  * Note that this code is tricky, and has many complications to resolve
4365  * deadlock or inconsistent data situations.  Tread lightly!!!
4366  * There are B_CACHE and B_DELWRI interactions that must be dealt with by
4367  * the caller.  Calling this code willy nilly can result in the loss of data.
4368  *
4369  * allocbuf() only adjusts B_CACHE for VMIO buffers.  getblk() deals with
4370  * B_CACHE for the non-VMIO case.
4371  */
4372 int
4373 allocbuf(struct buf *bp, int size)
4374 {
4375 	int newbsize;
4376 
4377 	if (bp->b_bcount == size)
4378 		return (1);
4379 
4380 	KASSERT(bp->b_kvasize == 0 || bp->b_kvasize >= size,
4381 	    ("allocbuf: buffer too small %p %#x %#x",
4382 	    bp, bp->b_kvasize, size));
4383 
4384 	newbsize = roundup2(size, DEV_BSIZE);
4385 	if ((bp->b_flags & B_VMIO) == 0) {
4386 		if ((bp->b_flags & B_MALLOC) == 0)
4387 			newbsize = round_page(newbsize);
4388 		/*
4389 		 * Just get anonymous memory from the kernel.  Don't
4390 		 * mess with B_CACHE.
4391 		 */
4392 		if (newbsize < bp->b_bufsize)
4393 			vfs_nonvmio_truncate(bp, newbsize);
4394 		else if (newbsize > bp->b_bufsize)
4395 			vfs_nonvmio_extend(bp, newbsize);
4396 	} else {
4397 		int desiredpages;
4398 
4399 		desiredpages = size == 0 ? 0 :
4400 		    num_pages((bp->b_offset & PAGE_MASK) + newbsize);
4401 
4402 		KASSERT((bp->b_flags & B_MALLOC) == 0,
4403 		    ("allocbuf: VMIO buffer can't be malloced %p", bp));
4404 
4405 		/*
4406 		 * Set B_CACHE initially if buffer is 0 length or will become
4407 		 * 0-length.
4408 		 */
4409 		if (size == 0 || bp->b_bufsize == 0)
4410 			bp->b_flags |= B_CACHE;
4411 
4412 		if (newbsize < bp->b_bufsize)
4413 			vfs_vmio_truncate(bp, desiredpages);
4414 		/* XXX This looks as if it should be newbsize > b_bufsize */
4415 		else if (size > bp->b_bcount)
4416 			vfs_vmio_extend(bp, desiredpages, size);
4417 		bufspace_adjust(bp, newbsize);
4418 	}
4419 	bp->b_bcount = size;		/* requested buffer size. */
4420 	return (1);
4421 }
4422 
4423 extern int inflight_transient_maps;
4424 
4425 static struct bio_queue nondump_bios;
4426 
4427 void
4428 biodone(struct bio *bp)
4429 {
4430 	struct mtx *mtxp;
4431 	void (*done)(struct bio *);
4432 	vm_offset_t start, end;
4433 
4434 	biotrack(bp, __func__);
4435 
4436 	/*
4437 	 * Avoid completing I/O when dumping after a panic since that may
4438 	 * result in a deadlock in the filesystem or pager code.  Note that
4439 	 * this doesn't affect dumps that were started manually since we aim
4440 	 * to keep the system usable after it has been resumed.
4441 	 */
4442 	if (__predict_false(dumping && SCHEDULER_STOPPED())) {
4443 		TAILQ_INSERT_HEAD(&nondump_bios, bp, bio_queue);
4444 		return;
4445 	}
4446 	if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) {
4447 		bp->bio_flags &= ~BIO_TRANSIENT_MAPPING;
4448 		bp->bio_flags |= BIO_UNMAPPED;
4449 		start = trunc_page((vm_offset_t)bp->bio_data);
4450 		end = round_page((vm_offset_t)bp->bio_data + bp->bio_length);
4451 		bp->bio_data = unmapped_buf;
4452 		pmap_qremove(start, atop(end - start));
4453 		vmem_free(transient_arena, start, end - start);
4454 		atomic_add_int(&inflight_transient_maps, -1);
4455 	}
4456 	done = bp->bio_done;
4457 	/*
4458 	 * The check for done == biodone is to allow biodone to be
4459 	 * used as a bio_done routine.
4460 	 */
4461 	if (done == NULL || done == biodone) {
4462 		mtxp = mtx_pool_find(mtxpool_sleep, bp);
4463 		mtx_lock(mtxp);
4464 		bp->bio_flags |= BIO_DONE;
4465 		wakeup(bp);
4466 		mtx_unlock(mtxp);
4467 	} else
4468 		done(bp);
4469 }
4470 
4471 /*
4472  * Wait for a BIO to finish.
4473  */
4474 int
4475 biowait(struct bio *bp, const char *wmesg)
4476 {
4477 	struct mtx *mtxp;
4478 
4479 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
4480 	mtx_lock(mtxp);
4481 	while ((bp->bio_flags & BIO_DONE) == 0)
4482 		msleep(bp, mtxp, PRIBIO, wmesg, 0);
4483 	mtx_unlock(mtxp);
4484 	if (bp->bio_error != 0)
4485 		return (bp->bio_error);
4486 	if (!(bp->bio_flags & BIO_ERROR))
4487 		return (0);
4488 	return (EIO);
4489 }
4490 
4491 void
4492 biofinish(struct bio *bp, struct devstat *stat, int error)
4493 {
4494 
4495 	if (error) {
4496 		bp->bio_error = error;
4497 		bp->bio_flags |= BIO_ERROR;
4498 	}
4499 	if (stat != NULL)
4500 		devstat_end_transaction_bio(stat, bp);
4501 	biodone(bp);
4502 }
4503 
4504 #if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING)
4505 void
4506 biotrack_buf(struct bio *bp, const char *location)
4507 {
4508 
4509 	buf_track(bp->bio_track_bp, location);
4510 }
4511 #endif
4512 
4513 /*
4514  *	bufwait:
4515  *
4516  *	Wait for buffer I/O completion, returning error status.  The buffer
4517  *	is left locked and B_DONE on return.  B_EINTR is converted into an EINTR
4518  *	error and cleared.
4519  */
4520 int
4521 bufwait(struct buf *bp)
4522 {
4523 	if (bp->b_iocmd == BIO_READ)
4524 		bwait(bp, PRIBIO, "biord");
4525 	else
4526 		bwait(bp, PRIBIO, "biowr");
4527 	if (bp->b_flags & B_EINTR) {
4528 		bp->b_flags &= ~B_EINTR;
4529 		return (EINTR);
4530 	}
4531 	if (bp->b_ioflags & BIO_ERROR) {
4532 		return (bp->b_error ? bp->b_error : EIO);
4533 	} else {
4534 		return (0);
4535 	}
4536 }
4537 
4538 /*
4539  *	bufdone:
4540  *
4541  *	Finish I/O on a buffer, optionally calling a completion function.
4542  *	This is usually called from an interrupt so process blocking is
4543  *	not allowed.
4544  *
4545  *	biodone is also responsible for setting B_CACHE in a B_VMIO bp.
4546  *	In a non-VMIO bp, B_CACHE will be set on the next getblk()
4547  *	assuming B_INVAL is clear.
4548  *
4549  *	For the VMIO case, we set B_CACHE if the op was a read and no
4550  *	read error occurred, or if the op was a write.  B_CACHE is never
4551  *	set if the buffer is invalid or otherwise uncacheable.
4552  *
4553  *	bufdone does not mess with B_INVAL, allowing the I/O routine or the
4554  *	initiator to leave B_INVAL set to brelse the buffer out of existence
4555  *	in the biodone routine.
4556  */
4557 void
4558 bufdone(struct buf *bp)
4559 {
4560 	struct bufobj *dropobj;
4561 	void    (*biodone)(struct buf *);
4562 
4563 	buf_track(bp, __func__);
4564 	CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
4565 	dropobj = NULL;
4566 
4567 	KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp));
4568 
4569 	runningbufwakeup(bp);
4570 	if (bp->b_iocmd == BIO_WRITE)
4571 		dropobj = bp->b_bufobj;
4572 	/* call optional completion function if requested */
4573 	if (bp->b_iodone != NULL) {
4574 		biodone = bp->b_iodone;
4575 		bp->b_iodone = NULL;
4576 		(*biodone) (bp);
4577 		if (dropobj)
4578 			bufobj_wdrop(dropobj);
4579 		return;
4580 	}
4581 	if (bp->b_flags & B_VMIO) {
4582 		/*
4583 		 * Set B_CACHE if the op was a normal read and no error
4584 		 * occurred.  B_CACHE is set for writes in the b*write()
4585 		 * routines.
4586 		 */
4587 		if (bp->b_iocmd == BIO_READ &&
4588 		    !(bp->b_flags & (B_INVAL|B_NOCACHE)) &&
4589 		    !(bp->b_ioflags & BIO_ERROR))
4590 			bp->b_flags |= B_CACHE;
4591 		vfs_vmio_iodone(bp);
4592 	}
4593 	if (!LIST_EMPTY(&bp->b_dep))
4594 		buf_complete(bp);
4595 	if ((bp->b_flags & B_CKHASH) != 0) {
4596 		KASSERT(bp->b_iocmd == BIO_READ,
4597 		    ("bufdone: b_iocmd %d not BIO_READ", bp->b_iocmd));
4598 		KASSERT(buf_mapped(bp), ("bufdone: bp %p not mapped", bp));
4599 		(*bp->b_ckhashcalc)(bp);
4600 	}
4601 	/*
4602 	 * For asynchronous completions, release the buffer now. The brelse
4603 	 * will do a wakeup there if necessary - so no need to do a wakeup
4604 	 * here in the async case. The sync case always needs to do a wakeup.
4605 	 */
4606 	if (bp->b_flags & B_ASYNC) {
4607 		if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_RELBUF)) ||
4608 		    (bp->b_ioflags & BIO_ERROR))
4609 			brelse(bp);
4610 		else
4611 			bqrelse(bp);
4612 	} else
4613 		bdone(bp);
4614 	if (dropobj)
4615 		bufobj_wdrop(dropobj);
4616 }
4617 
4618 /*
4619  * This routine is called in lieu of iodone in the case of
4620  * incomplete I/O.  This keeps the busy status for pages
4621  * consistent.
4622  */
4623 void
4624 vfs_unbusy_pages(struct buf *bp)
4625 {
4626 	int i;
4627 	vm_object_t obj;
4628 	vm_page_t m;
4629 
4630 	runningbufwakeup(bp);
4631 	if (!(bp->b_flags & B_VMIO))
4632 		return;
4633 
4634 	obj = bp->b_bufobj->bo_object;
4635 	for (i = 0; i < bp->b_npages; i++) {
4636 		m = bp->b_pages[i];
4637 		if (m == bogus_page) {
4638 			m = vm_page_relookup(obj, OFF_TO_IDX(bp->b_offset) + i);
4639 			if (!m)
4640 				panic("vfs_unbusy_pages: page missing\n");
4641 			bp->b_pages[i] = m;
4642 			if (buf_mapped(bp)) {
4643 				BUF_CHECK_MAPPED(bp);
4644 				pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4645 				    bp->b_pages, bp->b_npages);
4646 			} else
4647 				BUF_CHECK_UNMAPPED(bp);
4648 		}
4649 		vm_page_sunbusy(m);
4650 	}
4651 	vm_object_pip_wakeupn(obj, bp->b_npages);
4652 }
4653 
4654 /*
4655  * vfs_page_set_valid:
4656  *
4657  *	Set the valid bits in a page based on the supplied offset.   The
4658  *	range is restricted to the buffer's size.
4659  *
4660  *	This routine is typically called after a read completes.
4661  */
4662 static void
4663 vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4664 {
4665 	vm_ooffset_t eoff;
4666 
4667 	/*
4668 	 * Compute the end offset, eoff, such that [off, eoff) does not span a
4669 	 * page boundary and eoff is not greater than the end of the buffer.
4670 	 * The end of the buffer, in this case, is our file EOF, not the
4671 	 * allocation size of the buffer.
4672 	 */
4673 	eoff = (off + PAGE_SIZE) & ~(vm_ooffset_t)PAGE_MASK;
4674 	if (eoff > bp->b_offset + bp->b_bcount)
4675 		eoff = bp->b_offset + bp->b_bcount;
4676 
4677 	/*
4678 	 * Set valid range.  This is typically the entire buffer and thus the
4679 	 * entire page.
4680 	 */
4681 	if (eoff > off)
4682 		vm_page_set_valid_range(m, off & PAGE_MASK, eoff - off);
4683 }
4684 
4685 /*
4686  * vfs_page_set_validclean:
4687  *
4688  *	Set the valid bits and clear the dirty bits in a page based on the
4689  *	supplied offset.   The range is restricted to the buffer's size.
4690  */
4691 static void
4692 vfs_page_set_validclean(struct buf *bp, vm_ooffset_t off, vm_page_t m)
4693 {
4694 	vm_ooffset_t soff, eoff;
4695 
4696 	/*
4697 	 * Start and end offsets in buffer.  eoff - soff may not cross a
4698 	 * page boundary or cross the end of the buffer.  The end of the
4699 	 * buffer, in this case, is our file EOF, not the allocation size
4700 	 * of the buffer.
4701 	 */
4702 	soff = off;
4703 	eoff = (off + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4704 	if (eoff > bp->b_offset + bp->b_bcount)
4705 		eoff = bp->b_offset + bp->b_bcount;
4706 
4707 	/*
4708 	 * Set valid range.  This is typically the entire buffer and thus the
4709 	 * entire page.
4710 	 */
4711 	if (eoff > soff) {
4712 		vm_page_set_validclean(
4713 		    m,
4714 		   (vm_offset_t) (soff & PAGE_MASK),
4715 		   (vm_offset_t) (eoff - soff)
4716 		);
4717 	}
4718 }
4719 
4720 /*
4721  * Acquire a shared busy on all pages in the buf.
4722  */
4723 void
4724 vfs_busy_pages_acquire(struct buf *bp)
4725 {
4726 	int i;
4727 
4728 	for (i = 0; i < bp->b_npages; i++)
4729 		vm_page_busy_acquire(bp->b_pages[i], VM_ALLOC_SBUSY);
4730 }
4731 
4732 void
4733 vfs_busy_pages_release(struct buf *bp)
4734 {
4735 	int i;
4736 
4737 	for (i = 0; i < bp->b_npages; i++)
4738 		vm_page_sunbusy(bp->b_pages[i]);
4739 }
4740 
4741 /*
4742  * This routine is called before a device strategy routine.
4743  * It is used to tell the VM system that paging I/O is in
4744  * progress, and treat the pages associated with the buffer
4745  * almost as being exclusive busy.  Also the object paging_in_progress
4746  * flag is handled to make sure that the object doesn't become
4747  * inconsistent.
4748  *
4749  * Since I/O has not been initiated yet, certain buffer flags
4750  * such as BIO_ERROR or B_INVAL may be in an inconsistent state
4751  * and should be ignored.
4752  */
4753 void
4754 vfs_busy_pages(struct buf *bp, int clear_modify)
4755 {
4756 	vm_object_t obj;
4757 	vm_ooffset_t foff;
4758 	vm_page_t m;
4759 	int i;
4760 	bool bogus;
4761 
4762 	if (!(bp->b_flags & B_VMIO))
4763 		return;
4764 
4765 	obj = bp->b_bufobj->bo_object;
4766 	foff = bp->b_offset;
4767 	KASSERT(bp->b_offset != NOOFFSET,
4768 	    ("vfs_busy_pages: no buffer offset"));
4769 	if ((bp->b_flags & B_CLUSTER) == 0) {
4770 		vm_object_pip_add(obj, bp->b_npages);
4771 		vfs_busy_pages_acquire(bp);
4772 	}
4773 	if (bp->b_bufsize != 0)
4774 		vfs_setdirty_range(bp);
4775 	bogus = false;
4776 	for (i = 0; i < bp->b_npages; i++) {
4777 		m = bp->b_pages[i];
4778 		vm_page_assert_sbusied(m);
4779 
4780 		/*
4781 		 * When readying a buffer for a read ( i.e
4782 		 * clear_modify == 0 ), it is important to do
4783 		 * bogus_page replacement for valid pages in
4784 		 * partially instantiated buffers.  Partially
4785 		 * instantiated buffers can, in turn, occur when
4786 		 * reconstituting a buffer from its VM backing store
4787 		 * base.  We only have to do this if B_CACHE is
4788 		 * clear ( which causes the I/O to occur in the
4789 		 * first place ).  The replacement prevents the read
4790 		 * I/O from overwriting potentially dirty VM-backed
4791 		 * pages.  XXX bogus page replacement is, uh, bogus.
4792 		 * It may not work properly with small-block devices.
4793 		 * We need to find a better way.
4794 		 */
4795 		if (clear_modify) {
4796 			pmap_remove_write(m);
4797 			vfs_page_set_validclean(bp, foff, m);
4798 		} else if (vm_page_all_valid(m) &&
4799 		    (bp->b_flags & B_CACHE) == 0) {
4800 			bp->b_pages[i] = bogus_page;
4801 			bogus = true;
4802 		}
4803 		foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
4804 	}
4805 	if (bogus && buf_mapped(bp)) {
4806 		BUF_CHECK_MAPPED(bp);
4807 		pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
4808 		    bp->b_pages, bp->b_npages);
4809 	}
4810 }
4811 
4812 /*
4813  *	vfs_bio_set_valid:
4814  *
4815  *	Set the range within the buffer to valid.  The range is
4816  *	relative to the beginning of the buffer, b_offset.  Note that
4817  *	b_offset itself may be offset from the beginning of the first
4818  *	page.
4819  */
4820 void
4821 vfs_bio_set_valid(struct buf *bp, int base, int size)
4822 {
4823 	int i, n;
4824 	vm_page_t m;
4825 
4826 	if (!(bp->b_flags & B_VMIO))
4827 		return;
4828 
4829 	/*
4830 	 * Fixup base to be relative to beginning of first page.
4831 	 * Set initial n to be the maximum number of bytes in the
4832 	 * first page that can be validated.
4833 	 */
4834 	base += (bp->b_offset & PAGE_MASK);
4835 	n = PAGE_SIZE - (base & PAGE_MASK);
4836 
4837 	/*
4838 	 * Busy may not be strictly necessary here because the pages are
4839 	 * unlikely to be fully valid and the vnode lock will synchronize
4840 	 * their access via getpages.  It is grabbed for consistency with
4841 	 * other page validation.
4842 	 */
4843 	vfs_busy_pages_acquire(bp);
4844 	for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4845 		m = bp->b_pages[i];
4846 		if (n > size)
4847 			n = size;
4848 		vm_page_set_valid_range(m, base & PAGE_MASK, n);
4849 		base += n;
4850 		size -= n;
4851 		n = PAGE_SIZE;
4852 	}
4853 	vfs_busy_pages_release(bp);
4854 }
4855 
4856 /*
4857  *	vfs_bio_clrbuf:
4858  *
4859  *	If the specified buffer is a non-VMIO buffer, clear the entire
4860  *	buffer.  If the specified buffer is a VMIO buffer, clear and
4861  *	validate only the previously invalid portions of the buffer.
4862  *	This routine essentially fakes an I/O, so we need to clear
4863  *	BIO_ERROR and B_INVAL.
4864  *
4865  *	Note that while we only theoretically need to clear through b_bcount,
4866  *	we go ahead and clear through b_bufsize.
4867  */
4868 void
4869 vfs_bio_clrbuf(struct buf *bp)
4870 {
4871 	int i, j, sa, ea, slide, zbits;
4872 	vm_page_bits_t mask;
4873 
4874 	if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
4875 		clrbuf(bp);
4876 		return;
4877 	}
4878 	bp->b_flags &= ~B_INVAL;
4879 	bp->b_ioflags &= ~BIO_ERROR;
4880 	vfs_busy_pages_acquire(bp);
4881 	sa = bp->b_offset & PAGE_MASK;
4882 	slide = 0;
4883 	for (i = 0; i < bp->b_npages; i++, sa = 0) {
4884 		slide = imin(slide + PAGE_SIZE, bp->b_offset + bp->b_bufsize);
4885 		ea = slide & PAGE_MASK;
4886 		if (ea == 0)
4887 			ea = PAGE_SIZE;
4888 		if (bp->b_pages[i] == bogus_page)
4889 			continue;
4890 		j = sa / DEV_BSIZE;
4891 		zbits = (sizeof(vm_page_bits_t) * NBBY) -
4892 		    (ea - sa) / DEV_BSIZE;
4893 		mask = (VM_PAGE_BITS_ALL >> zbits) << j;
4894 		if ((bp->b_pages[i]->valid & mask) == mask)
4895 			continue;
4896 		if ((bp->b_pages[i]->valid & mask) == 0)
4897 			pmap_zero_page_area(bp->b_pages[i], sa, ea - sa);
4898 		else {
4899 			for (; sa < ea; sa += DEV_BSIZE, j++) {
4900 				if ((bp->b_pages[i]->valid & (1 << j)) == 0) {
4901 					pmap_zero_page_area(bp->b_pages[i],
4902 					    sa, DEV_BSIZE);
4903 				}
4904 			}
4905 		}
4906 		vm_page_set_valid_range(bp->b_pages[i], j * DEV_BSIZE,
4907 		    roundup2(ea - sa, DEV_BSIZE));
4908 	}
4909 	vfs_busy_pages_release(bp);
4910 	bp->b_resid = 0;
4911 }
4912 
4913 void
4914 vfs_bio_bzero_buf(struct buf *bp, int base, int size)
4915 {
4916 	vm_page_t m;
4917 	int i, n;
4918 
4919 	if (buf_mapped(bp)) {
4920 		BUF_CHECK_MAPPED(bp);
4921 		bzero(bp->b_data + base, size);
4922 	} else {
4923 		BUF_CHECK_UNMAPPED(bp);
4924 		n = PAGE_SIZE - (base & PAGE_MASK);
4925 		for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
4926 			m = bp->b_pages[i];
4927 			if (n > size)
4928 				n = size;
4929 			pmap_zero_page_area(m, base & PAGE_MASK, n);
4930 			base += n;
4931 			size -= n;
4932 			n = PAGE_SIZE;
4933 		}
4934 	}
4935 }
4936 
4937 /*
4938  * Update buffer flags based on I/O request parameters, optionally releasing the
4939  * buffer.  If it's VMIO or direct I/O, the buffer pages are released to the VM,
4940  * where they may be placed on a page queue (VMIO) or freed immediately (direct
4941  * I/O).  Otherwise the buffer is released to the cache.
4942  */
4943 static void
4944 b_io_dismiss(struct buf *bp, int ioflag, bool release)
4945 {
4946 
4947 	KASSERT((ioflag & IO_NOREUSE) == 0 || (ioflag & IO_VMIO) != 0,
4948 	    ("buf %p non-VMIO noreuse", bp));
4949 
4950 	if ((ioflag & IO_DIRECT) != 0)
4951 		bp->b_flags |= B_DIRECT;
4952 	if ((ioflag & IO_EXT) != 0)
4953 		bp->b_xflags |= BX_ALTDATA;
4954 	if ((ioflag & (IO_VMIO | IO_DIRECT)) != 0 && LIST_EMPTY(&bp->b_dep)) {
4955 		bp->b_flags |= B_RELBUF;
4956 		if ((ioflag & IO_NOREUSE) != 0)
4957 			bp->b_flags |= B_NOREUSE;
4958 		if (release)
4959 			brelse(bp);
4960 	} else if (release)
4961 		bqrelse(bp);
4962 }
4963 
4964 void
4965 vfs_bio_brelse(struct buf *bp, int ioflag)
4966 {
4967 
4968 	b_io_dismiss(bp, ioflag, true);
4969 }
4970 
4971 void
4972 vfs_bio_set_flags(struct buf *bp, int ioflag)
4973 {
4974 
4975 	b_io_dismiss(bp, ioflag, false);
4976 }
4977 
4978 /*
4979  * vm_hold_load_pages and vm_hold_free_pages get pages into
4980  * a buffers address space.  The pages are anonymous and are
4981  * not associated with a file object.
4982  */
4983 static void
4984 vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
4985 {
4986 	vm_offset_t pg;
4987 	vm_page_t p;
4988 	int index;
4989 
4990 	BUF_CHECK_MAPPED(bp);
4991 
4992 	to = round_page(to);
4993 	from = round_page(from);
4994 	index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
4995 	MPASS((bp->b_flags & B_MAXPHYS) == 0);
4996 	KASSERT(to - from <= maxbcachebuf,
4997 	    ("vm_hold_load_pages too large %p %#jx %#jx %u",
4998 	    bp, (uintmax_t)from, (uintmax_t)to, maxbcachebuf));
4999 
5000 	for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
5001 		/*
5002 		 * note: must allocate system pages since blocking here
5003 		 * could interfere with paging I/O, no matter which
5004 		 * process we are.
5005 		 */
5006 		p = vm_page_alloc_noobj(VM_ALLOC_SYSTEM | VM_ALLOC_WIRED |
5007 		    VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) | VM_ALLOC_WAITOK);
5008 		pmap_qenter(pg, &p, 1);
5009 		bp->b_pages[index] = p;
5010 	}
5011 	bp->b_npages = index;
5012 }
5013 
5014 /* Return pages associated with this buf to the vm system */
5015 static void
5016 vm_hold_free_pages(struct buf *bp, int newbsize)
5017 {
5018 	vm_offset_t from;
5019 	vm_page_t p;
5020 	int index, newnpages;
5021 
5022 	BUF_CHECK_MAPPED(bp);
5023 
5024 	from = round_page((vm_offset_t)bp->b_data + newbsize);
5025 	newnpages = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
5026 	if (bp->b_npages > newnpages)
5027 		pmap_qremove(from, bp->b_npages - newnpages);
5028 	for (index = newnpages; index < bp->b_npages; index++) {
5029 		p = bp->b_pages[index];
5030 		bp->b_pages[index] = NULL;
5031 		vm_page_unwire_noq(p);
5032 		vm_page_free(p);
5033 	}
5034 	bp->b_npages = newnpages;
5035 }
5036 
5037 /*
5038  * Map an IO request into kernel virtual address space.
5039  *
5040  * All requests are (re)mapped into kernel VA space.
5041  * Notice that we use b_bufsize for the size of the buffer
5042  * to be mapped.  b_bcount might be modified by the driver.
5043  *
5044  * Note that even if the caller determines that the address space should
5045  * be valid, a race or a smaller-file mapped into a larger space may
5046  * actually cause vmapbuf() to fail, so all callers of vmapbuf() MUST
5047  * check the return value.
5048  *
5049  * This function only works with pager buffers.
5050  */
5051 int
5052 vmapbuf(struct buf *bp, void *uaddr, size_t len, int mapbuf)
5053 {
5054 	vm_prot_t prot;
5055 	int pidx;
5056 
5057 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
5058 	prot = VM_PROT_READ;
5059 	if (bp->b_iocmd == BIO_READ)
5060 		prot |= VM_PROT_WRITE;	/* Less backwards than it looks */
5061 	pidx = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
5062 	    (vm_offset_t)uaddr, len, prot, bp->b_pages, PBUF_PAGES);
5063 	if (pidx < 0)
5064 		return (-1);
5065 	bp->b_bufsize = len;
5066 	bp->b_npages = pidx;
5067 	bp->b_offset = ((vm_offset_t)uaddr) & PAGE_MASK;
5068 	if (mapbuf || !unmapped_buf_allowed) {
5069 		pmap_qenter((vm_offset_t)bp->b_kvabase, bp->b_pages, pidx);
5070 		bp->b_data = bp->b_kvabase + bp->b_offset;
5071 	} else
5072 		bp->b_data = unmapped_buf;
5073 	return (0);
5074 }
5075 
5076 /*
5077  * Free the io map PTEs associated with this IO operation.
5078  * We also invalidate the TLB entries and restore the original b_addr.
5079  *
5080  * This function only works with pager buffers.
5081  */
5082 void
5083 vunmapbuf(struct buf *bp)
5084 {
5085 	int npages;
5086 
5087 	npages = bp->b_npages;
5088 	if (buf_mapped(bp))
5089 		pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
5090 	vm_page_unhold_pages(bp->b_pages, npages);
5091 
5092 	bp->b_data = unmapped_buf;
5093 }
5094 
5095 void
5096 bdone(struct buf *bp)
5097 {
5098 	struct mtx *mtxp;
5099 
5100 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5101 	mtx_lock(mtxp);
5102 	bp->b_flags |= B_DONE;
5103 	wakeup(bp);
5104 	mtx_unlock(mtxp);
5105 }
5106 
5107 void
5108 bwait(struct buf *bp, u_char pri, const char *wchan)
5109 {
5110 	struct mtx *mtxp;
5111 
5112 	mtxp = mtx_pool_find(mtxpool_sleep, bp);
5113 	mtx_lock(mtxp);
5114 	while ((bp->b_flags & B_DONE) == 0)
5115 		msleep(bp, mtxp, pri, wchan, 0);
5116 	mtx_unlock(mtxp);
5117 }
5118 
5119 int
5120 bufsync(struct bufobj *bo, int waitfor)
5121 {
5122 
5123 	return (VOP_FSYNC(bo2vnode(bo), waitfor, curthread));
5124 }
5125 
5126 void
5127 bufstrategy(struct bufobj *bo, struct buf *bp)
5128 {
5129 	int i __unused;
5130 	struct vnode *vp;
5131 
5132 	vp = bp->b_vp;
5133 	KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
5134 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
5135 	    ("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
5136 	i = VOP_STRATEGY(vp, bp);
5137 	KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
5138 }
5139 
5140 /*
5141  * Initialize a struct bufobj before use.  Memory is assumed zero filled.
5142  */
5143 void
5144 bufobj_init(struct bufobj *bo, void *private)
5145 {
5146 	static volatile int bufobj_cleanq;
5147 
5148         bo->bo_domain =
5149             atomic_fetchadd_int(&bufobj_cleanq, 1) % buf_domains;
5150         rw_init(BO_LOCKPTR(bo), "bufobj interlock");
5151         bo->bo_private = private;
5152         TAILQ_INIT(&bo->bo_clean.bv_hd);
5153         TAILQ_INIT(&bo->bo_dirty.bv_hd);
5154 }
5155 
5156 void
5157 bufobj_wrefl(struct bufobj *bo)
5158 {
5159 
5160 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5161 	ASSERT_BO_WLOCKED(bo);
5162 	bo->bo_numoutput++;
5163 }
5164 
5165 void
5166 bufobj_wref(struct bufobj *bo)
5167 {
5168 
5169 	KASSERT(bo != NULL, ("NULL bo in bufobj_wref"));
5170 	BO_LOCK(bo);
5171 	bo->bo_numoutput++;
5172 	BO_UNLOCK(bo);
5173 }
5174 
5175 void
5176 bufobj_wdrop(struct bufobj *bo)
5177 {
5178 
5179 	KASSERT(bo != NULL, ("NULL bo in bufobj_wdrop"));
5180 	BO_LOCK(bo);
5181 	KASSERT(bo->bo_numoutput > 0, ("bufobj_wdrop non-positive count"));
5182 	if ((--bo->bo_numoutput == 0) && (bo->bo_flag & BO_WWAIT)) {
5183 		bo->bo_flag &= ~BO_WWAIT;
5184 		wakeup(&bo->bo_numoutput);
5185 	}
5186 	BO_UNLOCK(bo);
5187 }
5188 
5189 int
5190 bufobj_wwait(struct bufobj *bo, int slpflag, int timeo)
5191 {
5192 	int error;
5193 
5194 	KASSERT(bo != NULL, ("NULL bo in bufobj_wwait"));
5195 	ASSERT_BO_WLOCKED(bo);
5196 	error = 0;
5197 	while (bo->bo_numoutput) {
5198 		bo->bo_flag |= BO_WWAIT;
5199 		error = msleep(&bo->bo_numoutput, BO_LOCKPTR(bo),
5200 		    slpflag | (PRIBIO + 1), "bo_wwait", timeo);
5201 		if (error)
5202 			break;
5203 	}
5204 	return (error);
5205 }
5206 
5207 /*
5208  * Set bio_data or bio_ma for struct bio from the struct buf.
5209  */
5210 void
5211 bdata2bio(struct buf *bp, struct bio *bip)
5212 {
5213 
5214 	if (!buf_mapped(bp)) {
5215 		KASSERT(unmapped_buf_allowed, ("unmapped"));
5216 		bip->bio_ma = bp->b_pages;
5217 		bip->bio_ma_n = bp->b_npages;
5218 		bip->bio_data = unmapped_buf;
5219 		bip->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
5220 		bip->bio_flags |= BIO_UNMAPPED;
5221 		KASSERT(round_page(bip->bio_ma_offset + bip->bio_length) /
5222 		    PAGE_SIZE == bp->b_npages,
5223 		    ("Buffer %p too short: %d %lld %d", bp, bip->bio_ma_offset,
5224 		    (long long)bip->bio_length, bip->bio_ma_n));
5225 	} else {
5226 		bip->bio_data = bp->b_data;
5227 		bip->bio_ma = NULL;
5228 	}
5229 }
5230 
5231 static int buf_pager_relbuf;
5232 SYSCTL_INT(_vfs, OID_AUTO, buf_pager_relbuf, CTLFLAG_RWTUN,
5233     &buf_pager_relbuf, 0,
5234     "Make buffer pager release buffers after reading");
5235 
5236 /*
5237  * The buffer pager.  It uses buffer reads to validate pages.
5238  *
5239  * In contrast to the generic local pager from vm/vnode_pager.c, this
5240  * pager correctly and easily handles volumes where the underlying
5241  * device block size is greater than the machine page size.  The
5242  * buffer cache transparently extends the requested page run to be
5243  * aligned at the block boundary, and does the necessary bogus page
5244  * replacements in the addends to avoid obliterating already valid
5245  * pages.
5246  *
5247  * The only non-trivial issue is that the exclusive busy state for
5248  * pages, which is assumed by the vm_pager_getpages() interface, is
5249  * incompatible with the VMIO buffer cache's desire to share-busy the
5250  * pages.  This function performs a trivial downgrade of the pages'
5251  * state before reading buffers, and a less trivial upgrade from the
5252  * shared-busy to excl-busy state after the read.
5253  */
5254 int
5255 vfs_bio_getpages(struct vnode *vp, vm_page_t *ma, int count,
5256     int *rbehind, int *rahead, vbg_get_lblkno_t get_lblkno,
5257     vbg_get_blksize_t get_blksize)
5258 {
5259 	vm_page_t m;
5260 	vm_object_t object;
5261 	struct buf *bp;
5262 	struct mount *mp;
5263 	daddr_t lbn, lbnp;
5264 	vm_ooffset_t la, lb, poff, poffe;
5265 	long bo_bs, bsize;
5266 	int br_flags, error, i, pgsin, pgsin_a, pgsin_b;
5267 	bool redo, lpart;
5268 
5269 	object = vp->v_object;
5270 	mp = vp->v_mount;
5271 	error = 0;
5272 	la = IDX_TO_OFF(ma[count - 1]->pindex);
5273 	if (la >= object->un_pager.vnp.vnp_size)
5274 		return (VM_PAGER_BAD);
5275 
5276 	/*
5277 	 * Change the meaning of la from where the last requested page starts
5278 	 * to where it ends, because that's the end of the requested region
5279 	 * and the start of the potential read-ahead region.
5280 	 */
5281 	la += PAGE_SIZE;
5282 	lpart = la > object->un_pager.vnp.vnp_size;
5283 	error = get_blksize(vp, get_lblkno(vp, IDX_TO_OFF(ma[0]->pindex)),
5284 	    &bo_bs);
5285 	if (error != 0)
5286 		return (VM_PAGER_ERROR);
5287 
5288 	/*
5289 	 * Calculate read-ahead, behind and total pages.
5290 	 */
5291 	pgsin = count;
5292 	lb = IDX_TO_OFF(ma[0]->pindex);
5293 	pgsin_b = OFF_TO_IDX(lb - rounddown2(lb, bo_bs));
5294 	pgsin += pgsin_b;
5295 	if (rbehind != NULL)
5296 		*rbehind = pgsin_b;
5297 	pgsin_a = OFF_TO_IDX(roundup2(la, bo_bs) - la);
5298 	if (la + IDX_TO_OFF(pgsin_a) >= object->un_pager.vnp.vnp_size)
5299 		pgsin_a = OFF_TO_IDX(roundup2(object->un_pager.vnp.vnp_size,
5300 		    PAGE_SIZE) - la);
5301 	pgsin += pgsin_a;
5302 	if (rahead != NULL)
5303 		*rahead = pgsin_a;
5304 	VM_CNT_INC(v_vnodein);
5305 	VM_CNT_ADD(v_vnodepgsin, pgsin);
5306 
5307 	br_flags = (mp != NULL && (mp->mnt_kern_flag & MNTK_UNMAPPED_BUFS)
5308 	    != 0) ? GB_UNMAPPED : 0;
5309 again:
5310 	for (i = 0; i < count; i++) {
5311 		if (ma[i] != bogus_page)
5312 			vm_page_busy_downgrade(ma[i]);
5313 	}
5314 
5315 	lbnp = -1;
5316 	for (i = 0; i < count; i++) {
5317 		m = ma[i];
5318 		if (m == bogus_page)
5319 			continue;
5320 
5321 		/*
5322 		 * Pages are shared busy and the object lock is not
5323 		 * owned, which together allow for the pages'
5324 		 * invalidation.  The racy test for validity avoids
5325 		 * useless creation of the buffer for the most typical
5326 		 * case when invalidation is not used in redo or for
5327 		 * parallel read.  The shared->excl upgrade loop at
5328 		 * the end of the function catches the race in a
5329 		 * reliable way (protected by the object lock).
5330 		 */
5331 		if (vm_page_all_valid(m))
5332 			continue;
5333 
5334 		poff = IDX_TO_OFF(m->pindex);
5335 		poffe = MIN(poff + PAGE_SIZE, object->un_pager.vnp.vnp_size);
5336 		for (; poff < poffe; poff += bsize) {
5337 			lbn = get_lblkno(vp, poff);
5338 			if (lbn == lbnp)
5339 				goto next_page;
5340 			lbnp = lbn;
5341 
5342 			error = get_blksize(vp, lbn, &bsize);
5343 			if (error == 0)
5344 				error = bread_gb(vp, lbn, bsize,
5345 				    curthread->td_ucred, br_flags, &bp);
5346 			if (error != 0)
5347 				goto end_pages;
5348 			if (bp->b_rcred == curthread->td_ucred) {
5349 				crfree(bp->b_rcred);
5350 				bp->b_rcred = NOCRED;
5351 			}
5352 			if (LIST_EMPTY(&bp->b_dep)) {
5353 				/*
5354 				 * Invalidation clears m->valid, but
5355 				 * may leave B_CACHE flag if the
5356 				 * buffer existed at the invalidation
5357 				 * time.  In this case, recycle the
5358 				 * buffer to do real read on next
5359 				 * bread() after redo.
5360 				 *
5361 				 * Otherwise B_RELBUF is not strictly
5362 				 * necessary, enable to reduce buf
5363 				 * cache pressure.
5364 				 */
5365 				if (buf_pager_relbuf ||
5366 				    !vm_page_all_valid(m))
5367 					bp->b_flags |= B_RELBUF;
5368 
5369 				bp->b_flags &= ~B_NOCACHE;
5370 				brelse(bp);
5371 			} else {
5372 				bqrelse(bp);
5373 			}
5374 		}
5375 		KASSERT(1 /* racy, enable for debugging */ ||
5376 		    vm_page_all_valid(m) || i == count - 1,
5377 		    ("buf %d %p invalid", i, m));
5378 		if (i == count - 1 && lpart) {
5379 			if (!vm_page_none_valid(m) &&
5380 			    !vm_page_all_valid(m))
5381 				vm_page_zero_invalid(m, TRUE);
5382 		}
5383 next_page:;
5384 	}
5385 end_pages:
5386 
5387 	redo = false;
5388 	for (i = 0; i < count; i++) {
5389 		if (ma[i] == bogus_page)
5390 			continue;
5391 		if (vm_page_busy_tryupgrade(ma[i]) == 0) {
5392 			vm_page_sunbusy(ma[i]);
5393 			ma[i] = vm_page_grab_unlocked(object, ma[i]->pindex,
5394 			    VM_ALLOC_NORMAL);
5395 		}
5396 
5397 		/*
5398 		 * Since the pages were only sbusy while neither the
5399 		 * buffer nor the object lock was held by us, or
5400 		 * reallocated while vm_page_grab() slept for busy
5401 		 * relinguish, they could have been invalidated.
5402 		 * Recheck the valid bits and re-read as needed.
5403 		 *
5404 		 * Note that the last page is made fully valid in the
5405 		 * read loop, and partial validity for the page at
5406 		 * index count - 1 could mean that the page was
5407 		 * invalidated or removed, so we must restart for
5408 		 * safety as well.
5409 		 */
5410 		if (!vm_page_all_valid(ma[i]))
5411 			redo = true;
5412 	}
5413 	if (redo && error == 0)
5414 		goto again;
5415 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
5416 }
5417 
5418 #include "opt_ddb.h"
5419 #ifdef DDB
5420 #include <ddb/ddb.h>
5421 
5422 /* DDB command to show buffer data */
5423 DB_SHOW_COMMAND(buffer, db_show_buffer)
5424 {
5425 	/* get args */
5426 	struct buf *bp = (struct buf *)addr;
5427 #ifdef FULL_BUF_TRACKING
5428 	uint32_t i, j;
5429 #endif
5430 
5431 	if (!have_addr) {
5432 		db_printf("usage: show buffer <addr>\n");
5433 		return;
5434 	}
5435 
5436 	db_printf("buf at %p\n", bp);
5437 	db_printf("b_flags = 0x%b, b_xflags=0x%b\n",
5438 	    (u_int)bp->b_flags, PRINT_BUF_FLAGS,
5439 	    (u_int)bp->b_xflags, PRINT_BUF_XFLAGS);
5440 	db_printf("b_vflags=0x%b b_ioflags0x%b\n",
5441 	    (u_int)bp->b_vflags, PRINT_BUF_VFLAGS,
5442 	    (u_int)bp->b_ioflags, PRINT_BIO_FLAGS);
5443 	db_printf(
5444 	    "b_error = %d, b_bufsize = %ld, b_bcount = %ld, b_resid = %ld\n"
5445 	    "b_bufobj = (%p), b_data = %p\n, b_blkno = %jd, b_lblkno = %jd, "
5446 	    "b_vp = %p, b_dep = %p\n",
5447 	    bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid,
5448 	    bp->b_bufobj, bp->b_data, (intmax_t)bp->b_blkno,
5449 	    (intmax_t)bp->b_lblkno, bp->b_vp, bp->b_dep.lh_first);
5450 	db_printf("b_kvabase = %p, b_kvasize = %d\n",
5451 	    bp->b_kvabase, bp->b_kvasize);
5452 	if (bp->b_npages) {
5453 		int i;
5454 		db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages);
5455 		for (i = 0; i < bp->b_npages; i++) {
5456 			vm_page_t m;
5457 			m = bp->b_pages[i];
5458 			if (m != NULL)
5459 				db_printf("(%p, 0x%lx, 0x%lx)", m->object,
5460 				    (u_long)m->pindex,
5461 				    (u_long)VM_PAGE_TO_PHYS(m));
5462 			else
5463 				db_printf("( ??? )");
5464 			if ((i + 1) < bp->b_npages)
5465 				db_printf(",");
5466 		}
5467 		db_printf("\n");
5468 	}
5469 	BUF_LOCKPRINTINFO(bp);
5470 #if defined(FULL_BUF_TRACKING)
5471 	db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt);
5472 
5473 	i = bp->b_io_tcnt % BUF_TRACKING_SIZE;
5474 	for (j = 1; j <= BUF_TRACKING_SIZE; j++) {
5475 		if (bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)] == NULL)
5476 			continue;
5477 		db_printf(" %2u: %s\n", j,
5478 		    bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]);
5479 	}
5480 #elif defined(BUF_TRACKING)
5481 	db_printf("b_io_tracking: %s\n", bp->b_io_tracking);
5482 #endif
5483 	db_printf(" ");
5484 }
5485 
5486 DB_SHOW_COMMAND_FLAGS(bufqueues, bufqueues, DB_CMD_MEMSAFE)
5487 {
5488 	struct bufdomain *bd;
5489 	struct buf *bp;
5490 	long total;
5491 	int i, j, cnt;
5492 
5493 	db_printf("bqempty: %d\n", bqempty.bq_len);
5494 
5495 	for (i = 0; i < buf_domains; i++) {
5496 		bd = &bdomain[i];
5497 		db_printf("Buf domain %d\n", i);
5498 		db_printf("\tfreebufs\t%d\n", bd->bd_freebuffers);
5499 		db_printf("\tlofreebufs\t%d\n", bd->bd_lofreebuffers);
5500 		db_printf("\thifreebufs\t%d\n", bd->bd_hifreebuffers);
5501 		db_printf("\n");
5502 		db_printf("\tbufspace\t%ld\n", bd->bd_bufspace);
5503 		db_printf("\tmaxbufspace\t%ld\n", bd->bd_maxbufspace);
5504 		db_printf("\thibufspace\t%ld\n", bd->bd_hibufspace);
5505 		db_printf("\tlobufspace\t%ld\n", bd->bd_lobufspace);
5506 		db_printf("\tbufspacethresh\t%ld\n", bd->bd_bufspacethresh);
5507 		db_printf("\n");
5508 		db_printf("\tnumdirtybuffers\t%d\n", bd->bd_numdirtybuffers);
5509 		db_printf("\tlodirtybuffers\t%d\n", bd->bd_lodirtybuffers);
5510 		db_printf("\thidirtybuffers\t%d\n", bd->bd_hidirtybuffers);
5511 		db_printf("\tdirtybufthresh\t%d\n", bd->bd_dirtybufthresh);
5512 		db_printf("\n");
5513 		total = 0;
5514 		TAILQ_FOREACH(bp, &bd->bd_cleanq->bq_queue, b_freelist)
5515 			total += bp->b_bufsize;
5516 		db_printf("\tcleanq count\t%d (%ld)\n",
5517 		    bd->bd_cleanq->bq_len, total);
5518 		total = 0;
5519 		TAILQ_FOREACH(bp, &bd->bd_dirtyq.bq_queue, b_freelist)
5520 			total += bp->b_bufsize;
5521 		db_printf("\tdirtyq count\t%d (%ld)\n",
5522 		    bd->bd_dirtyq.bq_len, total);
5523 		db_printf("\twakeup\t\t%d\n", bd->bd_wanted);
5524 		db_printf("\tlim\t\t%d\n", bd->bd_lim);
5525 		db_printf("\tCPU ");
5526 		for (j = 0; j <= mp_maxid; j++)
5527 			db_printf("%d, ", bd->bd_subq[j].bq_len);
5528 		db_printf("\n");
5529 		cnt = 0;
5530 		total = 0;
5531 		for (j = 0; j < nbuf; j++) {
5532 			bp = nbufp(j);
5533 			if (bp->b_domain == i && BUF_ISLOCKED(bp)) {
5534 				cnt++;
5535 				total += bp->b_bufsize;
5536 			}
5537 		}
5538 		db_printf("\tLocked buffers: %d space %ld\n", cnt, total);
5539 		cnt = 0;
5540 		total = 0;
5541 		for (j = 0; j < nbuf; j++) {
5542 			bp = nbufp(j);
5543 			if (bp->b_domain == i) {
5544 				cnt++;
5545 				total += bp->b_bufsize;
5546 			}
5547 		}
5548 		db_printf("\tTotal buffers: %d space %ld\n", cnt, total);
5549 	}
5550 }
5551 
5552 DB_SHOW_COMMAND_FLAGS(lockedbufs, lockedbufs, DB_CMD_MEMSAFE)
5553 {
5554 	struct buf *bp;
5555 	int i;
5556 
5557 	for (i = 0; i < nbuf; i++) {
5558 		bp = nbufp(i);
5559 		if (BUF_ISLOCKED(bp)) {
5560 			db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5561 			db_printf("\n");
5562 			if (db_pager_quit)
5563 				break;
5564 		}
5565 	}
5566 }
5567 
5568 DB_SHOW_COMMAND(vnodebufs, db_show_vnodebufs)
5569 {
5570 	struct vnode *vp;
5571 	struct buf *bp;
5572 
5573 	if (!have_addr) {
5574 		db_printf("usage: show vnodebufs <addr>\n");
5575 		return;
5576 	}
5577 	vp = (struct vnode *)addr;
5578 	db_printf("Clean buffers:\n");
5579 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_clean.bv_hd, b_bobufs) {
5580 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5581 		db_printf("\n");
5582 	}
5583 	db_printf("Dirty buffers:\n");
5584 	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
5585 		db_show_buffer((uintptr_t)bp, 1, 0, NULL);
5586 		db_printf("\n");
5587 	}
5588 }
5589 
5590 DB_COMMAND_FLAGS(countfreebufs, db_coundfreebufs, DB_CMD_MEMSAFE)
5591 {
5592 	struct buf *bp;
5593 	int i, used = 0, nfree = 0;
5594 
5595 	if (have_addr) {
5596 		db_printf("usage: countfreebufs\n");
5597 		return;
5598 	}
5599 
5600 	for (i = 0; i < nbuf; i++) {
5601 		bp = nbufp(i);
5602 		if (bp->b_qindex == QUEUE_EMPTY)
5603 			nfree++;
5604 		else
5605 			used++;
5606 	}
5607 
5608 	db_printf("Counted %d free, %d used (%d tot)\n", nfree, used,
5609 	    nfree + used);
5610 	db_printf("numfreebuffers is %d\n", numfreebuffers);
5611 }
5612 #endif /* DDB */
5613