xref: /dragonfly/sys/kern/vfs_mount.c (revision bd611623)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  */
70 
71 /*
72  * External virtual filesystem routines
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/malloc.h>
79 #include <sys/mount.h>
80 #include <sys/proc.h>
81 #include <sys/vnode.h>
82 #include <sys/buf.h>
83 #include <sys/eventhandler.h>
84 #include <sys/kthread.h>
85 #include <sys/sysctl.h>
86 
87 #include <machine/limits.h>
88 
89 #include <sys/buf2.h>
90 #include <sys/thread2.h>
91 #include <sys/sysref2.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_object.h>
95 
96 struct mountscan_info {
97 	TAILQ_ENTRY(mountscan_info) msi_entry;
98 	int msi_how;
99 	struct mount *msi_node;
100 };
101 
102 struct vmntvnodescan_info {
103 	TAILQ_ENTRY(vmntvnodescan_info) entry;
104 	struct vnode *vp;
105 };
106 
107 struct vnlru_info {
108 	int	pass;
109 };
110 
111 static int vnlru_nowhere = 0;
112 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
113 	    &vnlru_nowhere, 0,
114 	    "Number of times the vnlru process ran without success");
115 
116 
117 static struct lwkt_token mntid_token;
118 static struct mount dummymount;
119 
120 /* note: mountlist exported to pstat */
121 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
122 static TAILQ_HEAD(,mountscan_info) mountscan_list;
123 static struct lwkt_token mountlist_token;
124 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
125 struct lwkt_token mntvnode_token;
126 
127 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
128 
129 /*
130  * Called from vfsinit()
131  */
132 void
133 vfs_mount_init(void)
134 {
135 	lwkt_token_init(&mountlist_token, "mntlist");
136 	lwkt_token_init(&mntvnode_token, "mntvnode");
137 	lwkt_token_init(&mntid_token, "mntid");
138 	TAILQ_INIT(&mountscan_list);
139 	TAILQ_INIT(&mntvnodescan_list);
140 	mount_init(&dummymount);
141 	dummymount.mnt_flag |= MNT_RDONLY;
142 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
143 }
144 
145 /*
146  * Support function called with mntvnode_token held to remove a vnode
147  * from the mountlist.  We must update any list scans which are in progress.
148  */
149 static void
150 vremovevnodemnt(struct vnode *vp)
151 {
152         struct vmntvnodescan_info *info;
153 
154 	TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
155 		if (info->vp == vp)
156 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
157 	}
158 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
159 }
160 
161 /*
162  * Allocate a new vnode and associate it with a tag, mount point, and
163  * operations vector.
164  *
165  * A VX locked and refd vnode is returned.  The caller should setup the
166  * remaining fields and vx_put() or, if he wishes to leave a vref,
167  * vx_unlock() the vnode.
168  */
169 int
170 getnewvnode(enum vtagtype tag, struct mount *mp,
171 		struct vnode **vpp, int lktimeout, int lkflags)
172 {
173 	struct vnode *vp;
174 
175 	KKASSERT(mp != NULL);
176 
177 	vp = allocvnode(lktimeout, lkflags);
178 	vp->v_tag = tag;
179 	vp->v_data = NULL;
180 
181 	/*
182 	 * By default the vnode is assigned the mount point's normal
183 	 * operations vector.
184 	 */
185 	vp->v_ops = &mp->mnt_vn_use_ops;
186 
187 	/*
188 	 * Placing the vnode on the mount point's queue makes it visible.
189 	 * VNON prevents it from being messed with, however.
190 	 */
191 	insmntque(vp, mp);
192 
193 	/*
194 	 * A VX locked & refd vnode is returned.
195 	 */
196 	*vpp = vp;
197 	return (0);
198 }
199 
200 /*
201  * This function creates vnodes with special operations vectors.  The
202  * mount point is optional.
203  *
204  * This routine is being phased out but is still used by vfs_conf to
205  * create vnodes for devices prior to the root mount (with mp == NULL).
206  */
207 int
208 getspecialvnode(enum vtagtype tag, struct mount *mp,
209 		struct vop_ops **ops,
210 		struct vnode **vpp, int lktimeout, int lkflags)
211 {
212 	struct vnode *vp;
213 
214 	vp = allocvnode(lktimeout, lkflags);
215 	vp->v_tag = tag;
216 	vp->v_data = NULL;
217 	vp->v_ops = ops;
218 
219 	if (mp == NULL)
220 		mp = &dummymount;
221 
222 	/*
223 	 * Placing the vnode on the mount point's queue makes it visible.
224 	 * VNON prevents it from being messed with, however.
225 	 */
226 	insmntque(vp, mp);
227 
228 	/*
229 	 * A VX locked & refd vnode is returned.
230 	 */
231 	*vpp = vp;
232 	return (0);
233 }
234 
235 /*
236  * Interlock against an unmount, return 0 on success, non-zero on failure.
237  *
238  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
239  * is in-progress.
240  *
241  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
242  * are used.  A shared locked will be obtained and the filesystem will not
243  * be unmountable until the lock is released.
244  */
245 int
246 vfs_busy(struct mount *mp, int flags)
247 {
248 	int lkflags;
249 
250 	atomic_add_int(&mp->mnt_refs, 1);
251 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
252 		if (flags & LK_NOWAIT) {
253 			atomic_add_int(&mp->mnt_refs, -1);
254 			return (ENOENT);
255 		}
256 		/* XXX not MP safe */
257 		mp->mnt_kern_flag |= MNTK_MWAIT;
258 		/*
259 		 * Since all busy locks are shared except the exclusive
260 		 * lock granted when unmounting, the only place that a
261 		 * wakeup needs to be done is at the release of the
262 		 * exclusive lock at the end of dounmount.
263 		 */
264 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
265 		atomic_add_int(&mp->mnt_refs, -1);
266 		return (ENOENT);
267 	}
268 	lkflags = LK_SHARED;
269 	if (lockmgr(&mp->mnt_lock, lkflags))
270 		panic("vfs_busy: unexpected lock failure");
271 	return (0);
272 }
273 
274 /*
275  * Free a busy filesystem.
276  *
277  * Decrement refs before releasing the lock so e.g. a pending umount
278  * doesn't give us an unexpected busy error.
279  */
280 void
281 vfs_unbusy(struct mount *mp)
282 {
283 	atomic_add_int(&mp->mnt_refs, -1);
284 	lockmgr(&mp->mnt_lock, LK_RELEASE);
285 }
286 
287 /*
288  * Lookup a filesystem type, and if found allocate and initialize
289  * a mount structure for it.
290  *
291  * Devname is usually updated by mount(8) after booting.
292  */
293 int
294 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
295 {
296 	struct vfsconf *vfsp;
297 	struct mount *mp;
298 
299 	if (fstypename == NULL)
300 		return (ENODEV);
301 
302 	vfsp = vfsconf_find_by_name(fstypename);
303 	if (vfsp == NULL)
304 		return (ENODEV);
305 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
306 	mount_init(mp);
307 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
308 
309 	vfs_busy(mp, 0);
310 	mp->mnt_vfc = vfsp;
311 	mp->mnt_op = vfsp->vfc_vfsops;
312 	vfsp->vfc_refcount++;
313 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
314 	mp->mnt_flag |= MNT_RDONLY;
315 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
316 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
317 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
318 	*mpp = mp;
319 	return (0);
320 }
321 
322 /*
323  * Basic mount structure initialization
324  */
325 void
326 mount_init(struct mount *mp)
327 {
328 	lockinit(&mp->mnt_lock, "vfslock", 0, 0);
329 	lwkt_token_init(&mp->mnt_token, "permnt");
330 
331 	TAILQ_INIT(&mp->mnt_nvnodelist);
332 	TAILQ_INIT(&mp->mnt_reservedvnlist);
333 	TAILQ_INIT(&mp->mnt_jlist);
334 	mp->mnt_nvnodelistsize = 0;
335 	mp->mnt_flag = 0;
336 	mp->mnt_iosize_max = MAXPHYS;
337 }
338 
339 /*
340  * Lookup a mount point by filesystem identifier.
341  */
342 struct mount *
343 vfs_getvfs(fsid_t *fsid)
344 {
345 	struct mount *mp;
346 
347 	lwkt_gettoken(&mountlist_token);
348 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
349 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
350 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
351 			break;
352 		}
353 	}
354 	lwkt_reltoken(&mountlist_token);
355 	return (mp);
356 }
357 
358 /*
359  * Get a new unique fsid.  Try to make its val[0] unique, since this value
360  * will be used to create fake device numbers for stat().  Also try (but
361  * not so hard) make its val[0] unique mod 2^16, since some emulators only
362  * support 16-bit device numbers.  We end up with unique val[0]'s for the
363  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
364  *
365  * Keep in mind that several mounts may be running in parallel.  Starting
366  * the search one past where the previous search terminated is both a
367  * micro-optimization and a defense against returning the same fsid to
368  * different mounts.
369  */
370 void
371 vfs_getnewfsid(struct mount *mp)
372 {
373 	static u_int16_t mntid_base;
374 	fsid_t tfsid;
375 	int mtype;
376 
377 	lwkt_gettoken(&mntid_token);
378 	mtype = mp->mnt_vfc->vfc_typenum;
379 	tfsid.val[1] = mtype;
380 	mtype = (mtype & 0xFF) << 24;
381 	for (;;) {
382 		tfsid.val[0] = makeudev(255,
383 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
384 		mntid_base++;
385 		if (vfs_getvfs(&tfsid) == NULL)
386 			break;
387 	}
388 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
389 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
390 	lwkt_reltoken(&mntid_token);
391 }
392 
393 /*
394  * Set the FSID for a new mount point to the template.  Adjust
395  * the FSID to avoid collisions.
396  */
397 int
398 vfs_setfsid(struct mount *mp, fsid_t *template)
399 {
400 	int didmunge = 0;
401 
402 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
403 	for (;;) {
404 		if (vfs_getvfs(template) == NULL)
405 			break;
406 		didmunge = 1;
407 		++template->val[1];
408 	}
409 	mp->mnt_stat.f_fsid = *template;
410 	return(didmunge);
411 }
412 
413 /*
414  * This routine is called when we have too many vnodes.  It attempts
415  * to free <count> vnodes and will potentially free vnodes that still
416  * have VM backing store (VM backing store is typically the cause
417  * of a vnode blowout so we want to do this).  Therefore, this operation
418  * is not considered cheap.
419  *
420  * A number of conditions may prevent a vnode from being reclaimed.
421  * the buffer cache may have references on the vnode, a directory
422  * vnode may still have references due to the namei cache representing
423  * underlying files, or the vnode may be in active use.   It is not
424  * desireable to reuse such vnodes.  These conditions may cause the
425  * number of vnodes to reach some minimum value regardless of what
426  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
427  */
428 
429 /*
430  * This is a quick non-blocking check to determine if the vnode is a good
431  * candidate for being (eventually) vgone()'d.  Returns 0 if the vnode is
432  * not a good candidate, 1 if it is.
433  */
434 static __inline int
435 vmightfree(struct vnode *vp, int page_count, int pass)
436 {
437 	if (vp->v_flag & VRECLAIMED)
438 		return (0);
439 #if 0
440 	if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
441 		return (0);
442 #endif
443 	if (sysref_isactive(&vp->v_sysref))
444 		return (0);
445 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
446 		return (0);
447 
448 	/*
449 	 * XXX horrible hack.  Up to four passes will be taken.  Each pass
450 	 * makes a larger set of vnodes eligible.  For now what this really
451 	 * means is that we try to recycle files opened only once before
452 	 * recycling files opened multiple times.
453 	 */
454 	switch(vp->v_flag & (VAGE0 | VAGE1)) {
455 	case 0:
456 		if (pass < 3)
457 			return(0);
458 		break;
459 	case VAGE0:
460 		if (pass < 2)
461 			return(0);
462 		break;
463 	case VAGE1:
464 		if (pass < 1)
465 			return(0);
466 		break;
467 	case VAGE0 | VAGE1:
468 		break;
469 	}
470 	return (1);
471 }
472 
473 /*
474  * The vnode was found to be possibly vgone()able and the caller has locked it
475  * (thus the usecount should be 1 now).  Determine if the vnode is actually
476  * vgone()able, doing some cleanups in the process.  Returns 1 if the vnode
477  * can be vgone()'d, 0 otherwise.
478  *
479  * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
480  * in the namecache topology and (B) this vnode has buffer cache bufs.
481  * We cannot remove vnodes with non-leaf namecache associations.  We do a
482  * tentitive leaf check prior to attempting to flush out any buffers but the
483  * 'real' test when all is said in done is that v_auxrefs must become 0 for
484  * the vnode to be freeable.
485  *
486  * We could theoretically just unconditionally flush when v_auxrefs != 0,
487  * but flushing data associated with non-leaf nodes (which are always
488  * directories), just throws it away for no benefit.  It is the buffer
489  * cache's responsibility to choose buffers to recycle from the cached
490  * data point of view.
491  */
492 static int
493 visleaf(struct vnode *vp)
494 {
495 	struct namecache *ncp;
496 
497 	spin_lock(&vp->v_spin);
498 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
499 		if (!TAILQ_EMPTY(&ncp->nc_list)) {
500 			spin_unlock(&vp->v_spin);
501 			return(0);
502 		}
503 	}
504 	spin_unlock(&vp->v_spin);
505 	return(1);
506 }
507 
508 /*
509  * Try to clean up the vnode to the point where it can be vgone()'d, returning
510  * 0 if it cannot be vgone()'d (or already has been), 1 if it can.  Unlike
511  * vmightfree() this routine may flush the vnode and block.  Vnodes marked
512  * VFREE are still candidates for vgone()ing because they may hold namecache
513  * resources and could be blocking the namecache directory hierarchy (and
514  * related vnodes) from being freed.
515  */
516 static int
517 vtrytomakegoneable(struct vnode *vp, int page_count)
518 {
519 	if (vp->v_flag & VRECLAIMED)
520 		return (0);
521 	if (vp->v_sysref.refcnt > 1)
522 		return (0);
523 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
524 		return (0);
525 	if (vp->v_auxrefs && visleaf(vp)) {
526 		vinvalbuf(vp, V_SAVE, 0, 0);
527 #if 0	/* DEBUG */
528 		kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
529 			"vrecycle: vp %p succeeded: %s\n"), vp,
530 			(TAILQ_FIRST(&vp->v_namecache) ?
531 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
532 #endif
533 	}
534 
535 	/*
536 	 * This sequence may seem a little strange, but we need to optimize
537 	 * the critical path a bit.  We can't recycle vnodes with other
538 	 * references and because we are trying to recycle an otherwise
539 	 * perfectly fine vnode we have to invalidate the namecache in a
540 	 * way that avoids possible deadlocks (since the vnode lock is being
541 	 * held here).  Finally, we have to check for other references one
542 	 * last time in case something snuck in during the inval.
543 	 */
544 	if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
545 		return (0);
546 	if (cache_inval_vp_nonblock(vp))
547 		return (0);
548 	return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
549 }
550 
551 /*
552  * Reclaim up to 1/10 of the vnodes associated with a mount point.  Try
553  * to avoid vnodes which have lots of resident pages (we are trying to free
554  * vnodes, not memory).
555  *
556  * This routine is a callback from the mountlist scan.  The mount point
557  * in question will be busied.
558  *
559  * NOTE: The 1/10 reclamation also ensures that the inactive data set
560  *	 (the vnodes being recycled by the one-time use) does not degenerate
561  *	 into too-small a set.  This is important because once a vnode is
562  *	 marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
563  *	 will not be destroyed EXCEPT by this mechanism.  VM pages can still
564  *	 be cleaned/freed by the pageout daemon.
565  */
566 static int
567 vlrureclaim(struct mount *mp, void *data)
568 {
569 	struct vnlru_info *info = data;
570 	struct vnode *vp;
571 	int done;
572 	int trigger;
573 	int usevnodes;
574 	int count;
575 	int trigger_mult = vnlru_nowhere;
576 
577 	/*
578 	 * Calculate the trigger point for the resident pages check.  The
579 	 * minimum trigger value is approximately the number of pages in
580 	 * the system divded by the number of vnodes.  However, due to
581 	 * various other system memory overheads unrelated to data caching
582 	 * it is a good idea to double the trigger (at least).
583 	 *
584 	 * trigger_mult starts at 0.  If the recycler is having problems
585 	 * finding enough freeable vnodes it will increase trigger_mult.
586 	 * This should not happen in normal operation, even on machines with
587 	 * low amounts of memory, but extraordinary memory use by the system
588 	 * verses the amount of cached data can trigger it.
589 	 */
590 	usevnodes = desiredvnodes;
591 	if (usevnodes <= 0)
592 		usevnodes = 1;
593 	trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
594 
595 	done = 0;
596 	lwkt_gettoken(&mntvnode_token);
597 	count = mp->mnt_nvnodelistsize / 10 + 1;
598 
599 	while (count && mp->mnt_syncer) {
600 		/*
601 		 * Next vnode.  Use the special syncer vnode to placemark
602 		 * the LRU.  This way the LRU code does not interfere with
603 		 * vmntvnodescan().
604 		 */
605 		vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
606 		TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
607 		if (vp) {
608 			TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
609 					   mp->mnt_syncer, v_nmntvnodes);
610 		} else {
611 			TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
612 					  v_nmntvnodes);
613 			vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
614 			if (vp == NULL)
615 				break;
616 		}
617 
618 		/*
619 		 * __VNODESCAN__
620 		 *
621 		 * The VP will stick around while we hold mntvnode_token,
622 		 * at least until we block, so we can safely do an initial
623 		 * check, and then must check again after we lock the vnode.
624 		 */
625 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
626 		    !vmightfree(vp, trigger, info->pass) /* critical path opt */
627 		) {
628 			--count;
629 			continue;
630 		}
631 
632 		/*
633 		 * VX get the candidate vnode.  If the VX get fails the
634 		 * vnode might still be on the mountlist.  Our loop depends
635 		 * on us at least cycling the vnode to the end of the
636 		 * mountlist.
637 		 */
638 		if (vx_get_nonblock(vp) != 0) {
639 			--count;
640 			continue;
641 		}
642 
643 		/*
644 		 * Since we blocked locking the vp, make sure it is still
645 		 * a candidate for reclamation.  That is, it has not already
646 		 * been reclaimed and only has our VX reference associated
647 		 * with it.
648 		 */
649 		if (vp->v_type == VNON ||	/* syncer or indeterminant */
650 		    (vp->v_flag & VRECLAIMED) ||
651 		    vp->v_mount != mp ||
652 		    !vtrytomakegoneable(vp, trigger)	/* critical path opt */
653 		) {
654 			--count;
655 			vx_put(vp);
656 			continue;
657 		}
658 
659 		/*
660 		 * All right, we are good, move the vp to the end of the
661 		 * mountlist and clean it out.  The vget will have returned
662 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
663 		 * do not have to check again.  The vput() will move the
664 		 * vnode to the free list if the vgone() was successful.
665 		 */
666 		KKASSERT(vp->v_mount == mp);
667 		vgone_vxlocked(vp);
668 		vx_put(vp);
669 		++done;
670 		--count;
671 	}
672 	lwkt_reltoken(&mntvnode_token);
673 	return (done);
674 }
675 
676 /*
677  * Attempt to recycle vnodes in a context that is always safe to block.
678  * Calling vlrurecycle() from the bowels of file system code has some
679  * interesting deadlock problems.
680  */
681 static struct thread *vnlruthread;
682 
683 static void
684 vnlru_proc(void)
685 {
686 	struct thread *td = curthread;
687 	struct vnlru_info info;
688 	int done;
689 
690 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
691 			      SHUTDOWN_PRI_FIRST);
692 
693 	for (;;) {
694 		kproc_suspend_loop();
695 
696 		/*
697 		 * Do some opportunistic roving.
698 		 */
699 		if (numvnodes > 100000)
700 			vnode_free_rover_scan(50);
701 		else if (numvnodes > 10000)
702 			vnode_free_rover_scan(20);
703 		else
704 			vnode_free_rover_scan(5);
705 
706 		/*
707 		 * Try to free some vnodes if we have too many
708 		 */
709 		if (numvnodes > desiredvnodes &&
710 		    freevnodes > desiredvnodes * 2 / 10) {
711 			int count = numvnodes - desiredvnodes;
712 
713 			if (count > freevnodes / 100)
714 				count = freevnodes / 100;
715 			if (count < 5)
716 				count = 5;
717 			freesomevnodes(count);
718 		}
719 
720 		/*
721 		 * Nothing to do if most of our vnodes are already on
722 		 * the free list.
723 		 */
724 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
725 			tsleep(vnlruthread, 0, "vlruwt", hz);
726 			continue;
727 		}
728 		cache_hysteresis();
729 
730 		/*
731 		 * The pass iterates through the four combinations of
732 		 * VAGE0/VAGE1.  We want to get rid of aged small files
733 		 * first.
734 		 */
735 		info.pass = 0;
736 		done = 0;
737 		while (done == 0 && info.pass < 4) {
738 			done = mountlist_scan(vlrureclaim, &info,
739 					      MNTSCAN_FORWARD);
740 			++info.pass;
741 		}
742 
743 		/*
744 		 * The vlrureclaim() call only processes 1/10 of the vnodes
745 		 * on each mount.  If we couldn't find any repeat the loop
746 		 * at least enough times to cover all available vnodes before
747 		 * we start sleeping.  Complain if the failure extends past
748 		 * 30 second, every 30 seconds.
749 		 */
750 		if (done == 0) {
751 			++vnlru_nowhere;
752 			if (vnlru_nowhere % 10 == 0)
753 				tsleep(vnlruthread, 0, "vlrup", hz * 3);
754 			if (vnlru_nowhere % 100 == 0)
755 				kprintf("vnlru_proc: vnode recycler stopped working!\n");
756 			if (vnlru_nowhere == 1000)
757 				vnlru_nowhere = 900;
758 		} else {
759 			vnlru_nowhere = 0;
760 		}
761 	}
762 }
763 
764 /*
765  * MOUNTLIST FUNCTIONS
766  */
767 
768 /*
769  * mountlist_insert (MP SAFE)
770  *
771  * Add a new mount point to the mount list.
772  */
773 void
774 mountlist_insert(struct mount *mp, int how)
775 {
776 	lwkt_gettoken(&mountlist_token);
777 	if (how == MNTINS_FIRST)
778 	    TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
779 	else
780 	    TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
781 	lwkt_reltoken(&mountlist_token);
782 }
783 
784 /*
785  * mountlist_interlock (MP SAFE)
786  *
787  * Execute the specified interlock function with the mountlist token
788  * held.  The function will be called in a serialized fashion verses
789  * other functions called through this mechanism.
790  */
791 int
792 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
793 {
794 	int error;
795 
796 	lwkt_gettoken(&mountlist_token);
797 	error = callback(mp);
798 	lwkt_reltoken(&mountlist_token);
799 	return (error);
800 }
801 
802 /*
803  * mountlist_boot_getfirst (DURING BOOT ONLY)
804  *
805  * This function returns the first mount on the mountlist, which is
806  * expected to be the root mount.  Since no interlocks are obtained
807  * this function is only safe to use during booting.
808  */
809 
810 struct mount *
811 mountlist_boot_getfirst(void)
812 {
813 	return(TAILQ_FIRST(&mountlist));
814 }
815 
816 /*
817  * mountlist_remove (MP SAFE)
818  *
819  * Remove a node from the mountlist.  If this node is the next scan node
820  * for any active mountlist scans, the active mountlist scan will be
821  * adjusted to skip the node, thus allowing removals during mountlist
822  * scans.
823  */
824 void
825 mountlist_remove(struct mount *mp)
826 {
827 	struct mountscan_info *msi;
828 
829 	lwkt_gettoken(&mountlist_token);
830 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
831 		if (msi->msi_node == mp) {
832 			if (msi->msi_how & MNTSCAN_FORWARD)
833 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
834 			else
835 				msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
836 		}
837 	}
838 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
839 	lwkt_reltoken(&mountlist_token);
840 }
841 
842 /*
843  * mountlist_exists (MP SAFE)
844  *
845  * Checks if a node exists in the mountlist.
846  * This function is mainly used by VFS quota code to check if a
847  * cached nullfs struct mount pointer is still valid at use time
848  *
849  * FIXME: there is no warranty the mp passed to that function
850  * will be the same one used by VFS_ACCOUNT() later
851  */
852 int
853 mountlist_exists(struct mount *mp)
854 {
855 	int node_exists = 0;
856 	struct mount* lmp;
857 
858 	lwkt_gettoken(&mountlist_token);
859 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
860 		if (lmp == mp) {
861 			node_exists = 1;
862 			break;
863 		}
864 	}
865 	lwkt_reltoken(&mountlist_token);
866 	return(node_exists);
867 }
868 
869 /*
870  * mountlist_scan (MP SAFE)
871  *
872  * Safely scan the mount points on the mount list.  Unless otherwise
873  * specified each mount point will be busied prior to the callback and
874  * unbusied afterwords.  The callback may safely remove any mount point
875  * without interfering with the scan.  If the current callback
876  * mount is removed the scanner will not attempt to unbusy it.
877  *
878  * If a mount node cannot be busied it is silently skipped.
879  *
880  * The callback return value is aggregated and a total is returned.  A return
881  * value of < 0 is not aggregated and will terminate the scan.
882  *
883  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
884  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
885  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
886  *			  the mount node.
887  */
888 int
889 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
890 {
891 	struct mountscan_info info;
892 	struct mount *mp;
893 	int count;
894 	int res;
895 
896 	lwkt_gettoken(&mountlist_token);
897 
898 	info.msi_how = how;
899 	info.msi_node = NULL;	/* paranoia */
900 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
901 
902 	res = 0;
903 
904 	if (how & MNTSCAN_FORWARD) {
905 		info.msi_node = TAILQ_FIRST(&mountlist);
906 		while ((mp = info.msi_node) != NULL) {
907 			if (how & MNTSCAN_NOBUSY) {
908 				count = callback(mp, data);
909 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
910 				count = callback(mp, data);
911 				if (mp == info.msi_node)
912 					vfs_unbusy(mp);
913 			} else {
914 				count = 0;
915 			}
916 			if (count < 0)
917 				break;
918 			res += count;
919 			if (mp == info.msi_node)
920 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
921 		}
922 	} else if (how & MNTSCAN_REVERSE) {
923 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
924 		while ((mp = info.msi_node) != NULL) {
925 			if (how & MNTSCAN_NOBUSY) {
926 				count = callback(mp, data);
927 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
928 				count = callback(mp, data);
929 				if (mp == info.msi_node)
930 					vfs_unbusy(mp);
931 			} else {
932 				count = 0;
933 			}
934 			if (count < 0)
935 				break;
936 			res += count;
937 			if (mp == info.msi_node)
938 				info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
939 		}
940 	}
941 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
942 	lwkt_reltoken(&mountlist_token);
943 	return(res);
944 }
945 
946 /*
947  * MOUNT RELATED VNODE FUNCTIONS
948  */
949 
950 static struct kproc_desc vnlru_kp = {
951 	"vnlru",
952 	vnlru_proc,
953 	&vnlruthread
954 };
955 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
956 
957 /*
958  * Move a vnode from one mount queue to another.
959  *
960  * MPSAFE
961  */
962 void
963 insmntque(struct vnode *vp, struct mount *mp)
964 {
965 	lwkt_gettoken(&mntvnode_token);
966 	/*
967 	 * Delete from old mount point vnode list, if on one.
968 	 */
969 	if (vp->v_mount != NULL) {
970 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
971 			("bad mount point vnode list size"));
972 		vremovevnodemnt(vp);
973 		vp->v_mount->mnt_nvnodelistsize--;
974 	}
975 	/*
976 	 * Insert into list of vnodes for the new mount point, if available.
977 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
978 	 */
979 	if ((vp->v_mount = mp) == NULL) {
980 		lwkt_reltoken(&mntvnode_token);
981 		return;
982 	}
983 	if (mp->mnt_syncer) {
984 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
985 	} else {
986 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
987 	}
988 	mp->mnt_nvnodelistsize++;
989 	lwkt_reltoken(&mntvnode_token);
990 }
991 
992 
993 /*
994  * Scan the vnodes under a mount point and issue appropriate callbacks.
995  *
996  * The fastfunc() callback is called with just the mountlist token held
997  * (no vnode lock).  It may not block and the vnode may be undergoing
998  * modifications while the caller is processing it.  The vnode will
999  * not be entirely destroyed, however, due to the fact that the mountlist
1000  * token is held.  A return value < 0 skips to the next vnode without calling
1001  * the slowfunc(), a return value > 0 terminates the loop.
1002  *
1003  * The slowfunc() callback is called after the vnode has been successfully
1004  * locked based on passed flags.  The vnode is skipped if it gets rearranged
1005  * or destroyed while blocking on the lock.  A non-zero return value from
1006  * the slow function terminates the loop.  The slow function is allowed to
1007  * arbitrarily block.  The scanning code guarentees consistency of operation
1008  * even if the slow function deletes or moves the node, or blocks and some
1009  * other thread deletes or moves the node.
1010  *
1011  * NOTE: We hold vmobj_token to prevent a VM object from being destroyed
1012  *	 out from under the fastfunc()'s vnode test.  It will not prevent
1013  *	 v_object from getting NULL'd out but it will ensure that the
1014  *	 pointer (if we race) will remain stable.
1015  */
1016 int
1017 vmntvnodescan(
1018     struct mount *mp,
1019     int flags,
1020     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
1021     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
1022     void *data
1023 ) {
1024 	struct vmntvnodescan_info info;
1025 	struct vnode *vp;
1026 	int r = 0;
1027 	int maxcount = mp->mnt_nvnodelistsize * 2;
1028 	int stopcount = 0;
1029 	int count = 0;
1030 
1031 	lwkt_gettoken(&mntvnode_token);
1032 	lwkt_gettoken(&vmobj_token);
1033 
1034 	/*
1035 	 * If asked to do one pass stop after iterating available vnodes.
1036 	 * Under heavy loads new vnodes can be added while we are scanning,
1037 	 * so this isn't perfect.  Create a slop factor of 2x.
1038 	 */
1039 	if (flags & VMSC_ONEPASS)
1040 		stopcount = mp->mnt_nvnodelistsize;
1041 
1042 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
1043 	TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
1044 	while ((vp = info.vp) != NULL) {
1045 		if (--maxcount == 0) {
1046 			kprintf("Warning: excessive fssync iteration\n");
1047 			maxcount = mp->mnt_nvnodelistsize * 2;
1048 		}
1049 
1050 		/*
1051 		 * Skip if visible but not ready, or special (e.g.
1052 		 * mp->mnt_syncer)
1053 		 */
1054 		if (vp->v_type == VNON)
1055 			goto next;
1056 		KKASSERT(vp->v_mount == mp);
1057 
1058 		/*
1059 		 * Quick test.  A negative return continues the loop without
1060 		 * calling the slow test.  0 continues onto the slow test.
1061 		 * A positive number aborts the loop.
1062 		 */
1063 		if (fastfunc) {
1064 			if ((r = fastfunc(mp, vp, data)) < 0) {
1065 				r = 0;
1066 				goto next;
1067 			}
1068 			if (r)
1069 				break;
1070 		}
1071 
1072 		/*
1073 		 * Get a vxlock on the vnode, retry if it has moved or isn't
1074 		 * in the mountlist where we expect it.
1075 		 */
1076 		if (slowfunc) {
1077 			int error;
1078 
1079 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1080 			case VMSC_GETVP:
1081 				error = vget(vp, LK_EXCLUSIVE);
1082 				break;
1083 			case VMSC_GETVP|VMSC_NOWAIT:
1084 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
1085 				break;
1086 			case VMSC_GETVX:
1087 				vx_get(vp);
1088 				error = 0;
1089 				break;
1090 			default:
1091 				error = 0;
1092 				break;
1093 			}
1094 			if (error)
1095 				goto next;
1096 			/*
1097 			 * Do not call the slow function if the vnode is
1098 			 * invalid or if it was ripped out from under us
1099 			 * while we (potentially) blocked.
1100 			 */
1101 			if (info.vp == vp && vp->v_type != VNON)
1102 				r = slowfunc(mp, vp, data);
1103 
1104 			/*
1105 			 * Cleanup
1106 			 */
1107 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1108 			case VMSC_GETVP:
1109 			case VMSC_GETVP|VMSC_NOWAIT:
1110 				vput(vp);
1111 				break;
1112 			case VMSC_GETVX:
1113 				vx_put(vp);
1114 				break;
1115 			default:
1116 				break;
1117 			}
1118 			if (r != 0)
1119 				break;
1120 		}
1121 
1122 next:
1123 		/*
1124 		 * Yield after some processing.  Depending on the number
1125 		 * of vnodes, we might wind up running for a long time.
1126 		 * Because threads are not preemptable, time critical
1127 		 * userland processes might starve.  Give them a chance
1128 		 * now and then.
1129 		 */
1130 		if (++count == 10000) {
1131 			/* We really want to yield a bit, so we simply sleep a tick */
1132 			tsleep(mp, 0, "vnodescn", 1);
1133 			count = 0;
1134 		}
1135 
1136 		/*
1137 		 * If doing one pass this decrements to zero.  If it starts
1138 		 * at zero it is effectively unlimited for the purposes of
1139 		 * this loop.
1140 		 */
1141 		if (--stopcount == 0)
1142 			break;
1143 
1144 		/*
1145 		 * Iterate.  If the vnode was ripped out from under us
1146 		 * info.vp will already point to the next vnode, otherwise
1147 		 * we have to obtain the next valid vnode ourselves.
1148 		 */
1149 		if (info.vp == vp)
1150 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1151 	}
1152 	TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
1153 	lwkt_reltoken(&vmobj_token);
1154 	lwkt_reltoken(&mntvnode_token);
1155 	return(r);
1156 }
1157 
1158 /*
1159  * Remove any vnodes in the vnode table belonging to mount point mp.
1160  *
1161  * If FORCECLOSE is not specified, there should not be any active ones,
1162  * return error if any are found (nb: this is a user error, not a
1163  * system error). If FORCECLOSE is specified, detach any active vnodes
1164  * that are found.
1165  *
1166  * If WRITECLOSE is set, only flush out regular file vnodes open for
1167  * writing.
1168  *
1169  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1170  *
1171  * `rootrefs' specifies the base reference count for the root vnode
1172  * of this filesystem. The root vnode is considered busy if its
1173  * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1174  * will call vrele() on the root vnode exactly rootrefs times.
1175  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1176  * be zero.
1177  */
1178 #ifdef DIAGNOSTIC
1179 static int busyprt = 0;		/* print out busy vnodes */
1180 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1181 #endif
1182 
1183 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1184 
1185 struct vflush_info {
1186 	int flags;
1187 	int busy;
1188 	thread_t td;
1189 };
1190 
1191 int
1192 vflush(struct mount *mp, int rootrefs, int flags)
1193 {
1194 	struct thread *td = curthread;	/* XXX */
1195 	struct vnode *rootvp = NULL;
1196 	int error;
1197 	struct vflush_info vflush_info;
1198 
1199 	if (rootrefs > 0) {
1200 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1201 		    ("vflush: bad args"));
1202 		/*
1203 		 * Get the filesystem root vnode. We can vput() it
1204 		 * immediately, since with rootrefs > 0, it won't go away.
1205 		 */
1206 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1207 			if ((flags & FORCECLOSE) == 0)
1208 				return (error);
1209 			rootrefs = 0;
1210 			/* continue anyway */
1211 		}
1212 		if (rootrefs)
1213 			vput(rootvp);
1214 	}
1215 
1216 	vflush_info.busy = 0;
1217 	vflush_info.flags = flags;
1218 	vflush_info.td = td;
1219 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1220 
1221 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1222 		/*
1223 		 * If just the root vnode is busy, and if its refcount
1224 		 * is equal to `rootrefs', then go ahead and kill it.
1225 		 */
1226 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1227 		KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1228 		if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
1229 			vx_lock(rootvp);
1230 			vgone_vxlocked(rootvp);
1231 			vx_unlock(rootvp);
1232 			vflush_info.busy = 0;
1233 		}
1234 	}
1235 	if (vflush_info.busy)
1236 		return (EBUSY);
1237 	for (; rootrefs > 0; rootrefs--)
1238 		vrele(rootvp);
1239 	return (0);
1240 }
1241 
1242 /*
1243  * The scan callback is made with an VX locked vnode.
1244  */
1245 static int
1246 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1247 {
1248 	struct vflush_info *info = data;
1249 	struct vattr vattr;
1250 	int flags = info->flags;
1251 
1252 	/*
1253 	 * Skip over a vnodes marked VSYSTEM.
1254 	 */
1255 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1256 		return(0);
1257 	}
1258 
1259 	/*
1260 	 * Do not force-close VCHR or VBLK vnodes
1261 	 */
1262 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1263 		flags &= ~(WRITECLOSE|FORCECLOSE);
1264 
1265 	/*
1266 	 * If WRITECLOSE is set, flush out unlinked but still open
1267 	 * files (even if open only for reading) and regular file
1268 	 * vnodes open for writing.
1269 	 */
1270 	if ((flags & WRITECLOSE) &&
1271 	    (vp->v_type == VNON ||
1272 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1273 	    vattr.va_nlink > 0)) &&
1274 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1275 		return(0);
1276 	}
1277 
1278 	/*
1279 	 * If we are the only holder (refcnt of 1) or the vnode is in
1280 	 * termination (refcnt < 0), we can vgone the vnode.
1281 	 */
1282 	if (vp->v_sysref.refcnt <= 1) {
1283 		vgone_vxlocked(vp);
1284 		return(0);
1285 	}
1286 
1287 	/*
1288 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1289 	 * it to a dummymount structure so vop_*() functions don't deref
1290 	 * a NULL pointer.
1291 	 */
1292 	if (flags & FORCECLOSE) {
1293 		vhold(vp);
1294 		vgone_vxlocked(vp);
1295 		if (vp->v_mount == NULL)
1296 			insmntque(vp, &dummymount);
1297 		vdrop(vp);
1298 		return(0);
1299 	}
1300 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1301 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1302 #ifdef DIAGNOSTIC
1303 	if (busyprt)
1304 		vprint("vflush: busy vnode", vp);
1305 #endif
1306 	++info->busy;
1307 	return(0);
1308 }
1309 
1310 void
1311 add_bio_ops(struct bio_ops *ops)
1312 {
1313 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1314 }
1315 
1316 void
1317 rem_bio_ops(struct bio_ops *ops)
1318 {
1319 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1320 }
1321 
1322 /*
1323  * This calls the bio_ops io_sync function either for a mount point
1324  * or generally.
1325  *
1326  * WARNING: softdeps is weirdly coded and just isn't happy unless
1327  * io_sync is called with a NULL mount from the general syncing code.
1328  */
1329 void
1330 bio_ops_sync(struct mount *mp)
1331 {
1332 	struct bio_ops *ops;
1333 
1334 	if (mp) {
1335 		if ((ops = mp->mnt_bioops) != NULL)
1336 			ops->io_sync(mp);
1337 	} else {
1338 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1339 			ops->io_sync(NULL);
1340 		}
1341 	}
1342 }
1343 
1344 /*
1345  * Lookup a mount point by nch
1346  */
1347 struct mount *
1348 mount_get_by_nc(struct namecache *ncp)
1349 {
1350 	struct mount *mp = NULL;
1351 
1352 	lwkt_gettoken(&mountlist_token);
1353 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1354 		if (ncp == mp->mnt_ncmountpt.ncp)
1355 			break;
1356 	}
1357 	lwkt_reltoken(&mountlist_token);
1358 	return (mp);
1359 }
1360 
1361