xref: /dragonfly/sys/kern/vfs_mount.c (revision 335b9e93)
1 /*
2  * Copyright (c) 2004,2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 /*
68  * External virtual filesystem routines
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/spinlock2.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82 
83 #include <machine/limits.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87 
88 struct mountscan_info {
89 	TAILQ_ENTRY(mountscan_info) msi_entry;
90 	int msi_how;
91 	struct mount *msi_node;
92 };
93 
94 struct vmntvnodescan_info {
95 	TAILQ_ENTRY(vmntvnodescan_info) entry;
96 	struct vnode *vp;
97 };
98 
99 struct vnlru_info {
100 	int	pass;
101 };
102 
103 static int
104 mount_cmp(struct mount *mnt1, struct mount *mnt2)
105 {
106 	if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0])
107 		return -1;
108 	if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0])
109 		return 1;
110 	if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1])
111 		return -1;
112 	if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1])
113 		return 1;
114 	return 0;
115 }
116 
117 static int
118 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt)
119 {
120 	if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0])
121 		return -1;
122 	if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0])
123 		return 1;
124 	if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1])
125 		return -1;
126 	if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1])
127 		return 1;
128 	return 0;
129 }
130 
131 RB_HEAD(mount_rb_tree, mount);
132 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *);
133 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp);
134 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node,
135 			mount_fsid_cmp, fsid_t *);
136 
137 static int vnlru_nowhere = 0;
138 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
139 	    &vnlru_nowhere, 0,
140 	    "Number of times the vnlru process ran without success");
141 
142 
143 static struct lwkt_token mntid_token;
144 static struct mount dummymount;
145 
146 /* note: mountlist exported to pstat */
147 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
148 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree);
149 static TAILQ_HEAD(,mountscan_info) mountscan_list;
150 static struct lwkt_token mountlist_token;
151 
152 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
153 
154 /*
155  * Called from vfsinit()
156  */
157 void
158 vfs_mount_init(void)
159 {
160 	lwkt_token_init(&mountlist_token, "mntlist");
161 	lwkt_token_init(&mntid_token, "mntid");
162 	TAILQ_INIT(&mountscan_list);
163 	mount_init(&dummymount, NULL);
164 	dummymount.mnt_flag |= MNT_RDONLY;
165 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
166 }
167 
168 /*
169  * Support function called to remove a vnode from the mountlist and
170  * deal with side effects for scans in progress.
171  *
172  * Target mnt_token is held on call.
173  */
174 static void
175 vremovevnodemnt(struct vnode *vp)
176 {
177         struct vmntvnodescan_info *info;
178 	struct mount *mp = vp->v_mount;
179 
180 	TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
181 		if (info->vp == vp)
182 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
183 	}
184 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
185 }
186 
187 /*
188  * Allocate a new vnode and associate it with a tag, mount point, and
189  * operations vector.
190  *
191  * A VX locked and refd vnode is returned.  The caller should setup the
192  * remaining fields and vx_put() or, if he wishes to leave a vref,
193  * vx_unlock() the vnode.  Or if he wishes to return a normal locked
194  * vnode, call vx_downgrade(vp); to downgrade the VX lock to a normal
195  * VN lock.
196  */
197 int
198 getnewvnode(enum vtagtype tag, struct mount *mp,
199 		struct vnode **vpp, int lktimeout, int lkflags)
200 {
201 	struct vnode *vp;
202 
203 	KKASSERT(mp != NULL);
204 
205 	vp = allocvnode(lktimeout, lkflags);
206 	vp->v_tag = tag;
207 	vp->v_data = NULL;
208 
209 	/*
210 	 * By default the vnode is assigned the mount point's normal
211 	 * operations vector.
212 	 */
213 	vp->v_ops = &mp->mnt_vn_use_ops;
214 	vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
215 
216 	/*
217 	 * Placing the vnode on the mount point's queue makes it visible.
218 	 * VNON prevents it from being messed with, however.
219 	 */
220 	insmntque(vp, mp);
221 
222 	/*
223 	 * A VX locked & refd vnode is returned.
224 	 */
225 	*vpp = vp;
226 	return (0);
227 }
228 
229 /*
230  * This function creates vnodes with special operations vectors.  The
231  * mount point is optional.
232  *
233  * This routine is being phased out but is still used by vfs_conf to
234  * create vnodes for devices prior to the root mount (with mp == NULL).
235  */
236 int
237 getspecialvnode(enum vtagtype tag, struct mount *mp,
238 		struct vop_ops **ops,
239 		struct vnode **vpp, int lktimeout, int lkflags)
240 {
241 	struct vnode *vp;
242 
243 	vp = allocvnode(lktimeout, lkflags);
244 	vp->v_tag = tag;
245 	vp->v_data = NULL;
246 	vp->v_ops = ops;
247 
248 	if (mp == NULL)
249 		mp = &dummymount;
250 
251 	/*
252 	 * Placing the vnode on the mount point's queue makes it visible.
253 	 * VNON prevents it from being messed with, however.
254 	 */
255 	insmntque(vp, mp);
256 
257 	/*
258 	 * A VX locked & refd vnode is returned.
259 	 */
260 	*vpp = vp;
261 	return (0);
262 }
263 
264 /*
265  * Interlock against an unmount, return 0 on success, non-zero on failure.
266  *
267  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
268  * is in-progress.
269  *
270  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
271  * are used.  A shared locked will be obtained and the filesystem will not
272  * be unmountable until the lock is released.
273  */
274 int
275 vfs_busy(struct mount *mp, int flags)
276 {
277 	int lkflags;
278 
279 	atomic_add_int(&mp->mnt_refs, 1);
280 	lwkt_gettoken(&mp->mnt_token);
281 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
282 		if (flags & LK_NOWAIT) {
283 			lwkt_reltoken(&mp->mnt_token);
284 			atomic_add_int(&mp->mnt_refs, -1);
285 			return (ENOENT);
286 		}
287 		/* XXX not MP safe */
288 		mp->mnt_kern_flag |= MNTK_MWAIT;
289 
290 		/*
291 		 * Since all busy locks are shared except the exclusive
292 		 * lock granted when unmounting, the only place that a
293 		 * wakeup needs to be done is at the release of the
294 		 * exclusive lock at the end of dounmount.
295 		 *
296 		 * WARNING! mp can potentially go away once we release
297 		 *	    our ref.
298 		 */
299 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
300 		lwkt_reltoken(&mp->mnt_token);
301 		atomic_add_int(&mp->mnt_refs, -1);
302 		return (ENOENT);
303 	}
304 	lkflags = LK_SHARED;
305 	if (lockmgr(&mp->mnt_lock, lkflags))
306 		panic("vfs_busy: unexpected lock failure");
307 	lwkt_reltoken(&mp->mnt_token);
308 	return (0);
309 }
310 
311 /*
312  * Free a busy filesystem.
313  *
314  * Once refs is decremented the mount point can potentially get ripped
315  * out from under us, but we want to clean up our refs before unlocking
316  * so do a hold/drop around the whole mess.
317  *
318  * This is not in the critical path (I hope).
319  */
320 void
321 vfs_unbusy(struct mount *mp)
322 {
323 	mount_hold(mp);
324 	atomic_add_int(&mp->mnt_refs, -1);
325 	lockmgr(&mp->mnt_lock, LK_RELEASE);
326 	mount_drop(mp);
327 }
328 
329 /*
330  * Lookup a filesystem type, and if found allocate and initialize
331  * a mount structure for it.
332  *
333  * Devname is usually updated by mount(8) after booting.
334  */
335 int
336 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
337 {
338 	struct vfsconf *vfsp;
339 	struct mount *mp;
340 
341 	if (fstypename == NULL)
342 		return (ENODEV);
343 
344 	vfsp = vfsconf_find_by_name(fstypename);
345 	if (vfsp == NULL)
346 		return (ENODEV);
347 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
348 	mount_init(mp, vfsp->vfc_vfsops);
349 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
350 
351 	vfs_busy(mp, 0);
352 	mp->mnt_vfc = vfsp;
353 	mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
354 	vfsp->vfc_refcount++;
355 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
356 	mp->mnt_flag |= MNT_RDONLY;
357 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
358 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
359 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
360 
361 	/*
362 	 * Pre-set MPSAFE flags for VFS_MOUNT() call.
363 	 */
364 	if (vfsp->vfc_flags & VFCF_MPSAFE)
365 		mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
366 
367 	*mpp = mp;
368 
369 	return (0);
370 }
371 
372 /*
373  * Basic mount structure initialization
374  */
375 void
376 mount_init(struct mount *mp, struct vfsops *ops)
377 {
378 	lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
379 	lwkt_token_init(&mp->mnt_token, "permnt");
380 
381 	TAILQ_INIT(&mp->mnt_vnodescan_list);
382 	TAILQ_INIT(&mp->mnt_nvnodelist);
383 	TAILQ_INIT(&mp->mnt_reservedvnlist);
384 	TAILQ_INIT(&mp->mnt_jlist);
385 	mp->mnt_nvnodelistsize = 0;
386 	mp->mnt_flag = 0;
387 	mp->mnt_hold = 1;		/* hold for umount last drop */
388 	mp->mnt_iosize_max = MAXPHYS;
389 	mp->mnt_op = ops;
390 	if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0)
391 		vn_syncer_thr_create(mp);
392 }
393 
394 void
395 mount_hold(struct mount *mp)
396 {
397 	atomic_add_int(&mp->mnt_hold, 1);
398 }
399 
400 void
401 mount_drop(struct mount *mp)
402 {
403 	if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
404 		KKASSERT(mp->mnt_refs == 0);
405 		kfree(mp, M_MOUNT);
406 	}
407 }
408 
409 /*
410  * Lookup a mount point by filesystem identifier.
411  *
412  * If not NULL, the returned mp is held and the caller is expected to drop
413  * it via mount_drop().
414  */
415 struct mount *
416 vfs_getvfs(fsid_t *fsid)
417 {
418 	struct mount *mp;
419 
420 	lwkt_gettoken_shared(&mountlist_token);
421 	mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid);
422 	if (mp)
423 		mount_hold(mp);
424 	lwkt_reltoken(&mountlist_token);
425 	return (mp);
426 }
427 
428 /*
429  * Generate a FSID based on the mountpt.  The FSID will be adjusted to avoid
430  * collisions when the mount is added to mountlist.
431  *
432  * May only be called prior to the mount succeeding.
433  *
434  * OLD:
435  *
436  * Get a new unique fsid.  Try to make its val[0] unique, since this value
437  * will be used to create fake device numbers for stat().  Also try (but
438  * not so hard) make its val[0] unique mod 2^16, since some emulators only
439  * support 16-bit device numbers.  We end up with unique val[0]'s for the
440  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
441  */
442 void
443 vfs_getnewfsid(struct mount *mp)
444 {
445 	fsid_t tfsid;
446 	int mtype;
447 	int error;
448 	char *retbuf;
449 	char *freebuf;
450 
451 	mtype = mp->mnt_vfc->vfc_typenum;
452 	tfsid.val[1] = mtype;
453 	error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL,
454 			       &retbuf, &freebuf, 0);
455 	if (error) {
456 		tfsid.val[0] = makeudev(255, 0);
457 	} else {
458 		tfsid.val[0] = makeudev(255,
459 					iscsi_crc32(retbuf, strlen(retbuf)) &
460 					~makeudev(255, 0));
461 		kfree(freebuf, M_TEMP);
462 	}
463 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
464 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
465 }
466 
467 /*
468  * Set the FSID for a new mount point to the template.
469  *
470  * The FSID will be adjusted to avoid collisions when the mount is
471  * added to mountlist.
472  *
473  * May only be called prior to the mount succeeding.
474  */
475 void
476 vfs_setfsid(struct mount *mp, fsid_t *template)
477 {
478 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
479 
480 #if 0
481 	struct mount *mptmp;
482 
483 	lwkt_gettoken(&mntid_token);
484 	for (;;) {
485 		mptmp = vfs_getvfs(template);
486 		if (mptmp == NULL)
487 			break;
488 		mount_drop(mptmp);
489 		++template->val[1];
490 	}
491 	lwkt_reltoken(&mntid_token);
492 #endif
493 	mp->mnt_stat.f_fsid = *template;
494 }
495 
496 /*
497  * This routine is called when we have too many vnodes.  It attempts
498  * to free <count> vnodes and will potentially free vnodes that still
499  * have VM backing store (VM backing store is typically the cause
500  * of a vnode blowout so we want to do this).  Therefore, this operation
501  * is not considered cheap.
502  *
503  * A number of conditions may prevent a vnode from being reclaimed.
504  * the buffer cache may have references on the vnode, a directory
505  * vnode may still have references due to the namei cache representing
506  * underlying files, or the vnode may be in active use.   It is not
507  * desireable to reuse such vnodes.  These conditions may cause the
508  * number of vnodes to reach some minimum value regardless of what
509  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
510  */
511 
512 /*
513  * Attempt to recycle vnodes in a context that is always safe to block.
514  * Calling vlrurecycle() from the bowels of file system code has some
515  * interesting deadlock problems.
516  */
517 static struct thread *vnlruthread;
518 
519 static void
520 vnlru_proc(void)
521 {
522 	struct thread *td = curthread;
523 
524 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
525 			      SHUTDOWN_PRI_FIRST);
526 
527 	for (;;) {
528 		int ncachedandinactive;
529 
530 		kproc_suspend_loop();
531 
532 		/*
533 		 * Try to free some vnodes if we have too many.  Trigger based
534 		 * on potentially freeable vnodes but calculate the count
535 		 * based on total vnodes.
536 		 *
537 		 * (long) -> deal with 64 bit machines, intermediate overflow
538 		 */
539 		synchronizevnodecount();
540 		ncachedandinactive = countcachedandinactivevnodes();
541 		if (numvnodes >= maxvnodes * 9 / 10 &&
542 		    ncachedandinactive >= maxvnodes * 5 / 10) {
543 			int count = numvnodes - maxvnodes * 9 / 10;
544 
545 			if (count > (ncachedandinactive) / 100)
546 				count = (ncachedandinactive) / 100;
547 			if (count < 5)
548 				count = 5;
549 			freesomevnodes(count);
550 		}
551 
552 		/*
553 		 * Do non-critical-path (more robust) cache cleaning,
554 		 * even if vnode counts are nominal, to try to avoid
555 		 * having to do it in the critical path.
556 		 */
557 		cache_hysteresis(0);
558 
559 		/*
560 		 * Nothing to do if most of our vnodes are already on
561 		 * the free list.
562 		 */
563 		synchronizevnodecount();
564 		ncachedandinactive = countcachedandinactivevnodes();
565 		if (numvnodes <= maxvnodes * 9 / 10 ||
566 		    ncachedandinactive <= maxvnodes * 5 / 10) {
567 			tsleep(vnlruthread, 0, "vlruwt", hz);
568 			continue;
569 		}
570 	}
571 }
572 
573 /*
574  * MOUNTLIST FUNCTIONS
575  */
576 
577 /*
578  * mountlist_insert (MP SAFE)
579  *
580  * Add a new mount point to the mount list.  Filesystem should attempt to
581  * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure
582  * uniqueness.
583  */
584 void
585 mountlist_insert(struct mount *mp, int how)
586 {
587 	int lim = 0x01000000;
588 
589 	lwkt_gettoken(&mountlist_token);
590 	if (how == MNTINS_FIRST)
591 		TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
592 	else
593 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
594 	while (mount_rb_tree_RB_INSERT(&mounttree, mp)) {
595 		int32_t val;
596 
597 		/*
598 		 * minor device mask: 0xFFFF00FF
599 		 */
600 		val = mp->mnt_stat.f_fsid.val[0];
601 		val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF);
602 		++val;
603 		val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF);
604 		mp->mnt_stat.f_fsid.val[0] = val;
605 		if (--lim == 0) {
606 			lim = 0x01000000;
607 			mp->mnt_stat.f_fsid.val[1] += 0x0100;
608 			kprintf("mountlist_insert: fsid collision, "
609 				"too many mounts\n");
610 		}
611 	}
612 	lwkt_reltoken(&mountlist_token);
613 }
614 
615 /*
616  * mountlist_interlock (MP SAFE)
617  *
618  * Execute the specified interlock function with the mountlist token
619  * held.  The function will be called in a serialized fashion verses
620  * other functions called through this mechanism.
621  *
622  * The function is expected to be very short-lived.
623  */
624 int
625 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
626 {
627 	int error;
628 
629 	lwkt_gettoken(&mountlist_token);
630 	error = callback(mp);
631 	lwkt_reltoken(&mountlist_token);
632 	return (error);
633 }
634 
635 /*
636  * mountlist_boot_getfirst (DURING BOOT ONLY)
637  *
638  * This function returns the first mount on the mountlist, which is
639  * expected to be the root mount.  Since no interlocks are obtained
640  * this function is only safe to use during booting.
641  */
642 
643 struct mount *
644 mountlist_boot_getfirst(void)
645 {
646 	return(TAILQ_FIRST(&mountlist));
647 }
648 
649 /*
650  * mountlist_remove (MP SAFE)
651  *
652  * Remove a node from the mountlist.  If this node is the next scan node
653  * for any active mountlist scans, the active mountlist scan will be
654  * adjusted to skip the node, thus allowing removals during mountlist
655  * scans.
656  */
657 void
658 mountlist_remove(struct mount *mp)
659 {
660 	struct mountscan_info *msi;
661 
662 	lwkt_gettoken(&mountlist_token);
663 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
664 		if (msi->msi_node == mp) {
665 			if (msi->msi_how & MNTSCAN_FORWARD)
666 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
667 			else
668 				msi->msi_node = TAILQ_PREV(mp, mntlist,
669 							   mnt_list);
670 		}
671 	}
672 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
673 	mount_rb_tree_RB_REMOVE(&mounttree, mp);
674 	lwkt_reltoken(&mountlist_token);
675 }
676 
677 /*
678  * mountlist_exists (MP SAFE)
679  *
680  * Checks if a node exists in the mountlist.
681  * This function is mainly used by VFS quota code to check if a
682  * cached nullfs struct mount pointer is still valid at use time
683  *
684  * FIXME: there is no warranty the mp passed to that function
685  * will be the same one used by VFS_ACCOUNT() later
686  */
687 int
688 mountlist_exists(struct mount *mp)
689 {
690 	int node_exists = 0;
691 	struct mount* lmp;
692 
693 	lwkt_gettoken_shared(&mountlist_token);
694 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
695 		if (lmp == mp) {
696 			node_exists = 1;
697 			break;
698 		}
699 	}
700 	lwkt_reltoken(&mountlist_token);
701 
702 	return(node_exists);
703 }
704 
705 /*
706  * mountlist_scan
707  *
708  * Safely scan the mount points on the mount list.  Each mountpoint
709  * is held across the callback.  The callback is responsible for
710  * acquiring any further tokens or locks.
711  *
712  * Unless otherwise specified each mount point will be busied prior to the
713  * callback and unbusied afterwords.  The callback may safely remove any
714  * mount point without interfering with the scan.  If the current callback
715  * mount is removed the scanner will not attempt to unbusy it.
716  *
717  * If a mount node cannot be busied it is silently skipped.
718  *
719  * The callback return value is aggregated and a total is returned.  A return
720  * value of < 0 is not aggregated and will terminate the scan.
721  *
722  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
723  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
724  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
725  *			  the mount node.
726  * MNTSCAN_NOUNLOCK	- Do not unlock mountlist_token across callback
727  *
728  * NOTE: mountlist_token is not held across the callback.
729  */
730 int
731 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
732 {
733 	struct mountscan_info info;
734 	struct mount *mp;
735 	int count;
736 	int res;
737 	int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0);
738 
739 	lwkt_gettoken(&mountlist_token);
740 	info.msi_how = how;
741 	info.msi_node = NULL;	/* paranoia */
742 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
743 	lwkt_reltoken(&mountlist_token);
744 
745 	res = 0;
746 	lwkt_gettoken_shared(&mountlist_token);
747 
748 	if (how & MNTSCAN_FORWARD) {
749 		info.msi_node = TAILQ_FIRST(&mountlist);
750 		while ((mp = info.msi_node) != NULL) {
751 			mount_hold(mp);
752 			if (how & MNTSCAN_NOBUSY) {
753 				if (dounlock)
754 					lwkt_reltoken(&mountlist_token);
755 				count = callback(mp, data);
756 				if (dounlock)
757 					lwkt_gettoken_shared(&mountlist_token);
758 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
759 				if (dounlock)
760 					lwkt_reltoken(&mountlist_token);
761 				count = callback(mp, data);
762 				if (dounlock)
763 					lwkt_gettoken_shared(&mountlist_token);
764 				if (mp == info.msi_node)
765 					vfs_unbusy(mp);
766 			} else {
767 				count = 0;
768 			}
769 			mount_drop(mp);
770 			if (count < 0)
771 				break;
772 			res += count;
773 			if (mp == info.msi_node)
774 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
775 		}
776 	} else if (how & MNTSCAN_REVERSE) {
777 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
778 		while ((mp = info.msi_node) != NULL) {
779 			mount_hold(mp);
780 			if (how & MNTSCAN_NOBUSY) {
781 				if (dounlock)
782 					lwkt_reltoken(&mountlist_token);
783 				count = callback(mp, data);
784 				if (dounlock)
785 					lwkt_gettoken_shared(&mountlist_token);
786 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
787 				if (dounlock)
788 					lwkt_reltoken(&mountlist_token);
789 				count = callback(mp, data);
790 				if (dounlock)
791 					lwkt_gettoken_shared(&mountlist_token);
792 				if (mp == info.msi_node)
793 					vfs_unbusy(mp);
794 			} else {
795 				count = 0;
796 			}
797 			mount_drop(mp);
798 			if (count < 0)
799 				break;
800 			res += count;
801 			if (mp == info.msi_node)
802 				info.msi_node = TAILQ_PREV(mp, mntlist,
803 							   mnt_list);
804 		}
805 	}
806 	lwkt_reltoken(&mountlist_token);
807 
808 	lwkt_gettoken(&mountlist_token);
809 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
810 	lwkt_reltoken(&mountlist_token);
811 
812 	return(res);
813 }
814 
815 /*
816  * MOUNT RELATED VNODE FUNCTIONS
817  */
818 
819 static struct kproc_desc vnlru_kp = {
820 	"vnlru",
821 	vnlru_proc,
822 	&vnlruthread
823 };
824 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
825 
826 /*
827  * Move a vnode from one mount queue to another.
828  */
829 void
830 insmntque(struct vnode *vp, struct mount *mp)
831 {
832 	struct mount *omp;
833 
834 	/*
835 	 * Delete from old mount point vnode list, if on one.
836 	 */
837 	if ((omp = vp->v_mount) != NULL) {
838 		lwkt_gettoken(&omp->mnt_token);
839 		KKASSERT(omp == vp->v_mount);
840 		KASSERT(omp->mnt_nvnodelistsize > 0,
841 			("bad mount point vnode list size"));
842 		vremovevnodemnt(vp);
843 		omp->mnt_nvnodelistsize--;
844 		lwkt_reltoken(&omp->mnt_token);
845 	}
846 
847 	/*
848 	 * Insert into list of vnodes for the new mount point, if available.
849 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
850 	 */
851 	if (mp == NULL) {
852 		vp->v_mount = NULL;
853 		return;
854 	}
855 	lwkt_gettoken(&mp->mnt_token);
856 	vp->v_mount = mp;
857 	if (mp->mnt_syncer) {
858 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
859 	} else {
860 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
861 	}
862 	mp->mnt_nvnodelistsize++;
863 	lwkt_reltoken(&mp->mnt_token);
864 }
865 
866 
867 /*
868  * Scan the vnodes under a mount point and issue appropriate callbacks.
869  *
870  * The fastfunc() callback is called with just the mountlist token held
871  * (no vnode lock).  It may not block and the vnode may be undergoing
872  * modifications while the caller is processing it.  The vnode will
873  * not be entirely destroyed, however, due to the fact that the mountlist
874  * token is held.  A return value < 0 skips to the next vnode without calling
875  * the slowfunc(), a return value > 0 terminates the loop.
876  *
877  * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
878  *	    data structure is unstable when called from fastfunc().
879  *
880  * The slowfunc() callback is called after the vnode has been successfully
881  * locked based on passed flags.  The vnode is skipped if it gets rearranged
882  * or destroyed while blocking on the lock.  A non-zero return value from
883  * the slow function terminates the loop.  The slow function is allowed to
884  * arbitrarily block.  The scanning code guarentees consistency of operation
885  * even if the slow function deletes or moves the node, or blocks and some
886  * other thread deletes or moves the node.
887  */
888 int
889 vmntvnodescan(
890     struct mount *mp,
891     int flags,
892     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
893     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
894     void *data
895 ) {
896 	struct vmntvnodescan_info info;
897 	struct vnode *vp;
898 	int r = 0;
899 	int maxcount = mp->mnt_nvnodelistsize * 2;
900 	int stopcount = 0;
901 	int count = 0;
902 
903 	lwkt_gettoken(&mp->mnt_token);
904 
905 	/*
906 	 * If asked to do one pass stop after iterating available vnodes.
907 	 * Under heavy loads new vnodes can be added while we are scanning,
908 	 * so this isn't perfect.  Create a slop factor of 2x.
909 	 */
910 	if (flags & VMSC_ONEPASS)
911 		stopcount = mp->mnt_nvnodelistsize;
912 
913 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
914 	TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
915 
916 	while ((vp = info.vp) != NULL) {
917 		if (--maxcount == 0) {
918 			kprintf("Warning: excessive fssync iteration\n");
919 			maxcount = mp->mnt_nvnodelistsize * 2;
920 		}
921 
922 		/*
923 		 * Skip if visible but not ready, or special (e.g.
924 		 * mp->mnt_syncer)
925 		 */
926 		if (vp->v_type == VNON)
927 			goto next;
928 		KKASSERT(vp->v_mount == mp);
929 
930 		/*
931 		 * Quick test.  A negative return continues the loop without
932 		 * calling the slow test.  0 continues onto the slow test.
933 		 * A positive number aborts the loop.
934 		 */
935 		if (fastfunc) {
936 			if ((r = fastfunc(mp, vp, data)) < 0) {
937 				r = 0;
938 				goto next;
939 			}
940 			if (r)
941 				break;
942 		}
943 
944 		/*
945 		 * Get a vxlock on the vnode, retry if it has moved or isn't
946 		 * in the mountlist where we expect it.
947 		 */
948 		if (slowfunc) {
949 			int error;
950 
951 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
952 			case VMSC_GETVP:
953 				error = vget(vp, LK_EXCLUSIVE);
954 				break;
955 			case VMSC_GETVP|VMSC_NOWAIT:
956 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
957 				break;
958 			case VMSC_GETVX:
959 				vx_get(vp);
960 				error = 0;
961 				break;
962 			default:
963 				error = 0;
964 				break;
965 			}
966 			if (error)
967 				goto next;
968 			/*
969 			 * Do not call the slow function if the vnode is
970 			 * invalid or if it was ripped out from under us
971 			 * while we (potentially) blocked.
972 			 */
973 			if (info.vp == vp && vp->v_type != VNON)
974 				r = slowfunc(mp, vp, data);
975 
976 			/*
977 			 * Cleanup
978 			 */
979 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
980 			case VMSC_GETVP:
981 			case VMSC_GETVP|VMSC_NOWAIT:
982 				vput(vp);
983 				break;
984 			case VMSC_GETVX:
985 				vx_put(vp);
986 				break;
987 			default:
988 				break;
989 			}
990 			if (r != 0)
991 				break;
992 		}
993 
994 next:
995 		/*
996 		 * Yield after some processing.  Depending on the number
997 		 * of vnodes, we might wind up running for a long time.
998 		 * Because threads are not preemptable, time critical
999 		 * userland processes might starve.  Give them a chance
1000 		 * now and then.
1001 		 */
1002 		if (++count == 10000) {
1003 			/*
1004 			 * We really want to yield a bit, so we simply
1005 			 * sleep a tick
1006 			 */
1007 			tsleep(mp, 0, "vnodescn", 1);
1008 			count = 0;
1009 		}
1010 
1011 		/*
1012 		 * If doing one pass this decrements to zero.  If it starts
1013 		 * at zero it is effectively unlimited for the purposes of
1014 		 * this loop.
1015 		 */
1016 		if (--stopcount == 0)
1017 			break;
1018 
1019 		/*
1020 		 * Iterate.  If the vnode was ripped out from under us
1021 		 * info.vp will already point to the next vnode, otherwise
1022 		 * we have to obtain the next valid vnode ourselves.
1023 		 */
1024 		if (info.vp == vp)
1025 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1026 	}
1027 
1028 	TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1029 	lwkt_reltoken(&mp->mnt_token);
1030 	return(r);
1031 }
1032 
1033 /*
1034  * Remove any vnodes in the vnode table belonging to mount point mp.
1035  *
1036  * If FORCECLOSE is not specified, there should not be any active ones,
1037  * return error if any are found (nb: this is a user error, not a
1038  * system error). If FORCECLOSE is specified, detach any active vnodes
1039  * that are found.
1040  *
1041  * If WRITECLOSE is set, only flush out regular file vnodes open for
1042  * writing.
1043  *
1044  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1045  *
1046  * `rootrefs' specifies the base reference count for the root vnode
1047  * of this filesystem. The root vnode is considered busy if its
1048  * v_refcnt exceeds this value. On a successful return, vflush()
1049  * will call vrele() on the root vnode exactly rootrefs times.
1050  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1051  * be zero.
1052  */
1053 static int debug_busyprt = 0;		/* print out busy vnodes */
1054 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
1055 
1056 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1057 
1058 struct vflush_info {
1059 	int flags;
1060 	int busy;
1061 	thread_t td;
1062 };
1063 
1064 int
1065 vflush(struct mount *mp, int rootrefs, int flags)
1066 {
1067 	struct thread *td = curthread;	/* XXX */
1068 	struct vnode *rootvp = NULL;
1069 	int error;
1070 	struct vflush_info vflush_info;
1071 
1072 	if (rootrefs > 0) {
1073 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1074 		    ("vflush: bad args"));
1075 		/*
1076 		 * Get the filesystem root vnode. We can vput() it
1077 		 * immediately, since with rootrefs > 0, it won't go away.
1078 		 */
1079 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1080 			if ((flags & FORCECLOSE) == 0)
1081 				return (error);
1082 			rootrefs = 0;
1083 			/* continue anyway */
1084 		}
1085 		if (rootrefs)
1086 			vput(rootvp);
1087 	}
1088 
1089 	vflush_info.busy = 0;
1090 	vflush_info.flags = flags;
1091 	vflush_info.td = td;
1092 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1093 
1094 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1095 		/*
1096 		 * If just the root vnode is busy, and if its refcount
1097 		 * is equal to `rootrefs', then go ahead and kill it.
1098 		 */
1099 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1100 		KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1101 		if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1102 			vx_lock(rootvp);
1103 			vgone_vxlocked(rootvp);
1104 			vx_unlock(rootvp);
1105 			vflush_info.busy = 0;
1106 		}
1107 	}
1108 	if (vflush_info.busy)
1109 		return (EBUSY);
1110 	for (; rootrefs > 0; rootrefs--)
1111 		vrele(rootvp);
1112 	return (0);
1113 }
1114 
1115 /*
1116  * The scan callback is made with an VX locked vnode.
1117  */
1118 static int
1119 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1120 {
1121 	struct vflush_info *info = data;
1122 	struct vattr vattr;
1123 	int flags = info->flags;
1124 
1125 	/*
1126 	 * Generally speaking try to deactivate on 0 refs (catch-all)
1127 	 */
1128 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1129 
1130 	/*
1131 	 * Skip over a vnodes marked VSYSTEM.
1132 	 */
1133 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1134 		return(0);
1135 	}
1136 
1137 	/*
1138 	 * Do not force-close VCHR or VBLK vnodes
1139 	 */
1140 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1141 		flags &= ~(WRITECLOSE|FORCECLOSE);
1142 
1143 	/*
1144 	 * If WRITECLOSE is set, flush out unlinked but still open
1145 	 * files (even if open only for reading) and regular file
1146 	 * vnodes open for writing.
1147 	 */
1148 	if ((flags & WRITECLOSE) &&
1149 	    (vp->v_type == VNON ||
1150 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1151 	    vattr.va_nlink > 0)) &&
1152 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1153 		return(0);
1154 	}
1155 
1156 	/*
1157 	 * If we are the only holder (refcnt of 1) or the vnode is in
1158 	 * termination (refcnt < 0), we can vgone the vnode.
1159 	 */
1160 	if (VREFCNT(vp) <= 1) {
1161 		vgone_vxlocked(vp);
1162 		return(0);
1163 	}
1164 
1165 	/*
1166 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1167 	 * it to a dummymount structure so vop_*() functions don't deref
1168 	 * a NULL pointer.
1169 	 */
1170 	if (flags & FORCECLOSE) {
1171 		vhold(vp);
1172 		vgone_vxlocked(vp);
1173 		if (vp->v_mount == NULL)
1174 			insmntque(vp, &dummymount);
1175 		vdrop(vp);
1176 		return(0);
1177 	}
1178 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1179 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1180 	if (debug_busyprt) {
1181 		const char *filename;
1182 
1183 		spin_lock(&vp->v_spin);
1184 		filename = TAILQ_FIRST(&vp->v_namecache) ?
1185 			   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1186 		spin_unlock(&vp->v_spin);
1187 		kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1188 	}
1189 	++info->busy;
1190 	return(0);
1191 }
1192 
1193 void
1194 add_bio_ops(struct bio_ops *ops)
1195 {
1196 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1197 }
1198 
1199 void
1200 rem_bio_ops(struct bio_ops *ops)
1201 {
1202 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1203 }
1204 
1205 /*
1206  * This calls the bio_ops io_sync function either for a mount point
1207  * or generally.
1208  *
1209  * WARNING: softdeps is weirdly coded and just isn't happy unless
1210  * io_sync is called with a NULL mount from the general syncing code.
1211  */
1212 void
1213 bio_ops_sync(struct mount *mp)
1214 {
1215 	struct bio_ops *ops;
1216 
1217 	if (mp) {
1218 		if ((ops = mp->mnt_bioops) != NULL)
1219 			ops->io_sync(mp);
1220 	} else {
1221 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1222 			ops->io_sync(NULL);
1223 		}
1224 	}
1225 }
1226 
1227 /*
1228  * Lookup a mount point by nch
1229  */
1230 struct mount *
1231 mount_get_by_nc(struct namecache *ncp)
1232 {
1233 	struct mount *mp = NULL;
1234 
1235 	lwkt_gettoken_shared(&mountlist_token);
1236 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1237 		if (ncp == mp->mnt_ncmountpt.ncp)
1238 			break;
1239 	}
1240 	lwkt_reltoken(&mountlist_token);
1241 
1242 	return (mp);
1243 }
1244 
1245