xref: /dragonfly/sys/kern/vfs_mount.c (revision bbb35c81)
1 /*
2  * Copyright (c) 2004,2013-2019 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 /*
68  * External virtual filesystem routines
69  */
70 
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/spinlock2.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
82 
83 #include <machine/limits.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87 
88 struct mountscan_info {
89 	TAILQ_ENTRY(mountscan_info) msi_entry;
90 	int msi_how;
91 	struct mount *msi_node;
92 };
93 
94 struct vmntvnodescan_info {
95 	TAILQ_ENTRY(vmntvnodescan_info) entry;
96 	struct vnode *vp;
97 };
98 
99 static int
100 mount_cmp(struct mount *mnt1, struct mount *mnt2)
101 {
102 	if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0])
103 		return -1;
104 	if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0])
105 		return 1;
106 	if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1])
107 		return -1;
108 	if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1])
109 		return 1;
110 	return 0;
111 }
112 
113 static int
114 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt)
115 {
116 	if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0])
117 		return -1;
118 	if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0])
119 		return 1;
120 	if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1])
121 		return -1;
122 	if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1])
123 		return 1;
124 	return 0;
125 }
126 
127 RB_HEAD(mount_rb_tree, mount);
128 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *);
129 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp);
130 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node,
131 			mount_fsid_cmp, fsid_t *);
132 
133 static int vnlru_nowhere = 0;
134 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
135 	    &vnlru_nowhere, 0,
136 	    "Number of times the vnlru process ran without success");
137 
138 
139 static struct lwkt_token mntid_token;
140 static struct mount dummymount;
141 
142 /* note: mountlist exported to pstat */
143 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
144 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree);
145 static TAILQ_HEAD(,mountscan_info) mountscan_list;
146 static struct lwkt_token mountlist_token;
147 
148 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
149 
150 /*
151  * Called from vfsinit()
152  */
153 void
154 vfs_mount_init(void)
155 {
156 	lwkt_token_init(&mountlist_token, "mntlist");
157 	lwkt_token_init(&mntid_token, "mntid");
158 	TAILQ_INIT(&mountscan_list);
159 	mount_init(&dummymount, NULL);
160 	dummymount.mnt_flag |= MNT_RDONLY;
161 	dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
162 }
163 
164 /*
165  * Support function called to remove a vnode from the mountlist and
166  * deal with side effects for scans in progress.
167  *
168  * Target mnt_token is held on call.
169  */
170 static void
171 vremovevnodemnt(struct vnode *vp)
172 {
173         struct vmntvnodescan_info *info;
174 	struct mount *mp = vp->v_mount;
175 
176 	TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
177 		if (info->vp == vp)
178 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
179 	}
180 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
181 }
182 
183 /*
184  * Allocate a new vnode and associate it with a tag, mount point, and
185  * operations vector.
186  *
187  * A VX locked and refd vnode is returned.  The caller should setup the
188  * remaining fields and vx_put() or, if he wishes to leave a vref,
189  * vx_unlock() the vnode.  Or if he wishes to return a normal locked
190  * vnode, call vx_downgrade(vp); to downgrade the VX lock to a normal
191  * VN lock.
192  */
193 int
194 getnewvnode(enum vtagtype tag, struct mount *mp,
195 		struct vnode **vpp, int lktimeout, int lkflags)
196 {
197 	struct vnode *vp;
198 
199 	KKASSERT(mp != NULL);
200 
201 	vp = allocvnode(lktimeout, lkflags);
202 	vp->v_tag = tag;
203 	vp->v_data = NULL;
204 
205 	/*
206 	 * By default the vnode is assigned the mount point's normal
207 	 * operations vector.
208 	 */
209 	vp->v_ops = &mp->mnt_vn_use_ops;
210 	vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
211 
212 	/*
213 	 * Placing the vnode on the mount point's queue makes it visible.
214 	 * VNON prevents it from being messed with, however.
215 	 */
216 	insmntque(vp, mp);
217 
218 	/*
219 	 * A VX locked & refd vnode is returned.
220 	 */
221 	*vpp = vp;
222 	return (0);
223 }
224 
225 /*
226  * This function creates vnodes with special operations vectors.  The
227  * mount point is optional.
228  *
229  * This routine is being phased out but is still used by vfs_conf to
230  * create vnodes for devices prior to the root mount (with mp == NULL).
231  */
232 int
233 getspecialvnode(enum vtagtype tag, struct mount *mp,
234 		struct vop_ops **ops,
235 		struct vnode **vpp, int lktimeout, int lkflags)
236 {
237 	struct vnode *vp;
238 
239 	vp = allocvnode(lktimeout, lkflags);
240 	vp->v_tag = tag;
241 	vp->v_data = NULL;
242 	vp->v_ops = ops;
243 
244 	if (mp == NULL)
245 		mp = &dummymount;
246 
247 	/*
248 	 * Placing the vnode on the mount point's queue makes it visible.
249 	 * VNON prevents it from being messed with, however.
250 	 */
251 	insmntque(vp, mp);
252 
253 	/*
254 	 * A VX locked & refd vnode is returned.
255 	 */
256 	*vpp = vp;
257 	return (0);
258 }
259 
260 /*
261  * Interlock against an unmount, return 0 on success, non-zero on failure.
262  *
263  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
264  * is in-progress.
265  *
266  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
267  * are used.  A shared locked will be obtained and the filesystem will not
268  * be unmountable until the lock is released.
269  */
270 int
271 vfs_busy(struct mount *mp, int flags)
272 {
273 	int lkflags;
274 
275 	atomic_add_int(&mp->mnt_refs, 1);
276 	lwkt_gettoken(&mp->mnt_token);
277 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
278 		if (flags & LK_NOWAIT) {
279 			lwkt_reltoken(&mp->mnt_token);
280 			atomic_add_int(&mp->mnt_refs, -1);
281 			return (ENOENT);
282 		}
283 		/* XXX not MP safe */
284 		mp->mnt_kern_flag |= MNTK_MWAIT;
285 
286 		/*
287 		 * Since all busy locks are shared except the exclusive
288 		 * lock granted when unmounting, the only place that a
289 		 * wakeup needs to be done is at the release of the
290 		 * exclusive lock at the end of dounmount.
291 		 *
292 		 * WARNING! mp can potentially go away once we release
293 		 *	    our ref.
294 		 */
295 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
296 		lwkt_reltoken(&mp->mnt_token);
297 		atomic_add_int(&mp->mnt_refs, -1);
298 		return (ENOENT);
299 	}
300 	lkflags = LK_SHARED;
301 	if (lockmgr(&mp->mnt_lock, lkflags))
302 		panic("vfs_busy: unexpected lock failure");
303 	lwkt_reltoken(&mp->mnt_token);
304 	return (0);
305 }
306 
307 /*
308  * Free a busy filesystem.
309  *
310  * Once refs is decremented the mount point can potentially get ripped
311  * out from under us, but we want to clean up our refs before unlocking
312  * so do a hold/drop around the whole mess.
313  *
314  * This is not in the critical path (I hope).
315  */
316 void
317 vfs_unbusy(struct mount *mp)
318 {
319 	mount_hold(mp);
320 	atomic_add_int(&mp->mnt_refs, -1);
321 	lockmgr(&mp->mnt_lock, LK_RELEASE);
322 	mount_drop(mp);
323 }
324 
325 /*
326  * Lookup a filesystem type, and if found allocate and initialize
327  * a mount structure for it.
328  *
329  * Devname is usually updated by mount(8) after booting.
330  */
331 int
332 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
333 {
334 	struct vfsconf *vfsp;
335 	struct mount *mp;
336 
337 	if (fstypename == NULL)
338 		return (ENODEV);
339 
340 	vfsp = vfsconf_find_by_name(fstypename);
341 	if (vfsp == NULL)
342 		return (ENODEV);
343 	mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
344 	mount_init(mp, vfsp->vfc_vfsops);
345 	lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
346 	lockinit(&mp->mnt_renlock, "renamlk", VLKTIMEOUT, 0);
347 
348 	vfs_busy(mp, 0);
349 	mp->mnt_vfc = vfsp;
350 	mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
351 	vfsp->vfc_refcount++;
352 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
353 	mp->mnt_flag |= MNT_RDONLY;
354 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
355 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
356 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
357 
358 	/*
359 	 * Pre-set MPSAFE flags for VFS_MOUNT() call.
360 	 */
361 	if (vfsp->vfc_flags & VFCF_MPSAFE)
362 		mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
363 
364 	*mpp = mp;
365 
366 	return (0);
367 }
368 
369 /*
370  * Basic mount structure initialization
371  */
372 void
373 mount_init(struct mount *mp, struct vfsops *ops)
374 {
375 	lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
376 	lockinit(&mp->mnt_renlock, "renamlk", hz*5, 0);
377 	lwkt_token_init(&mp->mnt_token, "permnt");
378 
379 	TAILQ_INIT(&mp->mnt_vnodescan_list);
380 	TAILQ_INIT(&mp->mnt_nvnodelist);
381 	TAILQ_INIT(&mp->mnt_reservedvnlist);
382 	TAILQ_INIT(&mp->mnt_jlist);
383 	mp->mnt_nvnodelistsize = 0;
384 	mp->mnt_flag = 0;
385 	mp->mnt_hold = 1;		/* hold for umount last drop */
386 	mp->mnt_iosize_max = MAXPHYS;
387 	mp->mnt_op = ops;
388 	if (ops == NULL || (ops->vfs_flags & VFSOPSF_NOSYNCERTHR) == 0)
389 		vn_syncer_thr_create(mp);
390 }
391 
392 void
393 mount_hold(struct mount *mp)
394 {
395 	atomic_add_int(&mp->mnt_hold, 1);
396 }
397 
398 void
399 mount_drop(struct mount *mp)
400 {
401 	if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
402 		KKASSERT(mp->mnt_refs == 0);
403 		kfree(mp, M_MOUNT);
404 	}
405 }
406 
407 /*
408  * Lookup a mount point by filesystem identifier.
409  *
410  * If not NULL, the returned mp is held and the caller is expected to drop
411  * it via mount_drop().
412  */
413 struct mount *
414 vfs_getvfs(fsid_t *fsid)
415 {
416 	struct mount *mp;
417 
418 	lwkt_gettoken_shared(&mountlist_token);
419 	mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid);
420 	if (mp)
421 		mount_hold(mp);
422 	lwkt_reltoken(&mountlist_token);
423 	return (mp);
424 }
425 
426 /*
427  * Generate a FSID based on the mountpt.  The FSID will be adjusted to avoid
428  * collisions when the mount is added to mountlist.
429  *
430  * May only be called prior to the mount succeeding.
431  *
432  * OLD:
433  *
434  * Get a new unique fsid.  Try to make its val[0] unique, since this value
435  * will be used to create fake device numbers for stat().  Also try (but
436  * not so hard) make its val[0] unique mod 2^16, since some emulators only
437  * support 16-bit device numbers.  We end up with unique val[0]'s for the
438  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
439  */
440 void
441 vfs_getnewfsid(struct mount *mp)
442 {
443 	fsid_t tfsid;
444 	int mtype;
445 	int error;
446 	char *retbuf;
447 	char *freebuf;
448 
449 	mtype = mp->mnt_vfc->vfc_typenum;
450 	tfsid.val[1] = mtype;
451 	error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL,
452 			       &retbuf, &freebuf, 0);
453 	if (error) {
454 		tfsid.val[0] = makeudev(255, 0);
455 	} else {
456 		tfsid.val[0] = makeudev(255,
457 					iscsi_crc32(retbuf, strlen(retbuf)) &
458 					~makeudev(255, 0));
459 		/*kprintf("getnewfsid %08x %08x %s\n", tfsid.val[0], tfsid.val[1], retbuf);*/
460 		kfree(freebuf, M_TEMP);
461 	}
462 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
463 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
464 }
465 
466 /*
467  * Set the FSID for a new mount point to the template.
468  *
469  * The FSID will be adjusted to avoid collisions when the mount is
470  * added to mountlist.
471  *
472  * May only be called prior to the mount succeeding.
473  */
474 void
475 vfs_setfsid(struct mount *mp, fsid_t *template)
476 {
477 	bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
478 
479 #if 0
480 	struct mount *mptmp;
481 
482 	lwkt_gettoken(&mntid_token);
483 	for (;;) {
484 		mptmp = vfs_getvfs(template);
485 		if (mptmp == NULL)
486 			break;
487 		mount_drop(mptmp);
488 		++template->val[1];
489 	}
490 	lwkt_reltoken(&mntid_token);
491 #endif
492 	mp->mnt_stat.f_fsid = *template;
493 }
494 
495 /*
496  * This routine is called when we have too many vnodes.  It attempts
497  * to free <count> vnodes and will potentially free vnodes that still
498  * have VM backing store (VM backing store is typically the cause
499  * of a vnode blowout so we want to do this).  Therefore, this operation
500  * is not considered cheap.
501  *
502  * A number of conditions may prevent a vnode from being reclaimed.
503  * the buffer cache may have references on the vnode, a directory
504  * vnode may still have references due to the namei cache representing
505  * underlying files, or the vnode may be in active use.   It is not
506  * desireable to reuse such vnodes.  These conditions may cause the
507  * number of vnodes to reach some minimum value regardless of what
508  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
509  */
510 
511 /*
512  * Attempt to recycle vnodes in a context that is always safe to block.
513  * Calling vlrurecycle() from the bowels of file system code has some
514  * interesting deadlock problems.
515  */
516 static struct thread *vnlruthread;
517 
518 static void
519 vnlru_proc(void)
520 {
521 	struct thread *td = curthread;
522 
523 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
524 			      SHUTDOWN_PRI_FIRST);
525 
526 	for (;;) {
527 		int ncachedandinactive;
528 
529 		kproc_suspend_loop();
530 
531 		/*
532 		 * Try to free some vnodes if we have too many.  Trigger based
533 		 * on potentially freeable vnodes but calculate the count
534 		 * based on total vnodes.
535 		 *
536 		 * (long) -> deal with 64 bit machines, intermediate overflow
537 		 */
538 		synchronizevnodecount();
539 		ncachedandinactive = countcachedandinactivevnodes();
540 		if (numvnodes >= maxvnodes * 9 / 10 &&
541 		    ncachedandinactive >= maxvnodes * 5 / 10) {
542 			int count = numvnodes - maxvnodes * 9 / 10;
543 
544 			if (count > (ncachedandinactive) / 100)
545 				count = (ncachedandinactive) / 100;
546 			if (count < 5)
547 				count = 5;
548 			freesomevnodes(count);
549 		}
550 
551 		/*
552 		 * Do non-critical-path (more robust) cache cleaning,
553 		 * even if vnode counts are nominal, to try to avoid
554 		 * having to do it in the critical path.
555 		 */
556 		cache_hysteresis(0);
557 
558 		/*
559 		 * Nothing to do if most of our vnodes are already on
560 		 * the free list.
561 		 */
562 		synchronizevnodecount();
563 		ncachedandinactive = countcachedandinactivevnodes();
564 		if (numvnodes <= maxvnodes * 9 / 10 ||
565 		    ncachedandinactive <= maxvnodes * 5 / 10) {
566 			tsleep(vnlruthread, 0, "vlruwt", hz);
567 			continue;
568 		}
569 	}
570 }
571 
572 /*
573  * MOUNTLIST FUNCTIONS
574  */
575 
576 /*
577  * mountlist_insert (MP SAFE)
578  *
579  * Add a new mount point to the mount list.  Filesystem should attempt to
580  * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure
581  * uniqueness.
582  */
583 void
584 mountlist_insert(struct mount *mp, int how)
585 {
586 	int lim = 0x01000000;
587 
588 	lwkt_gettoken(&mountlist_token);
589 	if (how == MNTINS_FIRST)
590 		TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
591 	else
592 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
593 	while (mount_rb_tree_RB_INSERT(&mounttree, mp)) {
594 		int32_t val;
595 
596 		/*
597 		 * minor device mask: 0xFFFF00FF
598 		 */
599 		val = mp->mnt_stat.f_fsid.val[0];
600 		val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF);
601 		++val;
602 		val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF);
603 		mp->mnt_stat.f_fsid.val[0] = val;
604 		if (--lim == 0) {
605 			lim = 0x01000000;
606 			mp->mnt_stat.f_fsid.val[1] += 0x0100;
607 			kprintf("mountlist_insert: fsid collision, "
608 				"too many mounts\n");
609 		}
610 	}
611 	lwkt_reltoken(&mountlist_token);
612 }
613 
614 /*
615  * mountlist_interlock (MP SAFE)
616  *
617  * Execute the specified interlock function with the mountlist token
618  * held.  The function will be called in a serialized fashion verses
619  * other functions called through this mechanism.
620  *
621  * The function is expected to be very short-lived.
622  */
623 int
624 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
625 {
626 	int error;
627 
628 	lwkt_gettoken(&mountlist_token);
629 	error = callback(mp);
630 	lwkt_reltoken(&mountlist_token);
631 	return (error);
632 }
633 
634 /*
635  * mountlist_boot_getfirst (DURING BOOT ONLY)
636  *
637  * This function returns the first mount on the mountlist, which is
638  * expected to be the root mount.  Since no interlocks are obtained
639  * this function is only safe to use during booting.
640  */
641 
642 struct mount *
643 mountlist_boot_getfirst(void)
644 {
645 	return(TAILQ_FIRST(&mountlist));
646 }
647 
648 /*
649  * mountlist_remove (MP SAFE)
650  *
651  * Remove a node from the mountlist.  If this node is the next scan node
652  * for any active mountlist scans, the active mountlist scan will be
653  * adjusted to skip the node, thus allowing removals during mountlist
654  * scans.
655  */
656 void
657 mountlist_remove(struct mount *mp)
658 {
659 	struct mountscan_info *msi;
660 
661 	lwkt_gettoken(&mountlist_token);
662 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
663 		if (msi->msi_node == mp) {
664 			if (msi->msi_how & MNTSCAN_FORWARD)
665 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
666 			else
667 				msi->msi_node = TAILQ_PREV(mp, mntlist,
668 							   mnt_list);
669 		}
670 	}
671 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
672 	mount_rb_tree_RB_REMOVE(&mounttree, mp);
673 	lwkt_reltoken(&mountlist_token);
674 }
675 
676 /*
677  * mountlist_exists (MP SAFE)
678  *
679  * Checks if a node exists in the mountlist.
680  * This function is mainly used by VFS quota code to check if a
681  * cached nullfs struct mount pointer is still valid at use time
682  *
683  * FIXME: there is no warranty the mp passed to that function
684  * will be the same one used by VFS_ACCOUNT() later
685  */
686 int
687 mountlist_exists(struct mount *mp)
688 {
689 	int node_exists = 0;
690 	struct mount* lmp;
691 
692 	lwkt_gettoken_shared(&mountlist_token);
693 	TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
694 		if (lmp == mp) {
695 			node_exists = 1;
696 			break;
697 		}
698 	}
699 	lwkt_reltoken(&mountlist_token);
700 
701 	return(node_exists);
702 }
703 
704 /*
705  * mountlist_scan
706  *
707  * Safely scan the mount points on the mount list.  Each mountpoint
708  * is held across the callback.  The callback is responsible for
709  * acquiring any further tokens or locks.
710  *
711  * Unless otherwise specified each mount point will be busied prior to the
712  * callback and unbusied afterwords.  The callback may safely remove any
713  * mount point without interfering with the scan.  If the current callback
714  * mount is removed the scanner will not attempt to unbusy it.
715  *
716  * If a mount node cannot be busied it is silently skipped.
717  *
718  * The callback return value is aggregated and a total is returned.  A return
719  * value of < 0 is not aggregated and will terminate the scan.
720  *
721  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
722  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
723  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
724  *			  the mount node.
725  * MNTSCAN_NOUNLOCK	- Do not unlock mountlist_token across callback
726  *
727  * NOTE: mountlist_token is not held across the callback.
728  */
729 int
730 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
731 {
732 	struct mountscan_info info;
733 	struct mount *mp;
734 	int count;
735 	int res;
736 	int dounlock = ((how & MNTSCAN_NOUNLOCK) == 0);
737 
738 	lwkt_gettoken(&mountlist_token);
739 	info.msi_how = how;
740 	info.msi_node = NULL;	/* paranoia */
741 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
742 	lwkt_reltoken(&mountlist_token);
743 
744 	res = 0;
745 	lwkt_gettoken_shared(&mountlist_token);
746 
747 	if (how & MNTSCAN_FORWARD) {
748 		info.msi_node = TAILQ_FIRST(&mountlist);
749 		while ((mp = info.msi_node) != NULL) {
750 			mount_hold(mp);
751 			if (how & MNTSCAN_NOBUSY) {
752 				if (dounlock)
753 					lwkt_reltoken(&mountlist_token);
754 				count = callback(mp, data);
755 				if (dounlock)
756 					lwkt_gettoken_shared(&mountlist_token);
757 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
758 				if (dounlock)
759 					lwkt_reltoken(&mountlist_token);
760 				count = callback(mp, data);
761 				if (dounlock)
762 					lwkt_gettoken_shared(&mountlist_token);
763 				if (mp == info.msi_node)
764 					vfs_unbusy(mp);
765 			} else {
766 				count = 0;
767 			}
768 			mount_drop(mp);
769 			if (count < 0)
770 				break;
771 			res += count;
772 			if (mp == info.msi_node)
773 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
774 		}
775 	} else if (how & MNTSCAN_REVERSE) {
776 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
777 		while ((mp = info.msi_node) != NULL) {
778 			mount_hold(mp);
779 			if (how & MNTSCAN_NOBUSY) {
780 				if (dounlock)
781 					lwkt_reltoken(&mountlist_token);
782 				count = callback(mp, data);
783 				if (dounlock)
784 					lwkt_gettoken_shared(&mountlist_token);
785 			} else if (vfs_busy(mp, LK_NOWAIT) == 0) {
786 				if (dounlock)
787 					lwkt_reltoken(&mountlist_token);
788 				count = callback(mp, data);
789 				if (dounlock)
790 					lwkt_gettoken_shared(&mountlist_token);
791 				if (mp == info.msi_node)
792 					vfs_unbusy(mp);
793 			} else {
794 				count = 0;
795 			}
796 			mount_drop(mp);
797 			if (count < 0)
798 				break;
799 			res += count;
800 			if (mp == info.msi_node)
801 				info.msi_node = TAILQ_PREV(mp, mntlist,
802 							   mnt_list);
803 		}
804 	}
805 	lwkt_reltoken(&mountlist_token);
806 
807 	lwkt_gettoken(&mountlist_token);
808 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
809 	lwkt_reltoken(&mountlist_token);
810 
811 	return(res);
812 }
813 
814 /*
815  * MOUNT RELATED VNODE FUNCTIONS
816  */
817 
818 static struct kproc_desc vnlru_kp = {
819 	"vnlru",
820 	vnlru_proc,
821 	&vnlruthread
822 };
823 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
824 
825 /*
826  * Move a vnode from one mount queue to another.
827  */
828 void
829 insmntque(struct vnode *vp, struct mount *mp)
830 {
831 	struct mount *omp;
832 
833 	/*
834 	 * Delete from old mount point vnode list, if on one.
835 	 */
836 	if ((omp = vp->v_mount) != NULL) {
837 		lwkt_gettoken(&omp->mnt_token);
838 		KKASSERT(omp == vp->v_mount);
839 		KASSERT(omp->mnt_nvnodelistsize > 0,
840 			("bad mount point vnode list size"));
841 		vremovevnodemnt(vp);
842 		omp->mnt_nvnodelistsize--;
843 		lwkt_reltoken(&omp->mnt_token);
844 	}
845 
846 	/*
847 	 * Insert into list of vnodes for the new mount point, if available.
848 	 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
849 	 */
850 	if (mp == NULL) {
851 		vp->v_mount = NULL;
852 		return;
853 	}
854 	lwkt_gettoken(&mp->mnt_token);
855 	vp->v_mount = mp;
856 	if (mp->mnt_syncer) {
857 		TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
858 	} else {
859 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
860 	}
861 	mp->mnt_nvnodelistsize++;
862 	lwkt_reltoken(&mp->mnt_token);
863 }
864 
865 
866 /*
867  * Scan the vnodes under a mount point and issue appropriate callbacks.
868  *
869  * The fastfunc() callback is called with just the mountlist token held
870  * (no vnode lock).  It may not block and the vnode may be undergoing
871  * modifications while the caller is processing it.  The vnode will
872  * not be entirely destroyed, however, due to the fact that the mountlist
873  * token is held.  A return value < 0 skips to the next vnode without calling
874  * the slowfunc(), a return value > 0 terminates the loop.
875  *
876  * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
877  *	    data structure is unstable when called from fastfunc().
878  *
879  * The slowfunc() callback is called after the vnode has been successfully
880  * locked based on passed flags.  The vnode is skipped if it gets rearranged
881  * or destroyed while blocking on the lock.  A non-zero return value from
882  * the slow function terminates the loop.  The slow function is allowed to
883  * arbitrarily block.  The scanning code guarentees consistency of operation
884  * even if the slow function deletes or moves the node, or blocks and some
885  * other thread deletes or moves the node.
886  */
887 int
888 vmntvnodescan(
889     struct mount *mp,
890     int flags,
891     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
892     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
893     void *data
894 ) {
895 	struct vmntvnodescan_info info;
896 	struct vnode *vp;
897 	int r = 0;
898 	int maxcount = mp->mnt_nvnodelistsize * 2;
899 	int stopcount = 0;
900 	int count = 0;
901 
902 	lwkt_gettoken(&mp->mnt_token);
903 
904 	/*
905 	 * If asked to do one pass stop after iterating available vnodes.
906 	 * Under heavy loads new vnodes can be added while we are scanning,
907 	 * so this isn't perfect.  Create a slop factor of 2x.
908 	 */
909 	if (flags & VMSC_ONEPASS)
910 		stopcount = mp->mnt_nvnodelistsize;
911 
912 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
913 	TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
914 
915 	while ((vp = info.vp) != NULL) {
916 		if (--maxcount == 0) {
917 			kprintf("Warning: excessive fssync iteration\n");
918 			maxcount = mp->mnt_nvnodelistsize * 2;
919 		}
920 
921 		/*
922 		 * Skip if visible but not ready, or special (e.g.
923 		 * mp->mnt_syncer)
924 		 */
925 		if (vp->v_type == VNON)
926 			goto next;
927 		KKASSERT(vp->v_mount == mp);
928 
929 		/*
930 		 * Quick test.  A negative return continues the loop without
931 		 * calling the slow test.  0 continues onto the slow test.
932 		 * A positive number aborts the loop.
933 		 */
934 		if (fastfunc) {
935 			if ((r = fastfunc(mp, vp, data)) < 0) {
936 				r = 0;
937 				goto next;
938 			}
939 			if (r)
940 				break;
941 		}
942 
943 		/*
944 		 * Get a vxlock on the vnode, retry if it has moved or isn't
945 		 * in the mountlist where we expect it.
946 		 */
947 		if (slowfunc) {
948 			int error;
949 
950 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
951 			case VMSC_GETVP:
952 				error = vget(vp, LK_EXCLUSIVE);
953 				break;
954 			case VMSC_GETVP|VMSC_NOWAIT:
955 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
956 				break;
957 			case VMSC_GETVX:
958 				vx_get(vp);
959 				error = 0;
960 				break;
961 			default:
962 				error = 0;
963 				break;
964 			}
965 			if (error)
966 				goto next;
967 			/*
968 			 * Do not call the slow function if the vnode is
969 			 * invalid or if it was ripped out from under us
970 			 * while we (potentially) blocked.
971 			 */
972 			if (info.vp == vp && vp->v_type != VNON)
973 				r = slowfunc(mp, vp, data);
974 
975 			/*
976 			 * Cleanup
977 			 */
978 			switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
979 			case VMSC_GETVP:
980 			case VMSC_GETVP|VMSC_NOWAIT:
981 				vput(vp);
982 				break;
983 			case VMSC_GETVX:
984 				vx_put(vp);
985 				break;
986 			default:
987 				break;
988 			}
989 			if (r != 0)
990 				break;
991 		}
992 
993 next:
994 		/*
995 		 * Yield after some processing.  Depending on the number
996 		 * of vnodes, we might wind up running for a long time.
997 		 * Because threads are not preemptable, time critical
998 		 * userland processes might starve.  Give them a chance
999 		 * now and then.
1000 		 */
1001 		if (++count == 10000) {
1002 			/*
1003 			 * We really want to yield a bit, so we simply
1004 			 * sleep a tick
1005 			 */
1006 			tsleep(mp, 0, "vnodescn", 1);
1007 			count = 0;
1008 		}
1009 
1010 		/*
1011 		 * If doing one pass this decrements to zero.  If it starts
1012 		 * at zero it is effectively unlimited for the purposes of
1013 		 * this loop.
1014 		 */
1015 		if (--stopcount == 0)
1016 			break;
1017 
1018 		/*
1019 		 * Iterate.  If the vnode was ripped out from under us
1020 		 * info.vp will already point to the next vnode, otherwise
1021 		 * we have to obtain the next valid vnode ourselves.
1022 		 */
1023 		if (info.vp == vp)
1024 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1025 	}
1026 
1027 	TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1028 	lwkt_reltoken(&mp->mnt_token);
1029 	return(r);
1030 }
1031 
1032 /*
1033  * Remove any vnodes in the vnode table belonging to mount point mp.
1034  *
1035  * If FORCECLOSE is not specified, there should not be any active ones,
1036  * return error if any are found (nb: this is a user error, not a
1037  * system error). If FORCECLOSE is specified, detach any active vnodes
1038  * that are found.
1039  *
1040  * If WRITECLOSE is set, only flush out regular file vnodes open for
1041  * writing.
1042  *
1043  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1044  *
1045  * `rootrefs' specifies the base reference count for the root vnode
1046  * of this filesystem. The root vnode is considered busy if its
1047  * v_refcnt exceeds this value. On a successful return, vflush()
1048  * will call vrele() on the root vnode exactly rootrefs times.
1049  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1050  * be zero.
1051  */
1052 static int debug_busyprt = 0;		/* print out busy vnodes */
1053 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, "");
1054 
1055 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1056 
1057 struct vflush_info {
1058 	int flags;
1059 	int busy;
1060 	thread_t td;
1061 };
1062 
1063 int
1064 vflush(struct mount *mp, int rootrefs, int flags)
1065 {
1066 	struct thread *td = curthread;	/* XXX */
1067 	struct vnode *rootvp = NULL;
1068 	int error;
1069 	struct vflush_info vflush_info;
1070 
1071 	if (rootrefs > 0) {
1072 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1073 		    ("vflush: bad args"));
1074 		/*
1075 		 * Get the filesystem root vnode. We can vput() it
1076 		 * immediately, since with rootrefs > 0, it won't go away.
1077 		 */
1078 		if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1079 			if ((flags & FORCECLOSE) == 0)
1080 				return (error);
1081 			rootrefs = 0;
1082 			/* continue anyway */
1083 		}
1084 		if (rootrefs)
1085 			vput(rootvp);
1086 	}
1087 
1088 	vflush_info.busy = 0;
1089 	vflush_info.flags = flags;
1090 	vflush_info.td = td;
1091 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1092 
1093 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1094 		/*
1095 		 * If just the root vnode is busy, and if its refcount
1096 		 * is equal to `rootrefs', then go ahead and kill it.
1097 		 */
1098 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1099 		KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1100 		if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1101 			vx_lock(rootvp);
1102 			vgone_vxlocked(rootvp);
1103 			vx_unlock(rootvp);
1104 			vflush_info.busy = 0;
1105 		}
1106 	}
1107 	if (vflush_info.busy)
1108 		return (EBUSY);
1109 	for (; rootrefs > 0; rootrefs--)
1110 		vrele(rootvp);
1111 	return (0);
1112 }
1113 
1114 /*
1115  * The scan callback is made with an VX locked vnode.
1116  */
1117 static int
1118 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1119 {
1120 	struct vflush_info *info = data;
1121 	struct vattr vattr;
1122 	int flags = info->flags;
1123 
1124 	/*
1125 	 * Generally speaking try to deactivate on 0 refs (catch-all)
1126 	 */
1127 	atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1128 
1129 	/*
1130 	 * Skip over a vnodes marked VSYSTEM.
1131 	 */
1132 	if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1133 		return(0);
1134 	}
1135 
1136 	/*
1137 	 * Do not force-close VCHR or VBLK vnodes
1138 	 */
1139 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1140 		flags &= ~(WRITECLOSE|FORCECLOSE);
1141 
1142 	/*
1143 	 * If WRITECLOSE is set, flush out unlinked but still open
1144 	 * files (even if open only for reading) and regular file
1145 	 * vnodes open for writing.
1146 	 */
1147 	if ((flags & WRITECLOSE) &&
1148 	    (vp->v_type == VNON ||
1149 	    (VOP_GETATTR(vp, &vattr) == 0 &&
1150 	    vattr.va_nlink > 0)) &&
1151 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1152 		return(0);
1153 	}
1154 
1155 	/*
1156 	 * If we are the only holder (refcnt of 1) or the vnode is in
1157 	 * termination (refcnt < 0), we can vgone the vnode.
1158 	 */
1159 	if (VREFCNT(vp) <= 1) {
1160 		vgone_vxlocked(vp);
1161 		return(0);
1162 	}
1163 
1164 	/*
1165 	 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1166 	 * it to a dummymount structure so vop_*() functions don't deref
1167 	 * a NULL pointer.
1168 	 */
1169 	if (flags & FORCECLOSE) {
1170 		vhold(vp);
1171 		vgone_vxlocked(vp);
1172 		if (vp->v_mount == NULL)
1173 			insmntque(vp, &dummymount);
1174 		vdrop(vp);
1175 		return(0);
1176 	}
1177 	if (vp->v_type == VCHR || vp->v_type == VBLK)
1178 		kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1179 	if (debug_busyprt) {
1180 		const char *filename;
1181 
1182 		spin_lock(&vp->v_spin);
1183 		filename = TAILQ_FIRST(&vp->v_namecache) ?
1184 			   TAILQ_FIRST(&vp->v_namecache)->nc_name : "?";
1185 		spin_unlock(&vp->v_spin);
1186 		kprintf("vflush: busy vnode (%p) %s\n", vp, filename);
1187 	}
1188 	++info->busy;
1189 	return(0);
1190 }
1191 
1192 void
1193 add_bio_ops(struct bio_ops *ops)
1194 {
1195 	TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1196 }
1197 
1198 void
1199 rem_bio_ops(struct bio_ops *ops)
1200 {
1201 	TAILQ_REMOVE(&bio_ops_list, ops, entry);
1202 }
1203 
1204 /*
1205  * This calls the bio_ops io_sync function either for a mount point
1206  * or generally.
1207  *
1208  * WARNING: softdeps is weirdly coded and just isn't happy unless
1209  * io_sync is called with a NULL mount from the general syncing code.
1210  */
1211 void
1212 bio_ops_sync(struct mount *mp)
1213 {
1214 	struct bio_ops *ops;
1215 
1216 	if (mp) {
1217 		if ((ops = mp->mnt_bioops) != NULL)
1218 			ops->io_sync(mp);
1219 	} else {
1220 		TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1221 			ops->io_sync(NULL);
1222 		}
1223 	}
1224 }
1225 
1226 /*
1227  * Lookup a mount point by nch
1228  */
1229 struct mount *
1230 mount_get_by_nc(struct namecache *ncp)
1231 {
1232 	struct mount *mp = NULL;
1233 
1234 	lwkt_gettoken_shared(&mountlist_token);
1235 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1236 		if (ncp == mp->mnt_ncmountpt.ncp)
1237 			break;
1238 	}
1239 	lwkt_reltoken(&mountlist_token);
1240 
1241 	return (mp);
1242 }
1243 
1244