xref: /dragonfly/sys/kern/vfs_mount.c (revision 2ee85085)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  * (c) UNIX System Laboratories, Inc.
37  * All or some portions of this file are derived from material licensed
38  * to the University of California by American Telephone and Telegraph
39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40  * the permission of UNIX System Laboratories, Inc.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by the University of
53  *	California, Berkeley and its contributors.
54  * 4. Neither the name of the University nor the names of its contributors
55  *    may be used to endorse or promote products derived from this software
56  *    without specific prior written permission.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68  * SUCH DAMAGE.
69  *
70  * $DragonFly: src/sys/kern/vfs_mount.c,v 1.11 2005/06/06 15:02:28 dillon Exp $
71  */
72 
73 /*
74  * External virtual filesystem routines
75  */
76 #include "opt_ddb.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
89 
90 #include <machine/limits.h>
91 
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
94 
95 #include <vm/vm.h>
96 #include <vm/vm_object.h>
97 
98 struct mountscan_info {
99 	TAILQ_ENTRY(mountscan_info) msi_entry;
100 	int msi_how;
101 	struct mount *msi_node;
102 };
103 
104 struct vmntvnodescan_info {
105 	TAILQ_ENTRY(vmntvnodescan_info) entry;
106 	struct vnode *vp;
107 };
108 
109 static int vnlru_nowhere = 0;
110 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
111 	    &vnlru_nowhere, 0,
112 	    "Number of times the vnlru process ran without success");
113 
114 
115 static struct lwkt_token mntid_token;
116 
117 static struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
118 static TAILQ_HEAD(,mountscan_info) mountscan_list;
119 static struct lwkt_token mountlist_token;
120 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
121 struct lwkt_token mntvnode_token;
122 
123 /*
124  * Called from vfsinit()
125  */
126 void
127 vfs_mount_init(void)
128 {
129 	lwkt_token_init(&mountlist_token);
130 	lwkt_token_init(&mntvnode_token);
131 	lwkt_token_init(&mntid_token);
132 	TAILQ_INIT(&mountscan_list);
133 	TAILQ_INIT(&mntvnodescan_list);
134 }
135 
136 /*
137  * Support function called with mntvnode_token held to remove a vnode
138  * from the mountlist.  We must update any list scans which are in progress.
139  */
140 static void
141 vremovevnodemnt(struct vnode *vp)
142 {
143         struct vmntvnodescan_info *info;
144 
145 	TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
146 		if (info->vp == vp)
147 			info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
148 	}
149 	TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
150 }
151 
152 /*
153  * Support function called with mntvnode_token held to move a vnode to
154  * the end of the list.
155  */
156 static void
157 vmovevnodetoend(struct mount *mp, struct vnode *vp)
158 {
159 	vremovevnodemnt(vp);
160 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
161 }
162 
163 
164 /*
165  * Allocate a new vnode and associate it with a tag, mount point, and
166  * operations vector.
167  *
168  * A VX locked and refd vnode is returned.  The caller should setup the
169  * remaining fields and vx_put() or, if he wishes to leave a vref,
170  * vx_unlock() the vnode.
171  */
172 int
173 getnewvnode(enum vtagtype tag, struct mount *mp,
174 		struct vnode **vpp, int lktimeout, int lkflags)
175 {
176 	struct vnode *vp;
177 
178 	KKASSERT(mp != NULL);
179 
180 	vp = allocvnode(lktimeout, lkflags);
181 	vp->v_tag = tag;
182 	vp->v_data = NULL;
183 
184 	/*
185 	 * By default the vnode is assigned the mount point's normal
186 	 * operations vector.
187 	 */
188 	vp->v_ops = &mp->mnt_vn_use_ops;
189 
190 	/*
191 	 * Placing the vnode on the mount point's queue makes it visible.
192 	 * VNON prevents it from being messed with, however.
193 	 */
194 	insmntque(vp, mp);
195 	vfs_object_create(vp, curthread);
196 
197 	/*
198 	 * A VX locked & refd vnode is returned.
199 	 */
200 	*vpp = vp;
201 	return (0);
202 }
203 
204 /*
205  * This function creates vnodes with special operations vectors.  The
206  * mount point is optional.
207  *
208  * This routine is being phased out.
209  */
210 int
211 getspecialvnode(enum vtagtype tag, struct mount *mp,
212 		struct vop_ops **ops_pp,
213 		struct vnode **vpp, int lktimeout, int lkflags)
214 {
215 	struct vnode *vp;
216 
217 	vp = allocvnode(lktimeout, lkflags);
218 	vp->v_tag = tag;
219 	vp->v_data = NULL;
220 	vp->v_ops = ops_pp;
221 
222 	/*
223 	 * Placing the vnode on the mount point's queue makes it visible.
224 	 * VNON prevents it from being messed with, however.
225 	 */
226 	insmntque(vp, mp);
227 	vfs_object_create(vp, curthread);
228 
229 	/*
230 	 * A VX locked & refd vnode is returned.
231 	 */
232 	*vpp = vp;
233 	return (0);
234 }
235 
236 /*
237  * Interlock against an unmount, return 0 on success, non-zero on failure.
238  *
239  * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
240  * is in-progress.
241  *
242  * If no unmount is in-progress LK_NOWAIT is ignored.  No other flag bits
243  * are used.  A shared locked will be obtained and the filesystem will not
244  * be unmountable until the lock is released.
245  */
246 int
247 vfs_busy(struct mount *mp, int flags, struct thread *td)
248 {
249 	int lkflags;
250 
251 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
252 		if (flags & LK_NOWAIT)
253 			return (ENOENT);
254 		/* XXX not MP safe */
255 		mp->mnt_kern_flag |= MNTK_MWAIT;
256 		/*
257 		 * Since all busy locks are shared except the exclusive
258 		 * lock granted when unmounting, the only place that a
259 		 * wakeup needs to be done is at the release of the
260 		 * exclusive lock at the end of dounmount.
261 		 */
262 		tsleep((caddr_t)mp, 0, "vfs_busy", 0);
263 		return (ENOENT);
264 	}
265 	lkflags = LK_SHARED | LK_NOPAUSE;
266 	if (lockmgr(&mp->mnt_lock, lkflags, NULL, td))
267 		panic("vfs_busy: unexpected lock failure");
268 	return (0);
269 }
270 
271 /*
272  * Free a busy filesystem.
273  */
274 void
275 vfs_unbusy(struct mount *mp, struct thread *td)
276 {
277 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
278 }
279 
280 /*
281  * Lookup a filesystem type, and if found allocate and initialize
282  * a mount structure for it.
283  *
284  * Devname is usually updated by mount(8) after booting.
285  */
286 int
287 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
288 {
289 	struct thread *td = curthread;	/* XXX */
290 	struct vfsconf *vfsp;
291 	struct mount *mp;
292 
293 	if (fstypename == NULL)
294 		return (ENODEV);
295 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
296 		if (!strcmp(vfsp->vfc_name, fstypename))
297 			break;
298 	}
299 	if (vfsp == NULL)
300 		return (ENODEV);
301 	mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
302 	bzero((char *)mp, (u_long)sizeof(struct mount));
303 	lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE);
304 	vfs_busy(mp, LK_NOWAIT, td);
305 	TAILQ_INIT(&mp->mnt_nvnodelist);
306 	TAILQ_INIT(&mp->mnt_reservedvnlist);
307 	TAILQ_INIT(&mp->mnt_jlist);
308 	mp->mnt_nvnodelistsize = 0;
309 	mp->mnt_vfc = vfsp;
310 	mp->mnt_op = vfsp->vfc_vfsops;
311 	mp->mnt_flag = MNT_RDONLY;
312 	mp->mnt_vnodecovered = NULLVP;
313 	vfsp->vfc_refcount++;
314 	mp->mnt_iosize_max = DFLTPHYS;
315 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
316 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
317 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
318 	copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
319 	*mpp = mp;
320 	return (0);
321 }
322 
323 /*
324  * Lookup a mount point by filesystem identifier.
325  */
326 struct mount *
327 vfs_getvfs(fsid_t *fsid)
328 {
329 	struct mount *mp;
330 	lwkt_tokref ilock;
331 
332 	lwkt_gettoken(&ilock, &mountlist_token);
333 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
334 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
335 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
336 			break;
337 	    }
338 	}
339 	lwkt_reltoken(&ilock);
340 	return (mp);
341 }
342 
343 /*
344  * Get a new unique fsid.  Try to make its val[0] unique, since this value
345  * will be used to create fake device numbers for stat().  Also try (but
346  * not so hard) make its val[0] unique mod 2^16, since some emulators only
347  * support 16-bit device numbers.  We end up with unique val[0]'s for the
348  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
349  *
350  * Keep in mind that several mounts may be running in parallel.  Starting
351  * the search one past where the previous search terminated is both a
352  * micro-optimization and a defense against returning the same fsid to
353  * different mounts.
354  */
355 void
356 vfs_getnewfsid(struct mount *mp)
357 {
358 	static u_int16_t mntid_base;
359 	lwkt_tokref ilock;
360 	fsid_t tfsid;
361 	int mtype;
362 
363 	lwkt_gettoken(&ilock, &mntid_token);
364 	mtype = mp->mnt_vfc->vfc_typenum;
365 	tfsid.val[1] = mtype;
366 	mtype = (mtype & 0xFF) << 24;
367 	for (;;) {
368 		tfsid.val[0] = makeudev(255,
369 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
370 		mntid_base++;
371 		if (vfs_getvfs(&tfsid) == NULL)
372 			break;
373 	}
374 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
375 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
376 	lwkt_reltoken(&ilock);
377 }
378 
379 /*
380  * This routine is called when we have too many vnodes.  It attempts
381  * to free <count> vnodes and will potentially free vnodes that still
382  * have VM backing store (VM backing store is typically the cause
383  * of a vnode blowout so we want to do this).  Therefore, this operation
384  * is not considered cheap.
385  *
386  * A number of conditions may prevent a vnode from being reclaimed.
387  * the buffer cache may have references on the vnode, a directory
388  * vnode may still have references due to the namei cache representing
389  * underlying files, or the vnode may be in active use.   It is not
390  * desireable to reuse such vnodes.  These conditions may cause the
391  * number of vnodes to reach some minimum value regardless of what
392  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
393  */
394 
395 /*
396  * This is a quick non-blocking check to determine if the vnode is a good
397  * candidate for being (eventually) vgone()'d.  Returns 0 if the vnode is
398  * not a good candidate, 1 if it is.
399  *
400  * vnodes marked VFREE are already on the free list, but may still need
401  * to be recycled due to eating namecache resources and potentially blocking
402  * the namecache directory chain and related vnodes from being freed.
403  */
404 static __inline int
405 vmightfree(struct vnode *vp, int page_count)
406 {
407 	if (vp->v_flag & VRECLAIMED)
408 		return (0);
409 	if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
410 		return (0);
411 	if (vp->v_usecount != 0)
412 		return (0);
413 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
414 		return (0);
415 	return (1);
416 }
417 
418 /*
419  * The vnode was found to be possibly vgone()able and the caller has locked it
420  * (thus the usecount should be 1 now).  Determine if the vnode is actually
421  * vgone()able, doing some cleanups in the process.  Returns 1 if the vnode
422  * can be vgone()'d, 0 otherwise.
423  *
424  * Note that v_holdcnt may be non-zero because (A) this vnode is not a leaf
425  * in the namecache topology and (B) this vnode has buffer cache bufs.
426  * We cannot remove vnodes with non-leaf namecache associations.  We do a
427  * tentitive leaf check prior to attempting to flush out any buffers but the
428  * 'real' test when all is said in done is that v_holdcnt must become 0 for
429  * the vnode to be freeable.
430  *
431  * We could theoretically just unconditionally flush when v_holdcnt != 0,
432  * but flushing data associated with non-leaf nodes (which are always
433  * directories), just throws it away for no benefit.  It is the buffer
434  * cache's responsibility to choose buffers to recycle from the cached
435  * data point of view.
436  */
437 static int
438 visleaf(struct vnode *vp)
439 {
440 	struct namecache *ncp;
441 
442 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
443 		if (!TAILQ_EMPTY(&ncp->nc_list))
444 			return(0);
445 	}
446 	return(1);
447 }
448 
449 /*
450  * Try to clean up the vnode to the point where it can be vgone()'d, returning
451  * 0 if it cannot be vgone()'d (or already has been), 1 if it can.  Unlike
452  * vmightfree() this routine may flush the vnode and block.  Vnodes marked
453  * VFREE are still candidates for vgone()ing because they may hold namecache
454  * resources and could be blocking the namecache directory hierarchy (and
455  * related vnodes) from being freed.
456  */
457 static int
458 vtrytomakegoneable(struct vnode *vp, int page_count)
459 {
460 	if (vp->v_flag & VRECLAIMED)
461 		return (0);
462 	if (vp->v_usecount != 1)
463 		return (0);
464 	if (vp->v_object && vp->v_object->resident_page_count >= page_count)
465 		return (0);
466 	if (vp->v_holdcnt && visleaf(vp)) {
467 		vinvalbuf(vp, V_SAVE, NULL, 0, 0);
468 #if 0	/* DEBUG */
469 		printf((vp->v_holdcnt ? "vrecycle: vp %p failed: %s\n" :
470 			"vrecycle: vp %p succeeded: %s\n"), vp,
471 			(TAILQ_FIRST(&vp->v_namecache) ?
472 			    TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
473 #endif
474 	}
475 	return(vp->v_usecount == 1 && vp->v_holdcnt == 0);
476 }
477 
478 /*
479  * Reclaim up to 1/10 of the vnodes associated with a mount point.  Try
480  * to avoid vnodes which have lots of resident pages (we are trying to free
481  * vnodes, not memory).
482  *
483  * This routine is a callback from the mountlist scan.  The mount point
484  * in question will be busied.
485  */
486 static int
487 vlrureclaim(struct mount *mp, void *data)
488 {
489 	struct vnode *vp;
490 	lwkt_tokref ilock;
491 	int done;
492 	int trigger;
493 	int usevnodes;
494 	int count;
495 	int trigger_mult = vnlru_nowhere;
496 
497 	/*
498 	 * Calculate the trigger point for the resident pages check.  The
499 	 * minimum trigger value is approximately the number of pages in
500 	 * the system divded by the number of vnodes.  However, due to
501 	 * various other system memory overheads unrelated to data caching
502 	 * it is a good idea to double the trigger (at least).
503 	 *
504 	 * trigger_mult starts at 0.  If the recycler is having problems
505 	 * finding enough freeable vnodes it will increase trigger_mult.
506 	 * This should not happen in normal operation, even on machines with
507 	 * low amounts of memory, but extraordinary memory use by the system
508 	 * verses the amount of cached data can trigger it.
509 	 */
510 	usevnodes = desiredvnodes;
511 	if (usevnodes <= 0)
512 		usevnodes = 1;
513 	trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
514 
515 	done = 0;
516 	lwkt_gettoken(&ilock, &mntvnode_token);
517 	count = mp->mnt_nvnodelistsize / 10 + 1;
518 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
519 		/*
520 		 * __VNODESCAN__
521 		 *
522 		 * The VP will stick around while we hold mntvnode_token,
523 		 * at least until we block, so we can safely do an initial
524 		 * check, and then must check again after we lock the vnode.
525 		 */
526 		if (vp->v_type == VNON ||	/* XXX */
527 		    vp->v_type == VBAD ||	/* XXX */
528 		    !vmightfree(vp, trigger)	/* critical path opt */
529 		) {
530 			vmovevnodetoend(mp, vp);
531 			--count;
532 			continue;
533 		}
534 
535 		/*
536 		 * VX get the candidate vnode.  If the VX get fails the
537 		 * vnode might still be on the mountlist.  Our loop depends
538 		 * on us at least cycling the vnode to the end of the
539 		 * mountlist.
540 		 */
541 		if (vx_get_nonblock(vp) != 0) {
542 			if (vp->v_mount == mp)
543 				vmovevnodetoend(mp, vp);
544 			--count;
545 			continue;
546 		}
547 
548 		/*
549 		 * Since we blocked locking the vp, make sure it is still
550 		 * a candidate for reclamation.  That is, it has not already
551 		 * been reclaimed and only has our VX reference associated
552 		 * with it.
553 		 */
554 		if (vp->v_type == VNON ||	/* XXX */
555 		    vp->v_type == VBAD ||	/* XXX */
556 		    (vp->v_flag & VRECLAIMED) ||
557 		    vp->v_mount != mp ||
558 		    !vtrytomakegoneable(vp, trigger)	/* critical path opt */
559 		) {
560 			if (vp->v_mount == mp)
561 				vmovevnodetoend(mp, vp);
562 			--count;
563 			vx_put(vp);
564 			continue;
565 		}
566 
567 		/*
568 		 * All right, we are good, move the vp to the end of the
569 		 * mountlist and clean it out.  The vget will have returned
570 		 * an error if the vnode was destroyed (VRECLAIMED set), so we
571 		 * do not have to check again.  The vput() will move the
572 		 * vnode to the free list if the vgone() was successful.
573 		 */
574 		KKASSERT(vp->v_mount == mp);
575 		vmovevnodetoend(mp, vp);
576 		vgone(vp);
577 		vx_put(vp);
578 		++done;
579 		--count;
580 	}
581 	lwkt_reltoken(&ilock);
582 	return (done);
583 }
584 
585 /*
586  * Attempt to recycle vnodes in a context that is always safe to block.
587  * Calling vlrurecycle() from the bowels of file system code has some
588  * interesting deadlock problems.
589  */
590 static struct thread *vnlruthread;
591 static int vnlruproc_sig;
592 
593 void
594 vnlru_proc_wait(void)
595 {
596 	if (vnlruproc_sig == 0) {
597 		vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
598 		wakeup(vnlruthread);
599 	}
600 	tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
601 }
602 
603 static void
604 vnlru_proc(void)
605 {
606 	struct thread *td = curthread;
607 	int done;
608 
609 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
610 	    SHUTDOWN_PRI_FIRST);
611 
612 	crit_enter();
613 	for (;;) {
614 		kproc_suspend_loop();
615 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
616 			vnlruproc_sig = 0;
617 			wakeup(&vnlruproc_sig);
618 			tsleep(td, 0, "vlruwt", hz);
619 			continue;
620 		}
621 		cache_cleanneg(0);
622 		done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD);
623 
624 		/*
625 		 * The vlrureclaim() call only processes 1/10 of the vnodes
626 		 * on each mount.  If we couldn't find any repeat the loop
627 		 * at least enough times to cover all available vnodes before
628 		 * we start sleeping.  Complain if the failure extends past
629 		 * 30 second, every 30 seconds.
630 		 */
631 		if (done == 0) {
632 			++vnlru_nowhere;
633 			if (vnlru_nowhere % 10 == 0)
634 				tsleep(td, 0, "vlrup", hz * 3);
635 			if (vnlru_nowhere % 100 == 0)
636 				printf("vnlru_proc: vnode recycler stopped working!\n");
637 			if (vnlru_nowhere == 1000)
638 				vnlru_nowhere = 900;
639 		} else {
640 			vnlru_nowhere = 0;
641 		}
642 	}
643 	crit_exit();
644 }
645 
646 /*
647  * MOUNTLIST FUNCTIONS
648  */
649 
650 /*
651  * mountlist_insert (MP SAFE)
652  *
653  * Add a new mount point to the mount list.
654  */
655 void
656 mountlist_insert(struct mount *mp, int how)
657 {
658 	lwkt_tokref ilock;
659 
660 	lwkt_gettoken(&ilock, &mountlist_token);
661 	if (how == MNTINS_FIRST)
662 	    TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
663 	else
664 	    TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
665 	lwkt_reltoken(&ilock);
666 }
667 
668 /*
669  * mountlist_interlock (MP SAFE)
670  *
671  * Execute the specified interlock function with the mountlist token
672  * held.  The function will be called in a serialized fashion verses
673  * other functions called through this mechanism.
674  */
675 int
676 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
677 {
678 	lwkt_tokref ilock;
679 	int error;
680 
681 	lwkt_gettoken(&ilock, &mountlist_token);
682 	error = callback(mp);
683 	lwkt_reltoken(&ilock);
684 	return (error);
685 }
686 
687 /*
688  * mountlist_boot_getfirst (DURING BOOT ONLY)
689  *
690  * This function returns the first mount on the mountlist, which is
691  * expected to be the root mount.  Since no interlocks are obtained
692  * this function is only safe to use during booting.
693  */
694 
695 struct mount *
696 mountlist_boot_getfirst(void)
697 {
698 	return(TAILQ_FIRST(&mountlist));
699 }
700 
701 /*
702  * mountlist_remove (MP SAFE)
703  *
704  * Remove a node from the mountlist.  If this node is the next scan node
705  * for any active mountlist scans, the active mountlist scan will be
706  * adjusted to skip the node, thus allowing removals during mountlist
707  * scans.
708  */
709 void
710 mountlist_remove(struct mount *mp)
711 {
712 	struct mountscan_info *msi;
713 	lwkt_tokref ilock;
714 
715 	lwkt_gettoken(&ilock, &mountlist_token);
716 	TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
717 		if (msi->msi_node == mp) {
718 			if (msi->msi_how & MNTSCAN_FORWARD)
719 				msi->msi_node = TAILQ_NEXT(mp, mnt_list);
720 			else
721 				msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
722 		}
723 	}
724 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
725 	lwkt_reltoken(&ilock);
726 }
727 
728 /*
729  * mountlist_scan (MP SAFE)
730  *
731  * Safely scan the mount points on the mount list.  Unless otherwise
732  * specified each mount point will be busied prior to the callback and
733  * unbusied afterwords.  The callback may safely remove any mount point
734  * without interfering with the scan.  If the current callback
735  * mount is removed the scanner will not attempt to unbusy it.
736  *
737  * If a mount node cannot be busied it is silently skipped.
738  *
739  * The callback return value is aggregated and a total is returned.  A return
740  * value of < 0 is not aggregated and will terminate the scan.
741  *
742  * MNTSCAN_FORWARD	- the mountlist is scanned in the forward direction
743  * MNTSCAN_REVERSE	- the mountlist is scanned in reverse
744  * MNTSCAN_NOBUSY	- the scanner will make the callback without busying
745  *			  the mount node.
746  */
747 int
748 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
749 {
750 	struct mountscan_info info;
751 	lwkt_tokref ilock;
752 	struct mount *mp;
753 	thread_t td;
754 	int count;
755 	int res;
756 
757 	lwkt_gettoken(&ilock, &mountlist_token);
758 
759 	info.msi_how = how;
760 	info.msi_node = NULL;	/* paranoia */
761 	TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
762 
763 	res = 0;
764 	td = curthread;
765 
766 	if (how & MNTSCAN_FORWARD) {
767 		info.msi_node = TAILQ_FIRST(&mountlist);
768 		while ((mp = info.msi_node) != NULL) {
769 			if (how & MNTSCAN_NOBUSY) {
770 				count = callback(mp, data);
771 			} else if (vfs_busy(mp, LK_NOWAIT, td) == 0) {
772 				count = callback(mp, data);
773 				if (mp == info.msi_node)
774 					vfs_unbusy(mp, td);
775 			} else {
776 				count = 0;
777 			}
778 			if (count < 0)
779 				break;
780 			res += count;
781 			if (mp == info.msi_node)
782 				info.msi_node = TAILQ_NEXT(mp, mnt_list);
783 		}
784 	} else if (how & MNTSCAN_REVERSE) {
785 		info.msi_node = TAILQ_LAST(&mountlist, mntlist);
786 		while ((mp = info.msi_node) != NULL) {
787 			if (how & MNTSCAN_NOBUSY) {
788 				count = callback(mp, data);
789 			} else if (vfs_busy(mp, LK_NOWAIT, td) == 0) {
790 				count = callback(mp, data);
791 				if (mp == info.msi_node)
792 					vfs_unbusy(mp, td);
793 			} else {
794 				count = 0;
795 			}
796 			if (count < 0)
797 				break;
798 			res += count;
799 			if (mp == info.msi_node)
800 				info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
801 		}
802 	}
803 	TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
804 	lwkt_reltoken(&ilock);
805 	return(res);
806 }
807 
808 /*
809  * MOUNT RELATED VNODE FUNCTIONS
810  */
811 
812 static struct kproc_desc vnlru_kp = {
813 	"vnlru",
814 	vnlru_proc,
815 	&vnlruthread
816 };
817 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
818 
819 /*
820  * Move a vnode from one mount queue to another.
821  */
822 void
823 insmntque(struct vnode *vp, struct mount *mp)
824 {
825 	lwkt_tokref ilock;
826 
827 	lwkt_gettoken(&ilock, &mntvnode_token);
828 	/*
829 	 * Delete from old mount point vnode list, if on one.
830 	 */
831 	if (vp->v_mount != NULL) {
832 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
833 			("bad mount point vnode list size"));
834 		vremovevnodemnt(vp);
835 		vp->v_mount->mnt_nvnodelistsize--;
836 	}
837 	/*
838 	 * Insert into list of vnodes for the new mount point, if available.
839 	 */
840 	if ((vp->v_mount = mp) == NULL) {
841 		lwkt_reltoken(&ilock);
842 		return;
843 	}
844 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
845 	mp->mnt_nvnodelistsize++;
846 	lwkt_reltoken(&ilock);
847 }
848 
849 
850 /*
851  * Scan the vnodes under a mount point and issue appropriate callbacks.
852  *
853  * The fastfunc() callback is called with just the mountlist token held
854  * (no vnode lock).  It may not block and the vnode may be undergoing
855  * modifications while the caller is processing it.  The vnode will
856  * not be entirely destroyed, however, due to the fact that the mountlist
857  * token is held.  A return value < 0 skips to the next vnode without calling
858  * the slowfunc(), a return value > 0 terminates the loop.
859  *
860  * The slowfunc() callback is called after the vnode has been successfully
861  * locked based on passed flags.  The vnode is skipped if it gets rearranged
862  * or destroyed while blocking on the lock.  A non-zero return value from
863  * the slow function terminates the loop.  The slow function is allowed to
864  * arbitrarily block.  The scanning code guarentees consistency of operation
865  * even if the slow function deletes or moves the node, or blocks and some
866  * other thread deletes or moves the node.
867  */
868 int
869 vmntvnodescan(
870     struct mount *mp,
871     int flags,
872     int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
873     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
874     void *data
875 ) {
876 	struct vmntvnodescan_info info;
877 	lwkt_tokref ilock;
878 	struct vnode *vp;
879 	int r = 0;
880 	int maxcount = 1000000;
881 
882 	lwkt_gettoken(&ilock, &mntvnode_token);
883 
884 	info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
885 	TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
886 	while ((vp = info.vp) != NULL) {
887 		if (--maxcount == 0)
888 			panic("maxcount reached during vmntvnodescan");
889 
890 		if (vp->v_type == VNON)		/* visible but not ready */
891 			goto next;
892 		KKASSERT(vp->v_mount == mp);
893 
894 		/*
895 		 * Quick test.  A negative return continues the loop without
896 		 * calling the slow test.  0 continues onto the slow test.
897 		 * A positive number aborts the loop.
898 		 */
899 		if (fastfunc) {
900 			if ((r = fastfunc(mp, vp, data)) < 0)
901 				goto next;
902 			if (r)
903 				break;
904 		}
905 
906 		/*
907 		 * Get a vxlock on the vnode, retry if it has moved or isn't
908 		 * in the mountlist where we expect it.
909 		 */
910 		if (slowfunc) {
911 			int error;
912 
913 			switch(flags) {
914 			case VMSC_GETVP:
915 				error = vget(vp, LK_EXCLUSIVE, curthread);
916 				break;
917 			case VMSC_GETVP|VMSC_NOWAIT:
918 				error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT,
919 						curthread);
920 				break;
921 			case VMSC_GETVX:
922 				error = vx_get(vp);
923 				break;
924 			case VMSC_REFVP:
925 				vref(vp);
926 				/* fall through */
927 			default:
928 				error = 0;
929 				break;
930 			}
931 			if (error)
932 				goto next;
933 			/*
934 			 * Do not call the slow function if the vnode is
935 			 * invalid or if it was ripped out from under us
936 			 * while we (potentially) blocked.
937 			 */
938 			if (info.vp == vp && vp->v_type != VNON)
939 				r = slowfunc(mp, vp, data);
940 
941 			/*
942 			 * Cleanup
943 			 */
944 			switch(flags) {
945 			case VMSC_GETVP:
946 			case VMSC_GETVP|VMSC_NOWAIT:
947 				vput(vp);
948 				break;
949 			case VMSC_GETVX:
950 				vx_put(vp);
951 				break;
952 			case VMSC_REFVP:
953 				vrele(vp);
954 				/* fall through */
955 			default:
956 				break;
957 			}
958 			if (r != 0)
959 				break;
960 		}
961 
962 		/*
963 		 * Iterate.  If the vnode was ripped out from under us
964 		 * info.vp will already point to the next vnode, otherwise
965 		 * we have to obtain the next valid vnode ourselves.
966 		 */
967 next:
968 		if (info.vp == vp)
969 			info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
970 	}
971 	TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
972 	lwkt_reltoken(&ilock);
973 	return(r);
974 }
975 
976 /*
977  * Remove any vnodes in the vnode table belonging to mount point mp.
978  *
979  * If FORCECLOSE is not specified, there should not be any active ones,
980  * return error if any are found (nb: this is a user error, not a
981  * system error). If FORCECLOSE is specified, detach any active vnodes
982  * that are found.
983  *
984  * If WRITECLOSE is set, only flush out regular file vnodes open for
985  * writing.
986  *
987  * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
988  *
989  * `rootrefs' specifies the base reference count for the root vnode
990  * of this filesystem. The root vnode is considered busy if its
991  * v_usecount exceeds this value. On a successful return, vflush()
992  * will call vrele() on the root vnode exactly rootrefs times.
993  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
994  * be zero.
995  */
996 #ifdef DIAGNOSTIC
997 static int busyprt = 0;		/* print out busy vnodes */
998 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
999 #endif
1000 
1001 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1002 
1003 struct vflush_info {
1004 	int flags;
1005 	int busy;
1006 	thread_t td;
1007 };
1008 
1009 int
1010 vflush(struct mount *mp, int rootrefs, int flags)
1011 {
1012 	struct thread *td = curthread;	/* XXX */
1013 	struct vnode *rootvp = NULL;
1014 	int error;
1015 	struct vflush_info vflush_info;
1016 
1017 	if (rootrefs > 0) {
1018 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1019 		    ("vflush: bad args"));
1020 		/*
1021 		 * Get the filesystem root vnode. We can vput() it
1022 		 * immediately, since with rootrefs > 0, it won't go away.
1023 		 */
1024 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
1025 			return (error);
1026 		vput(rootvp);
1027 	}
1028 
1029 	vflush_info.busy = 0;
1030 	vflush_info.flags = flags;
1031 	vflush_info.td = td;
1032 	vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1033 
1034 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1035 		/*
1036 		 * If just the root vnode is busy, and if its refcount
1037 		 * is equal to `rootrefs', then go ahead and kill it.
1038 		 */
1039 		KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1040 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
1041 		if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) {
1042 			if (vx_lock(rootvp) == 0) {
1043 				vgone(rootvp);
1044 				vx_unlock(rootvp);
1045 				vflush_info.busy = 0;
1046 			}
1047 		}
1048 	}
1049 	if (vflush_info.busy)
1050 		return (EBUSY);
1051 	for (; rootrefs > 0; rootrefs--)
1052 		vrele(rootvp);
1053 	return (0);
1054 }
1055 
1056 /*
1057  * The scan callback is made with an VX locked vnode.
1058  */
1059 static int
1060 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1061 {
1062 	struct vflush_info *info = data;
1063 	struct vattr vattr;
1064 
1065 	/*
1066 	 * Skip over a vnodes marked VSYSTEM.
1067 	 */
1068 	if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1069 		return(0);
1070 	}
1071 
1072 	/*
1073 	 * If WRITECLOSE is set, flush out unlinked but still open
1074 	 * files (even if open only for reading) and regular file
1075 	 * vnodes open for writing.
1076 	 */
1077 	if ((info->flags & WRITECLOSE) &&
1078 	    (vp->v_type == VNON ||
1079 	    (VOP_GETATTR(vp, &vattr, info->td) == 0 &&
1080 	    vattr.va_nlink > 0)) &&
1081 	    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1082 		return(0);
1083 	}
1084 
1085 	/*
1086 	 * With v_usecount == 0, all we need to do is clear out the
1087 	 * vnode data structures and we are done.
1088 	 */
1089 	if (vp->v_usecount == 1) {
1090 		vgone(vp);
1091 		return(0);
1092 	}
1093 
1094 	/*
1095 	 * If FORCECLOSE is set, forcibly close the vnode. For block
1096 	 * or character devices, revert to an anonymous device. For
1097 	 * all other files, just kill them.
1098 	 */
1099 	if (info->flags & FORCECLOSE) {
1100 		if (vp->v_type != VBLK && vp->v_type != VCHR) {
1101 			vgone(vp);
1102 		} else {
1103 			vclean(vp, 0, info->td);
1104 			vp->v_ops = &spec_vnode_vops;
1105 			insmntque(vp, NULL);
1106 		}
1107 		return(0);
1108 	}
1109 #ifdef DIAGNOSTIC
1110 	if (busyprt)
1111 		vprint("vflush: busy vnode", vp);
1112 #endif
1113 	++info->busy;
1114 	return(0);
1115 }
1116 
1117