xref: /dragonfly/sys/kern/vfs_lock.c (revision cecb9aae)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * External virtual filesystem routines
37  */
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/mount.h>
44 #include <sys/proc.h>
45 #include <sys/vnode.h>
46 #include <sys/buf.h>
47 #include <sys/sysctl.h>
48 
49 #include <machine/limits.h>
50 
51 #include <vm/vm.h>
52 #include <vm/vm_object.h>
53 
54 #include <sys/buf2.h>
55 #include <sys/thread2.h>
56 #include <sys/sysref2.h>
57 
58 static void vnode_terminate(struct vnode *vp);
59 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
60 static void vnode_dtor(void *obj, void *private);
61 
62 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
63 static struct sysref_class vnode_sysref_class = {
64 	.name =		"vnode",
65 	.mtype =	M_VNODE,
66 	.proto =	SYSREF_PROTO_VNODE,
67 	.offset =	offsetof(struct vnode, v_sysref),
68 	.objsize =	sizeof(struct vnode),
69 	.nom_cache =	256,
70 	.flags =	SRC_MANAGEDINIT,
71 	.ctor =		vnode_ctor,
72 	.dtor =		vnode_dtor,
73 	.ops = {
74 		.terminate = (sysref_terminate_func_t)vnode_terminate,
75 		.lock = (sysref_terminate_func_t)vx_lock,
76 		.unlock = (sysref_terminate_func_t)vx_unlock
77 	}
78 };
79 
80 /*
81  * The vnode free list hold inactive vnodes.  Aged inactive vnodes
82  * are inserted prior to the mid point, and otherwise inserted
83  * at the tail.
84  */
85 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
86 static struct vnode	vnode_free_mid1;
87 static struct vnode	vnode_free_mid2;
88 static struct vnode	vnode_free_rover;
89 static struct spinlock	vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
90 static enum { ROVER_MID1, ROVER_MID2 } rover_state = ROVER_MID2;
91 
92 int  freevnodes = 0;
93 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
94 	&freevnodes, 0, "Number of free nodes");
95 static int wantfreevnodes = 25;
96 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
97 	&wantfreevnodes, 0, "Desired number of free vnodes");
98 static int batchfreevnodes = 5;
99 SYSCTL_INT(_debug, OID_AUTO, batchfreevnodes, CTLFLAG_RW,
100 	&batchfreevnodes, 0, "Number of vnodes to free at once");
101 #ifdef TRACKVNODE
102 static ulong trackvnode;
103 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
104 		&trackvnode, 0, "");
105 #endif
106 
107 /*
108  * Called from vfsinit()
109  */
110 void
111 vfs_lock_init(void)
112 {
113 	TAILQ_INIT(&vnode_free_list);
114 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid1, v_freelist);
115 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid2, v_freelist);
116 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_rover, v_freelist);
117 	spin_init(&vfs_spin);
118 	kmalloc_raise_limit(M_VNODE, 0);	/* unlimited */
119 }
120 
121 /*
122  * Misc functions
123  */
124 static __inline
125 void
126 _vsetflags(struct vnode *vp, int flags)
127 {
128 	atomic_set_int(&vp->v_flag, flags);
129 }
130 
131 static __inline
132 void
133 _vclrflags(struct vnode *vp, int flags)
134 {
135 	atomic_clear_int(&vp->v_flag, flags);
136 }
137 
138 void
139 vsetflags(struct vnode *vp, int flags)
140 {
141 	_vsetflags(vp, flags);
142 }
143 
144 void
145 vclrflags(struct vnode *vp, int flags)
146 {
147 	_vclrflags(vp, flags);
148 }
149 
150 /*
151  * Inline helper functions.
152  *
153  * WARNING: vbusy() may only be called while the vnode lock or VX lock
154  *	    is held.  The vnode spinlock need not be held.
155  *
156  * MPSAFE
157  */
158 static __inline
159 void
160 __vbusy_interlocked(struct vnode *vp)
161 {
162 	KKASSERT(vp->v_flag & VFREE);
163 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
164 	freevnodes--;
165 	_vclrflags(vp, VFREE);
166 }
167 
168 static __inline
169 void
170 __vbusy(struct vnode *vp)
171 {
172 #ifdef TRACKVNODE
173 	if ((ulong)vp == trackvnode)
174 		kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
175 #endif
176 	spin_lock(&vfs_spin);
177 	__vbusy_interlocked(vp);
178 	spin_unlock(&vfs_spin);
179 }
180 
181 /*
182  * Put a vnode on the free list.  The caller has cleared VCACHED or owns the
183  * implied sysref related to having removed the vnode from the freelist
184  * (and VCACHED is already clear in that case).
185  *
186  * MPSAFE
187  */
188 static __inline
189 void
190 __vfree(struct vnode *vp)
191 {
192 #ifdef TRACKVNODE
193 	if ((ulong)vp == trackvnode) {
194 		kprintf("__vfree %p %08x\n", vp, vp->v_flag);
195 		print_backtrace(-1);
196 	}
197 #endif
198 	spin_lock(&vfs_spin);
199 	KKASSERT((vp->v_flag & VFREE) == 0);
200 
201 	/*
202 	 * Distinguish between basically dead vnodes, vnodes with cached
203 	 * data, and vnodes without cached data.  A rover will shift the
204 	 * vnodes around as their cache status is lost.
205 	 */
206 	if (vp->v_flag & VRECLAIMED) {
207 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
208 	} else if (vp->v_object && vp->v_object->resident_page_count) {
209 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
210 	} else if (vp->v_object && vp->v_object->swblock_count) {
211 		TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
212 	} else {
213 		TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
214 	}
215 	freevnodes++;
216 	_vsetflags(vp, VFREE);
217 	spin_unlock(&vfs_spin);
218 }
219 
220 /*
221  * Put a vnode on the free list.  The caller has cleared VCACHED or owns the
222  * implied sysref related to having removed the vnode from the freelist
223  * (and VCACHED is already clear in that case).
224  *
225  * MPSAFE
226  */
227 static __inline
228 void
229 __vfreetail(struct vnode *vp)
230 {
231 #ifdef TRACKVNODE
232 	if ((ulong)vp == trackvnode)
233 		kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
234 #endif
235 	spin_lock(&vfs_spin);
236 	KKASSERT((vp->v_flag & VFREE) == 0);
237 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
238 	freevnodes++;
239 	_vsetflags(vp, VFREE);
240 	spin_unlock(&vfs_spin);
241 }
242 
243 /*
244  * Return a C boolean if we should put the vnode on the freelist (VFREE),
245  * or leave it / mark it as VCACHED.
246  *
247  * This routine is only valid if the vnode is already either VFREE or
248  * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
249  *
250  * WARNING!  We used to indicate FALSE if the vnode had an object with
251  *	     resident pages but we no longer do that because it makes
252  *	     managing kern.maxvnodes difficult.  Instead we rely on vfree()
253  *	     to place the vnode properly on the list.
254  *
255  * WARNING!  This functions is typically called with v_spin held.
256  *
257  * MPSAFE
258  */
259 static __inline boolean_t
260 vshouldfree(struct vnode *vp)
261 {
262 	return (vp->v_auxrefs == 0);
263 #if 0
264 	 && (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
265 #endif
266 }
267 
268 /*
269  * Add a ref to an active vnode.  This function should never be called
270  * with an inactive vnode (use vget() instead).
271  *
272  * MPSAFE
273  */
274 void
275 vref(struct vnode *vp)
276 {
277 	KKASSERT(vp->v_sysref.refcnt > 0 &&
278 		 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
279 	sysref_get(&vp->v_sysref);
280 }
281 
282 /*
283  * Release a ref on an active or inactive vnode.  The sysref termination
284  * function will be called when the active last active reference is released,
285  * and the vnode is returned to the objcache when the last inactive
286  * reference is released.
287  */
288 void
289 vrele(struct vnode *vp)
290 {
291 	sysref_put(&vp->v_sysref);
292 }
293 
294 /*
295  * Add an auxiliary data structure reference to the vnode.  Auxiliary
296  * references do not change the state of the vnode or prevent them
297  * from being deactivated, reclaimed, or placed on or removed from
298  * the free list.
299  *
300  * An auxiliary reference DOES prevent the vnode from being destroyed,
301  * allowing you to vx_lock() it, test state, etc.
302  *
303  * An auxiliary reference DOES NOT move a vnode out of the VFREE state
304  * once it has entered it.
305  *
306  * WARNING!  vhold() and vhold_interlocked() must not acquire v_spin.
307  *	     The spinlock may or may not already be held by the caller.
308  *	     vdrop() will clean up the free list state.
309  *
310  * MPSAFE
311  */
312 void
313 vhold(struct vnode *vp)
314 {
315 	KKASSERT(vp->v_sysref.refcnt != 0);
316 	atomic_add_int(&vp->v_auxrefs, 1);
317 }
318 
319 void
320 vhold_interlocked(struct vnode *vp)
321 {
322 	atomic_add_int(&vp->v_auxrefs, 1);
323 }
324 
325 /*
326  * Remove an auxiliary reference from the vnode.
327  *
328  * vdrop needs to check for a VCACHE->VFREE transition to catch cases
329  * where a vnode is held past its reclamation.  We use v_spin to
330  * interlock VCACHED -> !VCACHED transitions.
331  *
332  * MPSAFE
333  */
334 void
335 vdrop(struct vnode *vp)
336 {
337 	KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
338 	spin_lock(&vp->v_spin);
339 	atomic_subtract_int(&vp->v_auxrefs, 1);
340 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
341 		_vclrflags(vp, VCACHED);
342 		__vfree(vp);
343 	}
344 	spin_unlock(&vp->v_spin);
345 }
346 
347 /*
348  * This function is called when the last active reference on the vnode
349  * is released, typically via vrele().  SYSREF will VX lock the vnode
350  * and then give the vnode a negative ref count, indicating that it is
351  * undergoing termination or is being set aside for the cache, and one
352  * final sysref_put() is required to actually return it to the memory
353  * subsystem.
354  *
355  * Additional inactive sysrefs may race us but that's ok.  Reactivations
356  * cannot race us because the sysref code interlocked with the VX lock
357  * (which is held on call).
358  *
359  * MPSAFE
360  */
361 void
362 vnode_terminate(struct vnode *vp)
363 {
364 	/*
365 	 * We own the VX lock, it should not be possible for someone else
366 	 * to have reactivated the vp.
367 	 */
368 	KKASSERT(sysref_isinactive(&vp->v_sysref));
369 
370 	/*
371 	 * Deactivate the vnode by marking it VFREE or VCACHED.
372 	 * The vnode can be reactivated from either state until
373 	 * reclaimed.  These states inherit the 'last' sysref on the
374 	 * vnode.
375 	 *
376 	 * NOTE: There may be additional inactive references from
377 	 * other entities blocking on the VX lock while we hold it,
378 	 * but this does not prevent us from changing the vnode's
379 	 * state.
380 	 *
381 	 * NOTE: The vnode could already be marked inactive.  XXX
382 	 *	 how?
383 	 *
384 	 * NOTE: v_mount may be NULL due to assignment to
385 	 *	 dead_vnode_vops
386 	 *
387 	 * NOTE: The vnode may be marked inactive with dirty buffers
388 	 *	 or dirty pages in its cached VM object still present.
389 	 *
390 	 * NOTE: VCACHED should not be set on entry.  We lose control
391 	 *	 of the sysref the instant the vnode is placed on the
392 	 *	 free list or when VCACHED is set.
393 	 *
394 	 *	 The VX lock is required when transitioning to
395 	 *	 +VCACHED but is not sufficient for the vshouldfree()
396 	 *	 interlocked test or when transitioning to -VCACHED.
397 	 */
398 	if ((vp->v_flag & VINACTIVE) == 0) {
399 		_vsetflags(vp, VINACTIVE);
400 		if (vp->v_mount)
401 			VOP_INACTIVE(vp);
402 	}
403 	spin_lock(&vp->v_spin);
404 	KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
405 	if (vshouldfree(vp))
406 		__vfree(vp);
407 	else
408 		_vsetflags(vp, VCACHED); /* inactive but not yet free*/
409 	spin_unlock(&vp->v_spin);
410 	vx_unlock(vp);
411 }
412 
413 /*
414  * Physical vnode constructor / destructor.  These are only executed on
415  * the backend of the objcache.  They are NOT executed on every vnode
416  * allocation or deallocation.
417  *
418  * MPSAFE
419  */
420 boolean_t
421 vnode_ctor(void *obj, void *private, int ocflags)
422 {
423 	struct vnode *vp = obj;
424 
425 	lwkt_token_init(&vp->v_token, "vnode");
426 	lockinit(&vp->v_lock, "vnode", 0, 0);
427 	TAILQ_INIT(&vp->v_namecache);
428 	RB_INIT(&vp->v_rbclean_tree);
429 	RB_INIT(&vp->v_rbdirty_tree);
430 	RB_INIT(&vp->v_rbhash_tree);
431 	spin_init(&vp->v_spin);
432 	return(TRUE);
433 }
434 
435 /*
436  * MPSAFE
437  */
438 void
439 vnode_dtor(void *obj, void *private)
440 {
441 	struct vnode *vp __debugvar = obj;
442 
443 	KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
444 }
445 
446 /****************************************************************
447  *			VX LOCKING FUNCTIONS			*
448  ****************************************************************
449  *
450  * These functions lock vnodes for reclamation and deactivation related
451  * activities.  The caller must already be holding some sort of reference
452  * on the vnode.
453  *
454  * MPSAFE
455  */
456 void
457 vx_lock(struct vnode *vp)
458 {
459 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
460 }
461 
462 /*
463  * The non-blocking version also uses a slightly different mechanic.
464  * This function will explicitly fail not only if it cannot acquire
465  * the lock normally, but also if the caller already holds a lock.
466  *
467  * The adjusted mechanic is used to close a loophole where complex
468  * VOP_RECLAIM code can circle around recursively and allocate the
469  * same vnode it is trying to destroy from the freelist.
470  *
471  * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
472  * cause the incorrect behavior to occur.  If not for that lockmgr()
473  * would do the right thing.
474  */
475 static int
476 vx_lock_nonblock(struct vnode *vp)
477 {
478 	if (lockcountnb(&vp->v_lock))
479 		return(EBUSY);
480 	return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
481 }
482 
483 void
484 vx_unlock(struct vnode *vp)
485 {
486 	lockmgr(&vp->v_lock, LK_RELEASE);
487 }
488 
489 /****************************************************************
490  *			VNODE ACQUISITION FUNCTIONS		*
491  ****************************************************************
492  *
493  * These functions must be used when accessing a vnode via an auxiliary
494  * reference such as the namecache or free list, or when you wish to
495  * do a combo ref+lock sequence.
496  *
497  * These functions are MANDATORY for any code chain accessing a vnode
498  * whos activation state is not known.
499  *
500  * vget() can be called with LK_NOWAIT and will return EBUSY if the
501  * lock cannot be immediately acquired.
502  *
503  * vget()/vput() are used when reactivation is desired.
504  *
505  * vx_get() and vx_put() are used when reactivation is not desired.
506  */
507 int
508 vget(struct vnode *vp, int flags)
509 {
510 	int error;
511 
512 	/*
513 	 * A lock type must be passed
514 	 */
515 	if ((flags & LK_TYPE_MASK) == 0) {
516 		panic("vget() called with no lock specified!");
517 		/* NOT REACHED */
518 	}
519 
520 	/*
521 	 * Reference the structure and then acquire the lock.  0->1
522 	 * transitions and refs during termination are allowed here so
523 	 * call sysref directly.
524 	 *
525 	 * NOTE: The requested lock might be a shared lock and does
526 	 *	 not protect our access to the refcnt or other fields.
527 	 */
528 	sysref_get(&vp->v_sysref);
529 	if ((error = vn_lock(vp, flags)) != 0) {
530 		/*
531 		 * The lock failed, undo and return an error.
532 		 */
533 		sysref_put(&vp->v_sysref);
534 	} else if (vp->v_flag & VRECLAIMED) {
535 		/*
536 		 * The node is being reclaimed and cannot be reactivated
537 		 * any more, undo and return ENOENT.
538 		 */
539 		vn_unlock(vp);
540 		vrele(vp);
541 		error = ENOENT;
542 	} else {
543 		/*
544 		 * If the vnode is marked VFREE or VCACHED it needs to be
545 		 * reactivated, otherwise it had better already be active.
546 		 * VINACTIVE must also be cleared.
547 		 *
548 		 * In the VFREE/VCACHED case we have to throw away the
549 		 * sysref that was earmarking those cases and preventing
550 		 * the vnode from being destroyed.  Our sysref is still held.
551 		 *
552 		 * We are allowed to reactivate the vnode while we hold
553 		 * the VX lock, assuming it can be reactivated.
554 		 */
555 		spin_lock(&vp->v_spin);
556 		if (vp->v_flag & VFREE) {
557 			__vbusy(vp);
558 			sysref_activate(&vp->v_sysref);
559 			spin_unlock(&vp->v_spin);
560 			sysref_put(&vp->v_sysref);
561 		} else if (vp->v_flag & VCACHED) {
562 			_vclrflags(vp, VCACHED);
563 			sysref_activate(&vp->v_sysref);
564 			spin_unlock(&vp->v_spin);
565 			sysref_put(&vp->v_sysref);
566 		} else {
567 			if (sysref_isinactive(&vp->v_sysref)) {
568 				sysref_activate(&vp->v_sysref);
569 				kprintf("Warning vp %p reactivation race\n",
570 					vp);
571 			}
572 			spin_unlock(&vp->v_spin);
573 		}
574 		_vclrflags(vp, VINACTIVE);
575 		error = 0;
576 	}
577 	return(error);
578 }
579 
580 #ifdef DEBUG_VPUT
581 
582 void
583 debug_vput(struct vnode *vp, const char *filename, int line)
584 {
585 	kprintf("vput(%p) %s:%d\n", vp, filename, line);
586 	vn_unlock(vp);
587 	vrele(vp);
588 }
589 
590 #else
591 
592 /*
593  * MPSAFE
594  */
595 void
596 vput(struct vnode *vp)
597 {
598 	vn_unlock(vp);
599 	vrele(vp);
600 }
601 
602 #endif
603 
604 /*
605  * XXX The vx_*() locks should use auxrefs, not the main reference counter.
606  *
607  * MPSAFE
608  */
609 void
610 vx_get(struct vnode *vp)
611 {
612 	sysref_get(&vp->v_sysref);
613 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
614 }
615 
616 /*
617  * MPSAFE
618  */
619 int
620 vx_get_nonblock(struct vnode *vp)
621 {
622 	int error;
623 
624 	sysref_get(&vp->v_sysref);
625 	error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
626 	if (error)
627 		sysref_put(&vp->v_sysref);
628 	return(error);
629 }
630 
631 /*
632  * Relase a VX lock that also held a ref on the vnode.
633  *
634  * vx_put needs to check for a VCACHED->VFREE transition to catch the
635  * case where e.g. vnlru issues a vgone*().
636  *
637  * MPSAFE
638  */
639 void
640 vx_put(struct vnode *vp)
641 {
642 	spin_lock(&vp->v_spin);
643 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
644 		_vclrflags(vp, VCACHED);
645 		__vfree(vp);
646 	}
647 	spin_unlock(&vp->v_spin);
648 	lockmgr(&vp->v_lock, LK_RELEASE);
649 	sysref_put(&vp->v_sysref);
650 }
651 
652 /*
653  * The rover looks for vnodes past the midline with no cached data and
654  * moves them to before the midline.  If we do not do this the midline
655  * can wind up in a degenerate state.
656  */
657 static
658 void
659 vnode_free_rover_scan_locked(void)
660 {
661 	struct vnode *vp;
662 
663 	/*
664 	 * Get the vnode after the rover.  The rover roves between mid1 and
665 	 * the end so the only special vnode it can encounter is mid2.
666 	 */
667 	vp = TAILQ_NEXT(&vnode_free_rover, v_freelist);
668 	if (vp == &vnode_free_mid2) {
669 		vp = TAILQ_NEXT(vp, v_freelist);
670 		rover_state = ROVER_MID2;
671 	}
672 	KKASSERT(vp != &vnode_free_mid1);
673 
674 	/*
675 	 * Start over if we finished the scan.
676 	 */
677 	TAILQ_REMOVE(&vnode_free_list, &vnode_free_rover, v_freelist);
678 	if (vp == NULL) {
679 		TAILQ_INSERT_AFTER(&vnode_free_list, &vnode_free_mid1,
680 				   &vnode_free_rover, v_freelist);
681 		rover_state = ROVER_MID1;
682 		return;
683 	}
684 	TAILQ_INSERT_AFTER(&vnode_free_list, vp, &vnode_free_rover, v_freelist);
685 
686 	/*
687 	 * Shift vp if appropriate.
688 	 */
689 	if (vp->v_object && vp->v_object->resident_page_count) {
690 		/*
691 		 * Promote vnode with resident pages to section 3.
692 		 */
693 		if (rover_state == ROVER_MID1) {
694 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
695 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
696 		}
697 	} else if (vp->v_object && vp->v_object->swblock_count) {
698 		/*
699 		 * Demote vnode with only swap pages to section 2
700 		 */
701 		if (rover_state == ROVER_MID2) {
702 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
703 			TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
704 		}
705 	} else {
706 		/*
707 		 * Demote vnode with no cached data to section 1
708 		 */
709 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
710 		TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
711 	}
712 }
713 
714 void
715 vnode_free_rover_scan(int count)
716 {
717 	spin_lock(&vfs_spin);
718 	while (count > 0) {
719 		--count;
720 		vnode_free_rover_scan_locked();
721 	}
722 	spin_unlock(&vfs_spin);
723 }
724 
725 /*
726  * Try to reuse a vnode from the free list.  This function is somewhat
727  * advisory in that NULL can be returned as a normal case, even if free
728  * vnodes are present.
729  *
730  * The scan is limited because it can result in excessive CPU use during
731  * periods of extreme vnode use.
732  *
733  * NOTE: The returned vnode is not completely initialized.
734  *
735  * MPSAFE
736  */
737 static
738 struct vnode *
739 allocfreevnode(int maxcount)
740 {
741 	struct vnode *vp;
742 	int count;
743 
744 	for (count = 0; count < maxcount; count++) {
745 		/*
746 		 * Try to lock the first vnode on the free list.
747 		 * Cycle if we can't.
748 		 *
749 		 * We use a bad hack in vx_lock_nonblock() which avoids
750 		 * the lock order reversal between vfs_spin and v_spin.
751 		 * This is very fragile code and I don't want to use
752 		 * vhold here.
753 		 */
754 		spin_lock(&vfs_spin);
755 		vnode_free_rover_scan_locked();
756 		vnode_free_rover_scan_locked();
757 		vp = TAILQ_FIRST(&vnode_free_list);
758 		while (vp == &vnode_free_mid1 || vp == &vnode_free_mid2 ||
759 		       vp == &vnode_free_rover) {
760 			vp = TAILQ_NEXT(vp, v_freelist);
761 		}
762 		if (vp == NULL) {
763 			spin_unlock(&vfs_spin);
764 			break;
765 		}
766 		if (vx_lock_nonblock(vp)) {
767 			KKASSERT(vp->v_flag & VFREE);
768 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
769 			TAILQ_INSERT_TAIL(&vnode_free_list,
770 					  vp, v_freelist);
771 			spin_unlock(&vfs_spin);
772 			continue;
773 		}
774 
775 		/*
776 		 * We inherit the sysref associated the vnode on the free
777 		 * list.  Because VCACHED is clear the vnode will not
778 		 * be placed back on the free list.  We own the sysref
779 		 * free and clear and thus control the disposition of
780 		 * the vnode.
781 		 */
782 		__vbusy_interlocked(vp);
783 		spin_unlock(&vfs_spin);
784 #ifdef TRACKVNODE
785 		if ((ulong)vp == trackvnode)
786 			kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
787 #endif
788 		/*
789 		 * Do not reclaim/reuse a vnode while auxillary refs exists.
790 		 * This includes namecache refs due to a related ncp being
791 		 * locked or having children, a VM object association, or
792 		 * other hold users.
793 		 *
794 		 * We will make this test several times as auxrefs can
795 		 * get incremented on us without any spinlocks being held
796 		 * until we have removed all namecache and inode references
797 		 * to the vnode.
798 		 *
799 		 * Because VCACHED is already in the correct state (cleared)
800 		 * we cannot race other vdrop()s occuring at the same time
801 		 * and can safely place vp on the free list.
802 		 *
803 		 * The free list association reinherits the sysref.
804 		 */
805 		if (vp->v_auxrefs) {
806 			__vfreetail(vp);
807 			vx_unlock(vp);
808 			continue;
809 		}
810 
811 		/*
812 		 * We inherit the reference that was previously associated
813 		 * with the vnode being on the free list.  VCACHED had better
814 		 * not be set because the reference and VX lock prevents
815 		 * the sysref from transitioning to an active state.
816 		 */
817 		KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
818 		KKASSERT(sysref_isinactive(&vp->v_sysref));
819 
820 		/*
821 		 * Holding the VX lock on an inactive vnode prevents it
822 		 * from being reactivated or reused.  New namecache
823 		 * associations can only be made using active vnodes.
824 		 *
825 		 * Another thread may be blocked on our vnode lock while
826 		 * holding a namecache lock.  We can only reuse this vnode
827 		 * if we can clear all namecache associations without
828 		 * blocking.
829 		 *
830 		 * Because VCACHED is already in the correct state (cleared)
831 		 * we cannot race other vdrop()s occuring at the same time
832 		 * and can safely place vp on the free list.
833 		 */
834 		if ((vp->v_flag & VRECLAIMED) == 0) {
835 			if (cache_inval_vp_nonblock(vp)) {
836 				__vfreetail(vp);
837 				vx_unlock(vp);
838 				continue;
839 			}
840 			vgone_vxlocked(vp);
841 			/* vnode is still VX locked */
842 		}
843 
844 		/*
845 		 * We can reuse the vnode if no primary or auxiliary
846 		 * references remain other then ours, else put it
847 		 * back on the free list and keep looking.
848 		 *
849 		 * Either the free list inherits the last reference
850 		 * or we fall through and sysref_activate() the last
851 		 * reference.
852 		 *
853 		 * Since the vnode is in a VRECLAIMED state, no new
854 		 * namecache associations could have been made.
855 		 */
856 		KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
857 		if (vp->v_auxrefs ||
858 		    !sysref_islastdeactivation(&vp->v_sysref)) {
859 			__vfreetail(vp);
860 			vx_unlock(vp);
861 			continue;
862 		}
863 
864 		/*
865 		 * Return a VX locked vnode suitable for reuse.  The caller
866 		 * inherits the sysref.
867 		 */
868 		return(vp);
869 	}
870 	return(NULL);
871 }
872 
873 /*
874  * Obtain a new vnode.  The returned vnode is VX locked & vrefd.
875  *
876  * All new vnodes set the VAGE flags.  An open() of the vnode will
877  * decrement the (2-bit) flags.  Vnodes which are opened several times
878  * are thus retained in the cache over vnodes which are merely stat()d.
879  *
880  * We always allocate the vnode.  Attempting to recycle existing vnodes
881  * here can lead to numerous deadlocks, particularly with softupdates.
882  */
883 struct vnode *
884 allocvnode(int lktimeout, int lkflags)
885 {
886 	struct vnode *vp;
887 
888 	/*
889 	 * Do not flag for recyclement unless there are enough free vnodes
890 	 * to recycle and the number of vnodes has exceeded our target.
891 	 */
892 	if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes) {
893 		struct thread *td = curthread;
894 		if (td->td_lwp)
895 			atomic_set_int(&td->td_lwp->lwp_mpflags, LWP_MP_VNLRU);
896 	}
897 	vp = sysref_alloc(&vnode_sysref_class);
898 	KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
899 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
900 	atomic_add_int(&numvnodes, 1);
901 
902 	/*
903 	 * We are using a managed sysref class, vnode fields are only
904 	 * zerod on initial allocation from the backing store, not
905 	 * on reallocation.  Thus we have to clear these fields for both
906 	 * reallocation and reuse.
907 	 */
908 #ifdef INVARIANTS
909 	if (vp->v_data)
910 		panic("cleaned vnode isn't");
911 	if (bio_track_active(&vp->v_track_read) ||
912 	    bio_track_active(&vp->v_track_write)) {
913 		panic("Clean vnode has pending I/O's");
914 	}
915 	if (vp->v_flag & VONWORKLST)
916 		panic("Clean vnode still pending on syncer worklist!");
917 	if (!RB_EMPTY(&vp->v_rbdirty_tree))
918 		panic("Clean vnode still has dirty buffers!");
919 	if (!RB_EMPTY(&vp->v_rbclean_tree))
920 		panic("Clean vnode still has clean buffers!");
921 	if (!RB_EMPTY(&vp->v_rbhash_tree))
922 		panic("Clean vnode still on hash tree!");
923 	KKASSERT(vp->v_mount == NULL);
924 #endif
925 	vp->v_flag = VAGE0 | VAGE1;
926 	vp->v_lastw = 0;
927 	vp->v_lasta = 0;
928 	vp->v_cstart = 0;
929 	vp->v_clen = 0;
930 	vp->v_socket = 0;
931 	vp->v_opencount = 0;
932 	vp->v_writecount = 0;	/* XXX */
933 
934 	/*
935 	 * lktimeout only applies when LK_TIMELOCK is used, and only
936 	 * the pageout daemon uses it.  The timeout may not be zero
937 	 * or the pageout daemon can deadlock in low-VM situations.
938 	 */
939 	if (lktimeout == 0)
940 		lktimeout = hz / 10;
941 	lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
942 	KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
943 	/* exclusive lock still held */
944 
945 	/*
946 	 * Note: sysref needs to be activated to convert -0x40000000 to +1.
947 	 * The -0x40000000 comes from the last ref on reuse, and from
948 	 * sysref_init() on allocate.
949 	 */
950 	sysref_activate(&vp->v_sysref);
951 	vp->v_filesize = NOOFFSET;
952 	vp->v_type = VNON;
953 	vp->v_tag = 0;
954 	vp->v_ops = NULL;
955 	vp->v_data = NULL;
956 	vp->v_pfsmp = NULL;
957 	KKASSERT(vp->v_mount == NULL);
958 
959 	return (vp);
960 }
961 
962 /*
963  * Called after a process has allocated a vnode via allocvnode()
964  * and we detected that too many vnodes were present.
965  *
966  * Try to reuse vnodes if we hit the max.  This situation only
967  * occurs in certain large-memory (2G+) situations on 32 bit systems,
968  * or if kern.maxvnodes is set to very low values.
969  *
970  * This function is called just prior to a return to userland if the
971  * process at some point had to allocate a new vnode during the last
972  * system call and the vnode count was found to be excessive.
973  *
974  * WARNING: Sometimes numvnodes can blow out due to children being
975  *	    present under directory vnodes in the namecache.  For the
976  *	    moment use an if() instead of a while() and note that if
977  *	    we were to use a while() we would still have to break out
978  *	    if freesomevnodes() returned 0.
979  */
980 void
981 allocvnode_gc(void)
982 {
983 	if (numvnodes > desiredvnodes && freevnodes > wantfreevnodes) {
984 		freesomevnodes(batchfreevnodes);
985 	}
986 }
987 
988 /*
989  * MPSAFE
990  */
991 int
992 freesomevnodes(int n)
993 {
994 	struct vnode *vp;
995 	int count = 0;
996 
997 	while (n) {
998 		if ((vp = allocfreevnode(n * 2)) == NULL)
999 			break;
1000 		--n;
1001 		++count;
1002 		vx_put(vp);
1003 		atomic_add_int(&numvnodes, -1);
1004 	}
1005 	return(count);
1006 }
1007