xref: /dragonfly/sys/kern/vfs_lock.c (revision fb151170)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
51 
52 #include <machine/limits.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
60 
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
64 
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 	.name =		"vnode",
68 	.mtype =	M_VNODE,
69 	.proto =	SYSREF_PROTO_VNODE,
70 	.offset =	offsetof(struct vnode, v_sysref),
71 	.objsize =	sizeof(struct vnode),
72 	.nom_cache =	256,
73 	.flags =	SRC_MANAGEDINIT,
74 	.ctor =		vnode_ctor,
75 	.dtor =		vnode_dtor,
76 	.ops = {
77 		.terminate = (sysref_terminate_func_t)vnode_terminate,
78 		.lock = (sysref_terminate_func_t)vx_lock,
79 		.unlock = (sysref_terminate_func_t)vx_unlock
80 	}
81 };
82 
83 /*
84  * The vnode free list hold inactive vnodes.  Aged inactive vnodes
85  * are inserted prior to the mid point, and otherwise inserted
86  * at the tail.
87  */
88 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
89 static struct vnode	vnode_free_mid1;
90 static struct vnode	vnode_free_mid2;
91 static struct vnode	vnode_free_rover;
92 static struct spinlock	vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
93 static enum { ROVER_MID1, ROVER_MID2 } rover_state = ROVER_MID2;
94 
95 int  freevnodes = 0;
96 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
97 	&freevnodes, 0, "Number of free nodes");
98 static int wantfreevnodes = 25;
99 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
100 	&wantfreevnodes, 0, "Desired number of free vnodes");
101 #ifdef TRACKVNODE
102 static ulong trackvnode;
103 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
104 		&trackvnode, 0, "");
105 #endif
106 
107 /*
108  * Called from vfsinit()
109  */
110 void
111 vfs_lock_init(void)
112 {
113 	TAILQ_INIT(&vnode_free_list);
114 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid1, v_freelist);
115 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_mid2, v_freelist);
116 	TAILQ_INSERT_TAIL(&vnode_free_list, &vnode_free_rover, v_freelist);
117 	spin_init(&vfs_spin);
118 	kmalloc_raise_limit(M_VNODE, 0);	/* unlimited */
119 }
120 
121 /*
122  * Misc functions
123  */
124 static __inline
125 void
126 _vsetflags(struct vnode *vp, int flags)
127 {
128 	atomic_set_int(&vp->v_flag, flags);
129 }
130 
131 static __inline
132 void
133 _vclrflags(struct vnode *vp, int flags)
134 {
135 	atomic_clear_int(&vp->v_flag, flags);
136 }
137 
138 void
139 vsetflags(struct vnode *vp, int flags)
140 {
141 	_vsetflags(vp, flags);
142 }
143 
144 void
145 vclrflags(struct vnode *vp, int flags)
146 {
147 	_vclrflags(vp, flags);
148 }
149 
150 /*
151  * Inline helper functions.
152  *
153  * WARNING: vbusy() may only be called while the vnode lock or VX lock
154  *	    is held.  The vnode spinlock need not be held.
155  *
156  * MPSAFE
157  */
158 static __inline
159 void
160 __vbusy_interlocked(struct vnode *vp)
161 {
162 	KKASSERT(vp->v_flag & VFREE);
163 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
164 	freevnodes--;
165 	_vclrflags(vp, VFREE);
166 }
167 
168 static __inline
169 void
170 __vbusy(struct vnode *vp)
171 {
172 #ifdef TRACKVNODE
173 	if ((ulong)vp == trackvnode)
174 		kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
175 #endif
176 	spin_lock(&vfs_spin);
177 	__vbusy_interlocked(vp);
178 	spin_unlock(&vfs_spin);
179 }
180 
181 /*
182  * Put a vnode on the free list.  The caller has cleared VCACHED or owns the
183  * implied sysref related to having removed the vnode from the freelist
184  * (and VCACHED is already clear in that case).
185  *
186  * MPSAFE
187  */
188 static __inline
189 void
190 __vfree(struct vnode *vp)
191 {
192 #ifdef TRACKVNODE
193 	if ((ulong)vp == trackvnode) {
194 		kprintf("__vfree %p %08x\n", vp, vp->v_flag);
195 		print_backtrace(-1);
196 	}
197 #endif
198 	spin_lock(&vfs_spin);
199 	KKASSERT((vp->v_flag & VFREE) == 0);
200 
201 	/*
202 	 * Distinguish between basically dead vnodes, vnodes with cached
203 	 * data, and vnodes without cached data.  A rover will shift the
204 	 * vnodes around as their cache status is lost.
205 	 */
206 	if (vp->v_flag & VRECLAIMED) {
207 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
208 	} else if (vp->v_object && vp->v_object->resident_page_count) {
209 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
210 	} else if (vp->v_object && vp->v_object->swblock_count) {
211 		TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
212 	} else {
213 		TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
214 	}
215 	freevnodes++;
216 	_vsetflags(vp, VFREE);
217 	spin_unlock(&vfs_spin);
218 }
219 
220 /*
221  * Put a vnode on the free list.  The caller has cleared VCACHED or owns the
222  * implied sysref related to having removed the vnode from the freelist
223  * (and VCACHED is already clear in that case).
224  *
225  * MPSAFE
226  */
227 static __inline
228 void
229 __vfreetail(struct vnode *vp)
230 {
231 #ifdef TRACKVNODE
232 	if ((ulong)vp == trackvnode)
233 		kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
234 #endif
235 	spin_lock(&vfs_spin);
236 	KKASSERT((vp->v_flag & VFREE) == 0);
237 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
238 	freevnodes++;
239 	_vsetflags(vp, VFREE);
240 	spin_unlock(&vfs_spin);
241 }
242 
243 /*
244  * Return a C boolean if we should put the vnode on the freelist (VFREE),
245  * or leave it / mark it as VCACHED.
246  *
247  * This routine is only valid if the vnode is already either VFREE or
248  * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
249  *
250  * WARNING!  This functions is typically called with v_spin held.
251  *
252  * MPSAFE
253  */
254 static __inline boolean_t
255 vshouldfree(struct vnode *vp)
256 {
257 	return (vp->v_auxrefs == 0 &&
258 	    (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
259 }
260 
261 /*
262  * Add a ref to an active vnode.  This function should never be called
263  * with an inactive vnode (use vget() instead).
264  *
265  * MPSAFE
266  */
267 void
268 vref(struct vnode *vp)
269 {
270 	KKASSERT(vp->v_sysref.refcnt > 0 &&
271 		 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
272 	sysref_get(&vp->v_sysref);
273 }
274 
275 /*
276  * Release a ref on an active or inactive vnode.  The sysref termination
277  * function will be called when the active last active reference is released,
278  * and the vnode is returned to the objcache when the last inactive
279  * reference is released.
280  */
281 void
282 vrele(struct vnode *vp)
283 {
284 	sysref_put(&vp->v_sysref);
285 }
286 
287 /*
288  * Add an auxiliary data structure reference to the vnode.  Auxiliary
289  * references do not change the state of the vnode or prevent them
290  * from being deactivated, reclaimed, or placed on or removed from
291  * the free list.
292  *
293  * An auxiliary reference DOES prevent the vnode from being destroyed,
294  * allowing you to vx_lock() it, test state, etc.
295  *
296  * An auxiliary reference DOES NOT move a vnode out of the VFREE state
297  * once it has entered it.
298  *
299  * WARNING!  vhold() and vhold_interlocked() must not acquire v_spin.
300  *	     The spinlock may or may not already be held by the caller.
301  *	     vdrop() will clean up the free list state.
302  *
303  * MPSAFE
304  */
305 void
306 vhold(struct vnode *vp)
307 {
308 	KKASSERT(vp->v_sysref.refcnt != 0);
309 	atomic_add_int(&vp->v_auxrefs, 1);
310 }
311 
312 void
313 vhold_interlocked(struct vnode *vp)
314 {
315 	atomic_add_int(&vp->v_auxrefs, 1);
316 }
317 
318 /*
319  * Remove an auxiliary reference from the vnode.
320  *
321  * vdrop needs to check for a VCACHE->VFREE transition to catch cases
322  * where a vnode is held past its reclamation.  We use v_spin to
323  * interlock VCACHED -> !VCACHED transitions.
324  *
325  * MPSAFE
326  */
327 void
328 vdrop(struct vnode *vp)
329 {
330 	KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
331 	spin_lock(&vp->v_spin);
332 	atomic_subtract_int(&vp->v_auxrefs, 1);
333 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
334 		_vclrflags(vp, VCACHED);
335 		__vfree(vp);
336 	}
337 	spin_unlock(&vp->v_spin);
338 }
339 
340 /*
341  * This function is called when the last active reference on the vnode
342  * is released, typically via vrele().  SYSREF will VX lock the vnode
343  * and then give the vnode a negative ref count, indicating that it is
344  * undergoing termination or is being set aside for the cache, and one
345  * final sysref_put() is required to actually return it to the memory
346  * subsystem.
347  *
348  * Additional inactive sysrefs may race us but that's ok.  Reactivations
349  * cannot race us because the sysref code interlocked with the VX lock
350  * (which is held on call).
351  *
352  * MPSAFE
353  */
354 void
355 vnode_terminate(struct vnode *vp)
356 {
357 	/*
358 	 * We own the VX lock, it should not be possible for someone else
359 	 * to have reactivated the vp.
360 	 */
361 	KKASSERT(sysref_isinactive(&vp->v_sysref));
362 
363 	/*
364 	 * Deactivate the vnode by marking it VFREE or VCACHED.
365 	 * The vnode can be reactivated from either state until
366 	 * reclaimed.  These states inherit the 'last' sysref on the
367 	 * vnode.
368 	 *
369 	 * NOTE: There may be additional inactive references from
370 	 * other entities blocking on the VX lock while we hold it,
371 	 * but this does not prevent us from changing the vnode's
372 	 * state.
373 	 *
374 	 * NOTE: The vnode could already be marked inactive.  XXX
375 	 *	 how?
376 	 *
377 	 * NOTE: v_mount may be NULL due to assignment to
378 	 *	 dead_vnode_vops
379 	 *
380 	 * NOTE: The vnode may be marked inactive with dirty buffers
381 	 *	 or dirty pages in its cached VM object still present.
382 	 *
383 	 * NOTE: VCACHED should not be set on entry.  We lose control
384 	 *	 of the sysref the instant the vnode is placed on the
385 	 *	 free list or when VCACHED is set.
386 	 *
387 	 *	 The VX lock is required when transitioning to
388 	 *	 +VCACHED but is not sufficient for the vshouldfree()
389 	 *	 interlocked test or when transitioning to -VCACHED.
390 	 */
391 	if ((vp->v_flag & VINACTIVE) == 0) {
392 		_vsetflags(vp, VINACTIVE);
393 		if (vp->v_mount)
394 			VOP_INACTIVE(vp);
395 	}
396 	spin_lock(&vp->v_spin);
397 	KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
398 	if (vshouldfree(vp))
399 		__vfree(vp);
400 	else
401 		_vsetflags(vp, VCACHED); /* inactive but not yet free*/
402 	spin_unlock(&vp->v_spin);
403 	vx_unlock(vp);
404 }
405 
406 /*
407  * Physical vnode constructor / destructor.  These are only executed on
408  * the backend of the objcache.  They are NOT executed on every vnode
409  * allocation or deallocation.
410  *
411  * MPSAFE
412  */
413 boolean_t
414 vnode_ctor(void *obj, void *private, int ocflags)
415 {
416 	struct vnode *vp = obj;
417 
418 	lwkt_token_init(&vp->v_token, "vnode");
419 	lockinit(&vp->v_lock, "vnode", 0, 0);
420 	ccms_dataspace_init(&vp->v_ccms);
421 	TAILQ_INIT(&vp->v_namecache);
422 	RB_INIT(&vp->v_rbclean_tree);
423 	RB_INIT(&vp->v_rbdirty_tree);
424 	RB_INIT(&vp->v_rbhash_tree);
425 	spin_init(&vp->v_spin);
426 	return(TRUE);
427 }
428 
429 /*
430  * MPSAFE
431  */
432 void
433 vnode_dtor(void *obj, void *private)
434 {
435 	struct vnode *vp = obj;
436 
437 	KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
438 	ccms_dataspace_destroy(&vp->v_ccms);
439 }
440 
441 /****************************************************************
442  *			VX LOCKING FUNCTIONS			*
443  ****************************************************************
444  *
445  * These functions lock vnodes for reclamation and deactivation related
446  * activities.  The caller must already be holding some sort of reference
447  * on the vnode.
448  *
449  * MPSAFE
450  */
451 void
452 vx_lock(struct vnode *vp)
453 {
454 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
455 }
456 
457 /*
458  * The non-blocking version also uses a slightly different mechanic.
459  * This function will explicitly fail not only if it cannot acquire
460  * the lock normally, but also if the caller already holds a lock.
461  *
462  * The adjusted mechanic is used to close a loophole where complex
463  * VOP_RECLAIM code can circle around recursively and allocate the
464  * same vnode it is trying to destroy from the freelist.
465  *
466  * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
467  * cause the incorrect behavior to occur.  If not for that lockmgr()
468  * would do the right thing.
469  */
470 static int
471 vx_lock_nonblock(struct vnode *vp)
472 {
473 	if (lockcountnb(&vp->v_lock))
474 		return(EBUSY);
475 	return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
476 }
477 
478 void
479 vx_unlock(struct vnode *vp)
480 {
481 	lockmgr(&vp->v_lock, LK_RELEASE);
482 }
483 
484 /****************************************************************
485  *			VNODE ACQUISITION FUNCTIONS		*
486  ****************************************************************
487  *
488  * These functions must be used when accessing a vnode via an auxiliary
489  * reference such as the namecache or free list, or when you wish to
490  * do a combo ref+lock sequence.
491  *
492  * These functions are MANDATORY for any code chain accessing a vnode
493  * whos activation state is not known.
494  *
495  * vget() can be called with LK_NOWAIT and will return EBUSY if the
496  * lock cannot be immediately acquired.
497  *
498  * vget()/vput() are used when reactivation is desired.
499  *
500  * vx_get() and vx_put() are used when reactivation is not desired.
501  */
502 int
503 vget(struct vnode *vp, int flags)
504 {
505 	int error;
506 
507 	/*
508 	 * A lock type must be passed
509 	 */
510 	if ((flags & LK_TYPE_MASK) == 0) {
511 		panic("vget() called with no lock specified!");
512 		/* NOT REACHED */
513 	}
514 
515 	/*
516 	 * Reference the structure and then acquire the lock.  0->1
517 	 * transitions and refs during termination are allowed here so
518 	 * call sysref directly.
519 	 *
520 	 * NOTE: The requested lock might be a shared lock and does
521 	 *	 not protect our access to the refcnt or other fields.
522 	 */
523 	sysref_get(&vp->v_sysref);
524 	if ((error = vn_lock(vp, flags)) != 0) {
525 		/*
526 		 * The lock failed, undo and return an error.
527 		 */
528 		sysref_put(&vp->v_sysref);
529 	} else if (vp->v_flag & VRECLAIMED) {
530 		/*
531 		 * The node is being reclaimed and cannot be reactivated
532 		 * any more, undo and return ENOENT.
533 		 */
534 		vn_unlock(vp);
535 		vrele(vp);
536 		error = ENOENT;
537 	} else {
538 		/*
539 		 * If the vnode is marked VFREE or VCACHED it needs to be
540 		 * reactivated, otherwise it had better already be active.
541 		 * VINACTIVE must also be cleared.
542 		 *
543 		 * In the VFREE/VCACHED case we have to throw away the
544 		 * sysref that was earmarking those cases and preventing
545 		 * the vnode from being destroyed.  Our sysref is still held.
546 		 *
547 		 * We are allowed to reactivate the vnode while we hold
548 		 * the VX lock, assuming it can be reactivated.
549 		 */
550 		spin_lock(&vp->v_spin);
551 		if (vp->v_flag & VFREE) {
552 			__vbusy(vp);
553 			sysref_activate(&vp->v_sysref);
554 			spin_unlock(&vp->v_spin);
555 			sysref_put(&vp->v_sysref);
556 		} else if (vp->v_flag & VCACHED) {
557 			_vclrflags(vp, VCACHED);
558 			sysref_activate(&vp->v_sysref);
559 			spin_unlock(&vp->v_spin);
560 			sysref_put(&vp->v_sysref);
561 		} else {
562 			if (sysref_isinactive(&vp->v_sysref)) {
563 				sysref_activate(&vp->v_sysref);
564 				kprintf("Warning vp %p reactivation race\n",
565 					vp);
566 			}
567 			spin_unlock(&vp->v_spin);
568 		}
569 		_vclrflags(vp, VINACTIVE);
570 		error = 0;
571 	}
572 	return(error);
573 }
574 
575 #ifdef DEBUG_VPUT
576 
577 void
578 debug_vput(struct vnode *vp, const char *filename, int line)
579 {
580 	kprintf("vput(%p) %s:%d\n", vp, filename, line);
581 	vn_unlock(vp);
582 	vrele(vp);
583 }
584 
585 #else
586 
587 /*
588  * MPSAFE
589  */
590 void
591 vput(struct vnode *vp)
592 {
593 	vn_unlock(vp);
594 	vrele(vp);
595 }
596 
597 #endif
598 
599 /*
600  * XXX The vx_*() locks should use auxrefs, not the main reference counter.
601  *
602  * MPSAFE
603  */
604 void
605 vx_get(struct vnode *vp)
606 {
607 	sysref_get(&vp->v_sysref);
608 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
609 }
610 
611 /*
612  * MPSAFE
613  */
614 int
615 vx_get_nonblock(struct vnode *vp)
616 {
617 	int error;
618 
619 	sysref_get(&vp->v_sysref);
620 	error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
621 	if (error)
622 		sysref_put(&vp->v_sysref);
623 	return(error);
624 }
625 
626 /*
627  * Relase a VX lock that also held a ref on the vnode.
628  *
629  * vx_put needs to check for a VCACHED->VFREE transition to catch the
630  * case where e.g. vnlru issues a vgone*().
631  *
632  * MPSAFE
633  */
634 void
635 vx_put(struct vnode *vp)
636 {
637 	spin_lock(&vp->v_spin);
638 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
639 		_vclrflags(vp, VCACHED);
640 		__vfree(vp);
641 	}
642 	spin_unlock(&vp->v_spin);
643 	lockmgr(&vp->v_lock, LK_RELEASE);
644 	sysref_put(&vp->v_sysref);
645 }
646 
647 /*
648  * The rover looks for vnodes past the midline with no cached data and
649  * moves them to before the midline.  If we do not do this the midline
650  * can wind up in a degenerate state.
651  */
652 static
653 void
654 vnode_rover_locked(void)
655 {
656 	struct vnode *vp;
657 
658 	/*
659 	 * Get the vnode after the rover.  The rover roves between mid1 and
660 	 * the end so the only special vnode it can encounter is mid2.
661 	 */
662 	vp = TAILQ_NEXT(&vnode_free_rover, v_freelist);
663 	if (vp == &vnode_free_mid2) {
664 		vp = TAILQ_NEXT(vp, v_freelist);
665 		rover_state = ROVER_MID2;
666 	}
667 	KKASSERT(vp != &vnode_free_mid1);
668 
669 	/*
670 	 * Start over if we finished the scan.
671 	 */
672 	TAILQ_REMOVE(&vnode_free_list, &vnode_free_rover, v_freelist);
673 	if (vp == NULL) {
674 		TAILQ_INSERT_AFTER(&vnode_free_list, &vnode_free_mid1,
675 				   &vnode_free_rover, v_freelist);
676 		rover_state = ROVER_MID1;
677 		return;
678 	}
679 	TAILQ_INSERT_AFTER(&vnode_free_list, vp, &vnode_free_rover, v_freelist);
680 
681 	/*
682 	 * Shift vp if appropriate.
683 	 */
684 	if (vp->v_object && vp->v_object->resident_page_count) {
685 		/*
686 		 * Promote vnode with resident pages to section 3.
687 		 * (This case shouldn't happen).
688 		 */
689 		if (rover_state == ROVER_MID1) {
690 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
691 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
692 		}
693 	} else if (vp->v_object && vp->v_object->swblock_count) {
694 		/*
695 		 * Demote vnode with only swap pages to section 2
696 		 */
697 		if (rover_state == ROVER_MID2) {
698 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
699 			TAILQ_INSERT_BEFORE(&vnode_free_mid2, vp, v_freelist);
700 		}
701 	} else {
702 		/*
703 		 * Demote vnode with no cached data to section 1
704 		 */
705 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
706 		TAILQ_INSERT_BEFORE(&vnode_free_mid1, vp, v_freelist);
707 	}
708 }
709 
710 /*
711  * Try to reuse a vnode from the free list.
712  *
713  * NOTE: The returned vnode is not completely initialized.
714  *
715  * WARNING: The freevnodes count can race, NULL can be returned even if
716  *	    freevnodes != 0.
717  *
718  * MPSAFE
719  */
720 static
721 struct vnode *
722 allocfreevnode(void)
723 {
724 	struct vnode *vp;
725 	int count;
726 
727 	for (count = 0; count < freevnodes; count++) {
728 		/*
729 		 * Try to lock the first vnode on the free list.
730 		 * Cycle if we can't.
731 		 *
732 		 * We use a bad hack in vx_lock_nonblock() which avoids
733 		 * the lock order reversal between vfs_spin and v_spin.
734 		 * This is very fragile code and I don't want to use
735 		 * vhold here.
736 		 */
737 		spin_lock(&vfs_spin);
738 		vnode_rover_locked();
739 		vnode_rover_locked();
740 		vp = TAILQ_FIRST(&vnode_free_list);
741 		while (vp == &vnode_free_mid1 || vp == &vnode_free_mid2 ||
742 		       vp == &vnode_free_rover) {
743 			vp = TAILQ_NEXT(vp, v_freelist);
744 		}
745 		if (vp == NULL)
746 			break;
747 		if (vx_lock_nonblock(vp)) {
748 			KKASSERT(vp->v_flag & VFREE);
749 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
750 			TAILQ_INSERT_TAIL(&vnode_free_list,
751 					  vp, v_freelist);
752 			spin_unlock(&vfs_spin);
753 			continue;
754 		}
755 
756 		/*
757 		 * We inherit the sysref associated the vnode on the free
758 		 * list.  Because VCACHED is clear the vnode will not
759 		 * be placed back on the free list.  We own the sysref
760 		 * free and clear and thus control the disposition of
761 		 * the vnode.
762 		 */
763 		__vbusy_interlocked(vp);
764 		spin_unlock(&vfs_spin);
765 #ifdef TRACKVNODE
766 		if ((ulong)vp == trackvnode)
767 			kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
768 #endif
769 		/*
770 		 * Do not reclaim/reuse a vnode while auxillary refs exists.
771 		 * This includes namecache refs due to a related ncp being
772 		 * locked or having children.
773 		 *
774 		 * We will make this test several times as auxrefs can
775 		 * get incremented on us without any spinlocks being held
776 		 * until we have removed all namecache and inode references
777 		 * to the vnode.
778 		 *
779 		 * Because VCACHED is already in the correct state (cleared)
780 		 * we cannot race other vdrop()s occuring at the same time
781 		 * and can safely place vp on the free list.
782 		 *
783 		 * The free list association reinherits the sysref.
784 		 */
785 		if (vp->v_auxrefs) {
786 			__vfreetail(vp);
787 			vx_unlock(vp);
788 			continue;
789 		}
790 
791 		/*
792 		 * We inherit the reference that was previously associated
793 		 * with the vnode being on the free list.  VCACHED had better
794 		 * not be set because the reference and VX lock prevents
795 		 * the sysref from transitioning to an active state.
796 		 */
797 		KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
798 		KKASSERT(sysref_isinactive(&vp->v_sysref));
799 
800 		/*
801 		 * Holding the VX lock on an inactive vnode prevents it
802 		 * from being reactivated or reused.  New namecache
803 		 * associations can only be made using active vnodes.
804 		 *
805 		 * Another thread may be blocked on our vnode lock while
806 		 * holding a namecache lock.  We can only reuse this vnode
807 		 * if we can clear all namecache associations without
808 		 * blocking.
809 		 *
810 		 * Because VCACHED is already in the correct state (cleared)
811 		 * we cannot race other vdrop()s occuring at the same time
812 		 * and can safely place vp on the free list.
813 		 */
814 		if ((vp->v_flag & VRECLAIMED) == 0) {
815 			if (cache_inval_vp_nonblock(vp)) {
816 				__vfreetail(vp);
817 				vx_unlock(vp);
818 				continue;
819 			}
820 			vgone_vxlocked(vp);
821 			/* vnode is still VX locked */
822 		}
823 
824 		/*
825 		 * We can reuse the vnode if no primary or auxiliary
826 		 * references remain other then ours, else put it
827 		 * back on the free list and keep looking.
828 		 *
829 		 * Either the free list inherits the last reference
830 		 * or we fall through and sysref_activate() the last
831 		 * reference.
832 		 *
833 		 * Since the vnode is in a VRECLAIMED state, no new
834 		 * namecache associations could have been made.
835 		 */
836 		KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
837 		if (vp->v_auxrefs ||
838 		    !sysref_islastdeactivation(&vp->v_sysref)) {
839 			__vfreetail(vp);
840 			vx_unlock(vp);
841 			continue;
842 		}
843 
844 		/*
845 		 * Return a VX locked vnode suitable for reuse.  The caller
846 		 * inherits the sysref.
847 		 */
848 		return(vp);
849 	}
850 	return(NULL);
851 }
852 
853 /*
854  * Obtain a new vnode from the freelist, allocating more if necessary.
855  * The returned vnode is VX locked & vrefd.
856  *
857  * All new vnodes set the VAGE flags.  An open() of the vnode will
858  * decrement the (2-bit) flags.  Vnodes which are opened several times
859  * are thus retained in the cache over vnodes which are merely stat()d.
860  *
861  * MPSAFE
862  */
863 struct vnode *
864 allocvnode(int lktimeout, int lkflags)
865 {
866 	struct vnode *vp;
867 
868 	/*
869 	 * Try to reuse vnodes if we hit the max.  This situation only
870 	 * occurs in certain large-memory (2G+) situations.  We cannot
871 	 * attempt to directly reclaim vnodes due to nasty recursion
872 	 * problems.
873 	 */
874 	while (numvnodes - freevnodes > desiredvnodes)
875 		vnlru_proc_wait();
876 
877 	/*
878 	 * Try to build up as many vnodes as we can before reallocating
879 	 * from the free list.  A vnode on the free list simply means
880 	 * that it is inactive with no resident pages.  It may or may not
881 	 * have been reclaimed and could have valuable information associated
882 	 * with it that we shouldn't throw away unless we really need to.
883 	 *
884 	 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
885 	 * operation for HAMMER but this should benefit UFS as well.
886 	 */
887 	if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
888 		vp = allocfreevnode();
889 	else
890 		vp = NULL;
891 	if (vp == NULL) {
892 		vp = sysref_alloc(&vnode_sysref_class);
893 		KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
894 		lockmgr(&vp->v_lock, LK_EXCLUSIVE);
895 		numvnodes++;
896 	}
897 
898 	/*
899 	 * We are using a managed sysref class, vnode fields are only
900 	 * zerod on initial allocation from the backing store, not
901 	 * on reallocation.  Thus we have to clear these fields for both
902 	 * reallocation and reuse.
903 	 */
904 #ifdef INVARIANTS
905 	if (vp->v_data)
906 		panic("cleaned vnode isn't");
907 	if (bio_track_active(&vp->v_track_read) ||
908 	    bio_track_active(&vp->v_track_write)) {
909 		panic("Clean vnode has pending I/O's");
910 	}
911 	if (vp->v_flag & VONWORKLST)
912 		panic("Clean vnode still pending on syncer worklist!");
913 	if (!RB_EMPTY(&vp->v_rbdirty_tree))
914 		panic("Clean vnode still has dirty buffers!");
915 	if (!RB_EMPTY(&vp->v_rbclean_tree))
916 		panic("Clean vnode still has clean buffers!");
917 	if (!RB_EMPTY(&vp->v_rbhash_tree))
918 		panic("Clean vnode still on hash tree!");
919 	KKASSERT(vp->v_mount == NULL);
920 #endif
921 	vp->v_flag = VAGE0 | VAGE1;
922 	vp->v_lastw = 0;
923 	vp->v_lasta = 0;
924 	vp->v_cstart = 0;
925 	vp->v_clen = 0;
926 	vp->v_socket = 0;
927 	vp->v_opencount = 0;
928 	vp->v_writecount = 0;	/* XXX */
929 
930 	/*
931 	 * lktimeout only applies when LK_TIMELOCK is used, and only
932 	 * the pageout daemon uses it.  The timeout may not be zero
933 	 * or the pageout daemon can deadlock in low-VM situations.
934 	 */
935 	if (lktimeout == 0)
936 		lktimeout = hz / 10;
937 	lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
938 	KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
939 	/* exclusive lock still held */
940 
941 	/*
942 	 * Note: sysref needs to be activated to convert -0x40000000 to +1.
943 	 * The -0x40000000 comes from the last ref on reuse, and from
944 	 * sysref_init() on allocate.
945 	 */
946 	sysref_activate(&vp->v_sysref);
947 	vp->v_filesize = NOOFFSET;
948 	vp->v_type = VNON;
949 	vp->v_tag = 0;
950 	vp->v_ops = NULL;
951 	vp->v_data = NULL;
952 	KKASSERT(vp->v_mount == NULL);
953 
954 	return (vp);
955 }
956 
957 /*
958  * MPSAFE
959  */
960 int
961 freesomevnodes(int n)
962 {
963 	struct vnode *vp;
964 	int count = 0;
965 
966 	while (n) {
967 		--n;
968 		if ((vp = allocfreevnode()) == NULL)
969 			break;
970 		vx_put(vp);
971 		--numvnodes;
972 	}
973 	return(count);
974 }
975