xref: /dragonfly/sys/kern/vfs_lock.c (revision dca3c15d)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
51 
52 #include <machine/limits.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
60 
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
64 
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 	.name =		"vnode",
68 	.mtype =	M_VNODE,
69 	.proto =	SYSREF_PROTO_VNODE,
70 	.offset =	offsetof(struct vnode, v_sysref),
71 	.objsize =	sizeof(struct vnode),
72 	.mag_capacity =	256,
73 	.flags =	SRC_MANAGEDINIT,
74 	.ctor =		vnode_ctor,
75 	.dtor =		vnode_dtor,
76 	.ops = {
77 		.terminate = (sysref_terminate_func_t)vnode_terminate
78 	}
79 };
80 
81 /*
82  * The vnode free list hold inactive vnodes.  Aged inactive vnodes
83  * are inserted prior to the mid point, and otherwise inserted
84  * at the tail.
85  */
86 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
87 static struct vnode	vnode_free_mid;
88 
89 int  freevnodes = 0;
90 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
91 		&freevnodes, 0, "");
92 static int wantfreevnodes = 25;
93 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
94 		&wantfreevnodes, 0, "");
95 #ifdef TRACKVNODE
96 static ulong trackvnode;
97 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
98 		&trackvnode, 0, "");
99 #endif
100 
101 /*
102  * Called from vfsinit()
103  */
104 void
105 vfs_lock_init(void)
106 {
107 	TAILQ_INIT(&vnode_free_list);
108 	TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
109 }
110 
111 /*
112  * Inline helper functions.  vbusy() and vfree() must be called while in a
113  * critical section.
114  *
115  * Warning: must be callable if the caller holds a read spinlock to something
116  * else, meaning we can't use read spinlocks here.
117  */
118 static __inline
119 void
120 __vbusy(struct vnode *vp)
121 {
122 #ifdef TRACKVNODE
123 	if ((ulong)vp == trackvnode)
124 		kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
125 #endif
126 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
127 	freevnodes--;
128 	vp->v_flag &= ~VFREE;
129 }
130 
131 static __inline
132 void
133 __vfree(struct vnode *vp)
134 {
135 #ifdef TRACKVNODE
136 	if ((ulong)vp == trackvnode) {
137 		kprintf("__vfree %p %08x\n", vp, vp->v_flag);
138 		print_backtrace();
139 	}
140 #endif
141 	if (vp->v_flag & VRECLAIMED)
142 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
143 	else if (vp->v_flag & (VAGE0 | VAGE1))
144 		TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
145 	else
146 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
147 	freevnodes++;
148 	vp->v_flag |= VFREE;
149 }
150 
151 static __inline
152 void
153 __vfreetail(struct vnode *vp)
154 {
155 #ifdef TRACKVNODE
156 	if ((ulong)vp == trackvnode)
157 		kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
158 #endif
159 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
160 	freevnodes++;
161 	vp->v_flag |= VFREE;
162 }
163 
164 /*
165  * Return a C boolean if we should put the vnode on the freelist (VFREE),
166  * or leave it / mark it as VCACHED.
167  *
168  * This routine is only valid if the vnode is already either VFREE or
169  * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
170  */
171 static __inline boolean_t
172 vshouldfree(struct vnode *vp)
173 {
174 	return (vp->v_auxrefs == 0 &&
175 	    (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
176 }
177 
178 /*
179  * Add a ref to an active vnode.  This function should never be called
180  * with an inactive vnode (use vget() instead).
181  */
182 void
183 vref(struct vnode *vp)
184 {
185 	KKASSERT(vp->v_sysref.refcnt > 0 &&
186 		 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
187 	sysref_get(&vp->v_sysref);
188 }
189 
190 /*
191  * Release a ref on an active or inactive vnode.  The sysref termination
192  * function will be called when the active last active reference is released,
193  * and the vnode is returned to the objcache when the last inactive
194  * reference is released.
195  */
196 void
197 vrele(struct vnode *vp)
198 {
199 	sysref_put(&vp->v_sysref);
200 }
201 
202 /*
203  * Add an auxiliary data structure reference to the vnode.  Auxiliary
204  * references do not change the state of the vnode or prevent them
205  * from being deactivated, reclaimed, or placed on the free list.
206  *
207  * An auxiliary reference DOES prevent the vnode from being destroyed,
208  * allowing you to vx_lock() it, test state, etc.
209  *
210  * An auxiliary reference DOES NOT move a vnode out of the VFREE state
211  * once it has entered it.
212  *
213  * MPSAFE
214  */
215 void
216 vhold(struct vnode *vp)
217 {
218 	KKASSERT(vp->v_sysref.refcnt != 0);
219 	atomic_add_int(&vp->v_auxrefs, 1);
220 }
221 
222 /*
223  * Remove an auxiliary reference from the vnode.
224  *
225  * vdrop needs to check for a VCACHE->VFREE transition to catch cases
226  * where a vnode is held past its reclamation.
227  */
228 void
229 vdrop(struct vnode *vp)
230 {
231 	KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
232 	atomic_subtract_int(&vp->v_auxrefs, 1);
233 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
234 		vp->v_flag &= ~VCACHED;
235 		__vfree(vp);
236 	}
237 }
238 
239 /*
240  * This function is called when the last active reference on the vnode
241  * is released, typically via vrele().  SYSREF will give the vnode a
242  * negative ref count, indicating that it is undergoing termination or
243  * is being set aside for the cache, and one final sysref_put() is
244  * required to actually return it to the memory subsystem.
245  *
246  * However, because vnodes may have auxiliary structural references via
247  * v_auxrefs, we must interlock auxiliary references against termination
248  * via the VX lock mechanism.  It is possible for a vnode to be reactivated
249  * while we were blocked on the lock.
250  */
251 void
252 vnode_terminate(struct vnode *vp)
253 {
254 	vx_lock(vp);
255 	if (sysref_isinactive(&vp->v_sysref)) {
256 		/*
257 		 * Deactivate the vnode by marking it VFREE or VCACHED.
258 		 * The vnode can be reactivated from either state until
259 		 * reclaimed.  These states inherit the 'last' sysref on the
260 		 * vnode.
261 		 *
262 		 * NOTE: There may be additional inactive references from
263 		 * other entities blocking on the VX lock while we hold it,
264 		 * but this does not prevent us from changing the vnode's
265 		 * state.
266 		 *
267 		 * NOTE: The vnode could already be marked inactive.  XXX
268 		 * how?
269 		 *
270 		 * NOTE: The vnode may be marked inactive with dirty buffers
271 		 * or dirty pages in its cached VM object still present.
272 		 */
273 		if ((vp->v_flag & VINACTIVE) == 0) {
274 			vp->v_flag |= VINACTIVE;
275 			VOP_INACTIVE(vp);
276 		}
277 		KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
278 		if (vshouldfree(vp))
279 			__vfree(vp);
280 		else
281 			vp->v_flag |= VCACHED;	/* inactive but not yet free */
282 		vx_unlock(vp);
283 	} else {
284 		/*
285 		 * Someone reactivated the vnode while were blocked on the
286 		 * VX lock.  Release the VX lock and release the (now active)
287 		 * last reference which is no longer last.
288 		 */
289 		vx_unlock(vp);
290 		vrele(vp);
291 	}
292 }
293 
294 /*
295  * Physical vnode constructor / destructor.  These are only executed on
296  * the backend of the objcache.  They are NOT executed on every vnode
297  * allocation or deallocation.
298  */
299 boolean_t
300 vnode_ctor(void *obj, void *private, int ocflags)
301 {
302 	struct vnode *vp = obj;
303 
304 	lwkt_token_init(&vp->v_token);
305 	lockinit(&vp->v_lock, "vnode", 0, 0);
306 	ccms_dataspace_init(&vp->v_ccms);
307 	TAILQ_INIT(&vp->v_namecache);
308 	RB_INIT(&vp->v_rbclean_tree);
309 	RB_INIT(&vp->v_rbdirty_tree);
310 	RB_INIT(&vp->v_rbhash_tree);
311 	return(TRUE);
312 }
313 
314 void
315 vnode_dtor(void *obj, void *private)
316 {
317 	struct vnode *vp = obj;
318 
319 	ccms_dataspace_destroy(&vp->v_ccms);
320 }
321 
322 /****************************************************************
323  *			VX LOCKING FUNCTIONS			*
324  ****************************************************************
325  *
326  * These functions lock vnodes for reclamation and deactivation related
327  * activities.  The caller must already be holding some sort of reference
328  * on the vnode.
329  */
330 
331 void
332 vx_lock(struct vnode *vp)
333 {
334 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
335 }
336 
337 static int
338 vx_lock_nonblock(struct vnode *vp)
339 {
340 	return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
341 }
342 
343 void
344 vx_unlock(struct vnode *vp)
345 {
346 	lockmgr(&vp->v_lock, LK_RELEASE);
347 }
348 
349 /****************************************************************
350  *			VNODE ACQUISITION FUNCTIONS		*
351  ****************************************************************
352  *
353  * These functions must be used when accessing a vnode via an auxiliary
354  * reference such as the namecache or free list, or when you wish to
355  * do a combo ref+lock sequence.
356  *
357  * These functions are MANDATORY for any code chain accessing a vnode
358  * whos activation state is not known.
359  *
360  * vget()/vput() are used when reactivation is desired.
361  *
362  * vx_get() and vx_put() are used when reactivation is not desired.
363  */
364 int
365 vget(struct vnode *vp, int flags)
366 {
367 	int error;
368 
369 	/*
370 	 * A lock type must be passed
371 	 */
372 	if ((flags & LK_TYPE_MASK) == 0) {
373 		panic("vget() called with no lock specified!");
374 		/* NOT REACHED */
375 	}
376 
377 	/*
378 	 * Reference the structure and then acquire the lock.  0->1
379 	 * transitions and refs during termination are allowed here so
380 	 * call sysref directly.
381 	 */
382 
383 	sysref_get(&vp->v_sysref);
384 	if ((error = vn_lock(vp, flags)) != 0) {
385 		/*
386 		 * The lock failed, undo and return an error.
387 		 */
388 		sysref_put(&vp->v_sysref);
389 	} else if (vp->v_flag & VRECLAIMED) {
390 		/*
391 		 * The node is being reclaimed and cannot be reactivated
392 		 * any more, undo and return ENOENT.
393 		 */
394 		vn_unlock(vp);
395 		vrele(vp);
396 		error = ENOENT;
397 	} else {
398 		/*
399 		 * If the vnode is marked VFREE or VCACHED it needs to be
400 		 * reactivated, otherwise it had better already be active.
401 		 * VINACTIVE must also be cleared.
402 		 *
403 		 * In the VFREE/VCACHED case we have to throw away the
404 		 * sysref that was earmarking those cases and preventing
405 		 * the vnode from being destroyed.  Our sysref is still held.
406 		 */
407 		if (vp->v_flag & VFREE) {
408 			__vbusy(vp);
409 			sysref_put(&vp->v_sysref);
410 			sysref_activate(&vp->v_sysref);
411 		} else if (vp->v_flag & VCACHED) {
412 			vp->v_flag &= ~VCACHED;
413 			sysref_put(&vp->v_sysref);
414 			sysref_activate(&vp->v_sysref);
415 		} else {
416 			KKASSERT(sysref_isactive(&vp->v_sysref));
417 		}
418 		vp->v_flag &= ~VINACTIVE;
419 		error = 0;
420 	}
421 	return(error);
422 }
423 
424 void
425 vput(struct vnode *vp)
426 {
427 	vn_unlock(vp);
428 	vrele(vp);
429 }
430 
431 /*
432  * XXX The vx_*() locks should use auxrefs, not the main reference counter.
433  */
434 void
435 vx_get(struct vnode *vp)
436 {
437 	sysref_get(&vp->v_sysref);
438 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
439 }
440 
441 int
442 vx_get_nonblock(struct vnode *vp)
443 {
444 	int error;
445 
446 	sysref_get(&vp->v_sysref);
447 	error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
448 	if (error)
449 		sysref_put(&vp->v_sysref);
450 	return(error);
451 }
452 
453 /*
454  * Relase a VX lock that also held a ref on the vnode.
455  *
456  * vx_put needs to check for a VCACHE->VFREE transition to catch the
457  * case where e.g. vnlru issues a vgone*().
458  */
459 void
460 vx_put(struct vnode *vp)
461 {
462 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
463 		vp->v_flag &= ~VCACHED;
464 		__vfree(vp);
465 	}
466 	lockmgr(&vp->v_lock, LK_RELEASE);
467 	sysref_put(&vp->v_sysref);
468 }
469 
470 /*
471  * Misc functions
472  */
473 
474 void
475 vsetflags(struct vnode *vp, int flags)
476 {
477 	crit_enter();
478 	vp->v_flag |= flags;
479 	crit_exit();
480 }
481 
482 void
483 vclrflags(struct vnode *vp, int flags)
484 {
485 	crit_enter();
486 	vp->v_flag &= ~flags;
487 	crit_exit();
488 }
489 
490 /*
491  * Try to reuse a vnode from the free list.  NOTE: The returned vnode
492  * is not completely initialized.
493  */
494 static
495 struct vnode *
496 allocfreevnode(void)
497 {
498 	struct vnode *vp;
499 	int count;
500 
501 	for (count = 0; count < freevnodes; count++) {
502 		/*
503 		 * Note that regardless of how we block in this loop,
504 		 * we only get here if freevnodes != 0 so there
505 		 * had better be something on the list.
506 		 *
507 		 * Try to lock the first vnode on the free list.
508 		 * Cycle if we can't.
509 		 *
510 		 * XXX NOT MP SAFE
511 		 */
512 		vp = TAILQ_FIRST(&vnode_free_list);
513 		if (vp == &vnode_free_mid)
514 			vp = TAILQ_NEXT(vp, v_freelist);
515 		if (vx_lock_nonblock(vp)) {
516 			KKASSERT(vp->v_flag & VFREE);
517 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
518 			TAILQ_INSERT_TAIL(&vnode_free_list,
519 					  vp, v_freelist);
520 			continue;
521 		}
522 #ifdef TRACKVNODE
523 		if ((ulong)vp == trackvnode)
524 			kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
525 #endif
526 
527 		/*
528 		 * With the vnode locked we can safely remove it
529 		 * from the free list.  We inherit the reference
530 		 * that was previously associated with the vnode
531 		 * being on the free list.
532 		 */
533 		KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
534 			  (VFREE|VINACTIVE));
535 		KKASSERT(sysref_isinactive(&vp->v_sysref));
536 		__vbusy(vp);
537 
538 		/*
539 		 * Holding the VX lock on an inactive vnode prevents it
540 		 * from being reactivated or reused.  New namecache
541 		 * associations can only be made using active vnodes.
542 		 *
543 		 * Another thread may be blocked on our vnode lock while
544 		 * holding a namecache lock.  We can only reuse this vnode
545 		 * if we can clear all namecache associations without
546 		 * blocking.
547 		 */
548 		if ((vp->v_flag & VRECLAIMED) == 0) {
549 			if (cache_inval_vp_nonblock(vp)) {
550 				__vfreetail(vp);
551 				vx_unlock(vp);
552 				continue;
553 			}
554 			vgone_vxlocked(vp);
555 			/* vnode is still VX locked */
556 		}
557 
558 		/*
559 		 * We can reuse the vnode if no primary or auxiliary
560 		 * references remain other then ours, else put it
561 		 * back on the free list and keep looking.
562 		 *
563 		 * Either the free list inherits the last reference
564 		 * or we fall through and sysref_activate() the last
565 		 * reference.
566 		 *
567 		 * Since the vnode is in a VRECLAIMED state, no new
568 		 * namecache associations could have been made.
569 		 */
570 		KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
571 		if (vp->v_auxrefs ||
572 		    !sysref_islastdeactivation(&vp->v_sysref)) {
573 			__vfreetail(vp);
574 			vx_unlock(vp);
575 			continue;
576 		}
577 
578 		/*
579 		 * Return a VX locked vnode suitable for reuse.  The caller
580 		 * inherits the sysref.
581 		 */
582 		return(vp);
583 	}
584 	return(NULL);
585 }
586 
587 /*
588  * Obtain a new vnode from the freelist, allocating more if necessary.
589  * The returned vnode is VX locked & refd.
590  *
591  * All new vnodes set the VAGE flags.  An open() of the vnode will
592  * decrement the (2-bit) flags.  Vnodes which are opened several times
593  * are thus retained in the cache over vnodes which are merely stat()d.
594  */
595 struct vnode *
596 allocvnode(int lktimeout, int lkflags)
597 {
598 	struct vnode *vp;
599 
600 	/*
601 	 * Try to reuse vnodes if we hit the max.  This situation only
602 	 * occurs in certain large-memory (2G+) situations.  We cannot
603 	 * attempt to directly reclaim vnodes due to nasty recursion
604 	 * problems.
605 	 */
606 	while (numvnodes - freevnodes > desiredvnodes)
607 		vnlru_proc_wait();
608 
609 	/*
610 	 * Try to build up as many vnodes as we can before reallocating
611 	 * from the free list.  A vnode on the free list simply means
612 	 * that it is inactive with no resident pages.  It may or may not
613 	 * have been reclaimed and could have valuable information associated
614 	 * with it that we shouldn't throw away unless we really need to.
615 	 *
616 	 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
617 	 * operation for HAMMER but this should benefit UFS as well.
618 	 */
619 	if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
620 		vp = allocfreevnode();
621 	else
622 		vp = NULL;
623 	if (vp == NULL) {
624 		vp = sysref_alloc(&vnode_sysref_class);
625 		lockmgr(&vp->v_lock, LK_EXCLUSIVE);
626 		numvnodes++;
627 	}
628 
629 	/*
630 	 * We are using a managed sysref class, vnode fields are only
631 	 * zerod on initial allocation from the backing store, not
632 	 * on reallocation.  Thus we have to clear these fields for both
633 	 * reallocation and reuse.
634 	 */
635 #ifdef INVARIANTS
636 	if (vp->v_data)
637 		panic("cleaned vnode isn't");
638 	if (bio_track_active(&vp->v_track_read) ||
639 	    bio_track_active(&vp->v_track_write)) {
640 		panic("Clean vnode has pending I/O's");
641 	}
642 	if (vp->v_flag & VONWORKLST)
643 		panic("Clean vnode still pending on syncer worklist!");
644 	if (!RB_EMPTY(&vp->v_rbdirty_tree))
645 		panic("Clean vnode still has dirty buffers!");
646 	if (!RB_EMPTY(&vp->v_rbclean_tree))
647 		panic("Clean vnode still has clean buffers!");
648 	if (!RB_EMPTY(&vp->v_rbhash_tree))
649 		panic("Clean vnode still on hash tree!");
650 	KKASSERT(vp->v_mount == NULL);
651 #endif
652 	vp->v_flag = VAGE0 | VAGE1;
653 	vp->v_lastw = 0;
654 	vp->v_lasta = 0;
655 	vp->v_cstart = 0;
656 	vp->v_clen = 0;
657 	vp->v_socket = 0;
658 	vp->v_opencount = 0;
659 	vp->v_writecount = 0;	/* XXX */
660 
661 	/*
662 	 * lktimeout only applies when LK_TIMELOCK is used, and only
663 	 * the pageout daemon uses it.  The timeout may not be zero
664 	 * or the pageout daemon can deadlock in low-VM situations.
665 	 */
666 	if (lktimeout == 0)
667 		lktimeout = hz / 10;
668 	lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
669 	KKASSERT(TAILQ_FIRST(&vp->v_namecache) == NULL);
670 	/* exclusive lock still held */
671 
672 	/*
673 	 * Note: sysref needs to be activated to convert -0x40000000 to +1.
674 	 * The -0x40000000 comes from the last ref on reuse, and from
675 	 * sysref_init() on allocate.
676 	 */
677 	sysref_activate(&vp->v_sysref);
678 	vp->v_filesize = NOOFFSET;
679 	vp->v_type = VNON;
680 	vp->v_tag = 0;
681 	vp->v_ops = NULL;
682 	vp->v_data = NULL;
683 	KKASSERT(vp->v_mount == NULL);
684 
685 	return (vp);
686 }
687 
688 int
689 freesomevnodes(int n)
690 {
691 	struct vnode *vp;
692 	int count = 0;
693 
694 	while (n) {
695 		--n;
696 		if ((vp = allocfreevnode()) == NULL)
697 			break;
698 		vx_put(vp);
699 		--numvnodes;
700 	}
701 	return(count);
702 }
703 
704