xref: /dragonfly/sys/kern/vfs_lock.c (revision cc93b0eb)
1 /*
2  * Copyright (c) 2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 #include "opt_ddb.h"
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
51 
52 #include <machine/limits.h>
53 
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
56 
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
60 
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
64 
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 	.name =		"vnode",
68 	.mtype =	M_VNODE,
69 	.proto =	SYSREF_PROTO_VNODE,
70 	.offset =	offsetof(struct vnode, v_sysref),
71 	.objsize =	sizeof(struct vnode),
72 	.mag_capacity =	256,
73 	.flags =	SRC_MANAGEDINIT,
74 	.ctor =		vnode_ctor,
75 	.dtor =		vnode_dtor,
76 	.ops = {
77 		.terminate = (sysref_terminate_func_t)vnode_terminate
78 	}
79 };
80 
81 static TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
82 
83 int  freevnodes = 0;
84 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
85 		&freevnodes, 0, "");
86 static int wantfreevnodes = 25;
87 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
88 		&wantfreevnodes, 0, "");
89 
90 /*
91  * Called from vfsinit()
92  */
93 void
94 vfs_lock_init(void)
95 {
96 	TAILQ_INIT(&vnode_free_list);
97 }
98 
99 /*
100  * Inline helper functions.  vbusy() and vfree() must be called while in a
101  * critical section.
102  *
103  * Warning: must be callable if the caller holds a read spinlock to something
104  * else, meaning we can't use read spinlocks here.
105  */
106 static __inline
107 void
108 __vbusy(struct vnode *vp)
109 {
110 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
111 	freevnodes--;
112 	vp->v_flag &= ~(VFREE|VAGE);
113 }
114 
115 static __inline
116 void
117 __vfree(struct vnode *vp)
118 {
119 	if (vp->v_flag & (VAGE|VRECLAIMED))
120 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
121 	else
122 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
123 	freevnodes++;
124 	vp->v_flag &= ~VAGE;
125 	vp->v_flag |= VFREE;
126 }
127 
128 static __inline
129 void
130 __vfreetail(struct vnode *vp)
131 {
132 	TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
133 	freevnodes++;
134 	vp->v_flag |= VFREE;
135 }
136 
137 /*
138  * Return a C boolean if we should put the vnode on the freelist (VFREE),
139  * or leave it / mark it as VCACHED.
140  *
141  * This routine is only valid if the vnode is already either VFREE or
142  * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
143  */
144 static __inline boolean_t
145 vshouldfree(struct vnode *vp)
146 {
147 	return (vp->v_auxrefs == 0 &&
148 	    (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
149 }
150 
151 /*
152  * Add a ref to an active vnode.  This function should never be called
153  * with an inactive vnode (use vget() instead).
154  */
155 void
156 vref(struct vnode *vp)
157 {
158 	KKASSERT(vp->v_sysref.refcnt > 0 &&
159 		 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
160 	sysref_get(&vp->v_sysref);
161 }
162 
163 /*
164  * Release a ref on an active or inactive vnode.  The sysref termination
165  * function will be called when the active last active reference is released,
166  * and the vnode is returned to the objcache when the last inactive
167  * reference is released.
168  */
169 void
170 vrele(struct vnode *vp)
171 {
172 	sysref_put(&vp->v_sysref);
173 }
174 
175 /*
176  * Add an auxiliary data structure reference to the vnode.  Auxiliary
177  * references do not change the state of the vnode or prevent them
178  * from being deactivated, reclaimed, or placed on the free list.
179  *
180  * An auxiliary reference DOES prevent the vnode from being destroyed,
181  * allowing you to vx_lock() it, test state, etc.
182  *
183  * An auxiliary reference DOES NOT move a vnode out of the VFREE state
184  * once it has entered it.
185  */
186 void
187 vhold(struct vnode *vp)
188 {
189 	KKASSERT(vp->v_sysref.refcnt != 0);
190 	atomic_add_int(&vp->v_auxrefs, 1);
191 }
192 
193 /*
194  * Remove an auxiliary reference from the vnode.
195  *
196  * vdrop needs to check for a VCACHE->VFREE transition to catch cases
197  * where a vnode is held past its reclamation.
198  */
199 void
200 vdrop(struct vnode *vp)
201 {
202 	KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
203 	atomic_subtract_int(&vp->v_auxrefs, 1);
204 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
205 		/*vp->v_flag |= VAGE;*/
206 		vp->v_flag &= ~VCACHED;
207 		__vfree(vp);
208 	}
209 }
210 
211 /*
212  * This function is called when the last active reference on the vnode
213  * is released, typically via vrele().  SYSREF will give the vnode a
214  * negative ref count, indicating that it is undergoing termination or
215  * is being set aside for the cache, and one final sysref_put() is
216  * required to actually return it to the memory subsystem.
217  *
218  * However, because vnodes may have auxiliary structural references via
219  * v_auxrefs, we must interlock auxiliary references against termination
220  * via the VX lock mechanism.  It is possible for a vnode to be reactivated
221  * while we were blocked on the lock.
222  */
223 void
224 vnode_terminate(struct vnode *vp)
225 {
226 	vx_lock(vp);
227 	if (sysref_isinactive(&vp->v_sysref)) {
228 		/*
229 		 * Deactivate the vnode by marking it VFREE or VCACHED.
230 		 * The vnode can be reactivated from either state until
231 		 * reclaimed.  These states inherit the 'last' sysref on the
232 		 * vnode.
233 		 *
234 		 * NOTE: There may be additional inactive references from
235 		 * other entities blocking on the VX lock while we hold it,
236 		 * but this does not prevent us from changing the vnode's
237 		 * state.
238 		 *
239 		 * NOTE: The vnode could already be marked inactive.  XXX
240 		 * how?
241 		 *
242 		 * NOTE: The vnode may be marked inactive with dirty buffers
243 		 * or dirty pages in its cached VM object still present.
244 		 */
245 		if ((vp->v_flag & VINACTIVE) == 0) {
246 			vp->v_flag |= VINACTIVE;
247 			VOP_INACTIVE(vp);
248 		}
249 		KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
250 		if (vshouldfree(vp))
251 			__vfree(vp);
252 		else
253 			vp->v_flag |= VCACHED;	/* inactive but not yet free */
254 		vx_unlock(vp);
255 	} else {
256 		/*
257 		 * Someone reactivated the vnode while were blocked on the
258 		 * VX lock.  Release the VX lock and release the (now active)
259 		 * last reference which is no longer last.
260 		 */
261 		vx_unlock(vp);
262 		vrele(vp);
263 	}
264 }
265 
266 /*
267  * Physical vnode constructor / destructor.  These are only executed on
268  * the backend of the objcache.  They are NOT executed on every vnode
269  * allocation or deallocation.
270  */
271 boolean_t
272 vnode_ctor(void *obj, void *private, int ocflags)
273 {
274 	struct vnode *vp = obj;
275 
276 	lwkt_token_init(&vp->v_pollinfo.vpi_token);
277 	lockinit(&vp->v_lock, "vnode", 0, 0);
278 	ccms_dataspace_init(&vp->v_ccms);
279 	TAILQ_INIT(&vp->v_namecache);
280 	RB_INIT(&vp->v_rbclean_tree);
281 	RB_INIT(&vp->v_rbdirty_tree);
282 	RB_INIT(&vp->v_rbhash_tree);
283 	return(TRUE);
284 }
285 
286 void
287 vnode_dtor(void *obj, void *private)
288 {
289 	struct vnode *vp = obj;
290 
291 	ccms_dataspace_destroy(&vp->v_ccms);
292 }
293 
294 /****************************************************************
295  *			VX LOCKING FUNCTIONS			*
296  ****************************************************************
297  *
298  * These functions lock vnodes for reclamation and deactivation related
299  * activities.  The caller must already be holding some sort of reference
300  * on the vnode.
301  */
302 
303 void
304 vx_lock(struct vnode *vp)
305 {
306 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
307 }
308 
309 static int
310 vx_lock_nonblock(struct vnode *vp)
311 {
312 	return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
313 }
314 
315 void
316 vx_unlock(struct vnode *vp)
317 {
318 	lockmgr(&vp->v_lock, LK_RELEASE);
319 }
320 
321 /****************************************************************
322  *			VNODE ACQUISITION FUNCTIONS		*
323  ****************************************************************
324  *
325  * These functions must be used when accessing a vnode via an auxiliary
326  * reference such as the namecache or free list, or when you wish to
327  * do a combo ref+lock sequence.
328  *
329  * These functions are MANDATORY for any code chain accessing a vnode
330  * whos activation state is not known.
331  *
332  * vget()/vput() are used when reactivation is desired.
333  *
334  * vx_get() and vx_put() are used when reactivation is not desired.
335  */
336 int
337 vget(struct vnode *vp, int flags)
338 {
339 	int error;
340 
341 	/*
342 	 * A lock type must be passed
343 	 */
344 	if ((flags & LK_TYPE_MASK) == 0) {
345 		panic("vget() called with no lock specified!");
346 		/* NOT REACHED */
347 	}
348 
349 	/*
350 	 * Reference the structure and then acquire the lock.  0->1
351 	 * transitions and refs during termination are allowed here so
352 	 * call sysref directly.
353 	 */
354 
355 	sysref_get(&vp->v_sysref);
356 	if ((error = vn_lock(vp, flags)) != 0) {
357 		/*
358 		 * The lock failed, undo and return an error.
359 		 */
360 		sysref_put(&vp->v_sysref);
361 	} else if (vp->v_flag & VRECLAIMED) {
362 		/*
363 		 * The node is being reclaimed and cannot be reactivated
364 		 * any more, undo and return ENOENT.
365 		 */
366 		vn_unlock(vp);
367 		vrele(vp);
368 		error = ENOENT;
369 	} else {
370 		/*
371 		 * If the vnode is marked VFREE or VCACHED it needs to be
372 		 * reactivated, otherwise it had better already be active.
373 		 * VINACTIVE must also be cleared.
374 		 *
375 		 * In the VFREE/VCACHED case we have to throw away the
376 		 * sysref that was earmarking those cases and preventing
377 		 * the vnode from being destroyed.  Our sysref is still held.
378 		 */
379 		if (vp->v_flag & VFREE) {
380 			__vbusy(vp);
381 			sysref_put(&vp->v_sysref);
382 			sysref_activate(&vp->v_sysref);
383 		} else if (vp->v_flag & VCACHED) {
384 			vp->v_flag &= ~VCACHED;
385 			sysref_put(&vp->v_sysref);
386 			sysref_activate(&vp->v_sysref);
387 		} else {
388 			KKASSERT(sysref_isactive(&vp->v_sysref));
389 		}
390 		vp->v_flag &= ~VINACTIVE;
391 		error = 0;
392 	}
393 	return(error);
394 }
395 
396 void
397 vput(struct vnode *vp)
398 {
399 	vn_unlock(vp);
400 	vrele(vp);
401 }
402 
403 /*
404  * XXX The vx_*() locks should use auxrefs, not the main reference counter.
405  */
406 void
407 vx_get(struct vnode *vp)
408 {
409 	sysref_get(&vp->v_sysref);
410 	lockmgr(&vp->v_lock, LK_EXCLUSIVE);
411 }
412 
413 int
414 vx_get_nonblock(struct vnode *vp)
415 {
416 	int error;
417 
418 	sysref_get(&vp->v_sysref);
419 	error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
420 	if (error)
421 		sysref_put(&vp->v_sysref);
422 	return(error);
423 }
424 
425 /*
426  * Relase a VX lock that also held a ref on the vnode.
427  *
428  * vx_put needs to check for a VCACHE->VFREE transition to catch the
429  * case where e.g. vnlru issues a vgone*().
430  */
431 void
432 vx_put(struct vnode *vp)
433 {
434 	if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
435 		/*vp->v_flag |= VAGE;*/
436 		vp->v_flag &= ~VCACHED;
437 		__vfree(vp);
438 	}
439 	lockmgr(&vp->v_lock, LK_RELEASE);
440 	sysref_put(&vp->v_sysref);
441 }
442 
443 /*
444  * Misc functions
445  */
446 
447 void
448 vsetflags(struct vnode *vp, int flags)
449 {
450 	crit_enter();
451 	vp->v_flag |= flags;
452 	crit_exit();
453 }
454 
455 void
456 vclrflags(struct vnode *vp, int flags)
457 {
458 	crit_enter();
459 	vp->v_flag &= ~flags;
460 	crit_exit();
461 }
462 
463 /*
464  * Try to reuse a vnode from the free list.  NOTE: The returned vnode
465  * is not completely initialized.
466  */
467 static
468 struct vnode *
469 allocfreevnode(void)
470 {
471 	struct vnode *vp;
472 	int count;
473 
474 	for (count = 0; count < freevnodes; count++) {
475 		/*
476 		 * Note that regardless of how we block in this loop,
477 		 * we only get here if freevnodes != 0 so there
478 		 * had better be something on the list.
479 		 *
480 		 * Try to lock the first vnode on the free list.
481 		 * Cycle if we can't.
482 		 *
483 		 * XXX NOT MP SAFE
484 		 */
485 		vp = TAILQ_FIRST(&vnode_free_list);
486 		if (vx_lock_nonblock(vp)) {
487 			KKASSERT(vp->v_flag & VFREE);
488 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
489 			TAILQ_INSERT_TAIL(&vnode_free_list,
490 					  vp, v_freelist);
491 			continue;
492 		}
493 
494 		/*
495 		 * With the vnode locked we can safely remove it
496 		 * from the free list.  We inherit the reference
497 		 * that was previously associated with the vnode
498 		 * being on the free list.
499 		 */
500 		KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
501 			  (VFREE|VINACTIVE));
502 		KKASSERT(sysref_isinactive(&vp->v_sysref));
503 		__vbusy(vp);
504 
505 		/*
506 		 * Holding the VX lock on an inactive vnode prevents it
507 		 * from being reactivated or reused.  New namecache
508 		 * associations can only be made using active vnodes.
509 		 *
510 		 * Another thread may be blocked on our vnode lock while
511 		 * holding a namecache lock.  We can only reuse this vnode
512 		 * if we can clear all namecache associations without
513 		 * blocking.
514 		 */
515 		if ((vp->v_flag & VRECLAIMED) == 0) {
516 			if (cache_inval_vp_nonblock(vp)) {
517 				__vfreetail(vp);
518 				vx_unlock(vp);
519 				continue;
520 			}
521 			vgone_vxlocked(vp);
522 			/* vnode is still VX locked */
523 		}
524 
525 		/*
526 		 * We can reuse the vnode if no primary or auxiliary
527 		 * references remain other then ours, else put it
528 		 * back on the free list and keep looking.
529 		 *
530 		 * Either the free list inherits the last reference
531 		 * or we fall through and sysref_activate() the last
532 		 * reference.
533 		 *
534 		 * Since the vnode is in a VRECLAIMED state, no new
535 		 * namecache associations could have been made.
536 		 */
537 		KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
538 		if (vp->v_auxrefs ||
539 		    !sysref_islastdeactivation(&vp->v_sysref)) {
540 			__vfreetail(vp);
541 			vx_unlock(vp);
542 			continue;
543 		}
544 
545 		/*
546 		 * Return a VX locked vnode suitable for reuse.  The caller
547 		 * inherits the sysref.
548 		 */
549 		return(vp);
550 	}
551 	return(NULL);
552 }
553 
554 /*
555  * Obtain a new vnode from the freelist, allocating more if necessary.
556  * The returned vnode is VX locked & refd.
557  */
558 struct vnode *
559 allocvnode(int lktimeout, int lkflags)
560 {
561 	struct vnode *vp;
562 
563 	/*
564 	 * Try to reuse vnodes if we hit the max.  This situation only
565 	 * occurs in certain large-memory (2G+) situations.  We cannot
566 	 * attempt to directly reclaim vnodes due to nasty recursion
567 	 * problems.
568 	 */
569 	while (numvnodes - freevnodes > desiredvnodes)
570 		vnlru_proc_wait();
571 
572 	/*
573 	 * Try to build up as many vnodes as we can before reallocating
574 	 * from the free list.  A vnode on the free list simply means
575 	 * that it is inactive with no resident pages.  It may or may not
576 	 * have been reclaimed and could have valuable information associated
577 	 * with it that we shouldn't throw away unless we really need to.
578 	 *
579 	 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
580 	 * operation for HAMMER but this should benefit UFS as well.
581 	 */
582 	if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
583 		vp = allocfreevnode();
584 	else
585 		vp = NULL;
586 	if (vp == NULL) {
587 		vp = sysref_alloc(&vnode_sysref_class);
588 		lockmgr(&vp->v_lock, LK_EXCLUSIVE);
589 		numvnodes++;
590 	}
591 
592 	/*
593 	 * We are using a managed sysref class, vnode fields are only
594 	 * zerod on initial allocation from the backing store, not
595 	 * on reallocation.  Thus we have to clear these fields for both
596 	 * reallocation and reuse.
597 	 */
598 #ifdef INVARIANTS
599 	if (vp->v_data)
600 		panic("cleaned vnode isn't");
601 	if (vp->v_track_read.bk_active + vp->v_track_write.bk_active)
602 		panic("Clean vnode has pending I/O's");
603 	if (vp->v_flag & VONWORKLST)
604 		panic("Clean vnode still pending on syncer worklist!");
605 	if (!RB_EMPTY(&vp->v_rbdirty_tree))
606 		panic("Clean vnode still has dirty buffers!");
607 	if (!RB_EMPTY(&vp->v_rbclean_tree))
608 		panic("Clean vnode still has clean buffers!");
609 	if (!RB_EMPTY(&vp->v_rbhash_tree))
610 		panic("Clean vnode still on hash tree!");
611 	KKASSERT(vp->v_mount == NULL);
612 #endif
613 	vp->v_flag = 0;
614 	vp->v_lastw = 0;
615 	vp->v_lasta = 0;
616 	vp->v_cstart = 0;
617 	vp->v_clen = 0;
618 	vp->v_socket = 0;
619 	vp->v_opencount = 0;
620 	vp->v_writecount = 0;	/* XXX */
621 
622 	/*
623 	 * lktimeout only applies when LK_TIMELOCK is used, and only
624 	 * the pageout daemon uses it.  The timeout may not be zero
625 	 * or the pageout daemon can deadlock in low-VM situations.
626 	 */
627 	if (lktimeout == 0)
628 		lktimeout = hz / 10;
629 	lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
630 	KKASSERT(TAILQ_FIRST(&vp->v_namecache) == NULL);
631 	/* exclusive lock still held */
632 
633 	/*
634 	 * Note: sysref needs to be activated to convert -0x40000000 to +1.
635 	 * The -0x40000000 comes from the last ref on reuse, and from
636 	 * sysref_init() on allocate.
637 	 */
638 	sysref_activate(&vp->v_sysref);
639 	vp->v_filesize = NOOFFSET;
640 	vp->v_type = VNON;
641 	vp->v_tag = 0;
642 	vp->v_ops = NULL;
643 	vp->v_data = NULL;
644 	KKASSERT(vp->v_mount == NULL);
645 
646 	return (vp);
647 }
648 
649 int
650 freesomevnodes(int n)
651 {
652 	struct vnode *vp;
653 	int count = 0;
654 
655 	while (n) {
656 		--n;
657 		if ((vp = allocfreevnode()) == NULL)
658 			break;
659 		vx_put(vp);
660 		--numvnodes;
661 	}
662 	return(count);
663 }
664 
665