xref: /dragonfly/sys/kern/vfs_cache.c (revision 23265324)
1 /*
2  * Copyright (c) 2003,2004 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * Copyright (c) 1989, 1993, 1995
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * This code is derived from software contributed to Berkeley by
38  * Poul-Henning Kamp of the FreeBSD Project.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *	This product includes software developed by the University of
51  *	California, Berkeley and its contributors.
52  * 4. Neither the name of the University nor the names of its contributors
53  *    may be used to endorse or promote products derived from this software
54  *    without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66  * SUCH DAMAGE.
67  *
68  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
69  * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
70  * $DragonFly: src/sys/kern/vfs_cache.c,v 1.80 2006/12/23 00:35:04 swildner Exp $
71  */
72 
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
81 #include <sys/proc.h>
82 #include <sys/namei.h>
83 #include <sys/nlookup.h>
84 #include <sys/filedesc.h>
85 #include <sys/fnv_hash.h>
86 #include <sys/globaldata.h>
87 #include <sys/kern_syscall.h>
88 #include <sys/dirent.h>
89 #include <ddb/ddb.h>
90 
91 #define MAX_RECURSION_DEPTH	64
92 
93 /*
94  * Random lookups in the cache are accomplished with a hash table using
95  * a hash key of (nc_src_vp, name).
96  *
97  * Negative entries may exist and correspond to structures where nc_vp
98  * is NULL.  In a negative entry, NCF_WHITEOUT will be set if the entry
99  * corresponds to a whited-out directory entry (verses simply not finding the
100  * entry at all).
101  *
102  * Upon reaching the last segment of a path, if the reference is for DELETE,
103  * or NOCACHE is set (rewrite), and the name is located in the cache, it
104  * will be dropped.
105  */
106 
107 /*
108  * Structures associated with name cacheing.
109  */
110 #define NCHHASH(hash)	(&nchashtbl[(hash) & nchash])
111 #define MINNEG		1024
112 
113 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
114 
115 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
116 static struct namecache_list	ncneglist;		/* instead of vnode */
117 
118 /*
119  * ncvp_debug - debug cache_fromvp().  This is used by the NFS server
120  * to create the namecache infrastructure leading to a dangling vnode.
121  *
122  * 0	Only errors are reported
123  * 1	Successes are reported
124  * 2	Successes + the whole directory scan is reported
125  * 3	Force the directory scan code run as if the parent vnode did not
126  *	have a namecache record, even if it does have one.
127  */
128 static int	ncvp_debug;
129 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
130 
131 static u_long	nchash;			/* size of hash table */
132 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
133 
134 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
135 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
136 
137 static int	nclockwarn;		/* warn on locked entries in ticks */
138 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
139 
140 static u_long	numneg;		/* number of cache entries allocated */
141 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
142 
143 static u_long	numcache;		/* number of cache entries allocated */
144 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
145 
146 static u_long	numunres;		/* number of unresolved entries */
147 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, "");
148 
149 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
150 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
151 
152 static int cache_resolve_mp(struct mount *mp);
153 static void _cache_rehash(struct namecache *ncp);
154 static void _cache_lock(struct namecache *ncp);
155 static void _cache_setunresolved(struct namecache *ncp);
156 
157 /*
158  * The new name cache statistics
159  */
160 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
161 #define STATNODE(mode, name, var) \
162 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
163 STATNODE(CTLFLAG_RD, numneg, &numneg);
164 STATNODE(CTLFLAG_RD, numcache, &numcache);
165 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
166 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
167 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
168 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
169 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
170 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
171 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
172 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
173 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
174 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
175 
176 struct nchstats nchstats[SMP_MAXCPU];
177 /*
178  * Export VFS cache effectiveness statistics to user-land.
179  *
180  * The statistics are left for aggregation to user-land so
181  * neat things can be achieved, like observing per-CPU cache
182  * distribution.
183  */
184 static int
185 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
186 {
187 	struct globaldata *gd;
188 	int i, error;
189 
190 	error = 0;
191 	for (i = 0; i < ncpus; ++i) {
192 		gd = globaldata_find(i);
193 		if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
194 			sizeof(struct nchstats))))
195 			break;
196 	}
197 
198 	return (error);
199 }
200 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
201   0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
202 
203 static void cache_zap(struct namecache *ncp);
204 
205 /*
206  * cache_hold() and cache_drop() prevent the premature deletion of a
207  * namecache entry but do not prevent operations (such as zapping) on
208  * that namecache entry.
209  *
210  * This routine may only be called from outside this source module if
211  * nc_refs is already at least 1.
212  *
213  * This is a rare case where callers are allowed to hold a spinlock,
214  * so we can't ourselves.
215  */
216 static __inline
217 struct namecache *
218 _cache_hold(struct namecache *ncp)
219 {
220 	atomic_add_int(&ncp->nc_refs, 1);
221 	return(ncp);
222 }
223 
224 /*
225  * When dropping an entry, if only one ref remains and the entry has not
226  * been resolved, zap it.  Since the one reference is being dropped the
227  * entry had better not be locked.
228  */
229 static __inline
230 void
231 _cache_drop(struct namecache *ncp)
232 {
233 	KKASSERT(ncp->nc_refs > 0);
234 	if (ncp->nc_refs == 1 &&
235 	    (ncp->nc_flag & NCF_UNRESOLVED) &&
236 	    TAILQ_EMPTY(&ncp->nc_list)
237 	) {
238 		KKASSERT(ncp->nc_exlocks == 0);
239 		_cache_lock(ncp);
240 		cache_zap(ncp);
241 	} else {
242 		atomic_subtract_int(&ncp->nc_refs, 1);
243 	}
244 }
245 
246 /*
247  * Link a new namecache entry to its parent.  Be careful to avoid races
248  * if vhold() blocks in the future.
249  */
250 static void
251 cache_link_parent(struct namecache *ncp, struct namecache *par)
252 {
253 	KKASSERT(ncp->nc_parent == NULL);
254 	ncp->nc_parent = par;
255 	if (TAILQ_EMPTY(&par->nc_list)) {
256 		TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
257 		/*
258 		 * Any vp associated with an ncp which has children must
259 		 * be held to prevent it from being recycled.
260 		 */
261 		if (par->nc_vp)
262 			vhold(par->nc_vp);
263 	} else {
264 		TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
265 	}
266 }
267 
268 /*
269  * Remove the parent association from a namecache structure.  If this is
270  * the last child of the parent the cache_drop(par) will attempt to
271  * recursively zap the parent.
272  */
273 static void
274 cache_unlink_parent(struct namecache *ncp)
275 {
276 	struct namecache *par;
277 
278 	if ((par = ncp->nc_parent) != NULL) {
279 		ncp->nc_parent = NULL;
280 		par = _cache_hold(par);
281 		TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
282 		if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
283 			vdrop(par->nc_vp);
284 		_cache_drop(par);
285 	}
286 }
287 
288 /*
289  * Allocate a new namecache structure.  Most of the code does not require
290  * zero-termination of the string but it makes vop_compat_ncreate() easier.
291  */
292 static struct namecache *
293 cache_alloc(int nlen)
294 {
295 	struct namecache *ncp;
296 
297 	ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
298 	if (nlen)
299 		ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
300 	ncp->nc_nlen = nlen;
301 	ncp->nc_flag = NCF_UNRESOLVED;
302 	ncp->nc_error = ENOTCONN;	/* needs to be resolved */
303 	ncp->nc_refs = 1;
304 
305 	/*
306 	 * Construct a fake FSMID based on the time of day and a 32 bit
307 	 * roller for uniqueness.  This is used to generate a useful
308 	 * FSMID for filesystems which do not support it.
309 	 */
310 	ncp->nc_fsmid = cache_getnewfsmid();
311 	TAILQ_INIT(&ncp->nc_list);
312 	_cache_lock(ncp);
313 	return(ncp);
314 }
315 
316 static void
317 _cache_free(struct namecache *ncp)
318 {
319 	KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
320 	if (ncp->nc_name)
321 		kfree(ncp->nc_name, M_VFSCACHE);
322 	kfree(ncp, M_VFSCACHE);
323 }
324 
325 void
326 cache_zero(struct nchandle *nch)
327 {
328 	nch->ncp = NULL;
329 	nch->mount = NULL;
330 }
331 
332 /*
333  * Ref and deref a namecache structure.
334  *
335  * Warning: caller may hold an unrelated read spinlock, which means we can't
336  * use read spinlocks here.
337  */
338 struct nchandle *
339 cache_hold(struct nchandle *nch)
340 {
341 	_cache_hold(nch->ncp);
342 	++nch->mount->mnt_refs;
343 	return(nch);
344 }
345 
346 void
347 cache_copy(struct nchandle *nch, struct nchandle *target)
348 {
349 	*target = *nch;
350 	_cache_hold(target->ncp);
351 	++nch->mount->mnt_refs;
352 }
353 
354 void
355 cache_changemount(struct nchandle *nch, struct mount *mp)
356 {
357 	--nch->mount->mnt_refs;
358 	nch->mount = mp;
359 	++nch->mount->mnt_refs;
360 }
361 
362 void
363 cache_drop(struct nchandle *nch)
364 {
365 	--nch->mount->mnt_refs;
366 	_cache_drop(nch->ncp);
367 	nch->ncp = NULL;
368 	nch->mount = NULL;
369 }
370 
371 /*
372  * Namespace locking.  The caller must already hold a reference to the
373  * namecache structure in order to lock/unlock it.  This function prevents
374  * the namespace from being created or destroyed by accessors other then
375  * the lock holder.
376  *
377  * Note that holding a locked namecache structure prevents other threads
378  * from making namespace changes (e.g. deleting or creating), prevents
379  * vnode association state changes by other threads, and prevents the
380  * namecache entry from being resolved or unresolved by other threads.
381  *
382  * The lock owner has full authority to associate/disassociate vnodes
383  * and resolve/unresolve the locked ncp.
384  *
385  * WARNING!  Holding a locked ncp will prevent a vnode from being destroyed
386  * or recycled, but it does NOT help you if the vnode had already initiated
387  * a recyclement.  If this is important, use cache_get() rather then
388  * cache_lock() (and deal with the differences in the way the refs counter
389  * is handled).  Or, alternatively, make an unconditional call to
390  * cache_validate() or cache_resolve() after cache_lock() returns.
391  */
392 static
393 void
394 _cache_lock(struct namecache *ncp)
395 {
396 	thread_t td;
397 	int didwarn;
398 
399 	KKASSERT(ncp->nc_refs != 0);
400 	didwarn = 0;
401 	td = curthread;
402 
403 	for (;;) {
404 		if (ncp->nc_exlocks == 0) {
405 			ncp->nc_exlocks = 1;
406 			ncp->nc_locktd = td;
407 			/*
408 			 * The vp associated with a locked ncp must be held
409 			 * to prevent it from being recycled (which would
410 			 * cause the ncp to become unresolved).
411 			 *
412 			 * WARNING!  If VRECLAIMED is set the vnode could
413 			 * already be in the middle of a recycle.  Callers
414 			 * should not assume that nc_vp is usable when
415 			 * not NULL.  cache_vref() or cache_vget() must be
416 			 * called.
417 			 *
418 			 * XXX loop on race for later MPSAFE work.
419 			 */
420 			if (ncp->nc_vp)
421 				vhold(ncp->nc_vp);
422 			break;
423 		}
424 		if (ncp->nc_locktd == td) {
425 			++ncp->nc_exlocks;
426 			break;
427 		}
428 		ncp->nc_flag |= NCF_LOCKREQ;
429 		if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) {
430 			if (didwarn)
431 				continue;
432 			didwarn = 1;
433 			kprintf("[diagnostic] cache_lock: blocked on %p", ncp);
434 			kprintf(" \"%*.*s\"\n",
435 				ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
436 		}
437 	}
438 
439 	if (didwarn == 1) {
440 		kprintf("[diagnostic] cache_lock: unblocked %*.*s\n",
441 			ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
442 	}
443 }
444 
445 void
446 cache_lock(struct nchandle *nch)
447 {
448 	_cache_lock(nch->ncp);
449 }
450 
451 static
452 int
453 _cache_lock_nonblock(struct namecache *ncp)
454 {
455 	thread_t td;
456 
457 	KKASSERT(ncp->nc_refs != 0);
458 	td = curthread;
459 	if (ncp->nc_exlocks == 0) {
460 		ncp->nc_exlocks = 1;
461 		ncp->nc_locktd = td;
462 		/*
463 		 * The vp associated with a locked ncp must be held
464 		 * to prevent it from being recycled (which would
465 		 * cause the ncp to become unresolved).
466 		 *
467 		 * WARNING!  If VRECLAIMED is set the vnode could
468 		 * already be in the middle of a recycle.  Callers
469 		 * should not assume that nc_vp is usable when
470 		 * not NULL.  cache_vref() or cache_vget() must be
471 		 * called.
472 		 *
473 		 * XXX loop on race for later MPSAFE work.
474 		 */
475 		if (ncp->nc_vp)
476 			vhold(ncp->nc_vp);
477 		return(0);
478 	} else {
479 		return(EWOULDBLOCK);
480 	}
481 }
482 
483 int
484 cache_lock_nonblock(struct nchandle *nch)
485 {
486 	return(_cache_lock_nonblock(nch->ncp));
487 }
488 
489 static
490 void
491 _cache_unlock(struct namecache *ncp)
492 {
493 	thread_t td = curthread;
494 
495 	KKASSERT(ncp->nc_refs > 0);
496 	KKASSERT(ncp->nc_exlocks > 0);
497 	KKASSERT(ncp->nc_locktd == td);
498 	if (--ncp->nc_exlocks == 0) {
499 		if (ncp->nc_vp)
500 			vdrop(ncp->nc_vp);
501 		ncp->nc_locktd = NULL;
502 		if (ncp->nc_flag & NCF_LOCKREQ) {
503 			ncp->nc_flag &= ~NCF_LOCKREQ;
504 			wakeup(ncp);
505 		}
506 	}
507 }
508 
509 void
510 cache_unlock(struct nchandle *nch)
511 {
512 	_cache_unlock(nch->ncp);
513 }
514 
515 /*
516  * ref-and-lock, unlock-and-deref functions.
517  *
518  * This function is primarily used by nlookup.  Even though cache_lock
519  * holds the vnode, it is possible that the vnode may have already
520  * initiated a recyclement.  We want cache_get() to return a definitively
521  * usable vnode or a definitively unresolved ncp.
522  */
523 static
524 struct namecache *
525 _cache_get(struct namecache *ncp)
526 {
527 	_cache_hold(ncp);
528 	_cache_lock(ncp);
529 	if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
530 		_cache_setunresolved(ncp);
531 	return(ncp);
532 }
533 
534 /*
535  * note: the same nchandle can be passed for both arguments.
536  */
537 void
538 cache_get(struct nchandle *nch, struct nchandle *target)
539 {
540 	target->mount = nch->mount;
541 	target->ncp = _cache_get(nch->ncp);
542 	++target->mount->mnt_refs;
543 }
544 
545 static int
546 _cache_get_nonblock(struct namecache *ncp)
547 {
548 	/* XXX MP */
549 	if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) {
550 		_cache_hold(ncp);
551 		_cache_lock(ncp);
552 		if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
553 			_cache_setunresolved(ncp);
554 		return(0);
555 	}
556 	return(EWOULDBLOCK);
557 }
558 
559 int
560 cache_get_nonblock(struct nchandle *nch)
561 {
562 	return(_cache_get_nonblock(nch->ncp));
563 }
564 
565 static __inline
566 void
567 _cache_put(struct namecache *ncp)
568 {
569 	_cache_unlock(ncp);
570 	_cache_drop(ncp);
571 }
572 
573 void
574 cache_put(struct nchandle *nch)
575 {
576 	--nch->mount->mnt_refs;
577 	_cache_put(nch->ncp);
578 	nch->ncp = NULL;
579 	nch->mount = NULL;
580 }
581 
582 /*
583  * Resolve an unresolved ncp by associating a vnode with it.  If the
584  * vnode is NULL, a negative cache entry is created.
585  *
586  * The ncp should be locked on entry and will remain locked on return.
587  */
588 static
589 void
590 _cache_setvp(struct namecache *ncp, struct vnode *vp)
591 {
592 	KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
593 	ncp->nc_vp = vp;
594 	if (vp != NULL) {
595 		/*
596 		 * Any vp associated with an ncp which has children must
597 		 * be held.  Any vp associated with a locked ncp must be held.
598 		 */
599 		if (!TAILQ_EMPTY(&ncp->nc_list))
600 			vhold(vp);
601 		TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
602 		if (ncp->nc_exlocks)
603 			vhold(vp);
604 
605 		/*
606 		 * Set auxillary flags
607 		 */
608 		switch(vp->v_type) {
609 		case VDIR:
610 			ncp->nc_flag |= NCF_ISDIR;
611 			break;
612 		case VLNK:
613 			ncp->nc_flag |= NCF_ISSYMLINK;
614 			/* XXX cache the contents of the symlink */
615 			break;
616 		default:
617 			break;
618 		}
619 		++numcache;
620 		ncp->nc_error = 0;
621 	} else {
622 		TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
623 		++numneg;
624 		ncp->nc_error = ENOENT;
625 	}
626 	ncp->nc_flag &= ~NCF_UNRESOLVED;
627 }
628 
629 void
630 cache_setvp(struct nchandle *nch, struct vnode *vp)
631 {
632 	_cache_setvp(nch->ncp, vp);
633 }
634 
635 void
636 cache_settimeout(struct nchandle *nch, int nticks)
637 {
638 	struct namecache *ncp = nch->ncp;
639 
640 	if ((ncp->nc_timeout = ticks + nticks) == 0)
641 		ncp->nc_timeout = 1;
642 }
643 
644 /*
645  * Disassociate the vnode or negative-cache association and mark a
646  * namecache entry as unresolved again.  Note that the ncp is still
647  * left in the hash table and still linked to its parent.
648  *
649  * The ncp should be locked and refd on entry and will remain locked and refd
650  * on return.
651  *
652  * This routine is normally never called on a directory containing children.
653  * However, NFS often does just that in its rename() code as a cop-out to
654  * avoid complex namespace operations.  This disconnects a directory vnode
655  * from its namecache and can cause the OLDAPI and NEWAPI to get out of
656  * sync.
657  *
658  * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as
659  * in a create, properly propogates flag up the chain.
660  */
661 static
662 void
663 _cache_setunresolved(struct namecache *ncp)
664 {
665 	struct vnode *vp;
666 
667 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
668 		ncp->nc_flag |= NCF_UNRESOLVED;
669 		ncp->nc_timeout = 0;
670 		ncp->nc_error = ENOTCONN;
671 		++numunres;
672 		if ((vp = ncp->nc_vp) != NULL) {
673 			--numcache;
674 			ncp->nc_vp = NULL;
675 			TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
676 
677 			/*
678 			 * Any vp associated with an ncp with children is
679 			 * held by that ncp.  Any vp associated with a locked
680 			 * ncp is held by that ncp.  These conditions must be
681 			 * undone when the vp is cleared out from the ncp.
682 			 */
683 			if (ncp->nc_flag & NCF_FSMID)
684 				vupdatefsmid(vp);
685 			if (!TAILQ_EMPTY(&ncp->nc_list))
686 				vdrop(vp);
687 			if (ncp->nc_exlocks)
688 				vdrop(vp);
689 		} else {
690 			TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
691 			--numneg;
692 		}
693 		ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK|
694 				  NCF_FSMID);
695 	}
696 }
697 
698 void
699 cache_setunresolved(struct nchandle *nch)
700 {
701 	_cache_setunresolved(nch->ncp);
702 }
703 
704 /*
705  * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
706  * looking for matches.  This flag tells the lookup code when it must
707  * check for a mount linkage and also prevents the directories in question
708  * from being deleted or renamed.
709  */
710 static
711 int
712 cache_clrmountpt_callback(struct mount *mp, void *data)
713 {
714 	struct nchandle *nch = data;
715 
716 	if (mp->mnt_ncmounton.ncp == nch->ncp)
717 		return(1);
718 	if (mp->mnt_ncmountpt.ncp == nch->ncp)
719 		return(1);
720 	return(0);
721 }
722 
723 void
724 cache_clrmountpt(struct nchandle *nch)
725 {
726 	int count;
727 
728 	count = mountlist_scan(cache_clrmountpt_callback, nch,
729 			       MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
730 	if (count == 0)
731 		nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
732 }
733 
734 /*
735  * Invalidate portions of the namecache topology given a starting entry.
736  * The passed ncp is set to an unresolved state and:
737  *
738  * The passed ncp must be locked.
739  *
740  * CINV_DESTROY		- Set a flag in the passed ncp entry indicating
741  *			  that the physical underlying nodes have been
742  *			  destroyed... as in deleted.  For example, when
743  *			  a directory is removed.  This will cause record
744  *			  lookups on the name to no longer be able to find
745  *			  the record and tells the resolver to return failure
746  *			  rather then trying to resolve through the parent.
747  *
748  *			  The topology itself, including ncp->nc_name,
749  *			  remains intact.
750  *
751  *			  This only applies to the passed ncp, if CINV_CHILDREN
752  *			  is specified the children are not flagged.
753  *
754  * CINV_CHILDREN	- Set all children (recursively) to an unresolved
755  *			  state as well.
756  *
757  *			  Note that this will also have the side effect of
758  *			  cleaning out any unreferenced nodes in the topology
759  *			  from the leaves up as the recursion backs out.
760  *
761  * Note that the topology for any referenced nodes remains intact.
762  *
763  * It is possible for cache_inval() to race a cache_resolve(), meaning that
764  * the namecache entry may not actually be invalidated on return if it was
765  * revalidated while recursing down into its children.  This code guarentees
766  * that the node(s) will go through an invalidation cycle, but does not
767  * guarentee that they will remain in an invalidated state.
768  *
769  * Returns non-zero if a revalidation was detected during the invalidation
770  * recursion, zero otherwise.  Note that since only the original ncp is
771  * locked the revalidation ultimately can only indicate that the original ncp
772  * *MIGHT* no have been reresolved.
773  *
774  * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
775  * have to avoid blowing out the kernel stack.  We do this by saving the
776  * deep namecache node and aborting the recursion, then re-recursing at that
777  * node using a depth-first algorithm in order to allow multiple deep
778  * recursions to chain through each other, then we restart the invalidation
779  * from scratch.
780  */
781 
782 struct cinvtrack {
783 	struct namecache *resume_ncp;
784 	int depth;
785 };
786 
787 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
788 
789 static
790 int
791 _cache_inval(struct namecache *ncp, int flags)
792 {
793 	struct cinvtrack track;
794 	struct namecache *ncp2;
795 	int r;
796 
797 	track.depth = 0;
798 	track.resume_ncp = NULL;
799 
800 	for (;;) {
801 		r = _cache_inval_internal(ncp, flags, &track);
802 		if (track.resume_ncp == NULL)
803 			break;
804 		kprintf("Warning: deep namecache recursion at %s\n",
805 			ncp->nc_name);
806 		_cache_unlock(ncp);
807 		while ((ncp2 = track.resume_ncp) != NULL) {
808 			track.resume_ncp = NULL;
809 			_cache_lock(ncp2);
810 			_cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
811 					     &track);
812 			_cache_put(ncp2);
813 		}
814 		_cache_lock(ncp);
815 	}
816 	return(r);
817 }
818 
819 int
820 cache_inval(struct nchandle *nch, int flags)
821 {
822 	return(_cache_inval(nch->ncp, flags));
823 }
824 
825 static int
826 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
827 {
828 	struct namecache *kid;
829 	struct namecache *nextkid;
830 	int rcnt = 0;
831 
832 	KKASSERT(ncp->nc_exlocks);
833 
834 	_cache_setunresolved(ncp);
835 	if (flags & CINV_DESTROY)
836 		ncp->nc_flag |= NCF_DESTROYED;
837 
838 	if ((flags & CINV_CHILDREN) &&
839 	    (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
840 	) {
841 		if (++track->depth > MAX_RECURSION_DEPTH) {
842 			track->resume_ncp = ncp;
843 			_cache_hold(ncp);
844 			++rcnt;
845 		}
846 		_cache_hold(kid);
847 		_cache_unlock(ncp);
848 		while (kid) {
849 			if (track->resume_ncp) {
850 				_cache_drop(kid);
851 				break;
852 			}
853 			if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
854 				_cache_hold(nextkid);
855 			if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
856 			    TAILQ_FIRST(&kid->nc_list)
857 			) {
858 				_cache_lock(kid);
859 				rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
860 				_cache_unlock(kid);
861 			}
862 			_cache_drop(kid);
863 			kid = nextkid;
864 		}
865 		--track->depth;
866 		_cache_lock(ncp);
867 	}
868 
869 	/*
870 	 * Someone could have gotten in there while ncp was unlocked,
871 	 * retry if so.
872 	 */
873 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
874 		++rcnt;
875 	return (rcnt);
876 }
877 
878 /*
879  * Invalidate a vnode's namecache associations.  To avoid races against
880  * the resolver we do not invalidate a node which we previously invalidated
881  * but which was then re-resolved while we were in the invalidation loop.
882  *
883  * Returns non-zero if any namecache entries remain after the invalidation
884  * loop completed.
885  *
886  * NOTE: unlike the namecache topology which guarentees that ncp's will not
887  * be ripped out of the topology while held, the vnode's v_namecache list
888  * has no such restriction.  NCP's can be ripped out of the list at virtually
889  * any time if not locked, even if held.
890  */
891 int
892 cache_inval_vp(struct vnode *vp, int flags)
893 {
894 	struct namecache *ncp;
895 	struct namecache *next;
896 
897 restart:
898 	ncp = TAILQ_FIRST(&vp->v_namecache);
899 	if (ncp)
900 		_cache_hold(ncp);
901 	while (ncp) {
902 		/* loop entered with ncp held */
903 		if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
904 			_cache_hold(next);
905 		_cache_lock(ncp);
906 		if (ncp->nc_vp != vp) {
907 			kprintf("Warning: cache_inval_vp: race-A detected on "
908 				"%s\n", ncp->nc_name);
909 			_cache_put(ncp);
910 			if (next)
911 				_cache_drop(next);
912 			goto restart;
913 		}
914 		_cache_inval(ncp, flags);
915 		_cache_put(ncp);		/* also releases reference */
916 		ncp = next;
917 		if (ncp && ncp->nc_vp != vp) {
918 			kprintf("Warning: cache_inval_vp: race-B detected on "
919 				"%s\n", ncp->nc_name);
920 			_cache_drop(ncp);
921 			goto restart;
922 		}
923 	}
924 	return(TAILQ_FIRST(&vp->v_namecache) != NULL);
925 }
926 
927 /*
928  * The source ncp has been renamed to the target ncp.  Both fncp and tncp
929  * must be locked.  Both will be set to unresolved, any children of tncp
930  * will be disconnected (the prior contents of the target is assumed to be
931  * destroyed by the rename operation, e.g. renaming over an empty directory),
932  * and all children of fncp will be moved to tncp.
933  *
934  * XXX the disconnection could pose a problem, check code paths to make
935  * sure any code that blocks can handle the parent being changed out from
936  * under it.  Maybe we should lock the children (watch out for deadlocks) ?
937  *
938  * After we return the caller has the option of calling cache_setvp() if
939  * the vnode of the new target ncp is known.
940  *
941  * Any process CD'd into any of the children will no longer be able to ".."
942  * back out.  An rm -rf can cause this situation to occur.
943  */
944 void
945 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
946 {
947 	struct namecache *fncp = fnch->ncp;
948 	struct namecache *tncp = tnch->ncp;
949 	struct namecache *scan;
950 	int didwarn = 0;
951 
952 	_cache_setunresolved(fncp);
953 	_cache_setunresolved(tncp);
954 	while (_cache_inval(tncp, CINV_CHILDREN) != 0) {
955 		if (didwarn++ % 10 == 0) {
956 			kprintf("Warning: cache_rename: race during "
957 				"rename %s->%s\n",
958 				fncp->nc_name, tncp->nc_name);
959 		}
960 		tsleep(tncp, 0, "mvrace", hz / 10);
961 		_cache_setunresolved(tncp);
962 	}
963 	while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) {
964 		_cache_hold(scan);
965 		cache_unlink_parent(scan);
966 		cache_link_parent(scan, tncp);
967 		if (scan->nc_flag & NCF_HASHED)
968 			_cache_rehash(scan);
969 		_cache_drop(scan);
970 	}
971 }
972 
973 /*
974  * vget the vnode associated with the namecache entry.  Resolve the namecache
975  * entry if necessary and deal with namecache/vp races.  The passed ncp must
976  * be referenced and may be locked.  The ncp's ref/locking state is not
977  * effected by this call.
978  *
979  * lk_type may be LK_SHARED, LK_EXCLUSIVE.  A ref'd, possibly locked
980  * (depending on the passed lk_type) will be returned in *vpp with an error
981  * of 0, or NULL will be returned in *vpp with a non-0 error code.  The
982  * most typical error is ENOENT, meaning that the ncp represents a negative
983  * cache hit and there is no vnode to retrieve, but other errors can occur
984  * too.
985  *
986  * The main race we have to deal with are namecache zaps.  The ncp itself
987  * will not disappear since it is referenced, and it turns out that the
988  * validity of the vp pointer can be checked simply by rechecking the
989  * contents of ncp->nc_vp.
990  */
991 int
992 cache_vget(struct nchandle *nch, struct ucred *cred,
993 	   int lk_type, struct vnode **vpp)
994 {
995 	struct namecache *ncp;
996 	struct vnode *vp;
997 	int error;
998 
999 	ncp = nch->ncp;
1000 again:
1001 	vp = NULL;
1002 	if (ncp->nc_flag & NCF_UNRESOLVED) {
1003 		_cache_lock(ncp);
1004 		error = cache_resolve(nch, cred);
1005 		_cache_unlock(ncp);
1006 	} else {
1007 		error = 0;
1008 	}
1009 	if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1010 		/*
1011 		 * Accessing the vnode from the namecache is a bit
1012 		 * dangerous.  Because there are no refs on the vnode, it
1013 		 * could be in the middle of a reclaim.
1014 		 */
1015 		if (vp->v_flag & VRECLAIMED) {
1016 			kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name);
1017 			_cache_lock(ncp);
1018 			_cache_setunresolved(ncp);
1019 			_cache_unlock(ncp);
1020 			goto again;
1021 		}
1022 		error = vget(vp, lk_type);
1023 		if (error) {
1024 			if (vp != ncp->nc_vp)
1025 				goto again;
1026 			vp = NULL;
1027 		} else if (vp != ncp->nc_vp) {
1028 			vput(vp);
1029 			goto again;
1030 		} else if (vp->v_flag & VRECLAIMED) {
1031 			panic("vget succeeded on a VRECLAIMED node! vp %p", vp);
1032 		}
1033 	}
1034 	if (error == 0 && vp == NULL)
1035 		error = ENOENT;
1036 	*vpp = vp;
1037 	return(error);
1038 }
1039 
1040 int
1041 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1042 {
1043 	struct namecache *ncp;
1044 	struct vnode *vp;
1045 	int error;
1046 
1047 	ncp = nch->ncp;
1048 
1049 again:
1050 	vp = NULL;
1051 	if (ncp->nc_flag & NCF_UNRESOLVED) {
1052 		_cache_lock(ncp);
1053 		error = cache_resolve(nch, cred);
1054 		_cache_unlock(ncp);
1055 	} else {
1056 		error = 0;
1057 	}
1058 	if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1059 		/*
1060 		 * Since we did not obtain any locks, a cache zap
1061 		 * race can occur here if the vnode is in the middle
1062 		 * of being reclaimed and has not yet been able to
1063 		 * clean out its cache node.  If that case occurs,
1064 		 * we must lock and unresolve the cache, then loop
1065 		 * to retry.
1066 		 */
1067 		if (vp->v_flag & VRECLAIMED) {
1068 			kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name);
1069 			_cache_lock(ncp);
1070 			_cache_setunresolved(ncp);
1071 			_cache_unlock(ncp);
1072 			goto again;
1073 		}
1074 		vref_initial(vp, 1);
1075 	}
1076 	if (error == 0 && vp == NULL)
1077 		error = ENOENT;
1078 	*vpp = vp;
1079 	return(error);
1080 }
1081 
1082 /*
1083  * Recursively set the FSMID update flag for namecache nodes leading
1084  * to root.  This will cause the next getattr or reclaim to increment the
1085  * fsmid and mark the inode for lazy updating.
1086  *
1087  * Stop recursing when we hit a node whos NCF_FSMID flag is already set.
1088  * This makes FSMIDs work in an Einsteinian fashion - where the observation
1089  * effects the result.  In this case a program monitoring a higher level
1090  * node will have detected some prior change and started its scan (clearing
1091  * NCF_FSMID in higher level nodes), but since it has not yet observed the
1092  * node where we find NCF_FSMID still set, we can safely make the related
1093  * modification without interfering with the theorized program.
1094  *
1095  * This also means that FSMIDs cannot represent time-domain quantities
1096  * in a hierarchical sense.  But the main reason for doing it this way
1097  * is to reduce the amount of recursion that occurs in the critical path
1098  * when e.g. a program is writing to a file that sits deep in a directory
1099  * hierarchy.
1100  */
1101 void
1102 cache_update_fsmid(struct nchandle *nch)
1103 {
1104 	struct namecache *ncp;
1105 	struct namecache *scan;
1106 	struct vnode *vp;
1107 
1108 	ncp = nch->ncp;
1109 
1110 	/*
1111 	 * Warning: even if we get a non-NULL vp it could still be in the
1112 	 * middle of a recyclement.  Don't do anything fancy, just set
1113 	 * NCF_FSMID.
1114 	 */
1115 	if ((vp = ncp->nc_vp) != NULL) {
1116 		TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1117 			for (scan = ncp; scan; scan = scan->nc_parent) {
1118 				if (scan->nc_flag & NCF_FSMID)
1119 					break;
1120 				scan->nc_flag |= NCF_FSMID;
1121 			}
1122 		}
1123 	} else {
1124 		while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) {
1125 			ncp->nc_flag |= NCF_FSMID;
1126 			ncp = ncp->nc_parent;
1127 		}
1128 	}
1129 }
1130 
1131 void
1132 cache_update_fsmid_vp(struct vnode *vp)
1133 {
1134 	struct namecache *ncp;
1135 	struct namecache *scan;
1136 
1137 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1138 		for (scan = ncp; scan; scan = scan->nc_parent) {
1139 			if (scan->nc_flag & NCF_FSMID)
1140 				break;
1141 			scan->nc_flag |= NCF_FSMID;
1142 		}
1143 	}
1144 }
1145 
1146 /*
1147  * If getattr is called on a vnode (e.g. a stat call), the filesystem
1148  * may call this routine to determine if the namecache has the hierarchical
1149  * change flag set, requiring the fsmid to be updated.
1150  *
1151  * Since 0 indicates no support, make sure the filesystem fsmid is at least
1152  * 1.
1153  */
1154 int
1155 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid)
1156 {
1157 	struct namecache *ncp;
1158 	int changed = 0;
1159 
1160 	TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
1161 		if (ncp->nc_flag & NCF_FSMID) {
1162 			ncp->nc_flag &= ~NCF_FSMID;
1163 			changed = 1;
1164 		}
1165 	}
1166 	if (*fsmid == 0)
1167 		++*fsmid;
1168 	if (changed)
1169 		++*fsmid;
1170 	return(changed);
1171 }
1172 
1173 /*
1174  * Obtain the FSMID for a vnode for filesystems which do not support
1175  * a built-in FSMID.
1176  */
1177 int64_t
1178 cache_sync_fsmid_vp(struct vnode *vp)
1179 {
1180 	struct namecache *ncp;
1181 
1182 	if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) {
1183 		if (ncp->nc_flag & NCF_FSMID) {
1184 			ncp->nc_flag &= ~NCF_FSMID;
1185 			++ncp->nc_fsmid;
1186 		}
1187 		return(ncp->nc_fsmid);
1188 	}
1189 	return(VNOVAL);
1190 }
1191 
1192 /*
1193  * Convert a directory vnode to a namecache record without any other
1194  * knowledge of the topology.  This ONLY works with directory vnodes and
1195  * is ONLY used by the NFS server.  dvp must be refd but unlocked, and the
1196  * returned ncp (if not NULL) will be held and unlocked.
1197  *
1198  * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1199  * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1200  * for dvp.  This will fail only if the directory has been deleted out from
1201  * under the caller.
1202  *
1203  * Callers must always check for a NULL return no matter the value of 'makeit'.
1204  *
1205  * To avoid underflowing the kernel stack each recursive call increments
1206  * the makeit variable.
1207  */
1208 
1209 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1210 				  struct vnode *dvp);
1211 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1212 				  struct vnode **saved_dvp);
1213 
1214 int
1215 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1216 	      struct nchandle *nch)
1217 {
1218 	struct vnode *saved_dvp;
1219 	struct vnode *pvp;
1220 	int error;
1221 
1222 	nch->ncp = NULL;
1223 	nch->mount = dvp->v_mount;
1224 	saved_dvp = NULL;
1225 
1226 	/*
1227 	 * Temporary debugging code to force the directory scanning code
1228 	 * to be exercised.
1229 	 */
1230 	if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) {
1231 		nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1232 		kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name);
1233 		goto force;
1234 	}
1235 
1236 	/*
1237 	 * Loop until resolution, inside code will break out on error.
1238 	 */
1239 	while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) {
1240 force:
1241 		/*
1242 		 * If dvp is the root of its filesystem it should already
1243 		 * have a namecache pointer associated with it as a side
1244 		 * effect of the mount, but it may have been disassociated.
1245 		 */
1246 		if (dvp->v_flag & VROOT) {
1247 			nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1248 			error = cache_resolve_mp(nch->mount);
1249 			_cache_put(nch->ncp);
1250 			if (ncvp_debug) {
1251 				kprintf("cache_fromdvp: resolve root of mount %p error %d",
1252 					dvp->v_mount, error);
1253 			}
1254 			if (error) {
1255 				if (ncvp_debug)
1256 					kprintf(" failed\n");
1257 				nch->ncp = NULL;
1258 				break;
1259 			}
1260 			if (ncvp_debug)
1261 				kprintf(" succeeded\n");
1262 			continue;
1263 		}
1264 
1265 		/*
1266 		 * If we are recursed too deeply resort to an O(n^2)
1267 		 * algorithm to resolve the namecache topology.  The
1268 		 * resolved pvp is left referenced in saved_dvp to
1269 		 * prevent the tree from being destroyed while we loop.
1270 		 */
1271 		if (makeit > 20) {
1272 			error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1273 			if (error) {
1274 				kprintf("lookupdotdot(longpath) failed %d "
1275 				       "dvp %p\n", error, dvp);
1276 				break;
1277 			}
1278 			continue;
1279 		}
1280 
1281 		/*
1282 		 * Get the parent directory and resolve its ncp.
1283 		 */
1284 		error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1285 		if (error) {
1286 			kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1287 			break;
1288 		}
1289 		vn_unlock(pvp);
1290 
1291 		/*
1292 		 * Reuse makeit as a recursion depth counter.
1293 		 */
1294 		cache_fromdvp(pvp, cred, makeit + 1, nch);
1295 		vrele(pvp);
1296 		if (nch->ncp == NULL)
1297 			break;
1298 
1299 		/*
1300 		 * Do an inefficient scan of pvp (embodied by ncp) to look
1301 		 * for dvp.  This will create a namecache record for dvp on
1302 		 * success.  We loop up to recheck on success.
1303 		 *
1304 		 * ncp and dvp are both held but not locked.
1305 		 */
1306 		error = cache_inefficient_scan(nch, cred, dvp);
1307 		_cache_drop(nch->ncp);
1308 		if (error) {
1309 			kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1310 				pvp, nch->ncp->nc_name, dvp);
1311 			nch->ncp = NULL;
1312 			break;
1313 		}
1314 		if (ncvp_debug) {
1315 			kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1316 				pvp, nch->ncp->nc_name);
1317 		}
1318 	}
1319 
1320 	/*
1321 	 * hold it for real so the mount gets a ref
1322 	 */
1323 	if (nch->ncp)
1324 		cache_hold(nch);
1325 	if (saved_dvp)
1326 		vrele(saved_dvp);
1327 	if (nch->ncp)
1328 		return (0);
1329 	return (EINVAL);
1330 }
1331 
1332 /*
1333  * Go up the chain of parent directories until we find something
1334  * we can resolve into the namecache.  This is very inefficient.
1335  */
1336 static
1337 int
1338 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1339 		  struct vnode **saved_dvp)
1340 {
1341 	struct nchandle nch;
1342 	struct vnode *pvp;
1343 	int error;
1344 	static time_t last_fromdvp_report;
1345 
1346 	/*
1347 	 * Loop getting the parent directory vnode until we get something we
1348 	 * can resolve in the namecache.
1349 	 */
1350 	vref(dvp);
1351 	nch.mount = dvp->v_mount;
1352 
1353 	for (;;) {
1354 		error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred);
1355 		if (error) {
1356 			vrele(dvp);
1357 			return (error);
1358 		}
1359 		vn_unlock(pvp);
1360 		if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1361 			_cache_hold(nch.ncp);
1362 			vrele(pvp);
1363 			break;
1364 		}
1365 		if (pvp->v_flag & VROOT) {
1366 			nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1367 			error = cache_resolve_mp(nch.mount);
1368 			_cache_unlock(nch.ncp);
1369 			vrele(pvp);
1370 			if (error) {
1371 				_cache_drop(nch.ncp);
1372 				vrele(dvp);
1373 				return (error);
1374 			}
1375 			break;
1376 		}
1377 		vrele(dvp);
1378 		dvp = pvp;
1379 	}
1380 	if (last_fromdvp_report != time_second) {
1381 		last_fromdvp_report = time_second;
1382 		kprintf("Warning: extremely inefficient path resolution on %s\n",
1383 			nch.ncp->nc_name);
1384 	}
1385 	error = cache_inefficient_scan(&nch, cred, dvp);
1386 
1387 	/*
1388 	 * Hopefully dvp now has a namecache record associated with it.
1389 	 * Leave it referenced to prevent the kernel from recycling the
1390 	 * vnode.  Otherwise extremely long directory paths could result
1391 	 * in endless recycling.
1392 	 */
1393 	if (*saved_dvp)
1394 	    vrele(*saved_dvp);
1395 	*saved_dvp = dvp;
1396 	return (error);
1397 }
1398 
1399 
1400 /*
1401  * Do an inefficient scan of the directory represented by ncp looking for
1402  * the directory vnode dvp.  ncp must be held but not locked on entry and
1403  * will be held on return.  dvp must be refd but not locked on entry and
1404  * will remain refd on return.
1405  *
1406  * Why do this at all?  Well, due to its stateless nature the NFS server
1407  * converts file handles directly to vnodes without necessarily going through
1408  * the namecache ops that would otherwise create the namecache topology
1409  * leading to the vnode.  We could either (1) Change the namecache algorithms
1410  * to allow disconnect namecache records that are re-merged opportunistically,
1411  * or (2) Make the NFS server backtrack and scan to recover a connected
1412  * namecache topology in order to then be able to issue new API lookups.
1413  *
1414  * It turns out that (1) is a huge mess.  It takes a nice clean set of
1415  * namecache algorithms and introduces a lot of complication in every subsystem
1416  * that calls into the namecache to deal with the re-merge case, especially
1417  * since we are using the namecache to placehold negative lookups and the
1418  * vnode might not be immediately assigned. (2) is certainly far less
1419  * efficient then (1), but since we are only talking about directories here
1420  * (which are likely to remain cached), the case does not actually run all
1421  * that often and has the supreme advantage of not polluting the namecache
1422  * algorithms.
1423  */
1424 static int
1425 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1426 		       struct vnode *dvp)
1427 {
1428 	struct nlcomponent nlc;
1429 	struct nchandle rncp;
1430 	struct dirent *den;
1431 	struct vnode *pvp;
1432 	struct vattr vat;
1433 	struct iovec iov;
1434 	struct uio uio;
1435 	int blksize;
1436 	int eofflag;
1437 	int bytes;
1438 	char *rbuf;
1439 	int error;
1440 
1441 	vat.va_blocksize = 0;
1442 	if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1443 		return (error);
1444 	if ((error = cache_vref(nch, cred, &pvp)) != 0)
1445 		return (error);
1446 	if (ncvp_debug)
1447 		kprintf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid);
1448 	if ((blksize = vat.va_blocksize) == 0)
1449 		blksize = DEV_BSIZE;
1450 	rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1451 	rncp.ncp = NULL;
1452 
1453 	eofflag = 0;
1454 	uio.uio_offset = 0;
1455 again:
1456 	iov.iov_base = rbuf;
1457 	iov.iov_len = blksize;
1458 	uio.uio_iov = &iov;
1459 	uio.uio_iovcnt = 1;
1460 	uio.uio_resid = blksize;
1461 	uio.uio_segflg = UIO_SYSSPACE;
1462 	uio.uio_rw = UIO_READ;
1463 	uio.uio_td = curthread;
1464 
1465 	if (ncvp_debug >= 2)
1466 		kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1467 	error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1468 	if (error == 0) {
1469 		den = (struct dirent *)rbuf;
1470 		bytes = blksize - uio.uio_resid;
1471 
1472 		while (bytes > 0) {
1473 			if (ncvp_debug >= 2) {
1474 				kprintf("cache_inefficient_scan: %*.*s\n",
1475 					den->d_namlen, den->d_namlen,
1476 					den->d_name);
1477 			}
1478 			if (den->d_type != DT_WHT &&
1479 			    den->d_ino == vat.va_fileid) {
1480 				if (ncvp_debug) {
1481 					kprintf("cache_inefficient_scan: "
1482 					       "MATCHED inode %ld path %s/%*.*s\n",
1483 					       vat.va_fileid, nch->ncp->nc_name,
1484 					       den->d_namlen, den->d_namlen,
1485 					       den->d_name);
1486 				}
1487 				nlc.nlc_nameptr = den->d_name;
1488 				nlc.nlc_namelen = den->d_namlen;
1489 				rncp = cache_nlookup(nch, &nlc);
1490 				KKASSERT(rncp.ncp != NULL);
1491 				break;
1492 			}
1493 			bytes -= _DIRENT_DIRSIZ(den);
1494 			den = _DIRENT_NEXT(den);
1495 		}
1496 		if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1497 			goto again;
1498 	}
1499 	vrele(pvp);
1500 	if (rncp.ncp) {
1501 		if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1502 			_cache_setvp(rncp.ncp, dvp);
1503 			if (ncvp_debug >= 2) {
1504 				kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1505 					nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1506 			}
1507 		} else {
1508 			if (ncvp_debug >= 2) {
1509 				kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1510 					nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1511 					rncp.ncp->nc_vp);
1512 			}
1513 		}
1514 		if (rncp.ncp->nc_vp == NULL)
1515 			error = rncp.ncp->nc_error;
1516 		_cache_put(rncp.ncp);
1517 	} else {
1518 		kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1519 			dvp, nch->ncp->nc_name);
1520 		error = ENOENT;
1521 	}
1522 	kfree(rbuf, M_TEMP);
1523 	return (error);
1524 }
1525 
1526 /*
1527  * Zap a namecache entry.  The ncp is unconditionally set to an unresolved
1528  * state, which disassociates it from its vnode or ncneglist.
1529  *
1530  * Then, if there are no additional references to the ncp and no children,
1531  * the ncp is removed from the topology and destroyed.  This function will
1532  * also run through the nc_parent chain and destroy parent ncps if possible.
1533  * As a side benefit, it turns out the only conditions that allow running
1534  * up the chain are also the conditions to ensure no deadlock will occur.
1535  *
1536  * References and/or children may exist if the ncp is in the middle of the
1537  * topology, preventing the ncp from being destroyed.
1538  *
1539  * This function must be called with the ncp held and locked and will unlock
1540  * and drop it during zapping.
1541  */
1542 static void
1543 cache_zap(struct namecache *ncp)
1544 {
1545 	struct namecache *par;
1546 
1547 	/*
1548 	 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1549 	 */
1550 	_cache_setunresolved(ncp);
1551 
1552 	/*
1553 	 * Try to scrap the entry and possibly tail-recurse on its parent.
1554 	 * We only scrap unref'd (other then our ref) unresolved entries,
1555 	 * we do not scrap 'live' entries.
1556 	 */
1557 	while (ncp->nc_flag & NCF_UNRESOLVED) {
1558 		/*
1559 		 * Someone other then us has a ref, stop.
1560 		 */
1561 		if (ncp->nc_refs > 1)
1562 			goto done;
1563 
1564 		/*
1565 		 * We have children, stop.
1566 		 */
1567 		if (!TAILQ_EMPTY(&ncp->nc_list))
1568 			goto done;
1569 
1570 		/*
1571 		 * Remove ncp from the topology: hash table and parent linkage.
1572 		 */
1573 		if (ncp->nc_flag & NCF_HASHED) {
1574 			ncp->nc_flag &= ~NCF_HASHED;
1575 			LIST_REMOVE(ncp, nc_hash);
1576 		}
1577 		if ((par = ncp->nc_parent) != NULL) {
1578 			par = _cache_hold(par);
1579 			TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
1580 			ncp->nc_parent = NULL;
1581 			if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
1582 				vdrop(par->nc_vp);
1583 		}
1584 
1585 		/*
1586 		 * ncp should not have picked up any refs.  Physically
1587 		 * destroy the ncp.
1588 		 */
1589 		KKASSERT(ncp->nc_refs == 1);
1590 		--numunres;
1591 		/* _cache_unlock(ncp) not required */
1592 		ncp->nc_refs = -1;	/* safety */
1593 		if (ncp->nc_name)
1594 			kfree(ncp->nc_name, M_VFSCACHE);
1595 		kfree(ncp, M_VFSCACHE);
1596 
1597 		/*
1598 		 * Loop on the parent (it may be NULL).  Only bother looping
1599 		 * if the parent has a single ref (ours), which also means
1600 		 * we can lock it trivially.
1601 		 */
1602 		ncp = par;
1603 		if (ncp == NULL)
1604 			return;
1605 		if (ncp->nc_refs != 1) {
1606 			_cache_drop(ncp);
1607 			return;
1608 		}
1609 		KKASSERT(par->nc_exlocks == 0);
1610 		_cache_lock(ncp);
1611 	}
1612 done:
1613 	_cache_unlock(ncp);
1614 	atomic_subtract_int(&ncp->nc_refs, 1);
1615 }
1616 
1617 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
1618 
1619 static __inline
1620 void
1621 cache_hysteresis(void)
1622 {
1623 	/*
1624 	 * Don't cache too many negative hits.  We use hysteresis to reduce
1625 	 * the impact on the critical path.
1626 	 */
1627 	switch(cache_hysteresis_state) {
1628 	case CHI_LOW:
1629 		if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
1630 			cache_cleanneg(10);
1631 			cache_hysteresis_state = CHI_HIGH;
1632 		}
1633 		break;
1634 	case CHI_HIGH:
1635 		if (numneg > MINNEG * 9 / 10 &&
1636 		    numneg * ncnegfactor * 9 / 10 > numcache
1637 		) {
1638 			cache_cleanneg(10);
1639 		} else {
1640 			cache_hysteresis_state = CHI_LOW;
1641 		}
1642 		break;
1643 	}
1644 }
1645 
1646 /*
1647  * NEW NAMECACHE LOOKUP API
1648  *
1649  * Lookup an entry in the cache.  A locked, referenced, non-NULL
1650  * entry is *always* returned, even if the supplied component is illegal.
1651  * The resulting namecache entry should be returned to the system with
1652  * cache_put() or _cache_unlock() + cache_drop().
1653  *
1654  * namecache locks are recursive but care must be taken to avoid lock order
1655  * reversals.
1656  *
1657  * Nobody else will be able to manipulate the associated namespace (e.g.
1658  * create, delete, rename, rename-target) until the caller unlocks the
1659  * entry.
1660  *
1661  * The returned entry will be in one of three states:  positive hit (non-null
1662  * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1663  * Unresolved entries must be resolved through the filesystem to associate the
1664  * vnode and/or determine whether a positive or negative hit has occured.
1665  *
1666  * It is not necessary to lock a directory in order to lock namespace under
1667  * that directory.  In fact, it is explicitly not allowed to do that.  A
1668  * directory is typically only locked when being created, renamed, or
1669  * destroyed.
1670  *
1671  * The directory (par) may be unresolved, in which case any returned child
1672  * will likely also be marked unresolved.  Likely but not guarenteed.  Since
1673  * the filesystem lookup requires a resolved directory vnode the caller is
1674  * responsible for resolving the namecache chain top-down.  This API
1675  * specifically allows whole chains to be created in an unresolved state.
1676  */
1677 struct nchandle
1678 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
1679 {
1680 	struct nchandle nch;
1681 	struct namecache *ncp;
1682 	struct namecache *new_ncp;
1683 	struct nchashhead *nchpp;
1684 	u_int32_t hash;
1685 	globaldata_t gd;
1686 
1687 	numcalls++;
1688 	gd = mycpu;
1689 
1690 	/*
1691 	 * Try to locate an existing entry
1692 	 */
1693 	hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
1694 	hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
1695 	new_ncp = NULL;
1696 restart:
1697 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
1698 		numchecks++;
1699 
1700 		/*
1701 		 * Zap entries that have timed out.
1702 		 */
1703 		if (ncp->nc_timeout &&
1704 		    (int)(ncp->nc_timeout - ticks) < 0 &&
1705 		    (ncp->nc_flag & NCF_UNRESOLVED) == 0 &&
1706 		    ncp->nc_exlocks == 0
1707 		) {
1708 			cache_zap(_cache_get(ncp));
1709 			goto restart;
1710 		}
1711 
1712 		/*
1713 		 * Break out if we find a matching entry.  Note that
1714 		 * UNRESOLVED entries may match, but DESTROYED entries
1715 		 * do not.
1716 		 */
1717 		if (ncp->nc_parent == par_nch->ncp &&
1718 		    ncp->nc_nlen == nlc->nlc_namelen &&
1719 		    bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
1720 		    (ncp->nc_flag & NCF_DESTROYED) == 0
1721 		) {
1722 			if (_cache_get_nonblock(ncp) == 0) {
1723 				if (new_ncp)
1724 					_cache_free(new_ncp);
1725 				goto found;
1726 			}
1727 			_cache_get(ncp);
1728 			_cache_put(ncp);
1729 			goto restart;
1730 		}
1731 	}
1732 
1733 	/*
1734 	 * We failed to locate an entry, create a new entry and add it to
1735 	 * the cache.  We have to relookup after possibly blocking in
1736 	 * malloc.
1737 	 */
1738 	if (new_ncp == NULL) {
1739 		new_ncp = cache_alloc(nlc->nlc_namelen);
1740 		goto restart;
1741 	}
1742 
1743 	ncp = new_ncp;
1744 
1745 	/*
1746 	 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
1747 	 * and link to the parent.  The mount point is usually inherited
1748 	 * from the parent unless this is a special case such as a mount
1749 	 * point where nlc_namelen is 0.   If nlc_namelen is 0 nc_name will
1750 	 * be NULL.
1751 	 */
1752 	if (nlc->nlc_namelen) {
1753 		bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen);
1754 		ncp->nc_name[nlc->nlc_namelen] = 0;
1755 	}
1756 	nchpp = NCHHASH(hash);
1757 	LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
1758 	ncp->nc_flag |= NCF_HASHED;
1759 	cache_link_parent(ncp, par_nch->ncp);
1760 found:
1761 	/*
1762 	 * stats and namecache size management
1763 	 */
1764 	if (ncp->nc_flag & NCF_UNRESOLVED)
1765 		++gd->gd_nchstats->ncs_miss;
1766 	else if (ncp->nc_vp)
1767 		++gd->gd_nchstats->ncs_goodhits;
1768 	else
1769 		++gd->gd_nchstats->ncs_neghits;
1770 	cache_hysteresis();
1771 	nch.mount = par_nch->mount;
1772 	nch.ncp = ncp;
1773 	++nch.mount->mnt_refs;
1774 	return(nch);
1775 }
1776 
1777 /*
1778  * The namecache entry is marked as being used as a mount point.
1779  * Locate the mount if it is visible to the caller.
1780  */
1781 struct findmount_info {
1782 	struct mount *result;
1783 	struct mount *nch_mount;
1784 	struct namecache *nch_ncp;
1785 };
1786 
1787 static
1788 int
1789 cache_findmount_callback(struct mount *mp, void *data)
1790 {
1791 	struct findmount_info *info = data;
1792 
1793 	/*
1794 	 * Check the mount's mounted-on point against the passed nch.
1795 	 */
1796 	if (mp->mnt_ncmounton.mount == info->nch_mount &&
1797 	    mp->mnt_ncmounton.ncp == info->nch_ncp
1798 	) {
1799 	    info->result = mp;
1800 	    return(-1);
1801 	}
1802 	return(0);
1803 }
1804 
1805 struct mount *
1806 cache_findmount(struct nchandle *nch)
1807 {
1808 	struct findmount_info info;
1809 
1810 	info.result = NULL;
1811 	info.nch_mount = nch->mount;
1812 	info.nch_ncp = nch->ncp;
1813 	mountlist_scan(cache_findmount_callback, &info,
1814 			       MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1815 	return(info.result);
1816 }
1817 
1818 /*
1819  * Resolve an unresolved namecache entry, generally by looking it up.
1820  * The passed ncp must be locked and refd.
1821  *
1822  * Theoretically since a vnode cannot be recycled while held, and since
1823  * the nc_parent chain holds its vnode as long as children exist, the
1824  * direct parent of the cache entry we are trying to resolve should
1825  * have a valid vnode.  If not then generate an error that we can
1826  * determine is related to a resolver bug.
1827  *
1828  * However, if a vnode was in the middle of a recyclement when the NCP
1829  * got locked, ncp->nc_vp might point to a vnode that is about to become
1830  * invalid.  cache_resolve() handles this case by unresolving the entry
1831  * and then re-resolving it.
1832  *
1833  * Note that successful resolution does not necessarily return an error
1834  * code of 0.  If the ncp resolves to a negative cache hit then ENOENT
1835  * will be returned.
1836  */
1837 int
1838 cache_resolve(struct nchandle *nch, struct ucred *cred)
1839 {
1840 	struct namecache *par;
1841 	struct namecache *ncp;
1842 	struct nchandle nctmp;
1843 	struct mount *mp;
1844 	int error;
1845 
1846 	ncp = nch->ncp;
1847 	mp = nch->mount;
1848 restart:
1849 	/*
1850 	 * If the ncp is already resolved we have nothing to do.  However,
1851 	 * we do want to guarentee that a usable vnode is returned when
1852 	 * a vnode is present, so make sure it hasn't been reclaimed.
1853 	 */
1854 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1855 		if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1856 			_cache_setunresolved(ncp);
1857 		if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1858 			return (ncp->nc_error);
1859 	}
1860 
1861 	/*
1862 	 * Mount points need special handling because the parent does not
1863 	 * belong to the same filesystem as the ncp.
1864 	 */
1865 	if (ncp == mp->mnt_ncmountpt.ncp)
1866 		return (cache_resolve_mp(mp));
1867 
1868 	/*
1869 	 * We expect an unbroken chain of ncps to at least the mount point,
1870 	 * and even all the way to root (but this code doesn't have to go
1871 	 * past the mount point).
1872 	 */
1873 	if (ncp->nc_parent == NULL) {
1874 		kprintf("EXDEV case 1 %p %*.*s\n", ncp,
1875 			ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1876 		ncp->nc_error = EXDEV;
1877 		return(ncp->nc_error);
1878 	}
1879 
1880 	/*
1881 	 * The vp's of the parent directories in the chain are held via vhold()
1882 	 * due to the existance of the child, and should not disappear.
1883 	 * However, there are cases where they can disappear:
1884 	 *
1885 	 *	- due to filesystem I/O errors.
1886 	 *	- due to NFS being stupid about tracking the namespace and
1887 	 *	  destroys the namespace for entire directories quite often.
1888 	 *	- due to forced unmounts.
1889 	 *	- due to an rmdir (parent will be marked DESTROYED)
1890 	 *
1891 	 * When this occurs we have to track the chain backwards and resolve
1892 	 * it, looping until the resolver catches up to the current node.  We
1893 	 * could recurse here but we might run ourselves out of kernel stack
1894 	 * so we do it in a more painful manner.  This situation really should
1895 	 * not occur all that often, or if it does not have to go back too
1896 	 * many nodes to resolve the ncp.
1897 	 */
1898 	while (ncp->nc_parent->nc_vp == NULL) {
1899 		/*
1900 		 * This case can occur if a process is CD'd into a
1901 		 * directory which is then rmdir'd.  If the parent is marked
1902 		 * destroyed there is no point trying to resolve it.
1903 		 */
1904 		if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
1905 			return(ENOENT);
1906 
1907 		par = ncp->nc_parent;
1908 		while (par->nc_parent && par->nc_parent->nc_vp == NULL)
1909 			par = par->nc_parent;
1910 		if (par->nc_parent == NULL) {
1911 			kprintf("EXDEV case 2 %*.*s\n",
1912 				par->nc_nlen, par->nc_nlen, par->nc_name);
1913 			return (EXDEV);
1914 		}
1915 		kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
1916 			par->nc_nlen, par->nc_nlen, par->nc_name);
1917 		/*
1918 		 * The parent is not set in stone, ref and lock it to prevent
1919 		 * it from disappearing.  Also note that due to renames it
1920 		 * is possible for our ncp to move and for par to no longer
1921 		 * be one of its parents.  We resolve it anyway, the loop
1922 		 * will handle any moves.
1923 		 */
1924 		_cache_get(par);
1925 		if (par == nch->mount->mnt_ncmountpt.ncp) {
1926 			cache_resolve_mp(nch->mount);
1927 		} else if (par->nc_parent->nc_vp == NULL) {
1928 			kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
1929 			_cache_put(par);
1930 			continue;
1931 		} else if (par->nc_flag & NCF_UNRESOLVED) {
1932 			nctmp.mount = mp;
1933 			nctmp.ncp = par;
1934 			par->nc_error = VOP_NRESOLVE(&nctmp, cred);
1935 		}
1936 		if ((error = par->nc_error) != 0) {
1937 			if (par->nc_error != EAGAIN) {
1938 				kprintf("EXDEV case 3 %*.*s error %d\n",
1939 				    par->nc_nlen, par->nc_nlen, par->nc_name,
1940 				    par->nc_error);
1941 				_cache_put(par);
1942 				return(error);
1943 			}
1944 			kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
1945 				par, par->nc_nlen, par->nc_nlen, par->nc_name);
1946 		}
1947 		_cache_put(par);
1948 		/* loop */
1949 	}
1950 
1951 	/*
1952 	 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
1953 	 * ncp's and reattach them.  If this occurs the original ncp is marked
1954 	 * EAGAIN to force a relookup.
1955 	 *
1956 	 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
1957 	 * ncp must already be resolved.
1958 	 */
1959 	nctmp.mount = mp;
1960 	nctmp.ncp = ncp;
1961 	ncp->nc_error = VOP_NRESOLVE(&nctmp, cred);
1962 	/*vop_nresolve(*ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/
1963 	if (ncp->nc_error == EAGAIN) {
1964 		kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
1965 			ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
1966 		goto restart;
1967 	}
1968 	return(ncp->nc_error);
1969 }
1970 
1971 /*
1972  * Resolve the ncp associated with a mount point.  Such ncp's almost always
1973  * remain resolved and this routine is rarely called.  NFS MPs tends to force
1974  * re-resolution more often due to its mac-truck-smash-the-namecache
1975  * method of tracking namespace changes.
1976  *
1977  * The semantics for this call is that the passed ncp must be locked on
1978  * entry and will be locked on return.  However, if we actually have to
1979  * resolve the mount point we temporarily unlock the entry in order to
1980  * avoid race-to-root deadlocks due to e.g. dead NFS mounts.  Because of
1981  * the unlock we have to recheck the flags after we relock.
1982  */
1983 static int
1984 cache_resolve_mp(struct mount *mp)
1985 {
1986 	struct namecache *ncp = mp->mnt_ncmountpt.ncp;
1987 	struct vnode *vp;
1988 	int error;
1989 
1990 	KKASSERT(mp != NULL);
1991 
1992 	/*
1993 	 * If the ncp is already resolved we have nothing to do.  However,
1994 	 * we do want to guarentee that a usable vnode is returned when
1995 	 * a vnode is present, so make sure it hasn't been reclaimed.
1996 	 */
1997 	if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
1998 		if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
1999 			_cache_setunresolved(ncp);
2000 	}
2001 
2002 	if (ncp->nc_flag & NCF_UNRESOLVED) {
2003 		_cache_unlock(ncp);
2004 		while (vfs_busy(mp, 0))
2005 			;
2006 		error = VFS_ROOT(mp, &vp);
2007 		_cache_lock(ncp);
2008 
2009 		/*
2010 		 * recheck the ncp state after relocking.
2011 		 */
2012 		if (ncp->nc_flag & NCF_UNRESOLVED) {
2013 			ncp->nc_error = error;
2014 			if (error == 0) {
2015 				_cache_setvp(ncp, vp);
2016 				vput(vp);
2017 			} else {
2018 				kprintf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp);
2019 				_cache_setvp(ncp, NULL);
2020 			}
2021 		} else if (error == 0) {
2022 			vput(vp);
2023 		}
2024 		vfs_unbusy(mp);
2025 	}
2026 	return(ncp->nc_error);
2027 }
2028 
2029 void
2030 cache_cleanneg(int count)
2031 {
2032 	struct namecache *ncp;
2033 
2034 	/*
2035 	 * Automode from the vnlru proc - clean out 10% of the negative cache
2036 	 * entries.
2037 	 */
2038 	if (count == 0)
2039 		count = numneg / 10 + 1;
2040 
2041 	/*
2042 	 * Attempt to clean out the specified number of negative cache
2043 	 * entries.
2044 	 */
2045 	while (count) {
2046 		ncp = TAILQ_FIRST(&ncneglist);
2047 		if (ncp == NULL) {
2048 			KKASSERT(numneg == 0);
2049 			break;
2050 		}
2051 		TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2052 		TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2053 		if (_cache_get_nonblock(ncp) == 0)
2054 			cache_zap(ncp);
2055 		--count;
2056 	}
2057 }
2058 
2059 /*
2060  * Rehash a ncp.  Rehashing is typically required if the name changes (should
2061  * not generally occur) or the parent link changes.  This function will
2062  * unhash the ncp if the ncp is no longer hashable.
2063  */
2064 static void
2065 _cache_rehash(struct namecache *ncp)
2066 {
2067 	struct nchashhead *nchpp;
2068 	u_int32_t hash;
2069 
2070 	if (ncp->nc_flag & NCF_HASHED) {
2071 		ncp->nc_flag &= ~NCF_HASHED;
2072 		LIST_REMOVE(ncp, nc_hash);
2073 	}
2074 	if (ncp->nc_nlen && ncp->nc_parent) {
2075 		hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT);
2076 		hash = fnv_32_buf(&ncp->nc_parent,
2077 					sizeof(ncp->nc_parent), hash);
2078 		nchpp = NCHHASH(hash);
2079 		LIST_INSERT_HEAD(nchpp, ncp, nc_hash);
2080 		ncp->nc_flag |= NCF_HASHED;
2081 	}
2082 }
2083 
2084 /*
2085  * Name cache initialization, from vfsinit() when we are booting
2086  */
2087 void
2088 nchinit(void)
2089 {
2090 	int i;
2091 	globaldata_t gd;
2092 
2093 	/* initialise per-cpu namecache effectiveness statistics. */
2094 	for (i = 0; i < ncpus; ++i) {
2095 		gd = globaldata_find(i);
2096 		gd->gd_nchstats = &nchstats[i];
2097 	}
2098 	TAILQ_INIT(&ncneglist);
2099 	nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
2100 	nclockwarn = 1 * hz;
2101 }
2102 
2103 /*
2104  * Called from start_init() to bootstrap the root filesystem.  Returns
2105  * a referenced, unlocked namecache record.
2106  */
2107 void
2108 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2109 {
2110 	nch->ncp = cache_alloc(0);
2111 	nch->mount = mp;
2112 	++mp->mnt_refs;
2113 	if (vp)
2114 		_cache_setvp(nch->ncp, vp);
2115 }
2116 
2117 /*
2118  * vfs_cache_setroot()
2119  *
2120  *	Create an association between the root of our namecache and
2121  *	the root vnode.  This routine may be called several times during
2122  *	booting.
2123  *
2124  *	If the caller intends to save the returned namecache pointer somewhere
2125  *	it must cache_hold() it.
2126  */
2127 void
2128 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2129 {
2130 	struct vnode *ovp;
2131 	struct nchandle onch;
2132 
2133 	ovp = rootvnode;
2134 	onch = rootnch;
2135 	rootvnode = nvp;
2136 	if (nch)
2137 		rootnch = *nch;
2138 	else
2139 		cache_zero(&rootnch);
2140 	if (ovp)
2141 		vrele(ovp);
2142 	if (onch.ncp)
2143 		cache_drop(&onch);
2144 }
2145 
2146 /*
2147  * XXX OLD API COMPAT FUNCTION.  This really messes up the new namecache
2148  * topology and is being removed as quickly as possible.  The new VOP_N*()
2149  * API calls are required to make specific adjustments using the supplied
2150  * ncp pointers rather then just bogusly purging random vnodes.
2151  *
2152  * Invalidate all namecache entries to a particular vnode as well as
2153  * any direct children of that vnode in the namecache.  This is a
2154  * 'catch all' purge used by filesystems that do not know any better.
2155  *
2156  * Note that the linkage between the vnode and its namecache entries will
2157  * be removed, but the namecache entries themselves might stay put due to
2158  * active references from elsewhere in the system or due to the existance of
2159  * the children.   The namecache topology is left intact even if we do not
2160  * know what the vnode association is.  Such entries will be marked
2161  * NCF_UNRESOLVED.
2162  */
2163 void
2164 cache_purge(struct vnode *vp)
2165 {
2166 	cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2167 }
2168 
2169 /*
2170  * Flush all entries referencing a particular filesystem.
2171  *
2172  * Since we need to check it anyway, we will flush all the invalid
2173  * entries at the same time.
2174  */
2175 #if 0
2176 
2177 void
2178 cache_purgevfs(struct mount *mp)
2179 {
2180 	struct nchashhead *nchpp;
2181 	struct namecache *ncp, *nnp;
2182 
2183 	/*
2184 	 * Scan hash tables for applicable entries.
2185 	 */
2186 	for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2187 		ncp = LIST_FIRST(nchpp);
2188 		if (ncp)
2189 			_cache_hold(ncp);
2190 		while (ncp) {
2191 			nnp = LIST_NEXT(ncp, nc_hash);
2192 			if (nnp)
2193 				_cache_hold(nnp);
2194 			if (ncp->nc_mount == mp) {
2195 				_cache_lock(ncp);
2196 				cache_zap(ncp);
2197 			} else {
2198 				_cache_drop(ncp);
2199 			}
2200 			ncp = nnp;
2201 		}
2202 	}
2203 }
2204 
2205 #endif
2206 
2207 /*
2208  * Create a new (theoretically) unique fsmid
2209  */
2210 int64_t
2211 cache_getnewfsmid(void)
2212 {
2213 	static int fsmid_roller;
2214 	int64_t fsmid;
2215 
2216 	++fsmid_roller;
2217 	fsmid = ((int64_t)time_second << 32) |
2218 			(fsmid_roller & 0x7FFFFFFF);
2219 	return (fsmid);
2220 }
2221 
2222 
2223 static int disablecwd;
2224 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2225 
2226 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2227 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2228 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2229 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2230 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2231 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2232 
2233 int
2234 sys___getcwd(struct __getcwd_args *uap)
2235 {
2236 	int buflen;
2237 	int error;
2238 	char *buf;
2239 	char *bp;
2240 
2241 	if (disablecwd)
2242 		return (ENODEV);
2243 
2244 	buflen = uap->buflen;
2245 	if (buflen < 2)
2246 		return (EINVAL);
2247 	if (buflen > MAXPATHLEN)
2248 		buflen = MAXPATHLEN;
2249 
2250 	buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2251 	bp = kern_getcwd(buf, buflen, &error);
2252 	if (error == 0)
2253 		error = copyout(bp, uap->buf, strlen(bp) + 1);
2254 	kfree(buf, M_TEMP);
2255 	return (error);
2256 }
2257 
2258 char *
2259 kern_getcwd(char *buf, size_t buflen, int *error)
2260 {
2261 	struct proc *p = curproc;
2262 	char *bp;
2263 	int i, slash_prefixed;
2264 	struct filedesc *fdp;
2265 	struct nchandle nch;
2266 
2267 	numcwdcalls++;
2268 	bp = buf;
2269 	bp += buflen - 1;
2270 	*bp = '\0';
2271 	fdp = p->p_fd;
2272 	slash_prefixed = 0;
2273 
2274 	nch = fdp->fd_ncdir;
2275 	while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp ||
2276 	       nch.mount != fdp->fd_nrdir.mount)
2277 	) {
2278 		/*
2279 		 * While traversing upwards if we encounter the root
2280 		 * of the current mount we have to skip to the mount point
2281 		 * in the underlying filesystem.
2282 		 */
2283 		if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2284 			nch = nch.mount->mnt_ncmounton;
2285 			continue;
2286 		}
2287 
2288 		/*
2289 		 * Prepend the path segment
2290 		 */
2291 		for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2292 			if (bp == buf) {
2293 				numcwdfail4++;
2294 				*error = ENOMEM;
2295 				return(NULL);
2296 			}
2297 			*--bp = nch.ncp->nc_name[i];
2298 		}
2299 		if (bp == buf) {
2300 			numcwdfail4++;
2301 			*error = ENOMEM;
2302 			return(NULL);
2303 		}
2304 		*--bp = '/';
2305 		slash_prefixed = 1;
2306 
2307 		/*
2308 		 * Go up a directory.  This isn't a mount point so we don't
2309 		 * have to check again.
2310 		 */
2311 		nch.ncp = nch.ncp->nc_parent;
2312 	}
2313 	if (nch.ncp == NULL) {
2314 		numcwdfail2++;
2315 		*error = ENOENT;
2316 		return(NULL);
2317 	}
2318 	if (!slash_prefixed) {
2319 		if (bp == buf) {
2320 			numcwdfail4++;
2321 			*error = ENOMEM;
2322 			return(NULL);
2323 		}
2324 		*--bp = '/';
2325 	}
2326 	numcwdfound++;
2327 	*error = 0;
2328 	return (bp);
2329 }
2330 
2331 /*
2332  * Thus begins the fullpath magic.
2333  */
2334 
2335 #undef STATNODE
2336 #define STATNODE(name)							\
2337 	static u_int name;						\
2338 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2339 
2340 static int disablefullpath;
2341 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
2342     &disablefullpath, 0, "");
2343 
2344 STATNODE(numfullpathcalls);
2345 STATNODE(numfullpathfail1);
2346 STATNODE(numfullpathfail2);
2347 STATNODE(numfullpathfail3);
2348 STATNODE(numfullpathfail4);
2349 STATNODE(numfullpathfound);
2350 
2351 int
2352 cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf)
2353 {
2354 	char *bp, *buf;
2355 	int i, slash_prefixed;
2356 	struct nchandle fd_nrdir;
2357 	struct nchandle nch;
2358 
2359 	numfullpathcalls--;
2360 
2361 	*retbuf = NULL;
2362 	*freebuf = NULL;
2363 
2364 	buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2365 	bp = buf + MAXPATHLEN - 1;
2366 	*bp = '\0';
2367 	if (p != NULL)
2368 		fd_nrdir = p->p_fd->fd_nrdir;
2369 	else
2370 		fd_nrdir = rootnch;
2371 	slash_prefixed = 0;
2372 	nch = *nchp;
2373 
2374 	while (nch.ncp &&
2375 	       (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount)
2376 	) {
2377 		/*
2378 		 * While traversing upwards if we encounter the root
2379 		 * of the current mount we have to skip to the mount point.
2380 		 */
2381 		if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) {
2382 			nch = nch.mount->mnt_ncmounton;
2383 			continue;
2384 		}
2385 
2386 		/*
2387 		 * Prepend the path segment
2388 		 */
2389 		for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) {
2390 			if (bp == buf) {
2391 				numfullpathfail4++;
2392 				kfree(buf, M_TEMP);
2393 				return(ENOMEM);
2394 			}
2395 			*--bp = nch.ncp->nc_name[i];
2396 		}
2397 		if (bp == buf) {
2398 			numfullpathfail4++;
2399 			kfree(buf, M_TEMP);
2400 			return(ENOMEM);
2401 		}
2402 		*--bp = '/';
2403 		slash_prefixed = 1;
2404 
2405 		/*
2406 		 * Go up a directory.  This isn't a mount point so we don't
2407 		 * have to check again.
2408 		 */
2409 		nch.ncp = nch.ncp->nc_parent;
2410 	}
2411 	if (nch.ncp == NULL) {
2412 		numfullpathfail2++;
2413 		kfree(buf, M_TEMP);
2414 		return(ENOENT);
2415 	}
2416 
2417 	if (!slash_prefixed) {
2418 		if (bp == buf) {
2419 			numfullpathfail4++;
2420 			kfree(buf, M_TEMP);
2421 			return(ENOMEM);
2422 		}
2423 		*--bp = '/';
2424 	}
2425 	numfullpathfound++;
2426 	*retbuf = bp;
2427 	*freebuf = buf;
2428 
2429 	return(0);
2430 }
2431 
2432 int
2433 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
2434 {
2435 	struct namecache *ncp;
2436 	struct nchandle nch;
2437 
2438 	numfullpathcalls++;
2439 	if (disablefullpath)
2440 		return (ENODEV);
2441 
2442 	if (p == NULL)
2443 		return (EINVAL);
2444 
2445 	/* vn is NULL, client wants us to use p->p_textvp */
2446 	if (vn == NULL) {
2447 		if ((vn = p->p_textvp) == NULL)
2448 			return (EINVAL);
2449 	}
2450 	TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
2451 		if (ncp->nc_nlen)
2452 			break;
2453 	}
2454 	if (ncp == NULL)
2455 		return (EINVAL);
2456 
2457 	numfullpathcalls--;
2458 	nch.ncp = ncp;;
2459 	nch.mount = vn->v_mount;
2460 	return(cache_fullpath(p, &nch, retbuf, freebuf));
2461 }
2462