1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/kernel.h> 68 #include <sys/sysctl.h> 69 #include <sys/mount.h> 70 #include <sys/vnode.h> 71 #include <sys/malloc.h> 72 #include <sys/sysproto.h> 73 #include <sys/spinlock.h> 74 #include <sys/proc.h> 75 #include <sys/namei.h> 76 #include <sys/nlookup.h> 77 #include <sys/filedesc.h> 78 #include <sys/fnv_hash.h> 79 #include <sys/globaldata.h> 80 #include <sys/kern_syscall.h> 81 #include <sys/dirent.h> 82 #include <ddb/ddb.h> 83 84 #include <sys/sysref2.h> 85 #include <sys/spinlock2.h> 86 #include <sys/mplock2.h> 87 88 #define MAX_RECURSION_DEPTH 64 89 90 /* 91 * Random lookups in the cache are accomplished with a hash table using 92 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 93 * 94 * Negative entries may exist and correspond to resolved namecache 95 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 96 * will be set if the entry corresponds to a whited-out directory entry 97 * (verses simply not finding the entry at all). ncneglist is locked 98 * with a global spinlock (ncspin). 99 * 100 * MPSAFE RULES: 101 * 102 * (1) A ncp must be referenced before it can be locked. 103 * 104 * (2) A ncp must be locked in order to modify it. 105 * 106 * (3) ncp locks are always ordered child -> parent. That may seem 107 * backwards but forward scans use the hash table and thus can hold 108 * the parent unlocked when traversing downward. 109 * 110 * This allows insert/rename/delete/dot-dot and other operations 111 * to use ncp->nc_parent links. 112 * 113 * This also prevents a locked up e.g. NFS node from creating a 114 * chain reaction all the way back to the root vnode / namecache. 115 * 116 * (4) parent linkages require both the parent and child to be locked. 117 */ 118 119 /* 120 * Structures associated with name cacheing. 121 */ 122 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 123 #define MINNEG 1024 124 #define MINPOS 1024 125 #define NCMOUNT_NUMCACHE 1009 /* prime number */ 126 127 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 128 129 LIST_HEAD(nchash_list, namecache); 130 131 struct nchash_head { 132 struct nchash_list list; 133 struct spinlock spin; 134 }; 135 136 struct ncmount_cache { 137 struct spinlock spin; 138 struct namecache *ncp; 139 struct mount *mp; 140 int isneg; /* if != 0 mp is originator and not target */ 141 }; 142 143 static struct nchash_head *nchashtbl; 144 static struct namecache_list ncneglist; 145 static struct spinlock ncspin; 146 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE]; 147 148 /* 149 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 150 * to create the namecache infrastructure leading to a dangling vnode. 151 * 152 * 0 Only errors are reported 153 * 1 Successes are reported 154 * 2 Successes + the whole directory scan is reported 155 * 3 Force the directory scan code run as if the parent vnode did not 156 * have a namecache record, even if it does have one. 157 */ 158 static int ncvp_debug; 159 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 160 "Namecache debug level (0-3)"); 161 162 static u_long nchash; /* size of hash table */ 163 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 164 "Size of namecache hash table"); 165 166 static int ncnegflush = 10; /* burst for negative flush */ 167 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0, 168 "Batch flush negative entries"); 169 170 static int ncposflush = 10; /* burst for positive flush */ 171 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0, 172 "Batch flush positive entries"); 173 174 static int ncnegfactor = 16; /* ratio of negative entries */ 175 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 176 "Ratio of namecache negative entries"); 177 178 static int nclockwarn; /* warn on locked entries in ticks */ 179 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 180 "Warn on locked namecache entries in ticks"); 181 182 static int numdefered; /* number of cache entries allocated */ 183 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 184 "Number of cache entries allocated"); 185 186 static int ncposlimit; /* number of cache entries allocated */ 187 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 188 "Number of cache entries allocated"); 189 190 static int ncp_shared_lock_disable = 0; 191 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW, 192 &ncp_shared_lock_disable, 0, "Disable shared namecache locks"); 193 194 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 195 "sizeof(struct vnode)"); 196 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 197 "sizeof(struct namecache)"); 198 199 static int ncmount_cache_enable = 1; 200 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW, 201 &ncmount_cache_enable, 0, "mount point cache"); 202 static long ncmount_cache_hit; 203 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_hit, CTLFLAG_RW, 204 &ncmount_cache_hit, 0, "mpcache hits"); 205 static long ncmount_cache_miss; 206 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_miss, CTLFLAG_RW, 207 &ncmount_cache_miss, 0, "mpcache misses"); 208 static long ncmount_cache_overwrite; 209 SYSCTL_LONG(_debug, OID_AUTO, ncmount_cache_overwrite, CTLFLAG_RW, 210 &ncmount_cache_overwrite, 0, "mpcache entry overwrites"); 211 212 static int cache_resolve_mp(struct mount *mp); 213 static struct vnode *cache_dvpref(struct namecache *ncp); 214 static void _cache_lock(struct namecache *ncp); 215 static void _cache_setunresolved(struct namecache *ncp); 216 static void _cache_cleanneg(int count); 217 static void _cache_cleanpos(int count); 218 static void _cache_cleandefered(void); 219 static void _cache_unlink(struct namecache *ncp); 220 221 /* 222 * The new name cache statistics 223 */ 224 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 225 static int numneg; 226 SYSCTL_INT(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 227 "Number of negative namecache entries"); 228 static int numcache; 229 SYSCTL_INT(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 230 "Number of namecaches entries"); 231 static u_long numcalls; 232 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcalls, CTLFLAG_RD, &numcalls, 0, 233 "Number of namecache lookups"); 234 static u_long numchecks; 235 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numchecks, CTLFLAG_RD, &numchecks, 0, 236 "Number of checked entries in namecache lookups"); 237 238 struct nchstats nchstats[SMP_MAXCPU]; 239 /* 240 * Export VFS cache effectiveness statistics to user-land. 241 * 242 * The statistics are left for aggregation to user-land so 243 * neat things can be achieved, like observing per-CPU cache 244 * distribution. 245 */ 246 static int 247 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 248 { 249 struct globaldata *gd; 250 int i, error; 251 252 error = 0; 253 for (i = 0; i < ncpus; ++i) { 254 gd = globaldata_find(i); 255 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 256 sizeof(struct nchstats)))) 257 break; 258 } 259 260 return (error); 261 } 262 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 263 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 264 265 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 266 267 /* 268 * Namespace locking. The caller must already hold a reference to the 269 * namecache structure in order to lock/unlock it. This function prevents 270 * the namespace from being created or destroyed by accessors other then 271 * the lock holder. 272 * 273 * Note that holding a locked namecache structure prevents other threads 274 * from making namespace changes (e.g. deleting or creating), prevents 275 * vnode association state changes by other threads, and prevents the 276 * namecache entry from being resolved or unresolved by other threads. 277 * 278 * An exclusive lock owner has full authority to associate/disassociate 279 * vnodes and resolve/unresolve the locked ncp. 280 * 281 * A shared lock owner only has authority to acquire the underlying vnode, 282 * if any. 283 * 284 * The primary lock field is nc_lockstatus. nc_locktd is set after the 285 * fact (when locking) or cleared prior to unlocking. 286 * 287 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 288 * or recycled, but it does NOT help you if the vnode had already 289 * initiated a recyclement. If this is important, use cache_get() 290 * rather then cache_lock() (and deal with the differences in the 291 * way the refs counter is handled). Or, alternatively, make an 292 * unconditional call to cache_validate() or cache_resolve() 293 * after cache_lock() returns. 294 */ 295 static 296 void 297 _cache_lock(struct namecache *ncp) 298 { 299 thread_t td; 300 int didwarn; 301 int begticks; 302 int error; 303 u_int count; 304 305 KKASSERT(ncp->nc_refs != 0); 306 didwarn = 0; 307 begticks = 0; 308 td = curthread; 309 310 for (;;) { 311 count = ncp->nc_lockstatus; 312 cpu_ccfence(); 313 314 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 315 if (atomic_cmpset_int(&ncp->nc_lockstatus, 316 count, count + 1)) { 317 /* 318 * The vp associated with a locked ncp must 319 * be held to prevent it from being recycled. 320 * 321 * WARNING! If VRECLAIMED is set the vnode 322 * could already be in the middle of a recycle. 323 * Callers must use cache_vref() or 324 * cache_vget() on the locked ncp to 325 * validate the vp or set the cache entry 326 * to unresolved. 327 * 328 * NOTE! vhold() is allowed if we hold a 329 * lock on the ncp (which we do). 330 */ 331 ncp->nc_locktd = td; 332 if (ncp->nc_vp) 333 vhold(ncp->nc_vp); 334 break; 335 } 336 /* cmpset failed */ 337 continue; 338 } 339 if (ncp->nc_locktd == td) { 340 KKASSERT((count & NC_SHLOCK_FLAG) == 0); 341 if (atomic_cmpset_int(&ncp->nc_lockstatus, 342 count, count + 1)) { 343 break; 344 } 345 /* cmpset failed */ 346 continue; 347 } 348 tsleep_interlock(&ncp->nc_locktd, 0); 349 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 350 count | NC_EXLOCK_REQ) == 0) { 351 /* cmpset failed */ 352 continue; 353 } 354 if (begticks == 0) 355 begticks = ticks; 356 error = tsleep(&ncp->nc_locktd, PINTERLOCKED, 357 "clock", nclockwarn); 358 if (error == EWOULDBLOCK) { 359 if (didwarn == 0) { 360 didwarn = ticks; 361 kprintf("[diagnostic] cache_lock: " 362 "blocked on %p %08x", 363 ncp, count); 364 kprintf(" \"%*.*s\"\n", 365 ncp->nc_nlen, ncp->nc_nlen, 366 ncp->nc_name); 367 } 368 } 369 /* loop */ 370 } 371 if (didwarn) { 372 kprintf("[diagnostic] cache_lock: unblocked %*.*s after " 373 "%d secs\n", 374 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 375 (int)(ticks + (hz / 2) - begticks) / hz); 376 } 377 } 378 379 /* 380 * The shared lock works similarly to the exclusive lock except 381 * nc_locktd is left NULL and we need an interlock (VHOLD) to 382 * prevent vhold() races, since the moment our cmpset_int succeeds 383 * another cpu can come in and get its own shared lock. 384 * 385 * A critical section is needed to prevent interruption during the 386 * VHOLD interlock. 387 */ 388 static 389 void 390 _cache_lock_shared(struct namecache *ncp) 391 { 392 int didwarn; 393 int error; 394 u_int count; 395 u_int optreq = NC_EXLOCK_REQ; 396 397 KKASSERT(ncp->nc_refs != 0); 398 didwarn = 0; 399 400 for (;;) { 401 count = ncp->nc_lockstatus; 402 cpu_ccfence(); 403 404 if ((count & ~NC_SHLOCK_REQ) == 0) { 405 crit_enter(); 406 if (atomic_cmpset_int(&ncp->nc_lockstatus, 407 count, 408 (count + 1) | NC_SHLOCK_FLAG | 409 NC_SHLOCK_VHOLD)) { 410 /* 411 * The vp associated with a locked ncp must 412 * be held to prevent it from being recycled. 413 * 414 * WARNING! If VRECLAIMED is set the vnode 415 * could already be in the middle of a recycle. 416 * Callers must use cache_vref() or 417 * cache_vget() on the locked ncp to 418 * validate the vp or set the cache entry 419 * to unresolved. 420 * 421 * NOTE! vhold() is allowed if we hold a 422 * lock on the ncp (which we do). 423 */ 424 if (ncp->nc_vp) 425 vhold(ncp->nc_vp); 426 atomic_clear_int(&ncp->nc_lockstatus, 427 NC_SHLOCK_VHOLD); 428 crit_exit(); 429 break; 430 } 431 /* cmpset failed */ 432 crit_exit(); 433 continue; 434 } 435 436 /* 437 * If already held shared we can just bump the count, but 438 * only allow this if nobody is trying to get the lock 439 * exclusively. If we are blocking too long ignore excl 440 * requests (which can race/deadlock us). 441 * 442 * VHOLD is a bit of a hack. Even though we successfully 443 * added another shared ref, the cpu that got the first 444 * shared ref might not yet have held the vnode. 445 */ 446 if ((count & (optreq|NC_SHLOCK_FLAG)) == NC_SHLOCK_FLAG) { 447 KKASSERT((count & ~(NC_EXLOCK_REQ | 448 NC_SHLOCK_REQ | 449 NC_SHLOCK_FLAG)) > 0); 450 if (atomic_cmpset_int(&ncp->nc_lockstatus, 451 count, count + 1)) { 452 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 453 cpu_pause(); 454 break; 455 } 456 continue; 457 } 458 tsleep_interlock(ncp, 0); 459 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 460 count | NC_SHLOCK_REQ) == 0) { 461 /* cmpset failed */ 462 continue; 463 } 464 error = tsleep(ncp, PINTERLOCKED, "clocksh", nclockwarn); 465 if (error == EWOULDBLOCK) { 466 optreq = 0; 467 if (didwarn == 0) { 468 didwarn = ticks; 469 kprintf("[diagnostic] cache_lock_shared: " 470 "blocked on %p %08x", 471 ncp, count); 472 kprintf(" \"%*.*s\"\n", 473 ncp->nc_nlen, ncp->nc_nlen, 474 ncp->nc_name); 475 } 476 } 477 /* loop */ 478 } 479 if (didwarn) { 480 kprintf("[diagnostic] cache_lock_shared: " 481 "unblocked %*.*s after %d secs\n", 482 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 483 (int)(ticks - didwarn) / hz); 484 } 485 } 486 487 /* 488 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 489 * such as the case where one of its children is locked. 490 */ 491 static 492 int 493 _cache_lock_nonblock(struct namecache *ncp) 494 { 495 thread_t td; 496 u_int count; 497 498 td = curthread; 499 500 for (;;) { 501 count = ncp->nc_lockstatus; 502 503 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 504 if (atomic_cmpset_int(&ncp->nc_lockstatus, 505 count, count + 1)) { 506 /* 507 * The vp associated with a locked ncp must 508 * be held to prevent it from being recycled. 509 * 510 * WARNING! If VRECLAIMED is set the vnode 511 * could already be in the middle of a recycle. 512 * Callers must use cache_vref() or 513 * cache_vget() on the locked ncp to 514 * validate the vp or set the cache entry 515 * to unresolved. 516 * 517 * NOTE! vhold() is allowed if we hold a 518 * lock on the ncp (which we do). 519 */ 520 ncp->nc_locktd = td; 521 if (ncp->nc_vp) 522 vhold(ncp->nc_vp); 523 break; 524 } 525 /* cmpset failed */ 526 continue; 527 } 528 if (ncp->nc_locktd == td) { 529 if (atomic_cmpset_int(&ncp->nc_lockstatus, 530 count, count + 1)) { 531 break; 532 } 533 /* cmpset failed */ 534 continue; 535 } 536 return(EWOULDBLOCK); 537 } 538 return(0); 539 } 540 541 /* 542 * The shared lock works similarly to the exclusive lock except 543 * nc_locktd is left NULL and we need an interlock (VHOLD) to 544 * prevent vhold() races, since the moment our cmpset_int succeeds 545 * another cpu can come in and get its own shared lock. 546 * 547 * A critical section is needed to prevent interruption during the 548 * VHOLD interlock. 549 */ 550 static 551 int 552 _cache_lock_shared_nonblock(struct namecache *ncp) 553 { 554 u_int count; 555 556 for (;;) { 557 count = ncp->nc_lockstatus; 558 559 if ((count & ~NC_SHLOCK_REQ) == 0) { 560 crit_enter(); 561 if (atomic_cmpset_int(&ncp->nc_lockstatus, 562 count, 563 (count + 1) | NC_SHLOCK_FLAG | 564 NC_SHLOCK_VHOLD)) { 565 /* 566 * The vp associated with a locked ncp must 567 * be held to prevent it from being recycled. 568 * 569 * WARNING! If VRECLAIMED is set the vnode 570 * could already be in the middle of a recycle. 571 * Callers must use cache_vref() or 572 * cache_vget() on the locked ncp to 573 * validate the vp or set the cache entry 574 * to unresolved. 575 * 576 * NOTE! vhold() is allowed if we hold a 577 * lock on the ncp (which we do). 578 */ 579 if (ncp->nc_vp) 580 vhold(ncp->nc_vp); 581 atomic_clear_int(&ncp->nc_lockstatus, 582 NC_SHLOCK_VHOLD); 583 crit_exit(); 584 break; 585 } 586 /* cmpset failed */ 587 crit_exit(); 588 continue; 589 } 590 591 /* 592 * If already held shared we can just bump the count, but 593 * only allow this if nobody is trying to get the lock 594 * exclusively. 595 * 596 * VHOLD is a bit of a hack. Even though we successfully 597 * added another shared ref, the cpu that got the first 598 * shared ref might not yet have held the vnode. 599 */ 600 if ((count & (NC_EXLOCK_REQ|NC_SHLOCK_FLAG)) == 601 NC_SHLOCK_FLAG) { 602 KKASSERT((count & ~(NC_EXLOCK_REQ | 603 NC_SHLOCK_REQ | 604 NC_SHLOCK_FLAG)) > 0); 605 if (atomic_cmpset_int(&ncp->nc_lockstatus, 606 count, count + 1)) { 607 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 608 cpu_pause(); 609 break; 610 } 611 continue; 612 } 613 return(EWOULDBLOCK); 614 } 615 return(0); 616 } 617 618 /* 619 * Helper function 620 * 621 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 622 * 623 * nc_locktd must be NULLed out prior to nc_lockstatus getting cleared. 624 */ 625 static 626 void 627 _cache_unlock(struct namecache *ncp) 628 { 629 thread_t td __debugvar = curthread; 630 u_int count; 631 u_int ncount; 632 struct vnode *dropvp; 633 634 KKASSERT(ncp->nc_refs >= 0); 635 KKASSERT((ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) > 0); 636 KKASSERT((ncp->nc_lockstatus & NC_SHLOCK_FLAG) || ncp->nc_locktd == td); 637 638 count = ncp->nc_lockstatus; 639 cpu_ccfence(); 640 641 /* 642 * Clear nc_locktd prior to the atomic op (excl lock only) 643 */ 644 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) 645 ncp->nc_locktd = NULL; 646 dropvp = NULL; 647 648 for (;;) { 649 if ((count & 650 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ|NC_SHLOCK_FLAG)) == 1) { 651 dropvp = ncp->nc_vp; 652 if (count & NC_EXLOCK_REQ) 653 ncount = count & NC_SHLOCK_REQ; /* cnt->0 */ 654 else 655 ncount = 0; 656 657 if (atomic_cmpset_int(&ncp->nc_lockstatus, 658 count, ncount)) { 659 if (count & NC_EXLOCK_REQ) 660 wakeup(&ncp->nc_locktd); 661 else if (count & NC_SHLOCK_REQ) 662 wakeup(ncp); 663 break; 664 } 665 dropvp = NULL; 666 } else { 667 KKASSERT((count & NC_SHLOCK_VHOLD) == 0); 668 KKASSERT((count & ~(NC_EXLOCK_REQ | 669 NC_SHLOCK_REQ | 670 NC_SHLOCK_FLAG)) > 1); 671 if (atomic_cmpset_int(&ncp->nc_lockstatus, 672 count, count - 1)) { 673 break; 674 } 675 } 676 count = ncp->nc_lockstatus; 677 cpu_ccfence(); 678 } 679 680 /* 681 * Don't actually drop the vp until we successfully clean out 682 * the lock, otherwise we may race another shared lock. 683 */ 684 if (dropvp) 685 vdrop(dropvp); 686 } 687 688 static 689 int 690 _cache_lockstatus(struct namecache *ncp) 691 { 692 if (ncp->nc_locktd == curthread) 693 return(LK_EXCLUSIVE); 694 if (ncp->nc_lockstatus & NC_SHLOCK_FLAG) 695 return(LK_SHARED); 696 return(-1); 697 } 698 699 /* 700 * cache_hold() and cache_drop() prevent the premature deletion of a 701 * namecache entry but do not prevent operations (such as zapping) on 702 * that namecache entry. 703 * 704 * This routine may only be called from outside this source module if 705 * nc_refs is already at least 1. 706 * 707 * This is a rare case where callers are allowed to hold a spinlock, 708 * so we can't ourselves. 709 */ 710 static __inline 711 struct namecache * 712 _cache_hold(struct namecache *ncp) 713 { 714 atomic_add_int(&ncp->nc_refs, 1); 715 return(ncp); 716 } 717 718 /* 719 * Drop a cache entry, taking care to deal with races. 720 * 721 * For potential 1->0 transitions we must hold the ncp lock to safely 722 * test its flags. An unresolved entry with no children must be zapped 723 * to avoid leaks. 724 * 725 * The call to cache_zap() itself will handle all remaining races and 726 * will decrement the ncp's refs regardless. If we are resolved or 727 * have children nc_refs can safely be dropped to 0 without having to 728 * zap the entry. 729 * 730 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 731 * 732 * NOTE: cache_zap() may return a non-NULL referenced parent which must 733 * be dropped in a loop. 734 */ 735 static __inline 736 void 737 _cache_drop(struct namecache *ncp) 738 { 739 int refs; 740 741 while (ncp) { 742 KKASSERT(ncp->nc_refs > 0); 743 refs = ncp->nc_refs; 744 745 if (refs == 1) { 746 if (_cache_lock_nonblock(ncp) == 0) { 747 ncp->nc_flag &= ~NCF_DEFEREDZAP; 748 if ((ncp->nc_flag & NCF_UNRESOLVED) && 749 TAILQ_EMPTY(&ncp->nc_list)) { 750 ncp = cache_zap(ncp, 1); 751 continue; 752 } 753 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 754 _cache_unlock(ncp); 755 break; 756 } 757 _cache_unlock(ncp); 758 } 759 } else { 760 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 761 break; 762 } 763 cpu_pause(); 764 } 765 } 766 767 /* 768 * Link a new namecache entry to its parent and to the hash table. Be 769 * careful to avoid races if vhold() blocks in the future. 770 * 771 * Both ncp and par must be referenced and locked. 772 * 773 * NOTE: The hash table spinlock is held during this call, we can't do 774 * anything fancy. 775 */ 776 static void 777 _cache_link_parent(struct namecache *ncp, struct namecache *par, 778 struct nchash_head *nchpp) 779 { 780 KKASSERT(ncp->nc_parent == NULL); 781 ncp->nc_parent = par; 782 ncp->nc_head = nchpp; 783 784 /* 785 * Set inheritance flags. Note that the parent flags may be 786 * stale due to getattr potentially not having been run yet 787 * (it gets run during nlookup()'s). 788 */ 789 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 790 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 791 ncp->nc_flag |= NCF_SF_PNOCACHE; 792 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 793 ncp->nc_flag |= NCF_UF_PCACHE; 794 795 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 796 797 if (TAILQ_EMPTY(&par->nc_list)) { 798 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 799 /* 800 * Any vp associated with an ncp which has children must 801 * be held to prevent it from being recycled. 802 */ 803 if (par->nc_vp) 804 vhold(par->nc_vp); 805 } else { 806 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 807 } 808 } 809 810 /* 811 * Remove the parent and hash associations from a namecache structure. 812 * If this is the last child of the parent the cache_drop(par) will 813 * attempt to recursively zap the parent. 814 * 815 * ncp must be locked. This routine will acquire a temporary lock on 816 * the parent as wlel as the appropriate hash chain. 817 */ 818 static void 819 _cache_unlink_parent(struct namecache *ncp) 820 { 821 struct namecache *par; 822 struct vnode *dropvp; 823 824 if ((par = ncp->nc_parent) != NULL) { 825 KKASSERT(ncp->nc_parent == par); 826 _cache_hold(par); 827 _cache_lock(par); 828 spin_lock(&ncp->nc_head->spin); 829 LIST_REMOVE(ncp, nc_hash); 830 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 831 dropvp = NULL; 832 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 833 dropvp = par->nc_vp; 834 spin_unlock(&ncp->nc_head->spin); 835 ncp->nc_parent = NULL; 836 ncp->nc_head = NULL; 837 _cache_unlock(par); 838 _cache_drop(par); 839 840 /* 841 * We can only safely vdrop with no spinlocks held. 842 */ 843 if (dropvp) 844 vdrop(dropvp); 845 } 846 } 847 848 /* 849 * Allocate a new namecache structure. Most of the code does not require 850 * zero-termination of the string but it makes vop_compat_ncreate() easier. 851 */ 852 static struct namecache * 853 cache_alloc(int nlen) 854 { 855 struct namecache *ncp; 856 857 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 858 if (nlen) 859 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 860 ncp->nc_nlen = nlen; 861 ncp->nc_flag = NCF_UNRESOLVED; 862 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 863 ncp->nc_refs = 1; 864 865 TAILQ_INIT(&ncp->nc_list); 866 _cache_lock(ncp); 867 return(ncp); 868 } 869 870 /* 871 * Can only be called for the case where the ncp has never been 872 * associated with anything (so no spinlocks are needed). 873 */ 874 static void 875 _cache_free(struct namecache *ncp) 876 { 877 KKASSERT(ncp->nc_refs == 1 && ncp->nc_lockstatus == 1); 878 if (ncp->nc_name) 879 kfree(ncp->nc_name, M_VFSCACHE); 880 kfree(ncp, M_VFSCACHE); 881 } 882 883 /* 884 * [re]initialize a nchandle. 885 */ 886 void 887 cache_zero(struct nchandle *nch) 888 { 889 nch->ncp = NULL; 890 nch->mount = NULL; 891 } 892 893 /* 894 * Ref and deref a namecache structure. 895 * 896 * The caller must specify a stable ncp pointer, typically meaning the 897 * ncp is already referenced but this can also occur indirectly through 898 * e.g. holding a lock on a direct child. 899 * 900 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 901 * use read spinlocks here. 902 * 903 * MPSAFE if nch is 904 */ 905 struct nchandle * 906 cache_hold(struct nchandle *nch) 907 { 908 _cache_hold(nch->ncp); 909 atomic_add_int(&nch->mount->mnt_refs, 1); 910 return(nch); 911 } 912 913 /* 914 * Create a copy of a namecache handle for an already-referenced 915 * entry. 916 * 917 * MPSAFE if nch is 918 */ 919 void 920 cache_copy(struct nchandle *nch, struct nchandle *target) 921 { 922 *target = *nch; 923 if (target->ncp) 924 _cache_hold(target->ncp); 925 atomic_add_int(&nch->mount->mnt_refs, 1); 926 } 927 928 /* 929 * MPSAFE if nch is 930 */ 931 void 932 cache_changemount(struct nchandle *nch, struct mount *mp) 933 { 934 atomic_add_int(&nch->mount->mnt_refs, -1); 935 nch->mount = mp; 936 atomic_add_int(&nch->mount->mnt_refs, 1); 937 } 938 939 void 940 cache_drop(struct nchandle *nch) 941 { 942 atomic_add_int(&nch->mount->mnt_refs, -1); 943 _cache_drop(nch->ncp); 944 nch->ncp = NULL; 945 nch->mount = NULL; 946 } 947 948 int 949 cache_lockstatus(struct nchandle *nch) 950 { 951 return(_cache_lockstatus(nch->ncp)); 952 } 953 954 void 955 cache_lock(struct nchandle *nch) 956 { 957 _cache_lock(nch->ncp); 958 } 959 960 void 961 cache_lock_maybe_shared(struct nchandle *nch, int excl) 962 { 963 struct namecache *ncp = nch->ncp; 964 965 if (ncp_shared_lock_disable || excl || 966 (ncp->nc_flag & NCF_UNRESOLVED)) { 967 _cache_lock(ncp); 968 } else { 969 _cache_lock_shared(ncp); 970 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 971 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 972 _cache_unlock(ncp); 973 _cache_lock(ncp); 974 } 975 } else { 976 _cache_unlock(ncp); 977 _cache_lock(ncp); 978 } 979 } 980 } 981 982 /* 983 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 984 * is responsible for checking both for validity on return as they 985 * may have become invalid. 986 * 987 * We have to deal with potential deadlocks here, just ping pong 988 * the lock until we get it (we will always block somewhere when 989 * looping so this is not cpu-intensive). 990 * 991 * which = 0 nch1 not locked, nch2 is locked 992 * which = 1 nch1 is locked, nch2 is not locked 993 */ 994 void 995 cache_relock(struct nchandle *nch1, struct ucred *cred1, 996 struct nchandle *nch2, struct ucred *cred2) 997 { 998 int which; 999 1000 which = 0; 1001 1002 for (;;) { 1003 if (which == 0) { 1004 if (cache_lock_nonblock(nch1) == 0) { 1005 cache_resolve(nch1, cred1); 1006 break; 1007 } 1008 cache_unlock(nch2); 1009 cache_lock(nch1); 1010 cache_resolve(nch1, cred1); 1011 which = 1; 1012 } else { 1013 if (cache_lock_nonblock(nch2) == 0) { 1014 cache_resolve(nch2, cred2); 1015 break; 1016 } 1017 cache_unlock(nch1); 1018 cache_lock(nch2); 1019 cache_resolve(nch2, cred2); 1020 which = 0; 1021 } 1022 } 1023 } 1024 1025 int 1026 cache_lock_nonblock(struct nchandle *nch) 1027 { 1028 return(_cache_lock_nonblock(nch->ncp)); 1029 } 1030 1031 void 1032 cache_unlock(struct nchandle *nch) 1033 { 1034 _cache_unlock(nch->ncp); 1035 } 1036 1037 /* 1038 * ref-and-lock, unlock-and-deref functions. 1039 * 1040 * This function is primarily used by nlookup. Even though cache_lock 1041 * holds the vnode, it is possible that the vnode may have already 1042 * initiated a recyclement. 1043 * 1044 * We want cache_get() to return a definitively usable vnode or a 1045 * definitively unresolved ncp. 1046 */ 1047 static 1048 struct namecache * 1049 _cache_get(struct namecache *ncp) 1050 { 1051 _cache_hold(ncp); 1052 _cache_lock(ncp); 1053 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1054 _cache_setunresolved(ncp); 1055 return(ncp); 1056 } 1057 1058 /* 1059 * Attempt to obtain a shared lock on the ncp. A shared lock will only 1060 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is 1061 * valid. Otherwise an exclusive lock will be acquired instead. 1062 */ 1063 static 1064 struct namecache * 1065 _cache_get_maybe_shared(struct namecache *ncp, int excl) 1066 { 1067 if (ncp_shared_lock_disable || excl || 1068 (ncp->nc_flag & NCF_UNRESOLVED)) { 1069 return(_cache_get(ncp)); 1070 } 1071 _cache_hold(ncp); 1072 _cache_lock_shared(ncp); 1073 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1074 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1075 _cache_unlock(ncp); 1076 ncp = _cache_get(ncp); 1077 _cache_drop(ncp); 1078 } 1079 } else { 1080 _cache_unlock(ncp); 1081 ncp = _cache_get(ncp); 1082 _cache_drop(ncp); 1083 } 1084 return(ncp); 1085 } 1086 1087 /* 1088 * This is a special form of _cache_lock() which only succeeds if 1089 * it can get a pristine, non-recursive lock. The caller must have 1090 * already ref'd the ncp. 1091 * 1092 * On success the ncp will be locked, on failure it will not. The 1093 * ref count does not change either way. 1094 * 1095 * We want _cache_lock_special() (on success) to return a definitively 1096 * usable vnode or a definitively unresolved ncp. 1097 */ 1098 static int 1099 _cache_lock_special(struct namecache *ncp) 1100 { 1101 if (_cache_lock_nonblock(ncp) == 0) { 1102 if ((ncp->nc_lockstatus & 1103 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) { 1104 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1105 _cache_setunresolved(ncp); 1106 return(0); 1107 } 1108 _cache_unlock(ncp); 1109 } 1110 return(EWOULDBLOCK); 1111 } 1112 1113 /* 1114 * This function tries to get a shared lock but will back-off to an exclusive 1115 * lock if: 1116 * 1117 * (1) Some other thread is trying to obtain an exclusive lock 1118 * (to prevent the exclusive requester from getting livelocked out 1119 * by many shared locks). 1120 * 1121 * (2) The current thread already owns an exclusive lock (to avoid 1122 * deadlocking). 1123 * 1124 * WARNING! On machines with lots of cores we really want to try hard to 1125 * get a shared lock or concurrent path lookups can chain-react 1126 * into a very high-latency exclusive lock. 1127 */ 1128 static int 1129 _cache_lock_shared_special(struct namecache *ncp) 1130 { 1131 if (_cache_lock_shared_nonblock(ncp) == 0) { 1132 if ((ncp->nc_lockstatus & 1133 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == (NC_SHLOCK_FLAG | 1)) { 1134 if (ncp->nc_vp == NULL || 1135 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) { 1136 return(0); 1137 } 1138 } 1139 _cache_unlock(ncp); 1140 return(EWOULDBLOCK); 1141 } 1142 if (ncp->nc_locktd == curthread) { 1143 _cache_lock(ncp); 1144 return(0); 1145 } 1146 _cache_lock_shared(ncp); 1147 return(0); 1148 } 1149 1150 1151 /* 1152 * NOTE: The same nchandle can be passed for both arguments. 1153 */ 1154 void 1155 cache_get(struct nchandle *nch, struct nchandle *target) 1156 { 1157 KKASSERT(nch->ncp->nc_refs > 0); 1158 target->mount = nch->mount; 1159 target->ncp = _cache_get(nch->ncp); 1160 atomic_add_int(&target->mount->mnt_refs, 1); 1161 } 1162 1163 void 1164 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl) 1165 { 1166 KKASSERT(nch->ncp->nc_refs > 0); 1167 target->mount = nch->mount; 1168 target->ncp = _cache_get_maybe_shared(nch->ncp, excl); 1169 atomic_add_int(&target->mount->mnt_refs, 1); 1170 } 1171 1172 /* 1173 * 1174 */ 1175 static __inline 1176 void 1177 _cache_put(struct namecache *ncp) 1178 { 1179 _cache_unlock(ncp); 1180 _cache_drop(ncp); 1181 } 1182 1183 /* 1184 * 1185 */ 1186 void 1187 cache_put(struct nchandle *nch) 1188 { 1189 atomic_add_int(&nch->mount->mnt_refs, -1); 1190 _cache_put(nch->ncp); 1191 nch->ncp = NULL; 1192 nch->mount = NULL; 1193 } 1194 1195 /* 1196 * Resolve an unresolved ncp by associating a vnode with it. If the 1197 * vnode is NULL, a negative cache entry is created. 1198 * 1199 * The ncp should be locked on entry and will remain locked on return. 1200 */ 1201 static 1202 void 1203 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 1204 { 1205 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 1206 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1207 1208 if (vp != NULL) { 1209 /* 1210 * Any vp associated with an ncp which has children must 1211 * be held. Any vp associated with a locked ncp must be held. 1212 */ 1213 if (!TAILQ_EMPTY(&ncp->nc_list)) 1214 vhold(vp); 1215 spin_lock(&vp->v_spin); 1216 ncp->nc_vp = vp; 1217 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 1218 spin_unlock(&vp->v_spin); 1219 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1220 vhold(vp); 1221 1222 /* 1223 * Set auxiliary flags 1224 */ 1225 switch(vp->v_type) { 1226 case VDIR: 1227 ncp->nc_flag |= NCF_ISDIR; 1228 break; 1229 case VLNK: 1230 ncp->nc_flag |= NCF_ISSYMLINK; 1231 /* XXX cache the contents of the symlink */ 1232 break; 1233 default: 1234 break; 1235 } 1236 atomic_add_int(&numcache, 1); 1237 ncp->nc_error = 0; 1238 /* XXX: this is a hack to work-around the lack of a real pfs vfs 1239 * implementation*/ 1240 if (mp != NULL) 1241 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0) 1242 vp->v_pfsmp = mp; 1243 } else { 1244 /* 1245 * When creating a negative cache hit we set the 1246 * namecache_gen. A later resolve will clean out the 1247 * negative cache hit if the mount point's namecache_gen 1248 * has changed. Used by devfs, could also be used by 1249 * other remote FSs. 1250 */ 1251 ncp->nc_vp = NULL; 1252 spin_lock(&ncspin); 1253 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 1254 ++numneg; 1255 spin_unlock(&ncspin); 1256 ncp->nc_error = ENOENT; 1257 if (mp) 1258 VFS_NCPGEN_SET(mp, ncp); 1259 } 1260 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 1261 } 1262 1263 /* 1264 * 1265 */ 1266 void 1267 cache_setvp(struct nchandle *nch, struct vnode *vp) 1268 { 1269 _cache_setvp(nch->mount, nch->ncp, vp); 1270 } 1271 1272 /* 1273 * 1274 */ 1275 void 1276 cache_settimeout(struct nchandle *nch, int nticks) 1277 { 1278 struct namecache *ncp = nch->ncp; 1279 1280 if ((ncp->nc_timeout = ticks + nticks) == 0) 1281 ncp->nc_timeout = 1; 1282 } 1283 1284 /* 1285 * Disassociate the vnode or negative-cache association and mark a 1286 * namecache entry as unresolved again. Note that the ncp is still 1287 * left in the hash table and still linked to its parent. 1288 * 1289 * The ncp should be locked and refd on entry and will remain locked and refd 1290 * on return. 1291 * 1292 * This routine is normally never called on a directory containing children. 1293 * However, NFS often does just that in its rename() code as a cop-out to 1294 * avoid complex namespace operations. This disconnects a directory vnode 1295 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 1296 * sync. 1297 * 1298 */ 1299 static 1300 void 1301 _cache_setunresolved(struct namecache *ncp) 1302 { 1303 struct vnode *vp; 1304 1305 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1306 ncp->nc_flag |= NCF_UNRESOLVED; 1307 ncp->nc_timeout = 0; 1308 ncp->nc_error = ENOTCONN; 1309 if ((vp = ncp->nc_vp) != NULL) { 1310 atomic_add_int(&numcache, -1); 1311 spin_lock(&vp->v_spin); 1312 ncp->nc_vp = NULL; 1313 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 1314 spin_unlock(&vp->v_spin); 1315 1316 /* 1317 * Any vp associated with an ncp with children is 1318 * held by that ncp. Any vp associated with a locked 1319 * ncp is held by that ncp. These conditions must be 1320 * undone when the vp is cleared out from the ncp. 1321 */ 1322 if (!TAILQ_EMPTY(&ncp->nc_list)) 1323 vdrop(vp); 1324 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1325 vdrop(vp); 1326 } else { 1327 spin_lock(&ncspin); 1328 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 1329 --numneg; 1330 spin_unlock(&ncspin); 1331 } 1332 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 1333 } 1334 } 1335 1336 /* 1337 * The cache_nresolve() code calls this function to automatically 1338 * set a resolved cache element to unresolved if it has timed out 1339 * or if it is a negative cache hit and the mount point namecache_gen 1340 * has changed. 1341 */ 1342 static __inline int 1343 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp) 1344 { 1345 /* 1346 * Try to zap entries that have timed out. We have 1347 * to be careful here because locked leafs may depend 1348 * on the vnode remaining intact in a parent, so only 1349 * do this under very specific conditions. 1350 */ 1351 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1352 TAILQ_EMPTY(&ncp->nc_list)) { 1353 return 1; 1354 } 1355 1356 /* 1357 * If a resolved negative cache hit is invalid due to 1358 * the mount's namecache generation being bumped, zap it. 1359 */ 1360 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) { 1361 return 1; 1362 } 1363 1364 /* 1365 * Otherwise we are good 1366 */ 1367 return 0; 1368 } 1369 1370 static __inline void 1371 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1372 { 1373 /* 1374 * Already in an unresolved state, nothing to do. 1375 */ 1376 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1377 if (_cache_auto_unresolve_test(mp, ncp)) 1378 _cache_setunresolved(ncp); 1379 } 1380 } 1381 1382 /* 1383 * 1384 */ 1385 void 1386 cache_setunresolved(struct nchandle *nch) 1387 { 1388 _cache_setunresolved(nch->ncp); 1389 } 1390 1391 /* 1392 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1393 * looking for matches. This flag tells the lookup code when it must 1394 * check for a mount linkage and also prevents the directories in question 1395 * from being deleted or renamed. 1396 */ 1397 static 1398 int 1399 cache_clrmountpt_callback(struct mount *mp, void *data) 1400 { 1401 struct nchandle *nch = data; 1402 1403 if (mp->mnt_ncmounton.ncp == nch->ncp) 1404 return(1); 1405 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1406 return(1); 1407 return(0); 1408 } 1409 1410 /* 1411 * 1412 */ 1413 void 1414 cache_clrmountpt(struct nchandle *nch) 1415 { 1416 int count; 1417 1418 count = mountlist_scan(cache_clrmountpt_callback, nch, 1419 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1420 if (count == 0) 1421 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1422 } 1423 1424 /* 1425 * Invalidate portions of the namecache topology given a starting entry. 1426 * The passed ncp is set to an unresolved state and: 1427 * 1428 * The passed ncp must be referencxed and locked. The routine may unlock 1429 * and relock ncp several times, and will recheck the children and loop 1430 * to catch races. When done the passed ncp will be returned with the 1431 * reference and lock intact. 1432 * 1433 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1434 * that the physical underlying nodes have been 1435 * destroyed... as in deleted. For example, when 1436 * a directory is removed. This will cause record 1437 * lookups on the name to no longer be able to find 1438 * the record and tells the resolver to return failure 1439 * rather then trying to resolve through the parent. 1440 * 1441 * The topology itself, including ncp->nc_name, 1442 * remains intact. 1443 * 1444 * This only applies to the passed ncp, if CINV_CHILDREN 1445 * is specified the children are not flagged. 1446 * 1447 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1448 * state as well. 1449 * 1450 * Note that this will also have the side effect of 1451 * cleaning out any unreferenced nodes in the topology 1452 * from the leaves up as the recursion backs out. 1453 * 1454 * Note that the topology for any referenced nodes remains intact, but 1455 * the nodes will be marked as having been destroyed and will be set 1456 * to an unresolved state. 1457 * 1458 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1459 * the namecache entry may not actually be invalidated on return if it was 1460 * revalidated while recursing down into its children. This code guarentees 1461 * that the node(s) will go through an invalidation cycle, but does not 1462 * guarentee that they will remain in an invalidated state. 1463 * 1464 * Returns non-zero if a revalidation was detected during the invalidation 1465 * recursion, zero otherwise. Note that since only the original ncp is 1466 * locked the revalidation ultimately can only indicate that the original ncp 1467 * *MIGHT* no have been reresolved. 1468 * 1469 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1470 * have to avoid blowing out the kernel stack. We do this by saving the 1471 * deep namecache node and aborting the recursion, then re-recursing at that 1472 * node using a depth-first algorithm in order to allow multiple deep 1473 * recursions to chain through each other, then we restart the invalidation 1474 * from scratch. 1475 */ 1476 1477 struct cinvtrack { 1478 struct namecache *resume_ncp; 1479 int depth; 1480 }; 1481 1482 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1483 1484 static 1485 int 1486 _cache_inval(struct namecache *ncp, int flags) 1487 { 1488 struct cinvtrack track; 1489 struct namecache *ncp2; 1490 int r; 1491 1492 track.depth = 0; 1493 track.resume_ncp = NULL; 1494 1495 for (;;) { 1496 r = _cache_inval_internal(ncp, flags, &track); 1497 if (track.resume_ncp == NULL) 1498 break; 1499 kprintf("Warning: deep namecache recursion at %s\n", 1500 ncp->nc_name); 1501 _cache_unlock(ncp); 1502 while ((ncp2 = track.resume_ncp) != NULL) { 1503 track.resume_ncp = NULL; 1504 _cache_lock(ncp2); 1505 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1506 &track); 1507 _cache_put(ncp2); 1508 } 1509 _cache_lock(ncp); 1510 } 1511 return(r); 1512 } 1513 1514 int 1515 cache_inval(struct nchandle *nch, int flags) 1516 { 1517 return(_cache_inval(nch->ncp, flags)); 1518 } 1519 1520 /* 1521 * Helper for _cache_inval(). The passed ncp is refd and locked and 1522 * remains that way on return, but may be unlocked/relocked multiple 1523 * times by the routine. 1524 */ 1525 static int 1526 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1527 { 1528 struct namecache *kid; 1529 struct namecache *nextkid; 1530 int rcnt = 0; 1531 1532 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1533 1534 _cache_setunresolved(ncp); 1535 if (flags & CINV_DESTROY) 1536 ncp->nc_flag |= NCF_DESTROYED; 1537 if ((flags & CINV_CHILDREN) && 1538 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1539 ) { 1540 _cache_hold(kid); 1541 if (++track->depth > MAX_RECURSION_DEPTH) { 1542 track->resume_ncp = ncp; 1543 _cache_hold(ncp); 1544 ++rcnt; 1545 } 1546 _cache_unlock(ncp); 1547 while (kid) { 1548 if (track->resume_ncp) { 1549 _cache_drop(kid); 1550 break; 1551 } 1552 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1553 _cache_hold(nextkid); 1554 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1555 TAILQ_FIRST(&kid->nc_list) 1556 ) { 1557 _cache_lock(kid); 1558 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1559 _cache_unlock(kid); 1560 } 1561 _cache_drop(kid); 1562 kid = nextkid; 1563 } 1564 --track->depth; 1565 _cache_lock(ncp); 1566 } 1567 1568 /* 1569 * Someone could have gotten in there while ncp was unlocked, 1570 * retry if so. 1571 */ 1572 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1573 ++rcnt; 1574 return (rcnt); 1575 } 1576 1577 /* 1578 * Invalidate a vnode's namecache associations. To avoid races against 1579 * the resolver we do not invalidate a node which we previously invalidated 1580 * but which was then re-resolved while we were in the invalidation loop. 1581 * 1582 * Returns non-zero if any namecache entries remain after the invalidation 1583 * loop completed. 1584 * 1585 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1586 * be ripped out of the topology while held, the vnode's v_namecache 1587 * list has no such restriction. NCP's can be ripped out of the list 1588 * at virtually any time if not locked, even if held. 1589 * 1590 * In addition, the v_namecache list itself must be locked via 1591 * the vnode's spinlock. 1592 */ 1593 int 1594 cache_inval_vp(struct vnode *vp, int flags) 1595 { 1596 struct namecache *ncp; 1597 struct namecache *next; 1598 1599 restart: 1600 spin_lock(&vp->v_spin); 1601 ncp = TAILQ_FIRST(&vp->v_namecache); 1602 if (ncp) 1603 _cache_hold(ncp); 1604 while (ncp) { 1605 /* loop entered with ncp held and vp spin-locked */ 1606 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1607 _cache_hold(next); 1608 spin_unlock(&vp->v_spin); 1609 _cache_lock(ncp); 1610 if (ncp->nc_vp != vp) { 1611 kprintf("Warning: cache_inval_vp: race-A detected on " 1612 "%s\n", ncp->nc_name); 1613 _cache_put(ncp); 1614 if (next) 1615 _cache_drop(next); 1616 goto restart; 1617 } 1618 _cache_inval(ncp, flags); 1619 _cache_put(ncp); /* also releases reference */ 1620 ncp = next; 1621 spin_lock(&vp->v_spin); 1622 if (ncp && ncp->nc_vp != vp) { 1623 spin_unlock(&vp->v_spin); 1624 kprintf("Warning: cache_inval_vp: race-B detected on " 1625 "%s\n", ncp->nc_name); 1626 _cache_drop(ncp); 1627 goto restart; 1628 } 1629 } 1630 spin_unlock(&vp->v_spin); 1631 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1632 } 1633 1634 /* 1635 * This routine is used instead of the normal cache_inval_vp() when we 1636 * are trying to recycle otherwise good vnodes. 1637 * 1638 * Return 0 on success, non-zero if not all namecache records could be 1639 * disassociated from the vnode (for various reasons). 1640 */ 1641 int 1642 cache_inval_vp_nonblock(struct vnode *vp) 1643 { 1644 struct namecache *ncp; 1645 struct namecache *next; 1646 1647 spin_lock(&vp->v_spin); 1648 ncp = TAILQ_FIRST(&vp->v_namecache); 1649 if (ncp) 1650 _cache_hold(ncp); 1651 while (ncp) { 1652 /* loop entered with ncp held */ 1653 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1654 _cache_hold(next); 1655 spin_unlock(&vp->v_spin); 1656 if (_cache_lock_nonblock(ncp)) { 1657 _cache_drop(ncp); 1658 if (next) 1659 _cache_drop(next); 1660 goto done; 1661 } 1662 if (ncp->nc_vp != vp) { 1663 kprintf("Warning: cache_inval_vp: race-A detected on " 1664 "%s\n", ncp->nc_name); 1665 _cache_put(ncp); 1666 if (next) 1667 _cache_drop(next); 1668 goto done; 1669 } 1670 _cache_inval(ncp, 0); 1671 _cache_put(ncp); /* also releases reference */ 1672 ncp = next; 1673 spin_lock(&vp->v_spin); 1674 if (ncp && ncp->nc_vp != vp) { 1675 spin_unlock(&vp->v_spin); 1676 kprintf("Warning: cache_inval_vp: race-B detected on " 1677 "%s\n", ncp->nc_name); 1678 _cache_drop(ncp); 1679 goto done; 1680 } 1681 } 1682 spin_unlock(&vp->v_spin); 1683 done: 1684 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1685 } 1686 1687 /* 1688 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1689 * must be locked. The target ncp is destroyed (as a normal rename-over 1690 * would destroy the target file or directory). 1691 * 1692 * Because there may be references to the source ncp we cannot copy its 1693 * contents to the target. Instead the source ncp is relinked as the target 1694 * and the target ncp is removed from the namecache topology. 1695 */ 1696 void 1697 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1698 { 1699 struct namecache *fncp = fnch->ncp; 1700 struct namecache *tncp = tnch->ncp; 1701 struct namecache *tncp_par; 1702 struct nchash_head *nchpp; 1703 u_int32_t hash; 1704 char *oname; 1705 char *nname; 1706 1707 if (tncp->nc_nlen) { 1708 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK); 1709 bcopy(tncp->nc_name, nname, tncp->nc_nlen); 1710 nname[tncp->nc_nlen] = 0; 1711 } else { 1712 nname = NULL; 1713 } 1714 1715 /* 1716 * Rename fncp (unlink) 1717 */ 1718 _cache_unlink_parent(fncp); 1719 oname = fncp->nc_name; 1720 fncp->nc_name = nname; 1721 fncp->nc_nlen = tncp->nc_nlen; 1722 if (oname) 1723 kfree(oname, M_VFSCACHE); 1724 1725 tncp_par = tncp->nc_parent; 1726 _cache_hold(tncp_par); 1727 _cache_lock(tncp_par); 1728 1729 /* 1730 * Rename fncp (relink) 1731 */ 1732 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 1733 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 1734 nchpp = NCHHASH(hash); 1735 1736 spin_lock(&nchpp->spin); 1737 _cache_link_parent(fncp, tncp_par, nchpp); 1738 spin_unlock(&nchpp->spin); 1739 1740 _cache_put(tncp_par); 1741 1742 /* 1743 * Get rid of the overwritten tncp (unlink) 1744 */ 1745 _cache_unlink(tncp); 1746 } 1747 1748 /* 1749 * Perform actions consistent with unlinking a file. The passed-in ncp 1750 * must be locked. 1751 * 1752 * The ncp is marked DESTROYED so it no longer shows up in searches, 1753 * and will be physically deleted when the vnode goes away. 1754 * 1755 * If the related vnode has no refs then we cycle it through vget()/vput() 1756 * to (possibly if we don't have a ref race) trigger a deactivation, 1757 * allowing the VFS to trivially detect and recycle the deleted vnode 1758 * via VOP_INACTIVE(). 1759 * 1760 * NOTE: _cache_rename() will automatically call _cache_unlink() on the 1761 * target ncp. 1762 */ 1763 void 1764 cache_unlink(struct nchandle *nch) 1765 { 1766 _cache_unlink(nch->ncp); 1767 } 1768 1769 static void 1770 _cache_unlink(struct namecache *ncp) 1771 { 1772 struct vnode *vp; 1773 1774 /* 1775 * Causes lookups to fail and allows another ncp with the same 1776 * name to be created under ncp->nc_parent. 1777 */ 1778 ncp->nc_flag |= NCF_DESTROYED; 1779 1780 /* 1781 * Attempt to trigger a deactivation. 1782 */ 1783 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1784 (vp = ncp->nc_vp) != NULL && 1785 !sysref_isactive(&vp->v_sysref)) { 1786 if (vget(vp, LK_SHARED) == 0) 1787 vput(vp); 1788 } 1789 } 1790 1791 /* 1792 * vget the vnode associated with the namecache entry. Resolve the namecache 1793 * entry if necessary. The passed ncp must be referenced and locked. 1794 * 1795 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1796 * (depending on the passed lk_type) will be returned in *vpp with an error 1797 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1798 * most typical error is ENOENT, meaning that the ncp represents a negative 1799 * cache hit and there is no vnode to retrieve, but other errors can occur 1800 * too. 1801 * 1802 * The vget() can race a reclaim. If this occurs we re-resolve the 1803 * namecache entry. 1804 * 1805 * There are numerous places in the kernel where vget() is called on a 1806 * vnode while one or more of its namecache entries is locked. Releasing 1807 * a vnode never deadlocks against locked namecache entries (the vnode 1808 * will not get recycled while referenced ncp's exist). This means we 1809 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 1810 * lock when acquiring the vp lock or we might cause a deadlock. 1811 * 1812 * NOTE: The passed-in ncp must be locked exclusively if it is initially 1813 * unresolved. If a reclaim race occurs the passed-in ncp will be 1814 * relocked exclusively before being re-resolved. 1815 */ 1816 int 1817 cache_vget(struct nchandle *nch, struct ucred *cred, 1818 int lk_type, struct vnode **vpp) 1819 { 1820 struct namecache *ncp; 1821 struct vnode *vp; 1822 int error; 1823 1824 ncp = nch->ncp; 1825 again: 1826 vp = NULL; 1827 if (ncp->nc_flag & NCF_UNRESOLVED) 1828 error = cache_resolve(nch, cred); 1829 else 1830 error = 0; 1831 1832 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1833 error = vget(vp, lk_type); 1834 if (error) { 1835 /* 1836 * VRECLAIM race 1837 */ 1838 if (error == ENOENT) { 1839 kprintf("Warning: vnode reclaim race detected " 1840 "in cache_vget on %p (%s)\n", 1841 vp, ncp->nc_name); 1842 _cache_unlock(ncp); 1843 _cache_lock(ncp); 1844 _cache_setunresolved(ncp); 1845 goto again; 1846 } 1847 1848 /* 1849 * Not a reclaim race, some other error. 1850 */ 1851 KKASSERT(ncp->nc_vp == vp); 1852 vp = NULL; 1853 } else { 1854 KKASSERT(ncp->nc_vp == vp); 1855 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1856 } 1857 } 1858 if (error == 0 && vp == NULL) 1859 error = ENOENT; 1860 *vpp = vp; 1861 return(error); 1862 } 1863 1864 /* 1865 * Similar to cache_vget() but only acquires a ref on the vnode. 1866 * 1867 * NOTE: The passed-in ncp must be locked exclusively if it is initially 1868 * unresolved. If a reclaim race occurs the passed-in ncp will be 1869 * relocked exclusively before being re-resolved. 1870 */ 1871 int 1872 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1873 { 1874 struct namecache *ncp; 1875 struct vnode *vp; 1876 int error; 1877 1878 ncp = nch->ncp; 1879 again: 1880 vp = NULL; 1881 if (ncp->nc_flag & NCF_UNRESOLVED) 1882 error = cache_resolve(nch, cred); 1883 else 1884 error = 0; 1885 1886 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1887 error = vget(vp, LK_SHARED); 1888 if (error) { 1889 /* 1890 * VRECLAIM race 1891 */ 1892 if (error == ENOENT) { 1893 kprintf("Warning: vnode reclaim race detected " 1894 "in cache_vget on %p (%s)\n", 1895 vp, ncp->nc_name); 1896 _cache_unlock(ncp); 1897 _cache_lock(ncp); 1898 _cache_setunresolved(ncp); 1899 goto again; 1900 } 1901 1902 /* 1903 * Not a reclaim race, some other error. 1904 */ 1905 KKASSERT(ncp->nc_vp == vp); 1906 vp = NULL; 1907 } else { 1908 KKASSERT(ncp->nc_vp == vp); 1909 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1910 /* caller does not want a lock */ 1911 vn_unlock(vp); 1912 } 1913 } 1914 if (error == 0 && vp == NULL) 1915 error = ENOENT; 1916 *vpp = vp; 1917 return(error); 1918 } 1919 1920 /* 1921 * Return a referenced vnode representing the parent directory of 1922 * ncp. 1923 * 1924 * Because the caller has locked the ncp it should not be possible for 1925 * the parent ncp to go away. However, the parent can unresolve its 1926 * dvp at any time so we must be able to acquire a lock on the parent 1927 * to safely access nc_vp. 1928 * 1929 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 1930 * so use vhold()/vdrop() while holding the lock to prevent dvp from 1931 * getting destroyed. 1932 * 1933 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a 1934 * lock on the ncp in question.. 1935 */ 1936 static struct vnode * 1937 cache_dvpref(struct namecache *ncp) 1938 { 1939 struct namecache *par; 1940 struct vnode *dvp; 1941 1942 dvp = NULL; 1943 if ((par = ncp->nc_parent) != NULL) { 1944 _cache_hold(par); 1945 _cache_lock(par); 1946 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1947 if ((dvp = par->nc_vp) != NULL) 1948 vhold(dvp); 1949 } 1950 _cache_unlock(par); 1951 if (dvp) { 1952 if (vget(dvp, LK_SHARED) == 0) { 1953 vn_unlock(dvp); 1954 vdrop(dvp); 1955 /* return refd, unlocked dvp */ 1956 } else { 1957 vdrop(dvp); 1958 dvp = NULL; 1959 } 1960 } 1961 _cache_drop(par); 1962 } 1963 return(dvp); 1964 } 1965 1966 /* 1967 * Convert a directory vnode to a namecache record without any other 1968 * knowledge of the topology. This ONLY works with directory vnodes and 1969 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1970 * returned ncp (if not NULL) will be held and unlocked. 1971 * 1972 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1973 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1974 * for dvp. This will fail only if the directory has been deleted out from 1975 * under the caller. 1976 * 1977 * Callers must always check for a NULL return no matter the value of 'makeit'. 1978 * 1979 * To avoid underflowing the kernel stack each recursive call increments 1980 * the makeit variable. 1981 */ 1982 1983 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1984 struct vnode *dvp, char *fakename); 1985 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1986 struct vnode **saved_dvp); 1987 1988 int 1989 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1990 struct nchandle *nch) 1991 { 1992 struct vnode *saved_dvp; 1993 struct vnode *pvp; 1994 char *fakename; 1995 int error; 1996 1997 nch->ncp = NULL; 1998 nch->mount = dvp->v_mount; 1999 saved_dvp = NULL; 2000 fakename = NULL; 2001 2002 /* 2003 * Handle the makeit == 0 degenerate case 2004 */ 2005 if (makeit == 0) { 2006 spin_lock(&dvp->v_spin); 2007 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2008 if (nch->ncp) 2009 cache_hold(nch); 2010 spin_unlock(&dvp->v_spin); 2011 } 2012 2013 /* 2014 * Loop until resolution, inside code will break out on error. 2015 */ 2016 while (makeit) { 2017 /* 2018 * Break out if we successfully acquire a working ncp. 2019 */ 2020 spin_lock(&dvp->v_spin); 2021 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2022 if (nch->ncp) { 2023 cache_hold(nch); 2024 spin_unlock(&dvp->v_spin); 2025 break; 2026 } 2027 spin_unlock(&dvp->v_spin); 2028 2029 /* 2030 * If dvp is the root of its filesystem it should already 2031 * have a namecache pointer associated with it as a side 2032 * effect of the mount, but it may have been disassociated. 2033 */ 2034 if (dvp->v_flag & VROOT) { 2035 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 2036 error = cache_resolve_mp(nch->mount); 2037 _cache_put(nch->ncp); 2038 if (ncvp_debug) { 2039 kprintf("cache_fromdvp: resolve root of mount %p error %d", 2040 dvp->v_mount, error); 2041 } 2042 if (error) { 2043 if (ncvp_debug) 2044 kprintf(" failed\n"); 2045 nch->ncp = NULL; 2046 break; 2047 } 2048 if (ncvp_debug) 2049 kprintf(" succeeded\n"); 2050 continue; 2051 } 2052 2053 /* 2054 * If we are recursed too deeply resort to an O(n^2) 2055 * algorithm to resolve the namecache topology. The 2056 * resolved pvp is left referenced in saved_dvp to 2057 * prevent the tree from being destroyed while we loop. 2058 */ 2059 if (makeit > 20) { 2060 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 2061 if (error) { 2062 kprintf("lookupdotdot(longpath) failed %d " 2063 "dvp %p\n", error, dvp); 2064 nch->ncp = NULL; 2065 break; 2066 } 2067 continue; 2068 } 2069 2070 /* 2071 * Get the parent directory and resolve its ncp. 2072 */ 2073 if (fakename) { 2074 kfree(fakename, M_TEMP); 2075 fakename = NULL; 2076 } 2077 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2078 &fakename); 2079 if (error) { 2080 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 2081 break; 2082 } 2083 vn_unlock(pvp); 2084 2085 /* 2086 * Reuse makeit as a recursion depth counter. On success 2087 * nch will be fully referenced. 2088 */ 2089 cache_fromdvp(pvp, cred, makeit + 1, nch); 2090 vrele(pvp); 2091 if (nch->ncp == NULL) 2092 break; 2093 2094 /* 2095 * Do an inefficient scan of pvp (embodied by ncp) to look 2096 * for dvp. This will create a namecache record for dvp on 2097 * success. We loop up to recheck on success. 2098 * 2099 * ncp and dvp are both held but not locked. 2100 */ 2101 error = cache_inefficient_scan(nch, cred, dvp, fakename); 2102 if (error) { 2103 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 2104 pvp, nch->ncp->nc_name, dvp); 2105 cache_drop(nch); 2106 /* nch was NULLed out, reload mount */ 2107 nch->mount = dvp->v_mount; 2108 break; 2109 } 2110 if (ncvp_debug) { 2111 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 2112 pvp, nch->ncp->nc_name); 2113 } 2114 cache_drop(nch); 2115 /* nch was NULLed out, reload mount */ 2116 nch->mount = dvp->v_mount; 2117 } 2118 2119 /* 2120 * If nch->ncp is non-NULL it will have been held already. 2121 */ 2122 if (fakename) 2123 kfree(fakename, M_TEMP); 2124 if (saved_dvp) 2125 vrele(saved_dvp); 2126 if (nch->ncp) 2127 return (0); 2128 return (EINVAL); 2129 } 2130 2131 /* 2132 * Go up the chain of parent directories until we find something 2133 * we can resolve into the namecache. This is very inefficient. 2134 */ 2135 static 2136 int 2137 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2138 struct vnode **saved_dvp) 2139 { 2140 struct nchandle nch; 2141 struct vnode *pvp; 2142 int error; 2143 static time_t last_fromdvp_report; 2144 char *fakename; 2145 2146 /* 2147 * Loop getting the parent directory vnode until we get something we 2148 * can resolve in the namecache. 2149 */ 2150 vref(dvp); 2151 nch.mount = dvp->v_mount; 2152 nch.ncp = NULL; 2153 fakename = NULL; 2154 2155 for (;;) { 2156 if (fakename) { 2157 kfree(fakename, M_TEMP); 2158 fakename = NULL; 2159 } 2160 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2161 &fakename); 2162 if (error) { 2163 vrele(dvp); 2164 break; 2165 } 2166 vn_unlock(pvp); 2167 spin_lock(&pvp->v_spin); 2168 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 2169 _cache_hold(nch.ncp); 2170 spin_unlock(&pvp->v_spin); 2171 vrele(pvp); 2172 break; 2173 } 2174 spin_unlock(&pvp->v_spin); 2175 if (pvp->v_flag & VROOT) { 2176 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 2177 error = cache_resolve_mp(nch.mount); 2178 _cache_unlock(nch.ncp); 2179 vrele(pvp); 2180 if (error) { 2181 _cache_drop(nch.ncp); 2182 nch.ncp = NULL; 2183 vrele(dvp); 2184 } 2185 break; 2186 } 2187 vrele(dvp); 2188 dvp = pvp; 2189 } 2190 if (error == 0) { 2191 if (last_fromdvp_report != time_uptime) { 2192 last_fromdvp_report = time_uptime; 2193 kprintf("Warning: extremely inefficient path " 2194 "resolution on %s\n", 2195 nch.ncp->nc_name); 2196 } 2197 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 2198 2199 /* 2200 * Hopefully dvp now has a namecache record associated with 2201 * it. Leave it referenced to prevent the kernel from 2202 * recycling the vnode. Otherwise extremely long directory 2203 * paths could result in endless recycling. 2204 */ 2205 if (*saved_dvp) 2206 vrele(*saved_dvp); 2207 *saved_dvp = dvp; 2208 _cache_drop(nch.ncp); 2209 } 2210 if (fakename) 2211 kfree(fakename, M_TEMP); 2212 return (error); 2213 } 2214 2215 /* 2216 * Do an inefficient scan of the directory represented by ncp looking for 2217 * the directory vnode dvp. ncp must be held but not locked on entry and 2218 * will be held on return. dvp must be refd but not locked on entry and 2219 * will remain refd on return. 2220 * 2221 * Why do this at all? Well, due to its stateless nature the NFS server 2222 * converts file handles directly to vnodes without necessarily going through 2223 * the namecache ops that would otherwise create the namecache topology 2224 * leading to the vnode. We could either (1) Change the namecache algorithms 2225 * to allow disconnect namecache records that are re-merged opportunistically, 2226 * or (2) Make the NFS server backtrack and scan to recover a connected 2227 * namecache topology in order to then be able to issue new API lookups. 2228 * 2229 * It turns out that (1) is a huge mess. It takes a nice clean set of 2230 * namecache algorithms and introduces a lot of complication in every subsystem 2231 * that calls into the namecache to deal with the re-merge case, especially 2232 * since we are using the namecache to placehold negative lookups and the 2233 * vnode might not be immediately assigned. (2) is certainly far less 2234 * efficient then (1), but since we are only talking about directories here 2235 * (which are likely to remain cached), the case does not actually run all 2236 * that often and has the supreme advantage of not polluting the namecache 2237 * algorithms. 2238 * 2239 * If a fakename is supplied just construct a namecache entry using the 2240 * fake name. 2241 */ 2242 static int 2243 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2244 struct vnode *dvp, char *fakename) 2245 { 2246 struct nlcomponent nlc; 2247 struct nchandle rncp; 2248 struct dirent *den; 2249 struct vnode *pvp; 2250 struct vattr vat; 2251 struct iovec iov; 2252 struct uio uio; 2253 int blksize; 2254 int eofflag; 2255 int bytes; 2256 char *rbuf; 2257 int error; 2258 2259 vat.va_blocksize = 0; 2260 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 2261 return (error); 2262 cache_lock(nch); 2263 error = cache_vref(nch, cred, &pvp); 2264 cache_unlock(nch); 2265 if (error) 2266 return (error); 2267 if (ncvp_debug) { 2268 kprintf("inefficient_scan: directory iosize %ld " 2269 "vattr fileid = %lld\n", 2270 vat.va_blocksize, 2271 (long long)vat.va_fileid); 2272 } 2273 2274 /* 2275 * Use the supplied fakename if not NULL. Fake names are typically 2276 * not in the actual filesystem hierarchy. This is used by HAMMER 2277 * to glue @@timestamp recursions together. 2278 */ 2279 if (fakename) { 2280 nlc.nlc_nameptr = fakename; 2281 nlc.nlc_namelen = strlen(fakename); 2282 rncp = cache_nlookup(nch, &nlc); 2283 goto done; 2284 } 2285 2286 if ((blksize = vat.va_blocksize) == 0) 2287 blksize = DEV_BSIZE; 2288 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 2289 rncp.ncp = NULL; 2290 2291 eofflag = 0; 2292 uio.uio_offset = 0; 2293 again: 2294 iov.iov_base = rbuf; 2295 iov.iov_len = blksize; 2296 uio.uio_iov = &iov; 2297 uio.uio_iovcnt = 1; 2298 uio.uio_resid = blksize; 2299 uio.uio_segflg = UIO_SYSSPACE; 2300 uio.uio_rw = UIO_READ; 2301 uio.uio_td = curthread; 2302 2303 if (ncvp_debug >= 2) 2304 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 2305 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 2306 if (error == 0) { 2307 den = (struct dirent *)rbuf; 2308 bytes = blksize - uio.uio_resid; 2309 2310 while (bytes > 0) { 2311 if (ncvp_debug >= 2) { 2312 kprintf("cache_inefficient_scan: %*.*s\n", 2313 den->d_namlen, den->d_namlen, 2314 den->d_name); 2315 } 2316 if (den->d_type != DT_WHT && 2317 den->d_ino == vat.va_fileid) { 2318 if (ncvp_debug) { 2319 kprintf("cache_inefficient_scan: " 2320 "MATCHED inode %lld path %s/%*.*s\n", 2321 (long long)vat.va_fileid, 2322 nch->ncp->nc_name, 2323 den->d_namlen, den->d_namlen, 2324 den->d_name); 2325 } 2326 nlc.nlc_nameptr = den->d_name; 2327 nlc.nlc_namelen = den->d_namlen; 2328 rncp = cache_nlookup(nch, &nlc); 2329 KKASSERT(rncp.ncp != NULL); 2330 break; 2331 } 2332 bytes -= _DIRENT_DIRSIZ(den); 2333 den = _DIRENT_NEXT(den); 2334 } 2335 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 2336 goto again; 2337 } 2338 kfree(rbuf, M_TEMP); 2339 done: 2340 vrele(pvp); 2341 if (rncp.ncp) { 2342 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 2343 _cache_setvp(rncp.mount, rncp.ncp, dvp); 2344 if (ncvp_debug >= 2) { 2345 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 2346 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 2347 } 2348 } else { 2349 if (ncvp_debug >= 2) { 2350 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 2351 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 2352 rncp.ncp->nc_vp); 2353 } 2354 } 2355 if (rncp.ncp->nc_vp == NULL) 2356 error = rncp.ncp->nc_error; 2357 /* 2358 * Release rncp after a successful nlookup. rncp was fully 2359 * referenced. 2360 */ 2361 cache_put(&rncp); 2362 } else { 2363 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 2364 dvp, nch->ncp->nc_name); 2365 error = ENOENT; 2366 } 2367 return (error); 2368 } 2369 2370 /* 2371 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 2372 * state, which disassociates it from its vnode or ncneglist. 2373 * 2374 * Then, if there are no additional references to the ncp and no children, 2375 * the ncp is removed from the topology and destroyed. 2376 * 2377 * References and/or children may exist if the ncp is in the middle of the 2378 * topology, preventing the ncp from being destroyed. 2379 * 2380 * This function must be called with the ncp held and locked and will unlock 2381 * and drop it during zapping. 2382 * 2383 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 2384 * This case can occur in the cache_drop() path. 2385 * 2386 * This function may returned a held (but NOT locked) parent node which the 2387 * caller must drop. We do this so _cache_drop() can loop, to avoid 2388 * blowing out the kernel stack. 2389 * 2390 * WARNING! For MPSAFE operation this routine must acquire up to three 2391 * spin locks to be able to safely test nc_refs. Lock order is 2392 * very important. 2393 * 2394 * hash spinlock if on hash list 2395 * parent spinlock if child of parent 2396 * (the ncp is unresolved so there is no vnode association) 2397 */ 2398 static struct namecache * 2399 cache_zap(struct namecache *ncp, int nonblock) 2400 { 2401 struct namecache *par; 2402 struct vnode *dropvp; 2403 int refs; 2404 2405 /* 2406 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2407 */ 2408 _cache_setunresolved(ncp); 2409 2410 /* 2411 * Try to scrap the entry and possibly tail-recurse on its parent. 2412 * We only scrap unref'd (other then our ref) unresolved entries, 2413 * we do not scrap 'live' entries. 2414 * 2415 * Note that once the spinlocks are acquired if nc_refs == 1 no 2416 * other references are possible. If it isn't, however, we have 2417 * to decrement but also be sure to avoid a 1->0 transition. 2418 */ 2419 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2420 KKASSERT(ncp->nc_refs > 0); 2421 2422 /* 2423 * Acquire locks. Note that the parent can't go away while we hold 2424 * a child locked. 2425 */ 2426 if ((par = ncp->nc_parent) != NULL) { 2427 if (nonblock) { 2428 for (;;) { 2429 if (_cache_lock_nonblock(par) == 0) 2430 break; 2431 refs = ncp->nc_refs; 2432 ncp->nc_flag |= NCF_DEFEREDZAP; 2433 ++numdefered; /* MP race ok */ 2434 if (atomic_cmpset_int(&ncp->nc_refs, 2435 refs, refs - 1)) { 2436 _cache_unlock(ncp); 2437 return(NULL); 2438 } 2439 cpu_pause(); 2440 } 2441 _cache_hold(par); 2442 } else { 2443 _cache_hold(par); 2444 _cache_lock(par); 2445 } 2446 spin_lock(&ncp->nc_head->spin); 2447 } 2448 2449 /* 2450 * If someone other then us has a ref or we have children 2451 * we cannot zap the entry. The 1->0 transition and any 2452 * further list operation is protected by the spinlocks 2453 * we have acquired but other transitions are not. 2454 */ 2455 for (;;) { 2456 refs = ncp->nc_refs; 2457 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2458 break; 2459 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2460 if (par) { 2461 spin_unlock(&ncp->nc_head->spin); 2462 _cache_put(par); 2463 } 2464 _cache_unlock(ncp); 2465 return(NULL); 2466 } 2467 cpu_pause(); 2468 } 2469 2470 /* 2471 * We are the only ref and with the spinlocks held no further 2472 * refs can be acquired by others. 2473 * 2474 * Remove us from the hash list and parent list. We have to 2475 * drop a ref on the parent's vp if the parent's list becomes 2476 * empty. 2477 */ 2478 dropvp = NULL; 2479 if (par) { 2480 struct nchash_head *nchpp = ncp->nc_head; 2481 2482 KKASSERT(nchpp != NULL); 2483 LIST_REMOVE(ncp, nc_hash); 2484 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2485 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 2486 dropvp = par->nc_vp; 2487 ncp->nc_head = NULL; 2488 ncp->nc_parent = NULL; 2489 spin_unlock(&nchpp->spin); 2490 _cache_unlock(par); 2491 } else { 2492 KKASSERT(ncp->nc_head == NULL); 2493 } 2494 2495 /* 2496 * ncp should not have picked up any refs. Physically 2497 * destroy the ncp. 2498 */ 2499 KKASSERT(ncp->nc_refs == 1); 2500 /* _cache_unlock(ncp) not required */ 2501 ncp->nc_refs = -1; /* safety */ 2502 if (ncp->nc_name) 2503 kfree(ncp->nc_name, M_VFSCACHE); 2504 kfree(ncp, M_VFSCACHE); 2505 2506 /* 2507 * Delayed drop (we had to release our spinlocks) 2508 * 2509 * The refed parent (if not NULL) must be dropped. The 2510 * caller is responsible for looping. 2511 */ 2512 if (dropvp) 2513 vdrop(dropvp); 2514 return(par); 2515 } 2516 2517 /* 2518 * Clean up dangling negative cache and defered-drop entries in the 2519 * namecache. 2520 * 2521 * This routine is called in the critical path and also called from 2522 * vnlru(). When called from vnlru we use a lower limit to try to 2523 * deal with the negative cache before the critical path has to start 2524 * dealing with it. 2525 */ 2526 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2527 2528 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2529 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2530 2531 void 2532 cache_hysteresis(int critpath) 2533 { 2534 int poslimit; 2535 int neglimit = desiredvnodes / ncnegfactor; 2536 int xnumcache = numcache; 2537 2538 if (critpath == 0) 2539 neglimit = neglimit * 8 / 10; 2540 2541 /* 2542 * Don't cache too many negative hits. We use hysteresis to reduce 2543 * the impact on the critical path. 2544 */ 2545 switch(neg_cache_hysteresis_state[critpath]) { 2546 case CHI_LOW: 2547 if (numneg > MINNEG && numneg > neglimit) { 2548 if (critpath) 2549 _cache_cleanneg(ncnegflush); 2550 else 2551 _cache_cleanneg(ncnegflush + 2552 numneg - neglimit); 2553 neg_cache_hysteresis_state[critpath] = CHI_HIGH; 2554 } 2555 break; 2556 case CHI_HIGH: 2557 if (numneg > MINNEG * 9 / 10 && 2558 numneg * 9 / 10 > neglimit 2559 ) { 2560 if (critpath) 2561 _cache_cleanneg(ncnegflush); 2562 else 2563 _cache_cleanneg(ncnegflush + 2564 numneg * 9 / 10 - neglimit); 2565 } else { 2566 neg_cache_hysteresis_state[critpath] = CHI_LOW; 2567 } 2568 break; 2569 } 2570 2571 /* 2572 * Don't cache too many positive hits. We use hysteresis to reduce 2573 * the impact on the critical path. 2574 * 2575 * Excessive positive hits can accumulate due to large numbers of 2576 * hardlinks (the vnode cache will not prevent hl ncps from growing 2577 * into infinity). 2578 */ 2579 if ((poslimit = ncposlimit) == 0) 2580 poslimit = desiredvnodes * 2; 2581 if (critpath == 0) 2582 poslimit = poslimit * 8 / 10; 2583 2584 switch(pos_cache_hysteresis_state[critpath]) { 2585 case CHI_LOW: 2586 if (xnumcache > poslimit && xnumcache > MINPOS) { 2587 if (critpath) 2588 _cache_cleanpos(ncposflush); 2589 else 2590 _cache_cleanpos(ncposflush + 2591 xnumcache - poslimit); 2592 pos_cache_hysteresis_state[critpath] = CHI_HIGH; 2593 } 2594 break; 2595 case CHI_HIGH: 2596 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) { 2597 if (critpath) 2598 _cache_cleanpos(ncposflush); 2599 else 2600 _cache_cleanpos(ncposflush + 2601 xnumcache - poslimit * 5 / 6); 2602 } else { 2603 pos_cache_hysteresis_state[critpath] = CHI_LOW; 2604 } 2605 break; 2606 } 2607 2608 /* 2609 * Clean out dangling defered-zap ncps which could not 2610 * be cleanly dropped if too many build up. Note 2611 * that numdefered is not an exact number as such ncps 2612 * can be reused and the counter is not handled in a MP 2613 * safe manner by design. 2614 */ 2615 if (numdefered > neglimit) { 2616 _cache_cleandefered(); 2617 } 2618 } 2619 2620 /* 2621 * NEW NAMECACHE LOOKUP API 2622 * 2623 * Lookup an entry in the namecache. The passed par_nch must be referenced 2624 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2625 * is ALWAYS returned, eve if the supplied component is illegal. 2626 * 2627 * The resulting namecache entry should be returned to the system with 2628 * cache_put() or cache_unlock() + cache_drop(). 2629 * 2630 * namecache locks are recursive but care must be taken to avoid lock order 2631 * reversals (hence why the passed par_nch must be unlocked). Locking 2632 * rules are to order for parent traversals, not for child traversals. 2633 * 2634 * Nobody else will be able to manipulate the associated namespace (e.g. 2635 * create, delete, rename, rename-target) until the caller unlocks the 2636 * entry. 2637 * 2638 * The returned entry will be in one of three states: positive hit (non-null 2639 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2640 * Unresolved entries must be resolved through the filesystem to associate the 2641 * vnode and/or determine whether a positive or negative hit has occured. 2642 * 2643 * It is not necessary to lock a directory in order to lock namespace under 2644 * that directory. In fact, it is explicitly not allowed to do that. A 2645 * directory is typically only locked when being created, renamed, or 2646 * destroyed. 2647 * 2648 * The directory (par) may be unresolved, in which case any returned child 2649 * will likely also be marked unresolved. Likely but not guarenteed. Since 2650 * the filesystem lookup requires a resolved directory vnode the caller is 2651 * responsible for resolving the namecache chain top-down. This API 2652 * specifically allows whole chains to be created in an unresolved state. 2653 */ 2654 struct nchandle 2655 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 2656 { 2657 struct nchandle nch; 2658 struct namecache *ncp; 2659 struct namecache *new_ncp; 2660 struct nchash_head *nchpp; 2661 struct mount *mp; 2662 u_int32_t hash; 2663 globaldata_t gd; 2664 int par_locked; 2665 2666 numcalls++; 2667 gd = mycpu; 2668 mp = par_nch->mount; 2669 par_locked = 0; 2670 2671 /* 2672 * This is a good time to call it, no ncp's are locked by 2673 * the caller or us. 2674 */ 2675 cache_hysteresis(1); 2676 2677 /* 2678 * Try to locate an existing entry 2679 */ 2680 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2681 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2682 new_ncp = NULL; 2683 nchpp = NCHHASH(hash); 2684 restart: 2685 spin_lock(&nchpp->spin); 2686 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2687 numchecks++; 2688 2689 /* 2690 * Break out if we find a matching entry. Note that 2691 * UNRESOLVED entries may match, but DESTROYED entries 2692 * do not. 2693 */ 2694 if (ncp->nc_parent == par_nch->ncp && 2695 ncp->nc_nlen == nlc->nlc_namelen && 2696 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2697 (ncp->nc_flag & NCF_DESTROYED) == 0 2698 ) { 2699 _cache_hold(ncp); 2700 spin_unlock(&nchpp->spin); 2701 if (par_locked) { 2702 _cache_unlock(par_nch->ncp); 2703 par_locked = 0; 2704 } 2705 if (_cache_lock_special(ncp) == 0) { 2706 _cache_auto_unresolve(mp, ncp); 2707 if (new_ncp) 2708 _cache_free(new_ncp); 2709 goto found; 2710 } 2711 _cache_get(ncp); 2712 _cache_put(ncp); 2713 _cache_drop(ncp); 2714 goto restart; 2715 } 2716 } 2717 2718 /* 2719 * We failed to locate an entry, create a new entry and add it to 2720 * the cache. The parent ncp must also be locked so we 2721 * can link into it. 2722 * 2723 * We have to relookup after possibly blocking in kmalloc or 2724 * when locking par_nch. 2725 * 2726 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2727 * mount case, in which case nc_name will be NULL. 2728 */ 2729 if (new_ncp == NULL) { 2730 spin_unlock(&nchpp->spin); 2731 new_ncp = cache_alloc(nlc->nlc_namelen); 2732 if (nlc->nlc_namelen) { 2733 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2734 nlc->nlc_namelen); 2735 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2736 } 2737 goto restart; 2738 } 2739 if (par_locked == 0) { 2740 spin_unlock(&nchpp->spin); 2741 _cache_lock(par_nch->ncp); 2742 par_locked = 1; 2743 goto restart; 2744 } 2745 2746 /* 2747 * WARNING! We still hold the spinlock. We have to set the hash 2748 * table entry atomically. 2749 */ 2750 ncp = new_ncp; 2751 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2752 spin_unlock(&nchpp->spin); 2753 _cache_unlock(par_nch->ncp); 2754 /* par_locked = 0 - not used */ 2755 found: 2756 /* 2757 * stats and namecache size management 2758 */ 2759 if (ncp->nc_flag & NCF_UNRESOLVED) 2760 ++gd->gd_nchstats->ncs_miss; 2761 else if (ncp->nc_vp) 2762 ++gd->gd_nchstats->ncs_goodhits; 2763 else 2764 ++gd->gd_nchstats->ncs_neghits; 2765 nch.mount = mp; 2766 nch.ncp = ncp; 2767 atomic_add_int(&nch.mount->mnt_refs, 1); 2768 return(nch); 2769 } 2770 2771 /* 2772 * Attempt to lookup a namecache entry and return with a shared namecache 2773 * lock. 2774 */ 2775 int 2776 cache_nlookup_maybe_shared(struct nchandle *par_nch, struct nlcomponent *nlc, 2777 int excl, struct nchandle *res_nch) 2778 { 2779 struct namecache *ncp; 2780 struct nchash_head *nchpp; 2781 struct mount *mp; 2782 u_int32_t hash; 2783 globaldata_t gd; 2784 2785 /* 2786 * If exclusive requested or shared namecache locks are disabled, 2787 * return failure. 2788 */ 2789 if (ncp_shared_lock_disable || excl) 2790 return(EWOULDBLOCK); 2791 2792 numcalls++; 2793 gd = mycpu; 2794 mp = par_nch->mount; 2795 2796 /* 2797 * This is a good time to call it, no ncp's are locked by 2798 * the caller or us. 2799 */ 2800 cache_hysteresis(1); 2801 2802 /* 2803 * Try to locate an existing entry 2804 */ 2805 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2806 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2807 nchpp = NCHHASH(hash); 2808 2809 spin_lock(&nchpp->spin); 2810 2811 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2812 numchecks++; 2813 2814 /* 2815 * Break out if we find a matching entry. Note that 2816 * UNRESOLVED entries may match, but DESTROYED entries 2817 * do not. 2818 */ 2819 if (ncp->nc_parent == par_nch->ncp && 2820 ncp->nc_nlen == nlc->nlc_namelen && 2821 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2822 (ncp->nc_flag & NCF_DESTROYED) == 0 2823 ) { 2824 _cache_hold(ncp); 2825 spin_unlock(&nchpp->spin); 2826 if (_cache_lock_shared_special(ncp) == 0) { 2827 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 2828 (ncp->nc_flag & NCF_DESTROYED) == 0 && 2829 _cache_auto_unresolve_test(mp, ncp) == 0) { 2830 goto found; 2831 } 2832 _cache_unlock(ncp); 2833 } 2834 _cache_drop(ncp); 2835 spin_lock(&nchpp->spin); 2836 break; 2837 } 2838 } 2839 2840 /* 2841 * Failure 2842 */ 2843 spin_unlock(&nchpp->spin); 2844 return(EWOULDBLOCK); 2845 2846 /* 2847 * Success 2848 * 2849 * Note that nc_error might be non-zero (e.g ENOENT). 2850 */ 2851 found: 2852 res_nch->mount = mp; 2853 res_nch->ncp = ncp; 2854 ++gd->gd_nchstats->ncs_goodhits; 2855 atomic_add_int(&res_nch->mount->mnt_refs, 1); 2856 2857 KKASSERT(ncp->nc_error != EWOULDBLOCK); 2858 return(ncp->nc_error); 2859 } 2860 2861 /* 2862 * This is a non-blocking verison of cache_nlookup() used by 2863 * nfs_readdirplusrpc_uio(). It can fail for any reason and 2864 * will return nch.ncp == NULL in that case. 2865 */ 2866 struct nchandle 2867 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 2868 { 2869 struct nchandle nch; 2870 struct namecache *ncp; 2871 struct namecache *new_ncp; 2872 struct nchash_head *nchpp; 2873 struct mount *mp; 2874 u_int32_t hash; 2875 globaldata_t gd; 2876 int par_locked; 2877 2878 numcalls++; 2879 gd = mycpu; 2880 mp = par_nch->mount; 2881 par_locked = 0; 2882 2883 /* 2884 * Try to locate an existing entry 2885 */ 2886 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2887 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2888 new_ncp = NULL; 2889 nchpp = NCHHASH(hash); 2890 restart: 2891 spin_lock(&nchpp->spin); 2892 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2893 numchecks++; 2894 2895 /* 2896 * Break out if we find a matching entry. Note that 2897 * UNRESOLVED entries may match, but DESTROYED entries 2898 * do not. 2899 */ 2900 if (ncp->nc_parent == par_nch->ncp && 2901 ncp->nc_nlen == nlc->nlc_namelen && 2902 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2903 (ncp->nc_flag & NCF_DESTROYED) == 0 2904 ) { 2905 _cache_hold(ncp); 2906 spin_unlock(&nchpp->spin); 2907 if (par_locked) { 2908 _cache_unlock(par_nch->ncp); 2909 par_locked = 0; 2910 } 2911 if (_cache_lock_special(ncp) == 0) { 2912 _cache_auto_unresolve(mp, ncp); 2913 if (new_ncp) { 2914 _cache_free(new_ncp); 2915 new_ncp = NULL; 2916 } 2917 goto found; 2918 } 2919 _cache_drop(ncp); 2920 goto failed; 2921 } 2922 } 2923 2924 /* 2925 * We failed to locate an entry, create a new entry and add it to 2926 * the cache. The parent ncp must also be locked so we 2927 * can link into it. 2928 * 2929 * We have to relookup after possibly blocking in kmalloc or 2930 * when locking par_nch. 2931 * 2932 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2933 * mount case, in which case nc_name will be NULL. 2934 */ 2935 if (new_ncp == NULL) { 2936 spin_unlock(&nchpp->spin); 2937 new_ncp = cache_alloc(nlc->nlc_namelen); 2938 if (nlc->nlc_namelen) { 2939 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2940 nlc->nlc_namelen); 2941 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2942 } 2943 goto restart; 2944 } 2945 if (par_locked == 0) { 2946 spin_unlock(&nchpp->spin); 2947 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 2948 par_locked = 1; 2949 goto restart; 2950 } 2951 goto failed; 2952 } 2953 2954 /* 2955 * WARNING! We still hold the spinlock. We have to set the hash 2956 * table entry atomically. 2957 */ 2958 ncp = new_ncp; 2959 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2960 spin_unlock(&nchpp->spin); 2961 _cache_unlock(par_nch->ncp); 2962 /* par_locked = 0 - not used */ 2963 found: 2964 /* 2965 * stats and namecache size management 2966 */ 2967 if (ncp->nc_flag & NCF_UNRESOLVED) 2968 ++gd->gd_nchstats->ncs_miss; 2969 else if (ncp->nc_vp) 2970 ++gd->gd_nchstats->ncs_goodhits; 2971 else 2972 ++gd->gd_nchstats->ncs_neghits; 2973 nch.mount = mp; 2974 nch.ncp = ncp; 2975 atomic_add_int(&nch.mount->mnt_refs, 1); 2976 return(nch); 2977 failed: 2978 if (new_ncp) { 2979 _cache_free(new_ncp); 2980 new_ncp = NULL; 2981 } 2982 nch.mount = NULL; 2983 nch.ncp = NULL; 2984 return(nch); 2985 } 2986 2987 /* 2988 * The namecache entry is marked as being used as a mount point. 2989 * Locate the mount if it is visible to the caller. The DragonFly 2990 * mount system allows arbitrary loops in the topology and disentangles 2991 * those loops by matching against (mp, ncp) rather than just (ncp). 2992 * This means any given ncp can dive any number of mounts, depending 2993 * on the relative mount (e.g. nullfs) the caller is at in the topology. 2994 * 2995 * We use a very simple frontend cache to reduce SMP conflicts, 2996 * which we have to do because the mountlist scan needs an exclusive 2997 * lock around its ripout info list. Not to mention that there might 2998 * be a lot of mounts. 2999 */ 3000 struct findmount_info { 3001 struct mount *result; 3002 struct mount *nch_mount; 3003 struct namecache *nch_ncp; 3004 }; 3005 3006 static 3007 struct ncmount_cache * 3008 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp) 3009 { 3010 int hash; 3011 3012 hash = ((int)(intptr_t)mp / sizeof(*mp)) ^ 3013 ((int)(intptr_t)ncp / sizeof(*ncp)); 3014 hash = (hash & 0x7FFFFFFF) % NCMOUNT_NUMCACHE; 3015 return (&ncmount_cache[hash]); 3016 } 3017 3018 static 3019 int 3020 cache_findmount_callback(struct mount *mp, void *data) 3021 { 3022 struct findmount_info *info = data; 3023 3024 /* 3025 * Check the mount's mounted-on point against the passed nch. 3026 */ 3027 if (mp->mnt_ncmounton.mount == info->nch_mount && 3028 mp->mnt_ncmounton.ncp == info->nch_ncp 3029 ) { 3030 info->result = mp; 3031 atomic_add_int(&mp->mnt_refs, 1); 3032 return(-1); 3033 } 3034 return(0); 3035 } 3036 3037 struct mount * 3038 cache_findmount(struct nchandle *nch) 3039 { 3040 struct findmount_info info; 3041 struct ncmount_cache *ncc; 3042 struct mount *mp; 3043 3044 /* 3045 * Fast 3046 */ 3047 if (ncmount_cache_enable == 0) { 3048 ncc = NULL; 3049 goto skip; 3050 } 3051 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3052 if (ncc->ncp == nch->ncp) { 3053 spin_lock_shared(&ncc->spin); 3054 if (ncc->isneg == 0 && 3055 ncc->ncp == nch->ncp && (mp = ncc->mp) != NULL) { 3056 if (mp->mnt_ncmounton.mount == nch->mount && 3057 mp->mnt_ncmounton.ncp == nch->ncp) { 3058 /* 3059 * Cache hit (positive) 3060 */ 3061 atomic_add_int(&mp->mnt_refs, 1); 3062 spin_unlock_shared(&ncc->spin); 3063 ++ncmount_cache_hit; 3064 return(mp); 3065 } 3066 /* else cache miss */ 3067 } 3068 if (ncc->isneg && 3069 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3070 /* 3071 * Cache hit (negative) 3072 */ 3073 spin_unlock_shared(&ncc->spin); 3074 ++ncmount_cache_hit; 3075 return(NULL); 3076 } 3077 spin_unlock_shared(&ncc->spin); 3078 } 3079 skip: 3080 3081 /* 3082 * Slow 3083 */ 3084 info.result = NULL; 3085 info.nch_mount = nch->mount; 3086 info.nch_ncp = nch->ncp; 3087 mountlist_scan(cache_findmount_callback, &info, 3088 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 3089 3090 /* 3091 * Cache the result. 3092 * 3093 * Negative lookups: We cache the originating {ncp,mp}. (mp) is 3094 * only used for pointer comparisons and is not 3095 * referenced (otherwise there would be dangling 3096 * refs). 3097 * 3098 * Positive lookups: We cache the originating {ncp} and the target 3099 * (mp). (mp) is referenced. 3100 * 3101 * Indeterminant: If the match is undergoing an unmount we do 3102 * not cache it to avoid racing cache_unmounting(), 3103 * but still return the match. 3104 */ 3105 if (ncc) { 3106 spin_lock(&ncc->spin); 3107 if (info.result == NULL) { 3108 if (ncc->isneg == 0 && ncc->mp) 3109 atomic_add_int(&ncc->mp->mnt_refs, -1); 3110 ncc->ncp = nch->ncp; 3111 ncc->mp = nch->mount; 3112 ncc->isneg = 1; 3113 spin_unlock(&ncc->spin); 3114 ++ncmount_cache_overwrite; 3115 } else if ((info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0) { 3116 if (ncc->isneg == 0 && ncc->mp) 3117 atomic_add_int(&ncc->mp->mnt_refs, -1); 3118 atomic_add_int(&info.result->mnt_refs, 1); 3119 ncc->ncp = nch->ncp; 3120 ncc->mp = info.result; 3121 ncc->isneg = 0; 3122 spin_unlock(&ncc->spin); 3123 ++ncmount_cache_overwrite; 3124 } else { 3125 spin_unlock(&ncc->spin); 3126 } 3127 ++ncmount_cache_miss; 3128 } 3129 return(info.result); 3130 } 3131 3132 void 3133 cache_dropmount(struct mount *mp) 3134 { 3135 atomic_add_int(&mp->mnt_refs, -1); 3136 } 3137 3138 void 3139 cache_ismounting(struct mount *mp) 3140 { 3141 struct nchandle *nch = &mp->mnt_ncmounton; 3142 struct ncmount_cache *ncc; 3143 3144 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3145 if (ncc->isneg && 3146 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3147 spin_lock(&ncc->spin); 3148 if (ncc->isneg && 3149 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3150 ncc->ncp = NULL; 3151 ncc->mp = NULL; 3152 } 3153 spin_unlock(&ncc->spin); 3154 } 3155 } 3156 3157 void 3158 cache_unmounting(struct mount *mp) 3159 { 3160 struct nchandle *nch = &mp->mnt_ncmounton; 3161 struct ncmount_cache *ncc; 3162 3163 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3164 if (ncc->isneg == 0 && 3165 ncc->ncp == nch->ncp && ncc->mp == mp) { 3166 spin_lock(&ncc->spin); 3167 if (ncc->isneg == 0 && 3168 ncc->ncp == nch->ncp && ncc->mp == mp) { 3169 atomic_add_int(&mp->mnt_refs, -1); 3170 ncc->ncp = NULL; 3171 ncc->mp = NULL; 3172 } 3173 spin_unlock(&ncc->spin); 3174 } 3175 } 3176 3177 /* 3178 * Resolve an unresolved namecache entry, generally by looking it up. 3179 * The passed ncp must be locked and refd. 3180 * 3181 * Theoretically since a vnode cannot be recycled while held, and since 3182 * the nc_parent chain holds its vnode as long as children exist, the 3183 * direct parent of the cache entry we are trying to resolve should 3184 * have a valid vnode. If not then generate an error that we can 3185 * determine is related to a resolver bug. 3186 * 3187 * However, if a vnode was in the middle of a recyclement when the NCP 3188 * got locked, ncp->nc_vp might point to a vnode that is about to become 3189 * invalid. cache_resolve() handles this case by unresolving the entry 3190 * and then re-resolving it. 3191 * 3192 * Note that successful resolution does not necessarily return an error 3193 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 3194 * will be returned. 3195 */ 3196 int 3197 cache_resolve(struct nchandle *nch, struct ucred *cred) 3198 { 3199 struct namecache *par_tmp; 3200 struct namecache *par; 3201 struct namecache *ncp; 3202 struct nchandle nctmp; 3203 struct mount *mp; 3204 struct vnode *dvp; 3205 int error; 3206 3207 ncp = nch->ncp; 3208 mp = nch->mount; 3209 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 3210 restart: 3211 /* 3212 * If the ncp is already resolved we have nothing to do. However, 3213 * we do want to guarentee that a usable vnode is returned when 3214 * a vnode is present, so make sure it hasn't been reclaimed. 3215 */ 3216 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3217 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3218 _cache_setunresolved(ncp); 3219 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 3220 return (ncp->nc_error); 3221 } 3222 3223 /* 3224 * If the ncp was destroyed it will never resolve again. This 3225 * can basically only happen when someone is chdir'd into an 3226 * empty directory which is then rmdir'd. We want to catch this 3227 * here and not dive the VFS because the VFS might actually 3228 * have a way to re-resolve the disconnected ncp, which will 3229 * result in inconsistencies in the cdir/nch for proc->p_fd. 3230 */ 3231 if (ncp->nc_flag & NCF_DESTROYED) { 3232 kprintf("Warning: cache_resolve: ncp '%s' was unlinked\n", 3233 ncp->nc_name); 3234 return(EINVAL); 3235 } 3236 3237 /* 3238 * Mount points need special handling because the parent does not 3239 * belong to the same filesystem as the ncp. 3240 */ 3241 if (ncp == mp->mnt_ncmountpt.ncp) 3242 return (cache_resolve_mp(mp)); 3243 3244 /* 3245 * We expect an unbroken chain of ncps to at least the mount point, 3246 * and even all the way to root (but this code doesn't have to go 3247 * past the mount point). 3248 */ 3249 if (ncp->nc_parent == NULL) { 3250 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 3251 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3252 ncp->nc_error = EXDEV; 3253 return(ncp->nc_error); 3254 } 3255 3256 /* 3257 * The vp's of the parent directories in the chain are held via vhold() 3258 * due to the existance of the child, and should not disappear. 3259 * However, there are cases where they can disappear: 3260 * 3261 * - due to filesystem I/O errors. 3262 * - due to NFS being stupid about tracking the namespace and 3263 * destroys the namespace for entire directories quite often. 3264 * - due to forced unmounts. 3265 * - due to an rmdir (parent will be marked DESTROYED) 3266 * 3267 * When this occurs we have to track the chain backwards and resolve 3268 * it, looping until the resolver catches up to the current node. We 3269 * could recurse here but we might run ourselves out of kernel stack 3270 * so we do it in a more painful manner. This situation really should 3271 * not occur all that often, or if it does not have to go back too 3272 * many nodes to resolve the ncp. 3273 */ 3274 while ((dvp = cache_dvpref(ncp)) == NULL) { 3275 /* 3276 * This case can occur if a process is CD'd into a 3277 * directory which is then rmdir'd. If the parent is marked 3278 * destroyed there is no point trying to resolve it. 3279 */ 3280 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 3281 return(ENOENT); 3282 par = ncp->nc_parent; 3283 _cache_hold(par); 3284 _cache_lock(par); 3285 while ((par_tmp = par->nc_parent) != NULL && 3286 par_tmp->nc_vp == NULL) { 3287 _cache_hold(par_tmp); 3288 _cache_lock(par_tmp); 3289 _cache_put(par); 3290 par = par_tmp; 3291 } 3292 if (par->nc_parent == NULL) { 3293 kprintf("EXDEV case 2 %*.*s\n", 3294 par->nc_nlen, par->nc_nlen, par->nc_name); 3295 _cache_put(par); 3296 return (EXDEV); 3297 } 3298 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 3299 par->nc_nlen, par->nc_nlen, par->nc_name); 3300 /* 3301 * The parent is not set in stone, ref and lock it to prevent 3302 * it from disappearing. Also note that due to renames it 3303 * is possible for our ncp to move and for par to no longer 3304 * be one of its parents. We resolve it anyway, the loop 3305 * will handle any moves. 3306 */ 3307 _cache_get(par); /* additional hold/lock */ 3308 _cache_put(par); /* from earlier hold/lock */ 3309 if (par == nch->mount->mnt_ncmountpt.ncp) { 3310 cache_resolve_mp(nch->mount); 3311 } else if ((dvp = cache_dvpref(par)) == NULL) { 3312 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 3313 _cache_put(par); 3314 continue; 3315 } else { 3316 if (par->nc_flag & NCF_UNRESOLVED) { 3317 nctmp.mount = mp; 3318 nctmp.ncp = par; 3319 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3320 } 3321 vrele(dvp); 3322 } 3323 if ((error = par->nc_error) != 0) { 3324 if (par->nc_error != EAGAIN) { 3325 kprintf("EXDEV case 3 %*.*s error %d\n", 3326 par->nc_nlen, par->nc_nlen, par->nc_name, 3327 par->nc_error); 3328 _cache_put(par); 3329 return(error); 3330 } 3331 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 3332 par, par->nc_nlen, par->nc_nlen, par->nc_name); 3333 } 3334 _cache_put(par); 3335 /* loop */ 3336 } 3337 3338 /* 3339 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 3340 * ncp's and reattach them. If this occurs the original ncp is marked 3341 * EAGAIN to force a relookup. 3342 * 3343 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 3344 * ncp must already be resolved. 3345 */ 3346 if (dvp) { 3347 nctmp.mount = mp; 3348 nctmp.ncp = ncp; 3349 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3350 vrele(dvp); 3351 } else { 3352 ncp->nc_error = EPERM; 3353 } 3354 if (ncp->nc_error == EAGAIN) { 3355 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 3356 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3357 goto restart; 3358 } 3359 return(ncp->nc_error); 3360 } 3361 3362 /* 3363 * Resolve the ncp associated with a mount point. Such ncp's almost always 3364 * remain resolved and this routine is rarely called. NFS MPs tends to force 3365 * re-resolution more often due to its mac-truck-smash-the-namecache 3366 * method of tracking namespace changes. 3367 * 3368 * The semantics for this call is that the passed ncp must be locked on 3369 * entry and will be locked on return. However, if we actually have to 3370 * resolve the mount point we temporarily unlock the entry in order to 3371 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 3372 * the unlock we have to recheck the flags after we relock. 3373 */ 3374 static int 3375 cache_resolve_mp(struct mount *mp) 3376 { 3377 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 3378 struct vnode *vp; 3379 int error; 3380 3381 KKASSERT(mp != NULL); 3382 3383 /* 3384 * If the ncp is already resolved we have nothing to do. However, 3385 * we do want to guarentee that a usable vnode is returned when 3386 * a vnode is present, so make sure it hasn't been reclaimed. 3387 */ 3388 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3389 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3390 _cache_setunresolved(ncp); 3391 } 3392 3393 if (ncp->nc_flag & NCF_UNRESOLVED) { 3394 _cache_unlock(ncp); 3395 while (vfs_busy(mp, 0)) 3396 ; 3397 error = VFS_ROOT(mp, &vp); 3398 _cache_lock(ncp); 3399 3400 /* 3401 * recheck the ncp state after relocking. 3402 */ 3403 if (ncp->nc_flag & NCF_UNRESOLVED) { 3404 ncp->nc_error = error; 3405 if (error == 0) { 3406 _cache_setvp(mp, ncp, vp); 3407 vput(vp); 3408 } else { 3409 kprintf("[diagnostic] cache_resolve_mp: failed" 3410 " to resolve mount %p err=%d ncp=%p\n", 3411 mp, error, ncp); 3412 _cache_setvp(mp, ncp, NULL); 3413 } 3414 } else if (error == 0) { 3415 vput(vp); 3416 } 3417 vfs_unbusy(mp); 3418 } 3419 return(ncp->nc_error); 3420 } 3421 3422 /* 3423 * Clean out negative cache entries when too many have accumulated. 3424 */ 3425 static void 3426 _cache_cleanneg(int count) 3427 { 3428 struct namecache *ncp; 3429 3430 /* 3431 * Attempt to clean out the specified number of negative cache 3432 * entries. 3433 */ 3434 while (count) { 3435 spin_lock(&ncspin); 3436 ncp = TAILQ_FIRST(&ncneglist); 3437 if (ncp == NULL) { 3438 spin_unlock(&ncspin); 3439 break; 3440 } 3441 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 3442 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 3443 _cache_hold(ncp); 3444 spin_unlock(&ncspin); 3445 3446 /* 3447 * This can race, so we must re-check that the ncp 3448 * is on the ncneglist after successfully locking it. 3449 */ 3450 if (_cache_lock_special(ncp) == 0) { 3451 if (ncp->nc_vp == NULL && 3452 (ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3453 ncp = cache_zap(ncp, 1); 3454 if (ncp) 3455 _cache_drop(ncp); 3456 } else { 3457 kprintf("cache_cleanneg: race avoided\n"); 3458 _cache_unlock(ncp); 3459 } 3460 } else { 3461 _cache_drop(ncp); 3462 } 3463 --count; 3464 } 3465 } 3466 3467 /* 3468 * Clean out positive cache entries when too many have accumulated. 3469 */ 3470 static void 3471 _cache_cleanpos(int count) 3472 { 3473 static volatile int rover; 3474 struct nchash_head *nchpp; 3475 struct namecache *ncp; 3476 int rover_copy; 3477 3478 /* 3479 * Attempt to clean out the specified number of negative cache 3480 * entries. 3481 */ 3482 while (count) { 3483 rover_copy = ++rover; /* MPSAFEENOUGH */ 3484 cpu_ccfence(); 3485 nchpp = NCHHASH(rover_copy); 3486 3487 spin_lock(&nchpp->spin); 3488 ncp = LIST_FIRST(&nchpp->list); 3489 while (ncp && (ncp->nc_flag & NCF_DESTROYED)) 3490 ncp = LIST_NEXT(ncp, nc_hash); 3491 if (ncp) 3492 _cache_hold(ncp); 3493 spin_unlock(&nchpp->spin); 3494 3495 if (ncp) { 3496 if (_cache_lock_special(ncp) == 0) { 3497 ncp = cache_zap(ncp, 1); 3498 if (ncp) 3499 _cache_drop(ncp); 3500 } else { 3501 _cache_drop(ncp); 3502 } 3503 } 3504 --count; 3505 } 3506 } 3507 3508 /* 3509 * This is a kitchen sink function to clean out ncps which we 3510 * tried to zap from cache_drop() but failed because we were 3511 * unable to acquire the parent lock. 3512 * 3513 * Such entries can also be removed via cache_inval_vp(), such 3514 * as when unmounting. 3515 */ 3516 static void 3517 _cache_cleandefered(void) 3518 { 3519 struct nchash_head *nchpp; 3520 struct namecache *ncp; 3521 struct namecache dummy; 3522 int i; 3523 3524 numdefered = 0; 3525 bzero(&dummy, sizeof(dummy)); 3526 dummy.nc_flag = NCF_DESTROYED; 3527 dummy.nc_refs = 1; 3528 3529 for (i = 0; i <= nchash; ++i) { 3530 nchpp = &nchashtbl[i]; 3531 3532 spin_lock(&nchpp->spin); 3533 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 3534 ncp = &dummy; 3535 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) { 3536 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 3537 continue; 3538 LIST_REMOVE(&dummy, nc_hash); 3539 LIST_INSERT_AFTER(ncp, &dummy, nc_hash); 3540 _cache_hold(ncp); 3541 spin_unlock(&nchpp->spin); 3542 if (_cache_lock_nonblock(ncp) == 0) { 3543 ncp->nc_flag &= ~NCF_DEFEREDZAP; 3544 _cache_unlock(ncp); 3545 } 3546 _cache_drop(ncp); 3547 spin_lock(&nchpp->spin); 3548 ncp = &dummy; 3549 } 3550 LIST_REMOVE(&dummy, nc_hash); 3551 spin_unlock(&nchpp->spin); 3552 } 3553 } 3554 3555 /* 3556 * Name cache initialization, from vfsinit() when we are booting 3557 */ 3558 void 3559 nchinit(void) 3560 { 3561 int i; 3562 globaldata_t gd; 3563 3564 /* initialise per-cpu namecache effectiveness statistics. */ 3565 for (i = 0; i < ncpus; ++i) { 3566 gd = globaldata_find(i); 3567 gd->gd_nchstats = &nchstats[i]; 3568 } 3569 TAILQ_INIT(&ncneglist); 3570 spin_init(&ncspin); 3571 nchashtbl = hashinit_ext(desiredvnodes / 2, 3572 sizeof(struct nchash_head), 3573 M_VFSCACHE, &nchash); 3574 for (i = 0; i <= (int)nchash; ++i) { 3575 LIST_INIT(&nchashtbl[i].list); 3576 spin_init(&nchashtbl[i].spin); 3577 } 3578 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) 3579 spin_init(&ncmount_cache[i].spin); 3580 nclockwarn = 5 * hz; 3581 } 3582 3583 /* 3584 * Called from start_init() to bootstrap the root filesystem. Returns 3585 * a referenced, unlocked namecache record. 3586 */ 3587 void 3588 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 3589 { 3590 nch->ncp = cache_alloc(0); 3591 nch->mount = mp; 3592 atomic_add_int(&mp->mnt_refs, 1); 3593 if (vp) 3594 _cache_setvp(nch->mount, nch->ncp, vp); 3595 } 3596 3597 /* 3598 * vfs_cache_setroot() 3599 * 3600 * Create an association between the root of our namecache and 3601 * the root vnode. This routine may be called several times during 3602 * booting. 3603 * 3604 * If the caller intends to save the returned namecache pointer somewhere 3605 * it must cache_hold() it. 3606 */ 3607 void 3608 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 3609 { 3610 struct vnode *ovp; 3611 struct nchandle onch; 3612 3613 ovp = rootvnode; 3614 onch = rootnch; 3615 rootvnode = nvp; 3616 if (nch) 3617 rootnch = *nch; 3618 else 3619 cache_zero(&rootnch); 3620 if (ovp) 3621 vrele(ovp); 3622 if (onch.ncp) 3623 cache_drop(&onch); 3624 } 3625 3626 /* 3627 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 3628 * topology and is being removed as quickly as possible. The new VOP_N*() 3629 * API calls are required to make specific adjustments using the supplied 3630 * ncp pointers rather then just bogusly purging random vnodes. 3631 * 3632 * Invalidate all namecache entries to a particular vnode as well as 3633 * any direct children of that vnode in the namecache. This is a 3634 * 'catch all' purge used by filesystems that do not know any better. 3635 * 3636 * Note that the linkage between the vnode and its namecache entries will 3637 * be removed, but the namecache entries themselves might stay put due to 3638 * active references from elsewhere in the system or due to the existance of 3639 * the children. The namecache topology is left intact even if we do not 3640 * know what the vnode association is. Such entries will be marked 3641 * NCF_UNRESOLVED. 3642 */ 3643 void 3644 cache_purge(struct vnode *vp) 3645 { 3646 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 3647 } 3648 3649 /* 3650 * Flush all entries referencing a particular filesystem. 3651 * 3652 * Since we need to check it anyway, we will flush all the invalid 3653 * entries at the same time. 3654 */ 3655 #if 0 3656 3657 void 3658 cache_purgevfs(struct mount *mp) 3659 { 3660 struct nchash_head *nchpp; 3661 struct namecache *ncp, *nnp; 3662 3663 /* 3664 * Scan hash tables for applicable entries. 3665 */ 3666 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 3667 spin_lock_wr(&nchpp->spin); XXX 3668 ncp = LIST_FIRST(&nchpp->list); 3669 if (ncp) 3670 _cache_hold(ncp); 3671 while (ncp) { 3672 nnp = LIST_NEXT(ncp, nc_hash); 3673 if (nnp) 3674 _cache_hold(nnp); 3675 if (ncp->nc_mount == mp) { 3676 _cache_lock(ncp); 3677 ncp = cache_zap(ncp, 0); 3678 if (ncp) 3679 _cache_drop(ncp); 3680 } else { 3681 _cache_drop(ncp); 3682 } 3683 ncp = nnp; 3684 } 3685 spin_unlock_wr(&nchpp->spin); XXX 3686 } 3687 } 3688 3689 #endif 3690 3691 static int disablecwd; 3692 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 3693 "Disable getcwd"); 3694 3695 static u_long numcwdcalls; 3696 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0, 3697 "Number of current directory resolution calls"); 3698 static u_long numcwdfailnf; 3699 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0, 3700 "Number of current directory failures due to lack of file"); 3701 static u_long numcwdfailsz; 3702 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0, 3703 "Number of current directory failures due to large result"); 3704 static u_long numcwdfound; 3705 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0, 3706 "Number of current directory resolution successes"); 3707 3708 /* 3709 * MPALMOSTSAFE 3710 */ 3711 int 3712 sys___getcwd(struct __getcwd_args *uap) 3713 { 3714 u_int buflen; 3715 int error; 3716 char *buf; 3717 char *bp; 3718 3719 if (disablecwd) 3720 return (ENODEV); 3721 3722 buflen = uap->buflen; 3723 if (buflen == 0) 3724 return (EINVAL); 3725 if (buflen > MAXPATHLEN) 3726 buflen = MAXPATHLEN; 3727 3728 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 3729 bp = kern_getcwd(buf, buflen, &error); 3730 if (error == 0) 3731 error = copyout(bp, uap->buf, strlen(bp) + 1); 3732 kfree(buf, M_TEMP); 3733 return (error); 3734 } 3735 3736 char * 3737 kern_getcwd(char *buf, size_t buflen, int *error) 3738 { 3739 struct proc *p = curproc; 3740 char *bp; 3741 int i, slash_prefixed; 3742 struct filedesc *fdp; 3743 struct nchandle nch; 3744 struct namecache *ncp; 3745 3746 numcwdcalls++; 3747 bp = buf; 3748 bp += buflen - 1; 3749 *bp = '\0'; 3750 fdp = p->p_fd; 3751 slash_prefixed = 0; 3752 3753 nch = fdp->fd_ncdir; 3754 ncp = nch.ncp; 3755 if (ncp) 3756 _cache_hold(ncp); 3757 3758 while (ncp && (ncp != fdp->fd_nrdir.ncp || 3759 nch.mount != fdp->fd_nrdir.mount) 3760 ) { 3761 /* 3762 * While traversing upwards if we encounter the root 3763 * of the current mount we have to skip to the mount point 3764 * in the underlying filesystem. 3765 */ 3766 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 3767 nch = nch.mount->mnt_ncmounton; 3768 _cache_drop(ncp); 3769 ncp = nch.ncp; 3770 if (ncp) 3771 _cache_hold(ncp); 3772 continue; 3773 } 3774 3775 /* 3776 * Prepend the path segment 3777 */ 3778 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3779 if (bp == buf) { 3780 numcwdfailsz++; 3781 *error = ERANGE; 3782 bp = NULL; 3783 goto done; 3784 } 3785 *--bp = ncp->nc_name[i]; 3786 } 3787 if (bp == buf) { 3788 numcwdfailsz++; 3789 *error = ERANGE; 3790 bp = NULL; 3791 goto done; 3792 } 3793 *--bp = '/'; 3794 slash_prefixed = 1; 3795 3796 /* 3797 * Go up a directory. This isn't a mount point so we don't 3798 * have to check again. 3799 */ 3800 while ((nch.ncp = ncp->nc_parent) != NULL) { 3801 if (ncp_shared_lock_disable) 3802 _cache_lock(ncp); 3803 else 3804 _cache_lock_shared(ncp); 3805 if (nch.ncp != ncp->nc_parent) { 3806 _cache_unlock(ncp); 3807 continue; 3808 } 3809 _cache_hold(nch.ncp); 3810 _cache_unlock(ncp); 3811 break; 3812 } 3813 _cache_drop(ncp); 3814 ncp = nch.ncp; 3815 } 3816 if (ncp == NULL) { 3817 numcwdfailnf++; 3818 *error = ENOENT; 3819 bp = NULL; 3820 goto done; 3821 } 3822 if (!slash_prefixed) { 3823 if (bp == buf) { 3824 numcwdfailsz++; 3825 *error = ERANGE; 3826 bp = NULL; 3827 goto done; 3828 } 3829 *--bp = '/'; 3830 } 3831 numcwdfound++; 3832 *error = 0; 3833 done: 3834 if (ncp) 3835 _cache_drop(ncp); 3836 return (bp); 3837 } 3838 3839 /* 3840 * Thus begins the fullpath magic. 3841 * 3842 * The passed nchp is referenced but not locked. 3843 */ 3844 static int disablefullpath; 3845 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 3846 &disablefullpath, 0, 3847 "Disable fullpath lookups"); 3848 3849 static u_int numfullpathcalls; 3850 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathcalls, CTLFLAG_RD, 3851 &numfullpathcalls, 0, 3852 "Number of full path resolutions in progress"); 3853 static u_int numfullpathfailnf; 3854 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailnf, CTLFLAG_RD, 3855 &numfullpathfailnf, 0, 3856 "Number of full path resolution failures due to lack of file"); 3857 static u_int numfullpathfailsz; 3858 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailsz, CTLFLAG_RD, 3859 &numfullpathfailsz, 0, 3860 "Number of full path resolution failures due to insufficient memory"); 3861 static u_int numfullpathfound; 3862 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfound, CTLFLAG_RD, 3863 &numfullpathfound, 0, 3864 "Number of full path resolution successes"); 3865 3866 int 3867 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase, 3868 char **retbuf, char **freebuf, int guess) 3869 { 3870 struct nchandle fd_nrdir; 3871 struct nchandle nch; 3872 struct namecache *ncp; 3873 struct mount *mp, *new_mp; 3874 char *bp, *buf; 3875 int slash_prefixed; 3876 int error = 0; 3877 int i; 3878 3879 atomic_add_int(&numfullpathcalls, -1); 3880 3881 *retbuf = NULL; 3882 *freebuf = NULL; 3883 3884 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 3885 bp = buf + MAXPATHLEN - 1; 3886 *bp = '\0'; 3887 if (nchbase) 3888 fd_nrdir = *nchbase; 3889 else if (p != NULL) 3890 fd_nrdir = p->p_fd->fd_nrdir; 3891 else 3892 fd_nrdir = rootnch; 3893 slash_prefixed = 0; 3894 nch = *nchp; 3895 ncp = nch.ncp; 3896 if (ncp) 3897 _cache_hold(ncp); 3898 mp = nch.mount; 3899 3900 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 3901 new_mp = NULL; 3902 3903 /* 3904 * If we are asked to guess the upwards path, we do so whenever 3905 * we encounter an ncp marked as a mountpoint. We try to find 3906 * the actual mountpoint by finding the mountpoint with this 3907 * ncp. 3908 */ 3909 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 3910 new_mp = mount_get_by_nc(ncp); 3911 } 3912 /* 3913 * While traversing upwards if we encounter the root 3914 * of the current mount we have to skip to the mount point. 3915 */ 3916 if (ncp == mp->mnt_ncmountpt.ncp) { 3917 new_mp = mp; 3918 } 3919 if (new_mp) { 3920 nch = new_mp->mnt_ncmounton; 3921 _cache_drop(ncp); 3922 ncp = nch.ncp; 3923 if (ncp) 3924 _cache_hold(ncp); 3925 mp = nch.mount; 3926 continue; 3927 } 3928 3929 /* 3930 * Prepend the path segment 3931 */ 3932 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3933 if (bp == buf) { 3934 numfullpathfailsz++; 3935 kfree(buf, M_TEMP); 3936 error = ENOMEM; 3937 goto done; 3938 } 3939 *--bp = ncp->nc_name[i]; 3940 } 3941 if (bp == buf) { 3942 numfullpathfailsz++; 3943 kfree(buf, M_TEMP); 3944 error = ENOMEM; 3945 goto done; 3946 } 3947 *--bp = '/'; 3948 slash_prefixed = 1; 3949 3950 /* 3951 * Go up a directory. This isn't a mount point so we don't 3952 * have to check again. 3953 * 3954 * We can only safely access nc_parent with ncp held locked. 3955 */ 3956 while ((nch.ncp = ncp->nc_parent) != NULL) { 3957 _cache_lock(ncp); 3958 if (nch.ncp != ncp->nc_parent) { 3959 _cache_unlock(ncp); 3960 continue; 3961 } 3962 _cache_hold(nch.ncp); 3963 _cache_unlock(ncp); 3964 break; 3965 } 3966 _cache_drop(ncp); 3967 ncp = nch.ncp; 3968 } 3969 if (ncp == NULL) { 3970 numfullpathfailnf++; 3971 kfree(buf, M_TEMP); 3972 error = ENOENT; 3973 goto done; 3974 } 3975 3976 if (!slash_prefixed) { 3977 if (bp == buf) { 3978 numfullpathfailsz++; 3979 kfree(buf, M_TEMP); 3980 error = ENOMEM; 3981 goto done; 3982 } 3983 *--bp = '/'; 3984 } 3985 numfullpathfound++; 3986 *retbuf = bp; 3987 *freebuf = buf; 3988 error = 0; 3989 done: 3990 if (ncp) 3991 _cache_drop(ncp); 3992 return(error); 3993 } 3994 3995 int 3996 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, 3997 int guess) 3998 { 3999 struct namecache *ncp; 4000 struct nchandle nch; 4001 int error; 4002 4003 *freebuf = NULL; 4004 atomic_add_int(&numfullpathcalls, 1); 4005 if (disablefullpath) 4006 return (ENODEV); 4007 4008 if (p == NULL) 4009 return (EINVAL); 4010 4011 /* vn is NULL, client wants us to use p->p_textvp */ 4012 if (vn == NULL) { 4013 if ((vn = p->p_textvp) == NULL) 4014 return (EINVAL); 4015 } 4016 spin_lock(&vn->v_spin); 4017 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 4018 if (ncp->nc_nlen) 4019 break; 4020 } 4021 if (ncp == NULL) { 4022 spin_unlock(&vn->v_spin); 4023 return (EINVAL); 4024 } 4025 _cache_hold(ncp); 4026 spin_unlock(&vn->v_spin); 4027 4028 atomic_add_int(&numfullpathcalls, -1); 4029 nch.ncp = ncp; 4030 nch.mount = vn->v_mount; 4031 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess); 4032 _cache_drop(ncp); 4033 return (error); 4034 } 4035