1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/kernel.h> 68 #include <sys/sysctl.h> 69 #include <sys/mount.h> 70 #include <sys/vnode.h> 71 #include <sys/malloc.h> 72 #include <sys/sysproto.h> 73 #include <sys/spinlock.h> 74 #include <sys/proc.h> 75 #include <sys/namei.h> 76 #include <sys/nlookup.h> 77 #include <sys/filedesc.h> 78 #include <sys/fnv_hash.h> 79 #include <sys/globaldata.h> 80 #include <sys/kern_syscall.h> 81 #include <sys/dirent.h> 82 #include <ddb/ddb.h> 83 84 #include <sys/spinlock2.h> 85 86 #define MAX_RECURSION_DEPTH 64 87 88 /* 89 * Random lookups in the cache are accomplished with a hash table using 90 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 91 * 92 * Negative entries may exist and correspond to resolved namecache 93 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 94 * will be set if the entry corresponds to a whited-out directory entry 95 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list 96 * is locked via pcpu_ncache[n].neg_spin; 97 * 98 * MPSAFE RULES: 99 * 100 * (1) A ncp must be referenced before it can be locked. 101 * 102 * (2) A ncp must be locked in order to modify it. 103 * 104 * (3) ncp locks are always ordered child -> parent. That may seem 105 * backwards but forward scans use the hash table and thus can hold 106 * the parent unlocked when traversing downward. 107 * 108 * This allows insert/rename/delete/dot-dot and other operations 109 * to use ncp->nc_parent links. 110 * 111 * This also prevents a locked up e.g. NFS node from creating a 112 * chain reaction all the way back to the root vnode / namecache. 113 * 114 * (4) parent linkages require both the parent and child to be locked. 115 */ 116 117 /* 118 * Structures associated with name cacheing. 119 */ 120 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 121 #define MINNEG 1024 122 #define MINPOS 1024 123 #define NCMOUNT_NUMCACHE 16301 /* prime number */ 124 125 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 126 127 TAILQ_HEAD(nchash_list, namecache); 128 129 /* 130 * Don't cachealign, but at least pad to 32 bytes so entries 131 * don't cross a cache line. 132 */ 133 struct nchash_head { 134 struct nchash_list list; /* 16 bytes */ 135 struct spinlock spin; /* 8 bytes */ 136 long pad01; /* 8 bytes */ 137 }; 138 139 struct ncmount_cache { 140 struct spinlock spin; 141 struct namecache *ncp; 142 struct mount *mp; 143 int isneg; /* if != 0 mp is originator and not target */ 144 } __cachealign; 145 146 struct pcpu_ncache { 147 struct spinlock neg_spin; /* for neg_list and neg_count */ 148 struct namecache_list neg_list; 149 long neg_count; 150 long vfscache_negs; 151 long vfscache_count; 152 long vfscache_leafs; 153 long numdefered; 154 } __cachealign; 155 156 __read_mostly static struct nchash_head *nchashtbl; 157 __read_mostly static struct pcpu_ncache *pcpu_ncache; 158 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE]; 159 160 /* 161 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 162 * to create the namecache infrastructure leading to a dangling vnode. 163 * 164 * 0 Only errors are reported 165 * 1 Successes are reported 166 * 2 Successes + the whole directory scan is reported 167 * 3 Force the directory scan code run as if the parent vnode did not 168 * have a namecache record, even if it does have one. 169 */ 170 __read_mostly static int ncvp_debug; 171 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 172 "Namecache debug level (0-3)"); 173 174 __read_mostly static u_long nchash; /* size of hash table */ 175 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 176 "Size of namecache hash table"); 177 178 __read_mostly static int ncnegflush = 10; /* burst for negative flush */ 179 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0, 180 "Batch flush negative entries"); 181 182 __read_mostly static int ncposflush = 10; /* burst for positive flush */ 183 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0, 184 "Batch flush positive entries"); 185 186 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */ 187 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 188 "Ratio of namecache negative entries"); 189 190 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */ 191 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 192 "Warn on locked namecache entries in ticks"); 193 194 __read_mostly static int ncposlimit; /* number of cache entries allocated */ 195 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 196 "Number of cache entries allocated"); 197 198 __read_mostly static int ncp_shared_lock_disable = 0; 199 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW, 200 &ncp_shared_lock_disable, 0, "Disable shared namecache locks"); 201 202 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 203 "sizeof(struct vnode)"); 204 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 205 "sizeof(struct namecache)"); 206 207 __read_mostly static int ncmount_cache_enable = 1; 208 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW, 209 &ncmount_cache_enable, 0, "mount point cache"); 210 211 static __inline void _cache_drop(struct namecache *ncp); 212 static int cache_resolve_mp(struct mount *mp); 213 static struct vnode *cache_dvpref(struct namecache *ncp); 214 static void _cache_lock(struct namecache *ncp); 215 static void _cache_setunresolved(struct namecache *ncp); 216 static void _cache_cleanneg(long count); 217 static void _cache_cleanpos(long count); 218 static void _cache_cleandefered(void); 219 static void _cache_unlink(struct namecache *ncp); 220 #if 0 221 static void vfscache_rollup_all(void); 222 #endif 223 224 /* 225 * The new name cache statistics (these are rolled up globals and not 226 * modified in the critical path, see struct pcpu_ncache). 227 */ 228 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 229 static long vfscache_negs; 230 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0, 231 "Number of negative namecache entries"); 232 static long vfscache_count; 233 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0, 234 "Number of namecaches entries"); 235 static long vfscache_leafs; 236 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0, 237 "Number of namecaches entries"); 238 static long numdefered; 239 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 240 "Number of cache entries allocated"); 241 242 243 struct nchstats nchstats[SMP_MAXCPU]; 244 /* 245 * Export VFS cache effectiveness statistics to user-land. 246 * 247 * The statistics are left for aggregation to user-land so 248 * neat things can be achieved, like observing per-CPU cache 249 * distribution. 250 */ 251 static int 252 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 253 { 254 struct globaldata *gd; 255 int i, error; 256 257 error = 0; 258 for (i = 0; i < ncpus; ++i) { 259 gd = globaldata_find(i); 260 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 261 sizeof(struct nchstats)))) 262 break; 263 } 264 265 return (error); 266 } 267 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 268 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 269 270 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 271 272 /* 273 * Cache mount points and namecache records in order to avoid unnecessary 274 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP 275 * performance and is particularly important on multi-socket systems to 276 * reduce cache-line ping-ponging. 277 * 278 * Try to keep the pcpu structure within one cache line (~64 bytes). 279 */ 280 #define MNTCACHE_COUNT 5 281 282 struct mntcache { 283 struct mount *mntary[MNTCACHE_COUNT]; 284 struct namecache *ncp1; 285 struct namecache *ncp2; 286 struct nchandle ncdir; 287 int iter; 288 int unused01; 289 } __cachealign; 290 291 static struct mntcache pcpu_mntcache[MAXCPU]; 292 293 static 294 void 295 _cache_mntref(struct mount *mp) 296 { 297 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 298 int i; 299 300 for (i = 0; i < MNTCACHE_COUNT; ++i) { 301 if (cache->mntary[i] != mp) 302 continue; 303 if (atomic_cmpset_ptr((void *)&cache->mntary[i], mp, NULL)) 304 return; 305 } 306 atomic_add_int(&mp->mnt_refs, 1); 307 } 308 309 static 310 void 311 _cache_mntrel(struct mount *mp) 312 { 313 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 314 int i; 315 316 for (i = 0; i < MNTCACHE_COUNT; ++i) { 317 if (cache->mntary[i] == NULL) { 318 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp); 319 if (mp == NULL) 320 return; 321 } 322 } 323 i = (int)((uint32_t)++cache->iter % (uint32_t)MNTCACHE_COUNT); 324 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp); 325 if (mp) 326 atomic_add_int(&mp->mnt_refs, -1); 327 } 328 329 /* 330 * Clears all cached mount points on all cpus. This routine should only 331 * be called when we are waiting for a mount to clear, e.g. so we can 332 * unmount. 333 */ 334 void 335 cache_clearmntcache(void) 336 { 337 int n; 338 339 for (n = 0; n < ncpus; ++n) { 340 struct mntcache *cache = &pcpu_mntcache[n]; 341 struct namecache *ncp; 342 struct mount *mp; 343 int i; 344 345 for (i = 0; i < MNTCACHE_COUNT; ++i) { 346 if (cache->mntary[i]) { 347 mp = atomic_swap_ptr( 348 (void *)&cache->mntary[i], NULL); 349 if (mp) 350 atomic_add_int(&mp->mnt_refs, -1); 351 } 352 } 353 if (cache->ncp1) { 354 ncp = atomic_swap_ptr((void *)&cache->ncp1, NULL); 355 if (ncp) 356 _cache_drop(ncp); 357 } 358 if (cache->ncp2) { 359 ncp = atomic_swap_ptr((void *)&cache->ncp2, NULL); 360 if (ncp) 361 _cache_drop(ncp); 362 } 363 if (cache->ncdir.ncp) { 364 ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, NULL); 365 if (ncp) 366 _cache_drop(ncp); 367 } 368 if (cache->ncdir.mount) { 369 mp = atomic_swap_ptr((void *)&cache->ncdir.mount, NULL); 370 if (mp) 371 atomic_add_int(&mp->mnt_refs, -1); 372 } 373 } 374 } 375 376 377 /* 378 * Namespace locking. The caller must already hold a reference to the 379 * namecache structure in order to lock/unlock it. This function prevents 380 * the namespace from being created or destroyed by accessors other then 381 * the lock holder. 382 * 383 * Note that holding a locked namecache structure prevents other threads 384 * from making namespace changes (e.g. deleting or creating), prevents 385 * vnode association state changes by other threads, and prevents the 386 * namecache entry from being resolved or unresolved by other threads. 387 * 388 * An exclusive lock owner has full authority to associate/disassociate 389 * vnodes and resolve/unresolve the locked ncp. 390 * 391 * A shared lock owner only has authority to acquire the underlying vnode, 392 * if any. 393 * 394 * The primary lock field is nc_lockstatus. nc_locktd is set after the 395 * fact (when locking) or cleared prior to unlocking. 396 * 397 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 398 * or recycled, but it does NOT help you if the vnode had already 399 * initiated a recyclement. If this is important, use cache_get() 400 * rather then cache_lock() (and deal with the differences in the 401 * way the refs counter is handled). Or, alternatively, make an 402 * unconditional call to cache_validate() or cache_resolve() 403 * after cache_lock() returns. 404 */ 405 static 406 void 407 _cache_lock(struct namecache *ncp) 408 { 409 thread_t td; 410 int didwarn; 411 int begticks; 412 int error; 413 u_int count; 414 415 KKASSERT(ncp->nc_refs != 0); 416 didwarn = 0; 417 begticks = 0; 418 td = curthread; 419 420 for (;;) { 421 count = ncp->nc_lockstatus; 422 cpu_ccfence(); 423 424 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 425 if (atomic_cmpset_int(&ncp->nc_lockstatus, 426 count, count + 1)) { 427 /* 428 * The vp associated with a locked ncp must 429 * be held to prevent it from being recycled. 430 * 431 * WARNING! If VRECLAIMED is set the vnode 432 * could already be in the middle of a recycle. 433 * Callers must use cache_vref() or 434 * cache_vget() on the locked ncp to 435 * validate the vp or set the cache entry 436 * to unresolved. 437 * 438 * NOTE! vhold() is allowed if we hold a 439 * lock on the ncp (which we do). 440 */ 441 ncp->nc_locktd = td; 442 if (ncp->nc_vp) 443 vhold(ncp->nc_vp); 444 break; 445 } 446 /* cmpset failed */ 447 continue; 448 } 449 if (ncp->nc_locktd == td) { 450 KKASSERT((count & NC_SHLOCK_FLAG) == 0); 451 if (atomic_cmpset_int(&ncp->nc_lockstatus, 452 count, count + 1)) { 453 break; 454 } 455 /* cmpset failed */ 456 continue; 457 } 458 tsleep_interlock(&ncp->nc_locktd, 0); 459 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 460 count | NC_EXLOCK_REQ) == 0) { 461 /* cmpset failed */ 462 continue; 463 } 464 if (begticks == 0) 465 begticks = ticks; 466 error = tsleep(&ncp->nc_locktd, PINTERLOCKED, 467 "clock", nclockwarn); 468 if (error == EWOULDBLOCK) { 469 if (didwarn == 0) { 470 didwarn = ticks; 471 kprintf("[diagnostic] cache_lock: " 472 "%s blocked on %p %08x", 473 td->td_comm, ncp, count); 474 kprintf(" \"%*.*s\"\n", 475 ncp->nc_nlen, ncp->nc_nlen, 476 ncp->nc_name); 477 } 478 } 479 /* loop */ 480 } 481 if (didwarn) { 482 kprintf("[diagnostic] cache_lock: %s unblocked %*.*s after " 483 "%d secs\n", 484 td->td_comm, 485 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 486 (int)(ticks + (hz / 2) - begticks) / hz); 487 } 488 } 489 490 /* 491 * The shared lock works similarly to the exclusive lock except 492 * nc_locktd is left NULL and we need an interlock (VHOLD) to 493 * prevent vhold() races, since the moment our cmpset_int succeeds 494 * another cpu can come in and get its own shared lock. 495 * 496 * A critical section is needed to prevent interruption during the 497 * VHOLD interlock. 498 */ 499 static 500 void 501 _cache_lock_shared(struct namecache *ncp) 502 { 503 int didwarn; 504 int error; 505 u_int count; 506 u_int optreq = NC_EXLOCK_REQ; 507 508 KKASSERT(ncp->nc_refs != 0); 509 didwarn = 0; 510 511 for (;;) { 512 count = ncp->nc_lockstatus; 513 cpu_ccfence(); 514 515 if ((count & ~NC_SHLOCK_REQ) == 0) { 516 crit_enter(); 517 if (atomic_cmpset_int(&ncp->nc_lockstatus, 518 count, 519 (count + 1) | NC_SHLOCK_FLAG | 520 NC_SHLOCK_VHOLD)) { 521 /* 522 * The vp associated with a locked ncp must 523 * be held to prevent it from being recycled. 524 * 525 * WARNING! If VRECLAIMED is set the vnode 526 * could already be in the middle of a recycle. 527 * Callers must use cache_vref() or 528 * cache_vget() on the locked ncp to 529 * validate the vp or set the cache entry 530 * to unresolved. 531 * 532 * NOTE! vhold() is allowed if we hold a 533 * lock on the ncp (which we do). 534 */ 535 if (ncp->nc_vp) 536 vhold(ncp->nc_vp); 537 atomic_clear_int(&ncp->nc_lockstatus, 538 NC_SHLOCK_VHOLD); 539 crit_exit(); 540 break; 541 } 542 /* cmpset failed */ 543 crit_exit(); 544 continue; 545 } 546 547 /* 548 * If already held shared we can just bump the count, but 549 * only allow this if nobody is trying to get the lock 550 * exclusively. If we are blocking too long ignore excl 551 * requests (which can race/deadlock us). 552 * 553 * VHOLD is a bit of a hack. Even though we successfully 554 * added another shared ref, the cpu that got the first 555 * shared ref might not yet have held the vnode. 556 */ 557 if ((count & (optreq|NC_SHLOCK_FLAG)) == NC_SHLOCK_FLAG) { 558 KKASSERT((count & ~(NC_EXLOCK_REQ | 559 NC_SHLOCK_REQ | 560 NC_SHLOCK_FLAG)) > 0); 561 if (atomic_cmpset_int(&ncp->nc_lockstatus, 562 count, count + 1)) { 563 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 564 cpu_pause(); 565 break; 566 } 567 continue; 568 } 569 tsleep_interlock(ncp, 0); 570 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 571 count | NC_SHLOCK_REQ) == 0) { 572 /* cmpset failed */ 573 continue; 574 } 575 error = tsleep(ncp, PINTERLOCKED, "clocksh", nclockwarn); 576 if (error == EWOULDBLOCK) { 577 optreq = 0; 578 if (didwarn == 0) { 579 didwarn = ticks - nclockwarn; 580 kprintf("[diagnostic] cache_lock_shared: " 581 "%s blocked on %p %08x " 582 "\"%*.*s\"\n", 583 curthread->td_comm, ncp, count, 584 ncp->nc_nlen, ncp->nc_nlen, 585 ncp->nc_name); 586 } 587 } 588 /* loop */ 589 } 590 if (didwarn) { 591 kprintf("[diagnostic] cache_lock_shared: " 592 "%s unblocked %*.*s after %d secs\n", 593 curthread->td_comm, 594 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 595 (int)(ticks - didwarn) / hz); 596 } 597 } 598 599 /* 600 * Lock ncp exclusively, return 0 on success. 601 * 602 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 603 * such as the case where one of its children is locked. 604 */ 605 static 606 int 607 _cache_lock_nonblock(struct namecache *ncp) 608 { 609 thread_t td; 610 u_int count; 611 612 td = curthread; 613 614 for (;;) { 615 count = ncp->nc_lockstatus; 616 617 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 618 if (atomic_cmpset_int(&ncp->nc_lockstatus, 619 count, count + 1)) { 620 /* 621 * The vp associated with a locked ncp must 622 * be held to prevent it from being recycled. 623 * 624 * WARNING! If VRECLAIMED is set the vnode 625 * could already be in the middle of a recycle. 626 * Callers must use cache_vref() or 627 * cache_vget() on the locked ncp to 628 * validate the vp or set the cache entry 629 * to unresolved. 630 * 631 * NOTE! vhold() is allowed if we hold a 632 * lock on the ncp (which we do). 633 */ 634 ncp->nc_locktd = td; 635 if (ncp->nc_vp) 636 vhold(ncp->nc_vp); 637 break; 638 } 639 /* cmpset failed */ 640 continue; 641 } 642 if (ncp->nc_locktd == td) { 643 if (atomic_cmpset_int(&ncp->nc_lockstatus, 644 count, count + 1)) { 645 break; 646 } 647 /* cmpset failed */ 648 continue; 649 } 650 return(EWOULDBLOCK); 651 } 652 return(0); 653 } 654 655 /* 656 * The shared lock works similarly to the exclusive lock except 657 * nc_locktd is left NULL and we need an interlock (VHOLD) to 658 * prevent vhold() races, since the moment our cmpset_int succeeds 659 * another cpu can come in and get its own shared lock. 660 * 661 * A critical section is needed to prevent interruption during the 662 * VHOLD interlock. 663 */ 664 static 665 int 666 _cache_lock_shared_nonblock(struct namecache *ncp) 667 { 668 u_int count; 669 670 for (;;) { 671 count = ncp->nc_lockstatus; 672 673 if ((count & ~NC_SHLOCK_REQ) == 0) { 674 crit_enter(); 675 if (atomic_cmpset_int(&ncp->nc_lockstatus, 676 count, 677 (count + 1) | NC_SHLOCK_FLAG | 678 NC_SHLOCK_VHOLD)) { 679 /* 680 * The vp associated with a locked ncp must 681 * be held to prevent it from being recycled. 682 * 683 * WARNING! If VRECLAIMED is set the vnode 684 * could already be in the middle of a recycle. 685 * Callers must use cache_vref() or 686 * cache_vget() on the locked ncp to 687 * validate the vp or set the cache entry 688 * to unresolved. 689 * 690 * NOTE! vhold() is allowed if we hold a 691 * lock on the ncp (which we do). 692 */ 693 if (ncp->nc_vp) 694 vhold(ncp->nc_vp); 695 atomic_clear_int(&ncp->nc_lockstatus, 696 NC_SHLOCK_VHOLD); 697 crit_exit(); 698 break; 699 } 700 /* cmpset failed */ 701 crit_exit(); 702 continue; 703 } 704 705 /* 706 * If already held shared we can just bump the count, but 707 * only allow this if nobody is trying to get the lock 708 * exclusively. 709 * 710 * VHOLD is a bit of a hack. Even though we successfully 711 * added another shared ref, the cpu that got the first 712 * shared ref might not yet have held the vnode. 713 */ 714 if ((count & (NC_EXLOCK_REQ|NC_SHLOCK_FLAG)) == 715 NC_SHLOCK_FLAG) { 716 KKASSERT((count & ~(NC_EXLOCK_REQ | 717 NC_SHLOCK_REQ | 718 NC_SHLOCK_FLAG)) > 0); 719 if (atomic_cmpset_int(&ncp->nc_lockstatus, 720 count, count + 1)) { 721 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 722 cpu_pause(); 723 break; 724 } 725 continue; 726 } 727 return(EWOULDBLOCK); 728 } 729 return(0); 730 } 731 732 /* 733 * Helper function 734 * 735 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 736 * 737 * nc_locktd must be NULLed out prior to nc_lockstatus getting cleared. 738 */ 739 static 740 void 741 _cache_unlock(struct namecache *ncp) 742 { 743 thread_t td __debugvar = curthread; 744 u_int count; 745 u_int ncount; 746 struct vnode *dropvp; 747 748 KKASSERT(ncp->nc_refs >= 0); 749 KKASSERT((ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) > 0); 750 KKASSERT((ncp->nc_lockstatus & NC_SHLOCK_FLAG) || ncp->nc_locktd == td); 751 752 count = ncp->nc_lockstatus; 753 cpu_ccfence(); 754 755 /* 756 * Clear nc_locktd prior to the atomic op (excl lock only) 757 */ 758 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) 759 ncp->nc_locktd = NULL; 760 dropvp = NULL; 761 762 for (;;) { 763 if ((count & 764 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ|NC_SHLOCK_FLAG)) == 1) { 765 dropvp = ncp->nc_vp; 766 if (count & NC_EXLOCK_REQ) 767 ncount = count & NC_SHLOCK_REQ; /* cnt->0 */ 768 else 769 ncount = 0; 770 771 if (atomic_cmpset_int(&ncp->nc_lockstatus, 772 count, ncount)) { 773 if (count & NC_EXLOCK_REQ) 774 wakeup(&ncp->nc_locktd); 775 else if (count & NC_SHLOCK_REQ) 776 wakeup(ncp); 777 break; 778 } 779 dropvp = NULL; 780 } else { 781 KKASSERT((count & NC_SHLOCK_VHOLD) == 0); 782 KKASSERT((count & ~(NC_EXLOCK_REQ | 783 NC_SHLOCK_REQ | 784 NC_SHLOCK_FLAG)) > 1); 785 if (atomic_cmpset_int(&ncp->nc_lockstatus, 786 count, count - 1)) { 787 break; 788 } 789 } 790 count = ncp->nc_lockstatus; 791 cpu_ccfence(); 792 } 793 794 /* 795 * Don't actually drop the vp until we successfully clean out 796 * the lock, otherwise we may race another shared lock. 797 */ 798 if (dropvp) 799 vdrop(dropvp); 800 } 801 802 static 803 int 804 _cache_lockstatus(struct namecache *ncp) 805 { 806 if (ncp->nc_locktd == curthread) 807 return(LK_EXCLUSIVE); 808 if (ncp->nc_lockstatus & NC_SHLOCK_FLAG) 809 return(LK_SHARED); 810 return(-1); 811 } 812 813 /* 814 * cache_hold() and cache_drop() prevent the premature deletion of a 815 * namecache entry but do not prevent operations (such as zapping) on 816 * that namecache entry. 817 * 818 * This routine may only be called from outside this source module if 819 * nc_refs is already at least 1. 820 * 821 * This is a rare case where callers are allowed to hold a spinlock, 822 * so we can't ourselves. 823 */ 824 static __inline 825 struct namecache * 826 _cache_hold(struct namecache *ncp) 827 { 828 atomic_add_int(&ncp->nc_refs, 1); 829 return(ncp); 830 } 831 832 /* 833 * Drop a cache entry, taking care to deal with races. 834 * 835 * For potential 1->0 transitions we must hold the ncp lock to safely 836 * test its flags. An unresolved entry with no children must be zapped 837 * to avoid leaks. 838 * 839 * The call to cache_zap() itself will handle all remaining races and 840 * will decrement the ncp's refs regardless. If we are resolved or 841 * have children nc_refs can safely be dropped to 0 without having to 842 * zap the entry. 843 * 844 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 845 * 846 * NOTE: cache_zap() may return a non-NULL referenced parent which must 847 * be dropped in a loop. 848 */ 849 static __inline 850 void 851 _cache_drop(struct namecache *ncp) 852 { 853 int refs; 854 855 while (ncp) { 856 KKASSERT(ncp->nc_refs > 0); 857 refs = ncp->nc_refs; 858 859 if (refs == 1) { 860 if (_cache_lock_nonblock(ncp) == 0) { 861 ncp->nc_flag &= ~NCF_DEFEREDZAP; 862 if ((ncp->nc_flag & NCF_UNRESOLVED) && 863 TAILQ_EMPTY(&ncp->nc_list)) { 864 ncp = cache_zap(ncp, 1); 865 continue; 866 } 867 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 868 _cache_unlock(ncp); 869 break; 870 } 871 _cache_unlock(ncp); 872 } 873 } else { 874 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 875 break; 876 } 877 cpu_pause(); 878 } 879 } 880 881 /* 882 * Link a new namecache entry to its parent and to the hash table. Be 883 * careful to avoid races if vhold() blocks in the future. 884 * 885 * Both ncp and par must be referenced and locked. 886 * 887 * NOTE: The hash table spinlock is held during this call, we can't do 888 * anything fancy. 889 */ 890 static void 891 _cache_link_parent(struct namecache *ncp, struct namecache *par, 892 struct nchash_head *nchpp) 893 { 894 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 895 896 KKASSERT(ncp->nc_parent == NULL); 897 ncp->nc_parent = par; 898 ncp->nc_head = nchpp; 899 900 /* 901 * Set inheritance flags. Note that the parent flags may be 902 * stale due to getattr potentially not having been run yet 903 * (it gets run during nlookup()'s). 904 */ 905 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 906 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 907 ncp->nc_flag |= NCF_SF_PNOCACHE; 908 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 909 ncp->nc_flag |= NCF_UF_PCACHE; 910 911 /* 912 * Add to hash table and parent, adjust accounting 913 */ 914 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 915 atomic_add_long(&pn->vfscache_count, 1); 916 if (TAILQ_EMPTY(&ncp->nc_list)) 917 atomic_add_long(&pn->vfscache_leafs, 1); 918 919 if (TAILQ_EMPTY(&par->nc_list)) { 920 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 921 atomic_add_long(&pn->vfscache_leafs, -1); 922 /* 923 * Any vp associated with an ncp which has children must 924 * be held to prevent it from being recycled. 925 */ 926 if (par->nc_vp) 927 vhold(par->nc_vp); 928 } else { 929 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 930 } 931 } 932 933 /* 934 * Remove the parent and hash associations from a namecache structure. 935 * If this is the last child of the parent the cache_drop(par) will 936 * attempt to recursively zap the parent. 937 * 938 * ncp must be locked. This routine will acquire a temporary lock on 939 * the parent as wlel as the appropriate hash chain. 940 */ 941 static void 942 _cache_unlink_parent(struct namecache *ncp) 943 { 944 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 945 struct namecache *par; 946 struct vnode *dropvp; 947 948 if ((par = ncp->nc_parent) != NULL) { 949 KKASSERT(ncp->nc_parent == par); 950 _cache_hold(par); 951 _cache_lock(par); 952 spin_lock(&ncp->nc_head->spin); 953 954 /* 955 * Remove from hash table and parent, adjust accounting 956 */ 957 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 958 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 959 atomic_add_long(&pn->vfscache_count, -1); 960 if (TAILQ_EMPTY(&ncp->nc_list)) 961 atomic_add_long(&pn->vfscache_leafs, -1); 962 963 dropvp = NULL; 964 if (TAILQ_EMPTY(&par->nc_list)) { 965 atomic_add_long(&pn->vfscache_leafs, 1); 966 if (par->nc_vp) 967 dropvp = par->nc_vp; 968 } 969 spin_unlock(&ncp->nc_head->spin); 970 ncp->nc_parent = NULL; 971 ncp->nc_head = NULL; 972 _cache_unlock(par); 973 _cache_drop(par); 974 975 /* 976 * We can only safely vdrop with no spinlocks held. 977 */ 978 if (dropvp) 979 vdrop(dropvp); 980 } 981 } 982 983 /* 984 * Allocate a new namecache structure. Most of the code does not require 985 * zero-termination of the string but it makes vop_compat_ncreate() easier. 986 */ 987 static struct namecache * 988 cache_alloc(int nlen) 989 { 990 struct namecache *ncp; 991 992 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 993 if (nlen) 994 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 995 ncp->nc_nlen = nlen; 996 ncp->nc_flag = NCF_UNRESOLVED; 997 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 998 ncp->nc_refs = 1; 999 1000 TAILQ_INIT(&ncp->nc_list); 1001 _cache_lock(ncp); 1002 return(ncp); 1003 } 1004 1005 /* 1006 * Can only be called for the case where the ncp has never been 1007 * associated with anything (so no spinlocks are needed). 1008 */ 1009 static void 1010 _cache_free(struct namecache *ncp) 1011 { 1012 KKASSERT(ncp->nc_refs == 1 && ncp->nc_lockstatus == 1); 1013 if (ncp->nc_name) 1014 kfree(ncp->nc_name, M_VFSCACHE); 1015 kfree(ncp, M_VFSCACHE); 1016 } 1017 1018 /* 1019 * [re]initialize a nchandle. 1020 */ 1021 void 1022 cache_zero(struct nchandle *nch) 1023 { 1024 nch->ncp = NULL; 1025 nch->mount = NULL; 1026 } 1027 1028 /* 1029 * Ref and deref a namecache structure. 1030 * 1031 * The caller must specify a stable ncp pointer, typically meaning the 1032 * ncp is already referenced but this can also occur indirectly through 1033 * e.g. holding a lock on a direct child. 1034 * 1035 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 1036 * use read spinlocks here. 1037 */ 1038 struct nchandle * 1039 cache_hold(struct nchandle *nch) 1040 { 1041 _cache_hold(nch->ncp); 1042 _cache_mntref(nch->mount); 1043 return(nch); 1044 } 1045 1046 /* 1047 * Create a copy of a namecache handle for an already-referenced 1048 * entry. 1049 */ 1050 void 1051 cache_copy(struct nchandle *nch, struct nchandle *target) 1052 { 1053 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1054 struct namecache *ncp; 1055 1056 *target = *nch; 1057 _cache_mntref(target->mount); 1058 ncp = target->ncp; 1059 if (ncp) { 1060 if (ncp == cache->ncp1) { 1061 if (atomic_cmpset_ptr((void *)&cache->ncp1, ncp, NULL)) 1062 return; 1063 } 1064 if (ncp == cache->ncp2) { 1065 if (atomic_cmpset_ptr((void *)&cache->ncp2, ncp, NULL)) 1066 return; 1067 } 1068 _cache_hold(ncp); 1069 } 1070 } 1071 1072 /* 1073 * Caller wants to copy the current directory, copy it out from our 1074 * pcpu cache if possible (the entire critical path is just two localized 1075 * cmpset ops). If the pcpu cache has a snapshot at all it will be a 1076 * valid one, so we don't have to lock p->p_fd even though we are loading 1077 * two fields. 1078 * 1079 * This has a limited effect since nlookup must still ref and shlock the 1080 * vnode to check perms. We do avoid the per-proc spin-lock though, which 1081 * can aid threaded programs. 1082 */ 1083 void 1084 cache_copy_ncdir(struct proc *p, struct nchandle *target) 1085 { 1086 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1087 1088 *target = p->p_fd->fd_ncdir; 1089 if (target->ncp == cache->ncdir.ncp && 1090 target->mount == cache->ncdir.mount) { 1091 if (atomic_cmpset_ptr((void *)&cache->ncdir.ncp, 1092 target->ncp, NULL)) { 1093 if (atomic_cmpset_ptr((void *)&cache->ncdir.mount, 1094 target->mount, NULL)) { 1095 /* CRITICAL PATH */ 1096 return; 1097 } 1098 _cache_drop(target->ncp); 1099 } 1100 } 1101 spin_lock_shared(&p->p_fd->fd_spin); 1102 cache_copy(&p->p_fd->fd_ncdir, target); 1103 spin_unlock_shared(&p->p_fd->fd_spin); 1104 } 1105 1106 void 1107 cache_changemount(struct nchandle *nch, struct mount *mp) 1108 { 1109 _cache_mntref(mp); 1110 _cache_mntrel(nch->mount); 1111 nch->mount = mp; 1112 } 1113 1114 void 1115 cache_drop(struct nchandle *nch) 1116 { 1117 _cache_mntrel(nch->mount); 1118 _cache_drop(nch->ncp); 1119 nch->ncp = NULL; 1120 nch->mount = NULL; 1121 } 1122 1123 /* 1124 * Drop the nchandle, but try to cache the ref to avoid global atomic 1125 * ops. This is typically done on the system root and jail root nchandles. 1126 */ 1127 void 1128 cache_drop_and_cache(struct nchandle *nch) 1129 { 1130 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1131 struct namecache *ncp; 1132 1133 _cache_mntrel(nch->mount); 1134 ncp = nch->ncp; 1135 if (cache->ncp1 == NULL) { 1136 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp); 1137 if (ncp == NULL) 1138 goto done; 1139 } 1140 if (cache->ncp2 == NULL) { 1141 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp); 1142 if (ncp == NULL) 1143 goto done; 1144 } 1145 if (++cache->iter & 1) 1146 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp); 1147 else 1148 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp); 1149 if (ncp) 1150 _cache_drop(ncp); 1151 done: 1152 nch->ncp = NULL; 1153 nch->mount = NULL; 1154 } 1155 1156 /* 1157 * We are dropping what the caller believes is the current directory, 1158 * unconditionally store it in our pcpu cache. Anything already in 1159 * the cache will be discarded. 1160 */ 1161 void 1162 cache_drop_ncdir(struct nchandle *nch) 1163 { 1164 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1165 1166 nch->ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, nch->ncp); 1167 nch->mount = atomic_swap_ptr((void *)&cache->ncdir.mount, nch->mount); 1168 if (nch->ncp) 1169 _cache_drop(nch->ncp); 1170 if (nch->mount) 1171 _cache_mntrel(nch->mount); 1172 nch->ncp = NULL; 1173 nch->mount = NULL; 1174 } 1175 1176 int 1177 cache_lockstatus(struct nchandle *nch) 1178 { 1179 return(_cache_lockstatus(nch->ncp)); 1180 } 1181 1182 void 1183 cache_lock(struct nchandle *nch) 1184 { 1185 _cache_lock(nch->ncp); 1186 } 1187 1188 void 1189 cache_lock_maybe_shared(struct nchandle *nch, int excl) 1190 { 1191 struct namecache *ncp = nch->ncp; 1192 1193 if (ncp_shared_lock_disable || excl || 1194 (ncp->nc_flag & NCF_UNRESOLVED)) { 1195 _cache_lock(ncp); 1196 } else { 1197 _cache_lock_shared(ncp); 1198 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1199 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1200 _cache_unlock(ncp); 1201 _cache_lock(ncp); 1202 } 1203 } else { 1204 _cache_unlock(ncp); 1205 _cache_lock(ncp); 1206 } 1207 } 1208 } 1209 1210 /* 1211 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 1212 * is responsible for checking both for validity on return as they 1213 * may have become invalid. 1214 * 1215 * We have to deal with potential deadlocks here, just ping pong 1216 * the lock until we get it (we will always block somewhere when 1217 * looping so this is not cpu-intensive). 1218 * 1219 * which = 0 nch1 not locked, nch2 is locked 1220 * which = 1 nch1 is locked, nch2 is not locked 1221 */ 1222 void 1223 cache_relock(struct nchandle *nch1, struct ucred *cred1, 1224 struct nchandle *nch2, struct ucred *cred2) 1225 { 1226 int which; 1227 1228 which = 0; 1229 1230 for (;;) { 1231 if (which == 0) { 1232 if (cache_lock_nonblock(nch1) == 0) { 1233 cache_resolve(nch1, cred1); 1234 break; 1235 } 1236 cache_unlock(nch2); 1237 cache_lock(nch1); 1238 cache_resolve(nch1, cred1); 1239 which = 1; 1240 } else { 1241 if (cache_lock_nonblock(nch2) == 0) { 1242 cache_resolve(nch2, cred2); 1243 break; 1244 } 1245 cache_unlock(nch1); 1246 cache_lock(nch2); 1247 cache_resolve(nch2, cred2); 1248 which = 0; 1249 } 1250 } 1251 } 1252 1253 int 1254 cache_lock_nonblock(struct nchandle *nch) 1255 { 1256 return(_cache_lock_nonblock(nch->ncp)); 1257 } 1258 1259 void 1260 cache_unlock(struct nchandle *nch) 1261 { 1262 _cache_unlock(nch->ncp); 1263 } 1264 1265 /* 1266 * ref-and-lock, unlock-and-deref functions. 1267 * 1268 * This function is primarily used by nlookup. Even though cache_lock 1269 * holds the vnode, it is possible that the vnode may have already 1270 * initiated a recyclement. 1271 * 1272 * We want cache_get() to return a definitively usable vnode or a 1273 * definitively unresolved ncp. 1274 */ 1275 static 1276 struct namecache * 1277 _cache_get(struct namecache *ncp) 1278 { 1279 _cache_hold(ncp); 1280 _cache_lock(ncp); 1281 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1282 _cache_setunresolved(ncp); 1283 return(ncp); 1284 } 1285 1286 /* 1287 * Attempt to obtain a shared lock on the ncp. A shared lock will only 1288 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is 1289 * valid. Otherwise an exclusive lock will be acquired instead. 1290 */ 1291 static 1292 struct namecache * 1293 _cache_get_maybe_shared(struct namecache *ncp, int excl) 1294 { 1295 if (ncp_shared_lock_disable || excl || 1296 (ncp->nc_flag & NCF_UNRESOLVED)) { 1297 return(_cache_get(ncp)); 1298 } 1299 _cache_hold(ncp); 1300 _cache_lock_shared(ncp); 1301 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1302 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1303 _cache_unlock(ncp); 1304 ncp = _cache_get(ncp); 1305 _cache_drop(ncp); 1306 } 1307 } else { 1308 _cache_unlock(ncp); 1309 ncp = _cache_get(ncp); 1310 _cache_drop(ncp); 1311 } 1312 return(ncp); 1313 } 1314 1315 /* 1316 * This is a special form of _cache_lock() which only succeeds if 1317 * it can get a pristine, non-recursive lock. The caller must have 1318 * already ref'd the ncp. 1319 * 1320 * On success the ncp will be locked, on failure it will not. The 1321 * ref count does not change either way. 1322 * 1323 * We want _cache_lock_special() (on success) to return a definitively 1324 * usable vnode or a definitively unresolved ncp. 1325 */ 1326 static int 1327 _cache_lock_special(struct namecache *ncp) 1328 { 1329 if (_cache_lock_nonblock(ncp) == 0) { 1330 if ((ncp->nc_lockstatus & 1331 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) { 1332 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1333 _cache_setunresolved(ncp); 1334 return(0); 1335 } 1336 _cache_unlock(ncp); 1337 } 1338 return(EWOULDBLOCK); 1339 } 1340 1341 /* 1342 * This function tries to get a shared lock but will back-off to an exclusive 1343 * lock if: 1344 * 1345 * (1) Some other thread is trying to obtain an exclusive lock 1346 * (to prevent the exclusive requester from getting livelocked out 1347 * by many shared locks). 1348 * 1349 * (2) The current thread already owns an exclusive lock (to avoid 1350 * deadlocking). 1351 * 1352 * WARNING! On machines with lots of cores we really want to try hard to 1353 * get a shared lock or concurrent path lookups can chain-react 1354 * into a very high-latency exclusive lock. 1355 */ 1356 static int 1357 _cache_lock_shared_special(struct namecache *ncp) 1358 { 1359 /* 1360 * Only honor a successful shared lock (returning 0) if there is 1361 * no exclusive request pending and the vnode, if present, is not 1362 * in a reclaimed state. 1363 */ 1364 if (_cache_lock_shared_nonblock(ncp) == 0) { 1365 if ((ncp->nc_lockstatus & NC_EXLOCK_REQ) == 0) { 1366 if (ncp->nc_vp == NULL || 1367 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) { 1368 return(0); 1369 } 1370 } 1371 _cache_unlock(ncp); 1372 return(EWOULDBLOCK); 1373 } 1374 1375 /* 1376 * Non-blocking shared lock failed. If we already own the exclusive 1377 * lock just acquire another exclusive lock (instead of deadlocking). 1378 * Otherwise acquire a shared lock. 1379 */ 1380 if (ncp->nc_locktd == curthread) { 1381 _cache_lock(ncp); 1382 return(0); 1383 } 1384 _cache_lock_shared(ncp); 1385 return(0); 1386 } 1387 1388 1389 /* 1390 * NOTE: The same nchandle can be passed for both arguments. 1391 */ 1392 void 1393 cache_get(struct nchandle *nch, struct nchandle *target) 1394 { 1395 KKASSERT(nch->ncp->nc_refs > 0); 1396 target->mount = nch->mount; 1397 target->ncp = _cache_get(nch->ncp); 1398 _cache_mntref(target->mount); 1399 } 1400 1401 void 1402 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl) 1403 { 1404 KKASSERT(nch->ncp->nc_refs > 0); 1405 target->mount = nch->mount; 1406 target->ncp = _cache_get_maybe_shared(nch->ncp, excl); 1407 _cache_mntref(target->mount); 1408 } 1409 1410 /* 1411 * 1412 */ 1413 static __inline 1414 void 1415 _cache_put(struct namecache *ncp) 1416 { 1417 _cache_unlock(ncp); 1418 _cache_drop(ncp); 1419 } 1420 1421 /* 1422 * 1423 */ 1424 void 1425 cache_put(struct nchandle *nch) 1426 { 1427 _cache_mntrel(nch->mount); 1428 _cache_put(nch->ncp); 1429 nch->ncp = NULL; 1430 nch->mount = NULL; 1431 } 1432 1433 /* 1434 * Resolve an unresolved ncp by associating a vnode with it. If the 1435 * vnode is NULL, a negative cache entry is created. 1436 * 1437 * The ncp should be locked on entry and will remain locked on return. 1438 */ 1439 static 1440 void 1441 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 1442 { 1443 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 1444 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1445 1446 if (vp != NULL) { 1447 /* 1448 * Any vp associated with an ncp which has children must 1449 * be held. Any vp associated with a locked ncp must be held. 1450 */ 1451 if (!TAILQ_EMPTY(&ncp->nc_list)) 1452 vhold(vp); 1453 spin_lock(&vp->v_spin); 1454 ncp->nc_vp = vp; 1455 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 1456 spin_unlock(&vp->v_spin); 1457 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1458 vhold(vp); 1459 1460 /* 1461 * Set auxiliary flags 1462 */ 1463 switch(vp->v_type) { 1464 case VDIR: 1465 ncp->nc_flag |= NCF_ISDIR; 1466 break; 1467 case VLNK: 1468 ncp->nc_flag |= NCF_ISSYMLINK; 1469 /* XXX cache the contents of the symlink */ 1470 break; 1471 default: 1472 break; 1473 } 1474 ncp->nc_error = 0; 1475 /* XXX: this is a hack to work-around the lack of a real pfs vfs 1476 * implementation*/ 1477 if (mp != NULL) 1478 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0) 1479 vp->v_pfsmp = mp; 1480 } else { 1481 /* 1482 * When creating a negative cache hit we set the 1483 * namecache_gen. A later resolve will clean out the 1484 * negative cache hit if the mount point's namecache_gen 1485 * has changed. Used by devfs, could also be used by 1486 * other remote FSs. 1487 */ 1488 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 1489 1490 ncp->nc_vp = NULL; 1491 ncp->nc_negcpu = mycpu->gd_cpuid; 1492 spin_lock(&pn->neg_spin); 1493 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 1494 ++pn->neg_count; 1495 spin_unlock(&pn->neg_spin); 1496 atomic_add_long(&pn->vfscache_negs, 1); 1497 1498 ncp->nc_error = ENOENT; 1499 if (mp) 1500 VFS_NCPGEN_SET(mp, ncp); 1501 } 1502 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 1503 } 1504 1505 /* 1506 * 1507 */ 1508 void 1509 cache_setvp(struct nchandle *nch, struct vnode *vp) 1510 { 1511 _cache_setvp(nch->mount, nch->ncp, vp); 1512 } 1513 1514 /* 1515 * 1516 */ 1517 void 1518 cache_settimeout(struct nchandle *nch, int nticks) 1519 { 1520 struct namecache *ncp = nch->ncp; 1521 1522 if ((ncp->nc_timeout = ticks + nticks) == 0) 1523 ncp->nc_timeout = 1; 1524 } 1525 1526 /* 1527 * Disassociate the vnode or negative-cache association and mark a 1528 * namecache entry as unresolved again. Note that the ncp is still 1529 * left in the hash table and still linked to its parent. 1530 * 1531 * The ncp should be locked and refd on entry and will remain locked and refd 1532 * on return. 1533 * 1534 * This routine is normally never called on a directory containing children. 1535 * However, NFS often does just that in its rename() code as a cop-out to 1536 * avoid complex namespace operations. This disconnects a directory vnode 1537 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 1538 * sync. 1539 * 1540 */ 1541 static 1542 void 1543 _cache_setunresolved(struct namecache *ncp) 1544 { 1545 struct vnode *vp; 1546 1547 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1548 ncp->nc_flag |= NCF_UNRESOLVED; 1549 ncp->nc_timeout = 0; 1550 ncp->nc_error = ENOTCONN; 1551 if ((vp = ncp->nc_vp) != NULL) { 1552 spin_lock(&vp->v_spin); 1553 ncp->nc_vp = NULL; 1554 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 1555 spin_unlock(&vp->v_spin); 1556 1557 /* 1558 * Any vp associated with an ncp with children is 1559 * held by that ncp. Any vp associated with a locked 1560 * ncp is held by that ncp. These conditions must be 1561 * undone when the vp is cleared out from the ncp. 1562 */ 1563 if (!TAILQ_EMPTY(&ncp->nc_list)) 1564 vdrop(vp); 1565 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1566 vdrop(vp); 1567 } else { 1568 struct pcpu_ncache *pn; 1569 1570 pn = &pcpu_ncache[ncp->nc_negcpu]; 1571 1572 atomic_add_long(&pn->vfscache_negs, -1); 1573 spin_lock(&pn->neg_spin); 1574 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 1575 --pn->neg_count; 1576 spin_unlock(&pn->neg_spin); 1577 } 1578 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 1579 } 1580 } 1581 1582 /* 1583 * The cache_nresolve() code calls this function to automatically 1584 * set a resolved cache element to unresolved if it has timed out 1585 * or if it is a negative cache hit and the mount point namecache_gen 1586 * has changed. 1587 */ 1588 static __inline int 1589 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp) 1590 { 1591 /* 1592 * Try to zap entries that have timed out. We have 1593 * to be careful here because locked leafs may depend 1594 * on the vnode remaining intact in a parent, so only 1595 * do this under very specific conditions. 1596 */ 1597 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1598 TAILQ_EMPTY(&ncp->nc_list)) { 1599 return 1; 1600 } 1601 1602 /* 1603 * If a resolved negative cache hit is invalid due to 1604 * the mount's namecache generation being bumped, zap it. 1605 */ 1606 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) { 1607 return 1; 1608 } 1609 1610 /* 1611 * Otherwise we are good 1612 */ 1613 return 0; 1614 } 1615 1616 static __inline void 1617 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1618 { 1619 /* 1620 * Already in an unresolved state, nothing to do. 1621 */ 1622 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1623 if (_cache_auto_unresolve_test(mp, ncp)) 1624 _cache_setunresolved(ncp); 1625 } 1626 } 1627 1628 /* 1629 * 1630 */ 1631 void 1632 cache_setunresolved(struct nchandle *nch) 1633 { 1634 _cache_setunresolved(nch->ncp); 1635 } 1636 1637 /* 1638 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1639 * looking for matches. This flag tells the lookup code when it must 1640 * check for a mount linkage and also prevents the directories in question 1641 * from being deleted or renamed. 1642 */ 1643 static 1644 int 1645 cache_clrmountpt_callback(struct mount *mp, void *data) 1646 { 1647 struct nchandle *nch = data; 1648 1649 if (mp->mnt_ncmounton.ncp == nch->ncp) 1650 return(1); 1651 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1652 return(1); 1653 return(0); 1654 } 1655 1656 /* 1657 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated 1658 * with a mount point. 1659 */ 1660 void 1661 cache_clrmountpt(struct nchandle *nch) 1662 { 1663 int count; 1664 1665 count = mountlist_scan(cache_clrmountpt_callback, nch, 1666 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1667 if (count == 0) 1668 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1669 } 1670 1671 /* 1672 * Invalidate portions of the namecache topology given a starting entry. 1673 * The passed ncp is set to an unresolved state and: 1674 * 1675 * The passed ncp must be referencxed and locked. The routine may unlock 1676 * and relock ncp several times, and will recheck the children and loop 1677 * to catch races. When done the passed ncp will be returned with the 1678 * reference and lock intact. 1679 * 1680 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1681 * that the physical underlying nodes have been 1682 * destroyed... as in deleted. For example, when 1683 * a directory is removed. This will cause record 1684 * lookups on the name to no longer be able to find 1685 * the record and tells the resolver to return failure 1686 * rather then trying to resolve through the parent. 1687 * 1688 * The topology itself, including ncp->nc_name, 1689 * remains intact. 1690 * 1691 * This only applies to the passed ncp, if CINV_CHILDREN 1692 * is specified the children are not flagged. 1693 * 1694 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1695 * state as well. 1696 * 1697 * Note that this will also have the side effect of 1698 * cleaning out any unreferenced nodes in the topology 1699 * from the leaves up as the recursion backs out. 1700 * 1701 * Note that the topology for any referenced nodes remains intact, but 1702 * the nodes will be marked as having been destroyed and will be set 1703 * to an unresolved state. 1704 * 1705 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1706 * the namecache entry may not actually be invalidated on return if it was 1707 * revalidated while recursing down into its children. This code guarentees 1708 * that the node(s) will go through an invalidation cycle, but does not 1709 * guarentee that they will remain in an invalidated state. 1710 * 1711 * Returns non-zero if a revalidation was detected during the invalidation 1712 * recursion, zero otherwise. Note that since only the original ncp is 1713 * locked the revalidation ultimately can only indicate that the original ncp 1714 * *MIGHT* no have been reresolved. 1715 * 1716 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1717 * have to avoid blowing out the kernel stack. We do this by saving the 1718 * deep namecache node and aborting the recursion, then re-recursing at that 1719 * node using a depth-first algorithm in order to allow multiple deep 1720 * recursions to chain through each other, then we restart the invalidation 1721 * from scratch. 1722 */ 1723 1724 struct cinvtrack { 1725 struct namecache *resume_ncp; 1726 int depth; 1727 }; 1728 1729 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1730 1731 static 1732 int 1733 _cache_inval(struct namecache *ncp, int flags) 1734 { 1735 struct cinvtrack track; 1736 struct namecache *ncp2; 1737 int r; 1738 1739 track.depth = 0; 1740 track.resume_ncp = NULL; 1741 1742 for (;;) { 1743 r = _cache_inval_internal(ncp, flags, &track); 1744 if (track.resume_ncp == NULL) 1745 break; 1746 _cache_unlock(ncp); 1747 while ((ncp2 = track.resume_ncp) != NULL) { 1748 track.resume_ncp = NULL; 1749 _cache_lock(ncp2); 1750 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1751 &track); 1752 _cache_put(ncp2); 1753 } 1754 _cache_lock(ncp); 1755 } 1756 return(r); 1757 } 1758 1759 int 1760 cache_inval(struct nchandle *nch, int flags) 1761 { 1762 return(_cache_inval(nch->ncp, flags)); 1763 } 1764 1765 /* 1766 * Helper for _cache_inval(). The passed ncp is refd and locked and 1767 * remains that way on return, but may be unlocked/relocked multiple 1768 * times by the routine. 1769 */ 1770 static int 1771 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1772 { 1773 struct namecache *nextkid; 1774 int rcnt = 0; 1775 1776 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1777 1778 _cache_setunresolved(ncp); 1779 if (flags & CINV_DESTROY) { 1780 ncp->nc_flag |= NCF_DESTROYED; 1781 ++ncp->nc_generation; 1782 } 1783 while ((flags & CINV_CHILDREN) && 1784 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1785 ) { 1786 struct namecache *kid; 1787 int restart; 1788 1789 restart = 0; 1790 _cache_hold(nextkid); 1791 if (++track->depth > MAX_RECURSION_DEPTH) { 1792 track->resume_ncp = ncp; 1793 _cache_hold(ncp); 1794 ++rcnt; 1795 } 1796 while ((kid = nextkid) != NULL) { 1797 /* 1798 * Parent (ncp) must be locked for the iteration. 1799 */ 1800 nextkid = NULL; 1801 if (kid->nc_parent != ncp) { 1802 _cache_drop(kid); 1803 kprintf("cache_inval_internal restartA %s\n", 1804 ncp->nc_name); 1805 restart = 1; 1806 break; 1807 } 1808 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1809 _cache_hold(nextkid); 1810 1811 /* 1812 * Parent unlocked for this section to avoid 1813 * deadlocks. 1814 */ 1815 _cache_unlock(ncp); 1816 if (track->resume_ncp) { 1817 _cache_drop(kid); 1818 _cache_lock(ncp); 1819 break; 1820 } 1821 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1822 TAILQ_FIRST(&kid->nc_list) 1823 ) { 1824 _cache_lock(kid); 1825 if (kid->nc_parent != ncp) { 1826 kprintf("cache_inval_internal " 1827 "restartB %s\n", 1828 ncp->nc_name); 1829 restart = 1; 1830 _cache_unlock(kid); 1831 _cache_drop(kid); 1832 _cache_lock(ncp); 1833 break; 1834 } 1835 1836 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1837 _cache_unlock(kid); 1838 } 1839 _cache_drop(kid); 1840 _cache_lock(ncp); 1841 } 1842 if (nextkid) 1843 _cache_drop(nextkid); 1844 --track->depth; 1845 if (restart == 0) 1846 break; 1847 } 1848 1849 /* 1850 * Someone could have gotten in there while ncp was unlocked, 1851 * retry if so. 1852 */ 1853 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1854 ++rcnt; 1855 return (rcnt); 1856 } 1857 1858 /* 1859 * Invalidate a vnode's namecache associations. To avoid races against 1860 * the resolver we do not invalidate a node which we previously invalidated 1861 * but which was then re-resolved while we were in the invalidation loop. 1862 * 1863 * Returns non-zero if any namecache entries remain after the invalidation 1864 * loop completed. 1865 * 1866 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1867 * be ripped out of the topology while held, the vnode's v_namecache 1868 * list has no such restriction. NCP's can be ripped out of the list 1869 * at virtually any time if not locked, even if held. 1870 * 1871 * In addition, the v_namecache list itself must be locked via 1872 * the vnode's spinlock. 1873 */ 1874 int 1875 cache_inval_vp(struct vnode *vp, int flags) 1876 { 1877 struct namecache *ncp; 1878 struct namecache *next; 1879 1880 restart: 1881 spin_lock(&vp->v_spin); 1882 ncp = TAILQ_FIRST(&vp->v_namecache); 1883 if (ncp) 1884 _cache_hold(ncp); 1885 while (ncp) { 1886 /* loop entered with ncp held and vp spin-locked */ 1887 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1888 _cache_hold(next); 1889 spin_unlock(&vp->v_spin); 1890 _cache_lock(ncp); 1891 if (ncp->nc_vp != vp) { 1892 kprintf("Warning: cache_inval_vp: race-A detected on " 1893 "%s\n", ncp->nc_name); 1894 _cache_put(ncp); 1895 if (next) 1896 _cache_drop(next); 1897 goto restart; 1898 } 1899 _cache_inval(ncp, flags); 1900 _cache_put(ncp); /* also releases reference */ 1901 ncp = next; 1902 spin_lock(&vp->v_spin); 1903 if (ncp && ncp->nc_vp != vp) { 1904 spin_unlock(&vp->v_spin); 1905 kprintf("Warning: cache_inval_vp: race-B detected on " 1906 "%s\n", ncp->nc_name); 1907 _cache_drop(ncp); 1908 goto restart; 1909 } 1910 } 1911 spin_unlock(&vp->v_spin); 1912 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1913 } 1914 1915 /* 1916 * This routine is used instead of the normal cache_inval_vp() when we 1917 * are trying to recycle otherwise good vnodes. 1918 * 1919 * Return 0 on success, non-zero if not all namecache records could be 1920 * disassociated from the vnode (for various reasons). 1921 */ 1922 int 1923 cache_inval_vp_nonblock(struct vnode *vp) 1924 { 1925 struct namecache *ncp; 1926 struct namecache *next; 1927 1928 spin_lock(&vp->v_spin); 1929 ncp = TAILQ_FIRST(&vp->v_namecache); 1930 if (ncp) 1931 _cache_hold(ncp); 1932 while (ncp) { 1933 /* loop entered with ncp held */ 1934 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1935 _cache_hold(next); 1936 spin_unlock(&vp->v_spin); 1937 if (_cache_lock_nonblock(ncp)) { 1938 _cache_drop(ncp); 1939 if (next) 1940 _cache_drop(next); 1941 goto done; 1942 } 1943 if (ncp->nc_vp != vp) { 1944 kprintf("Warning: cache_inval_vp: race-A detected on " 1945 "%s\n", ncp->nc_name); 1946 _cache_put(ncp); 1947 if (next) 1948 _cache_drop(next); 1949 goto done; 1950 } 1951 _cache_inval(ncp, 0); 1952 _cache_put(ncp); /* also releases reference */ 1953 ncp = next; 1954 spin_lock(&vp->v_spin); 1955 if (ncp && ncp->nc_vp != vp) { 1956 spin_unlock(&vp->v_spin); 1957 kprintf("Warning: cache_inval_vp: race-B detected on " 1958 "%s\n", ncp->nc_name); 1959 _cache_drop(ncp); 1960 goto done; 1961 } 1962 } 1963 spin_unlock(&vp->v_spin); 1964 done: 1965 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1966 } 1967 1968 /* 1969 * Clears the universal directory search 'ok' flag. This flag allows 1970 * nlookup() to bypass normal vnode checks. This flag is a cached flag 1971 * so clearing it simply forces revalidation. 1972 */ 1973 void 1974 cache_inval_wxok(struct vnode *vp) 1975 { 1976 struct namecache *ncp; 1977 1978 spin_lock(&vp->v_spin); 1979 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1980 if (ncp->nc_flag & NCF_WXOK) 1981 atomic_clear_short(&ncp->nc_flag, NCF_WXOK); 1982 } 1983 spin_unlock(&vp->v_spin); 1984 } 1985 1986 /* 1987 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1988 * must be locked. The target ncp is destroyed (as a normal rename-over 1989 * would destroy the target file or directory). 1990 * 1991 * Because there may be references to the source ncp we cannot copy its 1992 * contents to the target. Instead the source ncp is relinked as the target 1993 * and the target ncp is removed from the namecache topology. 1994 */ 1995 void 1996 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1997 { 1998 struct namecache *fncp = fnch->ncp; 1999 struct namecache *tncp = tnch->ncp; 2000 struct namecache *tncp_par; 2001 struct nchash_head *nchpp; 2002 u_int32_t hash; 2003 char *oname; 2004 char *nname; 2005 2006 ++fncp->nc_generation; 2007 ++tncp->nc_generation; 2008 if (tncp->nc_nlen) { 2009 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK); 2010 bcopy(tncp->nc_name, nname, tncp->nc_nlen); 2011 nname[tncp->nc_nlen] = 0; 2012 } else { 2013 nname = NULL; 2014 } 2015 2016 /* 2017 * Rename fncp (unlink) 2018 */ 2019 _cache_unlink_parent(fncp); 2020 oname = fncp->nc_name; 2021 fncp->nc_name = nname; 2022 fncp->nc_nlen = tncp->nc_nlen; 2023 if (oname) 2024 kfree(oname, M_VFSCACHE); 2025 2026 tncp_par = tncp->nc_parent; 2027 _cache_hold(tncp_par); 2028 _cache_lock(tncp_par); 2029 2030 /* 2031 * Rename fncp (relink) 2032 */ 2033 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 2034 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 2035 nchpp = NCHHASH(hash); 2036 2037 spin_lock(&nchpp->spin); 2038 _cache_link_parent(fncp, tncp_par, nchpp); 2039 spin_unlock(&nchpp->spin); 2040 2041 _cache_put(tncp_par); 2042 2043 /* 2044 * Get rid of the overwritten tncp (unlink) 2045 */ 2046 _cache_unlink(tncp); 2047 } 2048 2049 /* 2050 * Perform actions consistent with unlinking a file. The passed-in ncp 2051 * must be locked. 2052 * 2053 * The ncp is marked DESTROYED so it no longer shows up in searches, 2054 * and will be physically deleted when the vnode goes away. 2055 * 2056 * If the related vnode has no refs then we cycle it through vget()/vput() 2057 * to (possibly if we don't have a ref race) trigger a deactivation, 2058 * allowing the VFS to trivially detect and recycle the deleted vnode 2059 * via VOP_INACTIVE(). 2060 * 2061 * NOTE: _cache_rename() will automatically call _cache_unlink() on the 2062 * target ncp. 2063 */ 2064 void 2065 cache_unlink(struct nchandle *nch) 2066 { 2067 _cache_unlink(nch->ncp); 2068 } 2069 2070 static void 2071 _cache_unlink(struct namecache *ncp) 2072 { 2073 struct vnode *vp; 2074 2075 /* 2076 * Causes lookups to fail and allows another ncp with the same 2077 * name to be created under ncp->nc_parent. 2078 */ 2079 ncp->nc_flag |= NCF_DESTROYED; 2080 ++ncp->nc_generation; 2081 2082 /* 2083 * Attempt to trigger a deactivation. Set VREF_FINALIZE to 2084 * force action on the 1->0 transition. 2085 */ 2086 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 2087 (vp = ncp->nc_vp) != NULL) { 2088 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 2089 if (VREFCNT(vp) <= 0) { 2090 if (vget(vp, LK_SHARED) == 0) 2091 vput(vp); 2092 } 2093 } 2094 } 2095 2096 /* 2097 * Return non-zero if the nch might be associated with an open and/or mmap()'d 2098 * file. The easy solution is to just return non-zero if the vnode has refs. 2099 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to 2100 * force the reclaim). 2101 */ 2102 int 2103 cache_isopen(struct nchandle *nch) 2104 { 2105 struct vnode *vp; 2106 struct namecache *ncp = nch->ncp; 2107 2108 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 2109 (vp = ncp->nc_vp) != NULL && 2110 VREFCNT(vp)) { 2111 return 1; 2112 } 2113 return 0; 2114 } 2115 2116 2117 /* 2118 * vget the vnode associated with the namecache entry. Resolve the namecache 2119 * entry if necessary. The passed ncp must be referenced and locked. If 2120 * the ncp is resolved it might be locked shared. 2121 * 2122 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 2123 * (depending on the passed lk_type) will be returned in *vpp with an error 2124 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 2125 * most typical error is ENOENT, meaning that the ncp represents a negative 2126 * cache hit and there is no vnode to retrieve, but other errors can occur 2127 * too. 2128 * 2129 * The vget() can race a reclaim. If this occurs we re-resolve the 2130 * namecache entry. 2131 * 2132 * There are numerous places in the kernel where vget() is called on a 2133 * vnode while one or more of its namecache entries is locked. Releasing 2134 * a vnode never deadlocks against locked namecache entries (the vnode 2135 * will not get recycled while referenced ncp's exist). This means we 2136 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 2137 * lock when acquiring the vp lock or we might cause a deadlock. 2138 * 2139 * NOTE: The passed-in ncp must be locked exclusively if it is initially 2140 * unresolved. If a reclaim race occurs the passed-in ncp will be 2141 * relocked exclusively before being re-resolved. 2142 */ 2143 int 2144 cache_vget(struct nchandle *nch, struct ucred *cred, 2145 int lk_type, struct vnode **vpp) 2146 { 2147 struct namecache *ncp; 2148 struct vnode *vp; 2149 int error; 2150 2151 ncp = nch->ncp; 2152 again: 2153 vp = NULL; 2154 if (ncp->nc_flag & NCF_UNRESOLVED) 2155 error = cache_resolve(nch, cred); 2156 else 2157 error = 0; 2158 2159 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 2160 error = vget(vp, lk_type); 2161 if (error) { 2162 /* 2163 * VRECLAIM race 2164 * 2165 * The ncp may have been locked shared, we must relock 2166 * it exclusively before we can set it to unresolved. 2167 */ 2168 if (error == ENOENT) { 2169 kprintf("Warning: vnode reclaim race detected " 2170 "in cache_vget on %p (%s)\n", 2171 vp, ncp->nc_name); 2172 _cache_unlock(ncp); 2173 _cache_lock(ncp); 2174 _cache_setunresolved(ncp); 2175 goto again; 2176 } 2177 2178 /* 2179 * Not a reclaim race, some other error. 2180 */ 2181 KKASSERT(ncp->nc_vp == vp); 2182 vp = NULL; 2183 } else { 2184 KKASSERT(ncp->nc_vp == vp); 2185 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 2186 } 2187 } 2188 if (error == 0 && vp == NULL) 2189 error = ENOENT; 2190 *vpp = vp; 2191 return(error); 2192 } 2193 2194 /* 2195 * Similar to cache_vget() but only acquires a ref on the vnode. 2196 * 2197 * NOTE: The passed-in ncp must be locked exclusively if it is initially 2198 * unresolved. If a reclaim race occurs the passed-in ncp will be 2199 * relocked exclusively before being re-resolved. 2200 */ 2201 int 2202 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 2203 { 2204 struct namecache *ncp; 2205 struct vnode *vp; 2206 int error; 2207 2208 ncp = nch->ncp; 2209 again: 2210 vp = NULL; 2211 if (ncp->nc_flag & NCF_UNRESOLVED) 2212 error = cache_resolve(nch, cred); 2213 else 2214 error = 0; 2215 2216 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 2217 error = vget(vp, LK_SHARED); 2218 if (error) { 2219 /* 2220 * VRECLAIM race 2221 */ 2222 if (error == ENOENT) { 2223 kprintf("Warning: vnode reclaim race detected " 2224 "in cache_vget on %p (%s)\n", 2225 vp, ncp->nc_name); 2226 _cache_unlock(ncp); 2227 _cache_lock(ncp); 2228 _cache_setunresolved(ncp); 2229 goto again; 2230 } 2231 2232 /* 2233 * Not a reclaim race, some other error. 2234 */ 2235 KKASSERT(ncp->nc_vp == vp); 2236 vp = NULL; 2237 } else { 2238 KKASSERT(ncp->nc_vp == vp); 2239 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 2240 /* caller does not want a lock */ 2241 vn_unlock(vp); 2242 } 2243 } 2244 if (error == 0 && vp == NULL) 2245 error = ENOENT; 2246 *vpp = vp; 2247 return(error); 2248 } 2249 2250 /* 2251 * Return a referenced vnode representing the parent directory of 2252 * ncp. 2253 * 2254 * Because the caller has locked the ncp it should not be possible for 2255 * the parent ncp to go away. However, the parent can unresolve its 2256 * dvp at any time so we must be able to acquire a lock on the parent 2257 * to safely access nc_vp. 2258 * 2259 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 2260 * so use vhold()/vdrop() while holding the lock to prevent dvp from 2261 * getting destroyed. 2262 * 2263 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a 2264 * lock on the ncp in question.. 2265 */ 2266 static struct vnode * 2267 cache_dvpref(struct namecache *ncp) 2268 { 2269 struct namecache *par; 2270 struct vnode *dvp; 2271 2272 dvp = NULL; 2273 if ((par = ncp->nc_parent) != NULL) { 2274 _cache_hold(par); 2275 _cache_lock(par); 2276 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 2277 if ((dvp = par->nc_vp) != NULL) 2278 vhold(dvp); 2279 } 2280 _cache_unlock(par); 2281 if (dvp) { 2282 if (vget(dvp, LK_SHARED) == 0) { 2283 vn_unlock(dvp); 2284 vdrop(dvp); 2285 /* return refd, unlocked dvp */ 2286 } else { 2287 vdrop(dvp); 2288 dvp = NULL; 2289 } 2290 } 2291 _cache_drop(par); 2292 } 2293 return(dvp); 2294 } 2295 2296 /* 2297 * Convert a directory vnode to a namecache record without any other 2298 * knowledge of the topology. This ONLY works with directory vnodes and 2299 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 2300 * returned ncp (if not NULL) will be held and unlocked. 2301 * 2302 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 2303 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 2304 * for dvp. This will fail only if the directory has been deleted out from 2305 * under the caller. 2306 * 2307 * Callers must always check for a NULL return no matter the value of 'makeit'. 2308 * 2309 * To avoid underflowing the kernel stack each recursive call increments 2310 * the makeit variable. 2311 */ 2312 2313 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2314 struct vnode *dvp, char *fakename); 2315 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2316 struct vnode **saved_dvp); 2317 2318 int 2319 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 2320 struct nchandle *nch) 2321 { 2322 struct vnode *saved_dvp; 2323 struct vnode *pvp; 2324 char *fakename; 2325 int error; 2326 2327 nch->ncp = NULL; 2328 nch->mount = dvp->v_mount; 2329 saved_dvp = NULL; 2330 fakename = NULL; 2331 2332 /* 2333 * Handle the makeit == 0 degenerate case 2334 */ 2335 if (makeit == 0) { 2336 spin_lock_shared(&dvp->v_spin); 2337 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2338 if (nch->ncp) 2339 cache_hold(nch); 2340 spin_unlock_shared(&dvp->v_spin); 2341 } 2342 2343 /* 2344 * Loop until resolution, inside code will break out on error. 2345 */ 2346 while (makeit) { 2347 /* 2348 * Break out if we successfully acquire a working ncp. 2349 */ 2350 spin_lock_shared(&dvp->v_spin); 2351 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2352 if (nch->ncp) { 2353 cache_hold(nch); 2354 spin_unlock_shared(&dvp->v_spin); 2355 break; 2356 } 2357 spin_unlock_shared(&dvp->v_spin); 2358 2359 /* 2360 * If dvp is the root of its filesystem it should already 2361 * have a namecache pointer associated with it as a side 2362 * effect of the mount, but it may have been disassociated. 2363 */ 2364 if (dvp->v_flag & VROOT) { 2365 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 2366 error = cache_resolve_mp(nch->mount); 2367 _cache_put(nch->ncp); 2368 if (ncvp_debug) { 2369 kprintf("cache_fromdvp: resolve root of mount %p error %d", 2370 dvp->v_mount, error); 2371 } 2372 if (error) { 2373 if (ncvp_debug) 2374 kprintf(" failed\n"); 2375 nch->ncp = NULL; 2376 break; 2377 } 2378 if (ncvp_debug) 2379 kprintf(" succeeded\n"); 2380 continue; 2381 } 2382 2383 /* 2384 * If we are recursed too deeply resort to an O(n^2) 2385 * algorithm to resolve the namecache topology. The 2386 * resolved pvp is left referenced in saved_dvp to 2387 * prevent the tree from being destroyed while we loop. 2388 */ 2389 if (makeit > 20) { 2390 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 2391 if (error) { 2392 kprintf("lookupdotdot(longpath) failed %d " 2393 "dvp %p\n", error, dvp); 2394 nch->ncp = NULL; 2395 break; 2396 } 2397 continue; 2398 } 2399 2400 /* 2401 * Get the parent directory and resolve its ncp. 2402 */ 2403 if (fakename) { 2404 kfree(fakename, M_TEMP); 2405 fakename = NULL; 2406 } 2407 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2408 &fakename); 2409 if (error) { 2410 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 2411 break; 2412 } 2413 vn_unlock(pvp); 2414 2415 /* 2416 * Reuse makeit as a recursion depth counter. On success 2417 * nch will be fully referenced. 2418 */ 2419 cache_fromdvp(pvp, cred, makeit + 1, nch); 2420 vrele(pvp); 2421 if (nch->ncp == NULL) 2422 break; 2423 2424 /* 2425 * Do an inefficient scan of pvp (embodied by ncp) to look 2426 * for dvp. This will create a namecache record for dvp on 2427 * success. We loop up to recheck on success. 2428 * 2429 * ncp and dvp are both held but not locked. 2430 */ 2431 error = cache_inefficient_scan(nch, cred, dvp, fakename); 2432 if (error) { 2433 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 2434 pvp, nch->ncp->nc_name, dvp); 2435 cache_drop(nch); 2436 /* nch was NULLed out, reload mount */ 2437 nch->mount = dvp->v_mount; 2438 break; 2439 } 2440 if (ncvp_debug) { 2441 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 2442 pvp, nch->ncp->nc_name); 2443 } 2444 cache_drop(nch); 2445 /* nch was NULLed out, reload mount */ 2446 nch->mount = dvp->v_mount; 2447 } 2448 2449 /* 2450 * If nch->ncp is non-NULL it will have been held already. 2451 */ 2452 if (fakename) 2453 kfree(fakename, M_TEMP); 2454 if (saved_dvp) 2455 vrele(saved_dvp); 2456 if (nch->ncp) 2457 return (0); 2458 return (EINVAL); 2459 } 2460 2461 /* 2462 * Go up the chain of parent directories until we find something 2463 * we can resolve into the namecache. This is very inefficient. 2464 */ 2465 static 2466 int 2467 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2468 struct vnode **saved_dvp) 2469 { 2470 struct nchandle nch; 2471 struct vnode *pvp; 2472 int error; 2473 static time_t last_fromdvp_report; 2474 char *fakename; 2475 2476 /* 2477 * Loop getting the parent directory vnode until we get something we 2478 * can resolve in the namecache. 2479 */ 2480 vref(dvp); 2481 nch.mount = dvp->v_mount; 2482 nch.ncp = NULL; 2483 fakename = NULL; 2484 2485 for (;;) { 2486 if (fakename) { 2487 kfree(fakename, M_TEMP); 2488 fakename = NULL; 2489 } 2490 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2491 &fakename); 2492 if (error) { 2493 vrele(dvp); 2494 break; 2495 } 2496 vn_unlock(pvp); 2497 spin_lock_shared(&pvp->v_spin); 2498 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 2499 _cache_hold(nch.ncp); 2500 spin_unlock_shared(&pvp->v_spin); 2501 vrele(pvp); 2502 break; 2503 } 2504 spin_unlock_shared(&pvp->v_spin); 2505 if (pvp->v_flag & VROOT) { 2506 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 2507 error = cache_resolve_mp(nch.mount); 2508 _cache_unlock(nch.ncp); 2509 vrele(pvp); 2510 if (error) { 2511 _cache_drop(nch.ncp); 2512 nch.ncp = NULL; 2513 vrele(dvp); 2514 } 2515 break; 2516 } 2517 vrele(dvp); 2518 dvp = pvp; 2519 } 2520 if (error == 0) { 2521 if (last_fromdvp_report != time_uptime) { 2522 last_fromdvp_report = time_uptime; 2523 kprintf("Warning: extremely inefficient path " 2524 "resolution on %s\n", 2525 nch.ncp->nc_name); 2526 } 2527 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 2528 2529 /* 2530 * Hopefully dvp now has a namecache record associated with 2531 * it. Leave it referenced to prevent the kernel from 2532 * recycling the vnode. Otherwise extremely long directory 2533 * paths could result in endless recycling. 2534 */ 2535 if (*saved_dvp) 2536 vrele(*saved_dvp); 2537 *saved_dvp = dvp; 2538 _cache_drop(nch.ncp); 2539 } 2540 if (fakename) 2541 kfree(fakename, M_TEMP); 2542 return (error); 2543 } 2544 2545 /* 2546 * Do an inefficient scan of the directory represented by ncp looking for 2547 * the directory vnode dvp. ncp must be held but not locked on entry and 2548 * will be held on return. dvp must be refd but not locked on entry and 2549 * will remain refd on return. 2550 * 2551 * Why do this at all? Well, due to its stateless nature the NFS server 2552 * converts file handles directly to vnodes without necessarily going through 2553 * the namecache ops that would otherwise create the namecache topology 2554 * leading to the vnode. We could either (1) Change the namecache algorithms 2555 * to allow disconnect namecache records that are re-merged opportunistically, 2556 * or (2) Make the NFS server backtrack and scan to recover a connected 2557 * namecache topology in order to then be able to issue new API lookups. 2558 * 2559 * It turns out that (1) is a huge mess. It takes a nice clean set of 2560 * namecache algorithms and introduces a lot of complication in every subsystem 2561 * that calls into the namecache to deal with the re-merge case, especially 2562 * since we are using the namecache to placehold negative lookups and the 2563 * vnode might not be immediately assigned. (2) is certainly far less 2564 * efficient then (1), but since we are only talking about directories here 2565 * (which are likely to remain cached), the case does not actually run all 2566 * that often and has the supreme advantage of not polluting the namecache 2567 * algorithms. 2568 * 2569 * If a fakename is supplied just construct a namecache entry using the 2570 * fake name. 2571 */ 2572 static int 2573 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2574 struct vnode *dvp, char *fakename) 2575 { 2576 struct nlcomponent nlc; 2577 struct nchandle rncp; 2578 struct dirent *den; 2579 struct vnode *pvp; 2580 struct vattr vat; 2581 struct iovec iov; 2582 struct uio uio; 2583 int blksize; 2584 int eofflag; 2585 int bytes; 2586 char *rbuf; 2587 int error; 2588 2589 vat.va_blocksize = 0; 2590 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 2591 return (error); 2592 cache_lock(nch); 2593 error = cache_vref(nch, cred, &pvp); 2594 cache_unlock(nch); 2595 if (error) 2596 return (error); 2597 if (ncvp_debug) { 2598 kprintf("inefficient_scan of (%p,%s): directory iosize %ld " 2599 "vattr fileid = %lld\n", 2600 nch->ncp, nch->ncp->nc_name, 2601 vat.va_blocksize, 2602 (long long)vat.va_fileid); 2603 } 2604 2605 /* 2606 * Use the supplied fakename if not NULL. Fake names are typically 2607 * not in the actual filesystem hierarchy. This is used by HAMMER 2608 * to glue @@timestamp recursions together. 2609 */ 2610 if (fakename) { 2611 nlc.nlc_nameptr = fakename; 2612 nlc.nlc_namelen = strlen(fakename); 2613 rncp = cache_nlookup(nch, &nlc); 2614 goto done; 2615 } 2616 2617 if ((blksize = vat.va_blocksize) == 0) 2618 blksize = DEV_BSIZE; 2619 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 2620 rncp.ncp = NULL; 2621 2622 eofflag = 0; 2623 uio.uio_offset = 0; 2624 again: 2625 iov.iov_base = rbuf; 2626 iov.iov_len = blksize; 2627 uio.uio_iov = &iov; 2628 uio.uio_iovcnt = 1; 2629 uio.uio_resid = blksize; 2630 uio.uio_segflg = UIO_SYSSPACE; 2631 uio.uio_rw = UIO_READ; 2632 uio.uio_td = curthread; 2633 2634 if (ncvp_debug >= 2) 2635 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 2636 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 2637 if (error == 0) { 2638 den = (struct dirent *)rbuf; 2639 bytes = blksize - uio.uio_resid; 2640 2641 while (bytes > 0) { 2642 if (ncvp_debug >= 2) { 2643 kprintf("cache_inefficient_scan: %*.*s\n", 2644 den->d_namlen, den->d_namlen, 2645 den->d_name); 2646 } 2647 if (den->d_type != DT_WHT && 2648 den->d_ino == vat.va_fileid) { 2649 if (ncvp_debug) { 2650 kprintf("cache_inefficient_scan: " 2651 "MATCHED inode %lld path %s/%*.*s\n", 2652 (long long)vat.va_fileid, 2653 nch->ncp->nc_name, 2654 den->d_namlen, den->d_namlen, 2655 den->d_name); 2656 } 2657 nlc.nlc_nameptr = den->d_name; 2658 nlc.nlc_namelen = den->d_namlen; 2659 rncp = cache_nlookup(nch, &nlc); 2660 KKASSERT(rncp.ncp != NULL); 2661 break; 2662 } 2663 bytes -= _DIRENT_DIRSIZ(den); 2664 den = _DIRENT_NEXT(den); 2665 } 2666 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 2667 goto again; 2668 } 2669 kfree(rbuf, M_TEMP); 2670 done: 2671 vrele(pvp); 2672 if (rncp.ncp) { 2673 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 2674 _cache_setvp(rncp.mount, rncp.ncp, dvp); 2675 if (ncvp_debug >= 2) { 2676 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 2677 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 2678 } 2679 } else { 2680 if (ncvp_debug >= 2) { 2681 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 2682 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 2683 rncp.ncp->nc_vp); 2684 } 2685 } 2686 if (rncp.ncp->nc_vp == NULL) 2687 error = rncp.ncp->nc_error; 2688 /* 2689 * Release rncp after a successful nlookup. rncp was fully 2690 * referenced. 2691 */ 2692 cache_put(&rncp); 2693 } else { 2694 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 2695 dvp, nch->ncp->nc_name); 2696 error = ENOENT; 2697 } 2698 return (error); 2699 } 2700 2701 /* 2702 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 2703 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list. 2704 * 2705 * Then, if there are no additional references to the ncp and no children, 2706 * the ncp is removed from the topology and destroyed. 2707 * 2708 * References and/or children may exist if the ncp is in the middle of the 2709 * topology, preventing the ncp from being destroyed. 2710 * 2711 * This function must be called with the ncp held and locked and will unlock 2712 * and drop it during zapping. 2713 * 2714 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 2715 * This case can occur in the cache_drop() path. 2716 * 2717 * This function may returned a held (but NOT locked) parent node which the 2718 * caller must drop. We do this so _cache_drop() can loop, to avoid 2719 * blowing out the kernel stack. 2720 * 2721 * WARNING! For MPSAFE operation this routine must acquire up to three 2722 * spin locks to be able to safely test nc_refs. Lock order is 2723 * very important. 2724 * 2725 * hash spinlock if on hash list 2726 * parent spinlock if child of parent 2727 * (the ncp is unresolved so there is no vnode association) 2728 */ 2729 static struct namecache * 2730 cache_zap(struct namecache *ncp, int nonblock) 2731 { 2732 struct namecache *par; 2733 struct vnode *dropvp; 2734 struct nchash_head *nchpp; 2735 int refs; 2736 2737 /* 2738 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2739 */ 2740 _cache_setunresolved(ncp); 2741 2742 /* 2743 * Try to scrap the entry and possibly tail-recurse on its parent. 2744 * We only scrap unref'd (other then our ref) unresolved entries, 2745 * we do not scrap 'live' entries. 2746 * 2747 * Note that once the spinlocks are acquired if nc_refs == 1 no 2748 * other references are possible. If it isn't, however, we have 2749 * to decrement but also be sure to avoid a 1->0 transition. 2750 */ 2751 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2752 KKASSERT(ncp->nc_refs > 0); 2753 2754 /* 2755 * Acquire locks. Note that the parent can't go away while we hold 2756 * a child locked. 2757 */ 2758 nchpp = NULL; 2759 if ((par = ncp->nc_parent) != NULL) { 2760 if (nonblock) { 2761 for (;;) { 2762 if (_cache_lock_nonblock(par) == 0) 2763 break; 2764 refs = ncp->nc_refs; 2765 ncp->nc_flag |= NCF_DEFEREDZAP; 2766 atomic_add_long( 2767 &pcpu_ncache[mycpu->gd_cpuid].numdefered, 2768 1); 2769 if (atomic_cmpset_int(&ncp->nc_refs, 2770 refs, refs - 1)) { 2771 _cache_unlock(ncp); 2772 return(NULL); 2773 } 2774 cpu_pause(); 2775 } 2776 _cache_hold(par); 2777 } else { 2778 _cache_hold(par); 2779 _cache_lock(par); 2780 } 2781 nchpp = ncp->nc_head; 2782 spin_lock(&nchpp->spin); 2783 } 2784 2785 /* 2786 * At this point if we find refs == 1 it should not be possible for 2787 * anyone else to have access to the ncp. We are holding the only 2788 * possible access point left (nchpp) spin-locked. 2789 * 2790 * If someone other then us has a ref or we have children 2791 * we cannot zap the entry. The 1->0 transition and any 2792 * further list operation is protected by the spinlocks 2793 * we have acquired but other transitions are not. 2794 */ 2795 for (;;) { 2796 refs = ncp->nc_refs; 2797 cpu_ccfence(); 2798 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2799 break; 2800 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2801 if (par) { 2802 spin_unlock(&nchpp->spin); 2803 _cache_put(par); 2804 } 2805 _cache_unlock(ncp); 2806 return(NULL); 2807 } 2808 cpu_pause(); 2809 } 2810 2811 /* 2812 * We are the only ref and with the spinlocks held no further 2813 * refs can be acquired by others. 2814 * 2815 * Remove us from the hash list and parent list. We have to 2816 * drop a ref on the parent's vp if the parent's list becomes 2817 * empty. 2818 */ 2819 dropvp = NULL; 2820 if (par) { 2821 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 2822 2823 KKASSERT(nchpp == ncp->nc_head); 2824 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 2825 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2826 atomic_add_long(&pn->vfscache_count, -1); 2827 if (TAILQ_EMPTY(&ncp->nc_list)) 2828 atomic_add_long(&pn->vfscache_leafs, -1); 2829 2830 if (TAILQ_EMPTY(&par->nc_list)) { 2831 atomic_add_long(&pn->vfscache_leafs, 1); 2832 if (par->nc_vp) 2833 dropvp = par->nc_vp; 2834 } 2835 ncp->nc_head = NULL; 2836 ncp->nc_parent = NULL; 2837 spin_unlock(&nchpp->spin); 2838 _cache_unlock(par); 2839 } else { 2840 KKASSERT(ncp->nc_head == NULL); 2841 } 2842 2843 /* 2844 * ncp should not have picked up any refs. Physically 2845 * destroy the ncp. 2846 */ 2847 if (ncp->nc_refs != 1) { 2848 int save_refs = ncp->nc_refs; 2849 cpu_ccfence(); 2850 panic("cache_zap: %p bad refs %d (%d)\n", 2851 ncp, save_refs, atomic_fetchadd_int(&ncp->nc_refs, 0)); 2852 } 2853 KKASSERT(ncp->nc_refs == 1); 2854 /* _cache_unlock(ncp) not required */ 2855 ncp->nc_refs = -1; /* safety */ 2856 if (ncp->nc_name) 2857 kfree(ncp->nc_name, M_VFSCACHE); 2858 kfree(ncp, M_VFSCACHE); 2859 2860 /* 2861 * Delayed drop (we had to release our spinlocks) 2862 * 2863 * The refed parent (if not NULL) must be dropped. The 2864 * caller is responsible for looping. 2865 */ 2866 if (dropvp) 2867 vdrop(dropvp); 2868 return(par); 2869 } 2870 2871 /* 2872 * Clean up dangling negative cache and defered-drop entries in the 2873 * namecache. 2874 * 2875 * This routine is called in the critical path and also called from 2876 * vnlru(). When called from vnlru we use a lower limit to try to 2877 * deal with the negative cache before the critical path has to start 2878 * dealing with it. 2879 */ 2880 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2881 2882 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2883 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2884 2885 void 2886 cache_hysteresis(int critpath) 2887 { 2888 long poslimit; 2889 long neglimit = maxvnodes / ncnegfactor; 2890 long xnumcache = vfscache_leafs; 2891 2892 if (critpath == 0) 2893 neglimit = neglimit * 8 / 10; 2894 2895 /* 2896 * Don't cache too many negative hits. We use hysteresis to reduce 2897 * the impact on the critical path. 2898 */ 2899 switch(neg_cache_hysteresis_state[critpath]) { 2900 case CHI_LOW: 2901 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) { 2902 if (critpath) 2903 _cache_cleanneg(ncnegflush); 2904 else 2905 _cache_cleanneg(ncnegflush + 2906 vfscache_negs - neglimit); 2907 neg_cache_hysteresis_state[critpath] = CHI_HIGH; 2908 } 2909 break; 2910 case CHI_HIGH: 2911 if (vfscache_negs > MINNEG * 9 / 10 && 2912 vfscache_negs * 9 / 10 > neglimit 2913 ) { 2914 if (critpath) 2915 _cache_cleanneg(ncnegflush); 2916 else 2917 _cache_cleanneg(ncnegflush + 2918 vfscache_negs * 9 / 10 - 2919 neglimit); 2920 } else { 2921 neg_cache_hysteresis_state[critpath] = CHI_LOW; 2922 } 2923 break; 2924 } 2925 2926 /* 2927 * Don't cache too many positive hits. We use hysteresis to reduce 2928 * the impact on the critical path. 2929 * 2930 * Excessive positive hits can accumulate due to large numbers of 2931 * hardlinks (the vnode cache will not prevent hl ncps from growing 2932 * into infinity). 2933 */ 2934 if ((poslimit = ncposlimit) == 0) 2935 poslimit = maxvnodes * 2; 2936 if (critpath == 0) 2937 poslimit = poslimit * 8 / 10; 2938 2939 switch(pos_cache_hysteresis_state[critpath]) { 2940 case CHI_LOW: 2941 if (xnumcache > poslimit && xnumcache > MINPOS) { 2942 if (critpath) 2943 _cache_cleanpos(ncposflush); 2944 else 2945 _cache_cleanpos(ncposflush + 2946 xnumcache - poslimit); 2947 pos_cache_hysteresis_state[critpath] = CHI_HIGH; 2948 } 2949 break; 2950 case CHI_HIGH: 2951 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) { 2952 if (critpath) 2953 _cache_cleanpos(ncposflush); 2954 else 2955 _cache_cleanpos(ncposflush + 2956 xnumcache - poslimit * 5 / 6); 2957 } else { 2958 pos_cache_hysteresis_state[critpath] = CHI_LOW; 2959 } 2960 break; 2961 } 2962 2963 /* 2964 * Clean out dangling defered-zap ncps which could not be cleanly 2965 * dropped if too many build up. Note that numdefered is 2966 * heuristical. Make sure we are real-time for the current cpu, 2967 * plus the global rollup. 2968 */ 2969 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) { 2970 _cache_cleandefered(); 2971 } 2972 } 2973 2974 /* 2975 * NEW NAMECACHE LOOKUP API 2976 * 2977 * Lookup an entry in the namecache. The passed par_nch must be referenced 2978 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2979 * is ALWAYS returned, eve if the supplied component is illegal. 2980 * 2981 * The resulting namecache entry should be returned to the system with 2982 * cache_put() or cache_unlock() + cache_drop(). 2983 * 2984 * namecache locks are recursive but care must be taken to avoid lock order 2985 * reversals (hence why the passed par_nch must be unlocked). Locking 2986 * rules are to order for parent traversals, not for child traversals. 2987 * 2988 * Nobody else will be able to manipulate the associated namespace (e.g. 2989 * create, delete, rename, rename-target) until the caller unlocks the 2990 * entry. 2991 * 2992 * The returned entry will be in one of three states: positive hit (non-null 2993 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2994 * Unresolved entries must be resolved through the filesystem to associate the 2995 * vnode and/or determine whether a positive or negative hit has occured. 2996 * 2997 * It is not necessary to lock a directory in order to lock namespace under 2998 * that directory. In fact, it is explicitly not allowed to do that. A 2999 * directory is typically only locked when being created, renamed, or 3000 * destroyed. 3001 * 3002 * The directory (par) may be unresolved, in which case any returned child 3003 * will likely also be marked unresolved. Likely but not guarenteed. Since 3004 * the filesystem lookup requires a resolved directory vnode the caller is 3005 * responsible for resolving the namecache chain top-down. This API 3006 * specifically allows whole chains to be created in an unresolved state. 3007 */ 3008 struct nchandle 3009 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 3010 { 3011 struct nchandle nch; 3012 struct namecache *ncp; 3013 struct namecache *new_ncp; 3014 struct namecache *rep_ncp; /* reuse a destroyed ncp */ 3015 struct nchash_head *nchpp; 3016 struct mount *mp; 3017 u_int32_t hash; 3018 globaldata_t gd; 3019 int par_locked; 3020 3021 gd = mycpu; 3022 mp = par_nch->mount; 3023 par_locked = 0; 3024 3025 /* 3026 * This is a good time to call it, no ncp's are locked by 3027 * the caller or us. 3028 */ 3029 cache_hysteresis(1); 3030 3031 /* 3032 * Try to locate an existing entry 3033 */ 3034 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3035 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3036 new_ncp = NULL; 3037 nchpp = NCHHASH(hash); 3038 restart: 3039 rep_ncp = NULL; 3040 if (new_ncp) 3041 spin_lock(&nchpp->spin); 3042 else 3043 spin_lock_shared(&nchpp->spin); 3044 3045 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3046 /* 3047 * Break out if we find a matching entry. Note that 3048 * UNRESOLVED entries may match, but DESTROYED entries 3049 * do not. 3050 * 3051 * We may be able to reuse DESTROYED entries that we come 3052 * across, even if the name does not match, as long as 3053 * nc_nlen is correct. 3054 */ 3055 if (ncp->nc_parent == par_nch->ncp && 3056 ncp->nc_nlen == nlc->nlc_namelen) { 3057 if (ncp->nc_flag & NCF_DESTROYED) { 3058 if (ncp->nc_refs == 0 && rep_ncp == NULL) 3059 rep_ncp = ncp; 3060 continue; 3061 } 3062 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen)) 3063 continue; 3064 _cache_hold(ncp); 3065 if (new_ncp) 3066 spin_unlock(&nchpp->spin); 3067 else 3068 spin_unlock_shared(&nchpp->spin); 3069 if (par_locked) { 3070 _cache_unlock(par_nch->ncp); 3071 par_locked = 0; 3072 } 3073 if (_cache_lock_special(ncp) == 0) { 3074 /* 3075 * Successfully locked but we must re-test 3076 * conditions that might have changed since 3077 * we did not have the lock before. 3078 */ 3079 if (ncp->nc_parent != par_nch->ncp || 3080 ncp->nc_nlen != nlc->nlc_namelen || 3081 bcmp(ncp->nc_name, nlc->nlc_nameptr, 3082 ncp->nc_nlen) || 3083 (ncp->nc_flag & NCF_DESTROYED)) { 3084 _cache_put(ncp); 3085 goto restart; 3086 } 3087 _cache_auto_unresolve(mp, ncp); 3088 if (new_ncp) 3089 _cache_free(new_ncp); 3090 goto found; 3091 } 3092 _cache_get(ncp); /* cycle the lock to block */ 3093 _cache_put(ncp); 3094 _cache_drop(ncp); 3095 goto restart; 3096 } 3097 } 3098 3099 /* 3100 * We failed to locate the entry, try to resurrect a destroyed 3101 * entry that we did find that is already correctly linked into 3102 * nchpp and the parent. We must re-test conditions after 3103 * successfully locking rep_ncp. 3104 * 3105 * This case can occur under heavy loads due to not being able 3106 * to safely lock the parent in cache_zap(). Nominally a repeated 3107 * create/unlink load, but only the namelen needs to match. 3108 */ 3109 if (rep_ncp && new_ncp == NULL) { 3110 if (_cache_lock_nonblock(rep_ncp) == 0) { 3111 _cache_hold(rep_ncp); 3112 if (rep_ncp->nc_parent == par_nch->ncp && 3113 rep_ncp->nc_nlen == nlc->nlc_namelen && 3114 (rep_ncp->nc_flag & NCF_DESTROYED)) { 3115 /* 3116 * Update nc_name as reuse as new. 3117 */ 3118 ncp = rep_ncp; 3119 bcopy(nlc->nlc_nameptr, ncp->nc_name, 3120 nlc->nlc_namelen); 3121 spin_unlock_shared(&nchpp->spin); 3122 _cache_setunresolved(ncp); 3123 ncp->nc_flag = NCF_UNRESOLVED; 3124 ncp->nc_error = ENOTCONN; 3125 goto found; 3126 } 3127 _cache_put(rep_ncp); 3128 } 3129 } 3130 3131 /* 3132 * Otherwise create a new entry and add it to the cache. The parent 3133 * ncp must also be locked so we can link into it. 3134 * 3135 * We have to relookup after possibly blocking in kmalloc or 3136 * when locking par_nch. 3137 * 3138 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 3139 * mount case, in which case nc_name will be NULL. 3140 */ 3141 if (new_ncp == NULL) { 3142 spin_unlock_shared(&nchpp->spin); 3143 new_ncp = cache_alloc(nlc->nlc_namelen); 3144 if (nlc->nlc_namelen) { 3145 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 3146 nlc->nlc_namelen); 3147 new_ncp->nc_name[nlc->nlc_namelen] = 0; 3148 } 3149 goto restart; 3150 } 3151 3152 /* 3153 * NOTE! The spinlock is held exclusively here because new_ncp 3154 * is non-NULL. 3155 */ 3156 if (par_locked == 0) { 3157 spin_unlock(&nchpp->spin); 3158 _cache_lock(par_nch->ncp); 3159 par_locked = 1; 3160 goto restart; 3161 } 3162 3163 /* 3164 * WARNING! We still hold the spinlock. We have to set the hash 3165 * table entry atomically. 3166 */ 3167 ncp = new_ncp; 3168 _cache_link_parent(ncp, par_nch->ncp, nchpp); 3169 spin_unlock(&nchpp->spin); 3170 _cache_unlock(par_nch->ncp); 3171 /* par_locked = 0 - not used */ 3172 found: 3173 /* 3174 * stats and namecache size management 3175 */ 3176 if (ncp->nc_flag & NCF_UNRESOLVED) 3177 ++gd->gd_nchstats->ncs_miss; 3178 else if (ncp->nc_vp) 3179 ++gd->gd_nchstats->ncs_goodhits; 3180 else 3181 ++gd->gd_nchstats->ncs_neghits; 3182 nch.mount = mp; 3183 nch.ncp = ncp; 3184 _cache_mntref(nch.mount); 3185 3186 return(nch); 3187 } 3188 3189 /* 3190 * Attempt to lookup a namecache entry and return with a shared namecache 3191 * lock. 3192 */ 3193 int 3194 cache_nlookup_maybe_shared(struct nchandle *par_nch, struct nlcomponent *nlc, 3195 int excl, struct nchandle *res_nch) 3196 { 3197 struct namecache *ncp; 3198 struct nchash_head *nchpp; 3199 struct mount *mp; 3200 u_int32_t hash; 3201 globaldata_t gd; 3202 3203 /* 3204 * If exclusive requested or shared namecache locks are disabled, 3205 * return failure. 3206 */ 3207 if (ncp_shared_lock_disable || excl) 3208 return(EWOULDBLOCK); 3209 3210 gd = mycpu; 3211 mp = par_nch->mount; 3212 3213 /* 3214 * This is a good time to call it, no ncp's are locked by 3215 * the caller or us. 3216 */ 3217 cache_hysteresis(1); 3218 3219 /* 3220 * Try to locate an existing entry 3221 */ 3222 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3223 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3224 nchpp = NCHHASH(hash); 3225 3226 spin_lock_shared(&nchpp->spin); 3227 3228 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3229 /* 3230 * Break out if we find a matching entry. Note that 3231 * UNRESOLVED entries may match, but DESTROYED entries 3232 * do not. 3233 */ 3234 if (ncp->nc_parent == par_nch->ncp && 3235 ncp->nc_nlen == nlc->nlc_namelen && 3236 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3237 (ncp->nc_flag & NCF_DESTROYED) == 0 3238 ) { 3239 _cache_hold(ncp); 3240 spin_unlock_shared(&nchpp->spin); 3241 if (_cache_lock_shared_special(ncp) == 0) { 3242 if (ncp->nc_parent == par_nch->ncp && 3243 ncp->nc_nlen == nlc->nlc_namelen && 3244 bcmp(ncp->nc_name, nlc->nlc_nameptr, 3245 ncp->nc_nlen) == 0 && 3246 (ncp->nc_flag & NCF_DESTROYED) == 0 && 3247 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 3248 _cache_auto_unresolve_test(mp, ncp) == 0) { 3249 goto found; 3250 } 3251 _cache_unlock(ncp); 3252 } 3253 _cache_drop(ncp); 3254 spin_lock_shared(&nchpp->spin); 3255 break; 3256 } 3257 } 3258 3259 /* 3260 * Failure 3261 */ 3262 spin_unlock_shared(&nchpp->spin); 3263 return(EWOULDBLOCK); 3264 3265 /* 3266 * Success 3267 * 3268 * Note that nc_error might be non-zero (e.g ENOENT). 3269 */ 3270 found: 3271 res_nch->mount = mp; 3272 res_nch->ncp = ncp; 3273 ++gd->gd_nchstats->ncs_goodhits; 3274 _cache_mntref(res_nch->mount); 3275 3276 KKASSERT(ncp->nc_error != EWOULDBLOCK); 3277 return(ncp->nc_error); 3278 } 3279 3280 /* 3281 * This is a non-blocking verison of cache_nlookup() used by 3282 * nfs_readdirplusrpc_uio(). It can fail for any reason and 3283 * will return nch.ncp == NULL in that case. 3284 */ 3285 struct nchandle 3286 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 3287 { 3288 struct nchandle nch; 3289 struct namecache *ncp; 3290 struct namecache *new_ncp; 3291 struct nchash_head *nchpp; 3292 struct mount *mp; 3293 u_int32_t hash; 3294 globaldata_t gd; 3295 int par_locked; 3296 3297 gd = mycpu; 3298 mp = par_nch->mount; 3299 par_locked = 0; 3300 3301 /* 3302 * Try to locate an existing entry 3303 */ 3304 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3305 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3306 new_ncp = NULL; 3307 nchpp = NCHHASH(hash); 3308 restart: 3309 spin_lock(&nchpp->spin); 3310 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3311 /* 3312 * Break out if we find a matching entry. Note that 3313 * UNRESOLVED entries may match, but DESTROYED entries 3314 * do not. 3315 */ 3316 if (ncp->nc_parent == par_nch->ncp && 3317 ncp->nc_nlen == nlc->nlc_namelen && 3318 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3319 (ncp->nc_flag & NCF_DESTROYED) == 0 3320 ) { 3321 _cache_hold(ncp); 3322 spin_unlock(&nchpp->spin); 3323 if (par_locked) { 3324 _cache_unlock(par_nch->ncp); 3325 par_locked = 0; 3326 } 3327 if (_cache_lock_special(ncp) == 0) { 3328 if (ncp->nc_parent != par_nch->ncp || 3329 ncp->nc_nlen != nlc->nlc_namelen || 3330 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) || 3331 (ncp->nc_flag & NCF_DESTROYED)) { 3332 kprintf("cache_lookup_nonblock: " 3333 "ncp-race %p %*.*s\n", 3334 ncp, 3335 nlc->nlc_namelen, 3336 nlc->nlc_namelen, 3337 nlc->nlc_nameptr); 3338 _cache_unlock(ncp); 3339 _cache_drop(ncp); 3340 goto failed; 3341 } 3342 _cache_auto_unresolve(mp, ncp); 3343 if (new_ncp) { 3344 _cache_free(new_ncp); 3345 new_ncp = NULL; 3346 } 3347 goto found; 3348 } 3349 _cache_drop(ncp); 3350 goto failed; 3351 } 3352 } 3353 3354 /* 3355 * We failed to locate an entry, create a new entry and add it to 3356 * the cache. The parent ncp must also be locked so we 3357 * can link into it. 3358 * 3359 * We have to relookup after possibly blocking in kmalloc or 3360 * when locking par_nch. 3361 * 3362 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 3363 * mount case, in which case nc_name will be NULL. 3364 */ 3365 if (new_ncp == NULL) { 3366 spin_unlock(&nchpp->spin); 3367 new_ncp = cache_alloc(nlc->nlc_namelen); 3368 if (nlc->nlc_namelen) { 3369 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 3370 nlc->nlc_namelen); 3371 new_ncp->nc_name[nlc->nlc_namelen] = 0; 3372 } 3373 goto restart; 3374 } 3375 if (par_locked == 0) { 3376 spin_unlock(&nchpp->spin); 3377 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 3378 par_locked = 1; 3379 goto restart; 3380 } 3381 goto failed; 3382 } 3383 3384 /* 3385 * WARNING! We still hold the spinlock. We have to set the hash 3386 * table entry atomically. 3387 */ 3388 ncp = new_ncp; 3389 _cache_link_parent(ncp, par_nch->ncp, nchpp); 3390 spin_unlock(&nchpp->spin); 3391 _cache_unlock(par_nch->ncp); 3392 /* par_locked = 0 - not used */ 3393 found: 3394 /* 3395 * stats and namecache size management 3396 */ 3397 if (ncp->nc_flag & NCF_UNRESOLVED) 3398 ++gd->gd_nchstats->ncs_miss; 3399 else if (ncp->nc_vp) 3400 ++gd->gd_nchstats->ncs_goodhits; 3401 else 3402 ++gd->gd_nchstats->ncs_neghits; 3403 nch.mount = mp; 3404 nch.ncp = ncp; 3405 _cache_mntref(nch.mount); 3406 3407 return(nch); 3408 failed: 3409 if (new_ncp) { 3410 _cache_free(new_ncp); 3411 new_ncp = NULL; 3412 } 3413 nch.mount = NULL; 3414 nch.ncp = NULL; 3415 return(nch); 3416 } 3417 3418 /* 3419 * The namecache entry is marked as being used as a mount point. 3420 * Locate the mount if it is visible to the caller. The DragonFly 3421 * mount system allows arbitrary loops in the topology and disentangles 3422 * those loops by matching against (mp, ncp) rather than just (ncp). 3423 * This means any given ncp can dive any number of mounts, depending 3424 * on the relative mount (e.g. nullfs) the caller is at in the topology. 3425 * 3426 * We use a very simple frontend cache to reduce SMP conflicts, 3427 * which we have to do because the mountlist scan needs an exclusive 3428 * lock around its ripout info list. Not to mention that there might 3429 * be a lot of mounts. 3430 */ 3431 struct findmount_info { 3432 struct mount *result; 3433 struct mount *nch_mount; 3434 struct namecache *nch_ncp; 3435 }; 3436 3437 static 3438 struct ncmount_cache * 3439 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp) 3440 { 3441 uintptr_t hash; 3442 3443 hash = (uintptr_t)mp + ((uintptr_t)mp >> 18); 3444 hash += (uintptr_t)ncp + ((uintptr_t)ncp >> 16); 3445 hash = (hash >> 1) % NCMOUNT_NUMCACHE; 3446 3447 return (&ncmount_cache[hash]); 3448 } 3449 3450 static 3451 int 3452 cache_findmount_callback(struct mount *mp, void *data) 3453 { 3454 struct findmount_info *info = data; 3455 3456 /* 3457 * Check the mount's mounted-on point against the passed nch. 3458 */ 3459 if (mp->mnt_ncmounton.mount == info->nch_mount && 3460 mp->mnt_ncmounton.ncp == info->nch_ncp 3461 ) { 3462 info->result = mp; 3463 _cache_mntref(mp); 3464 return(-1); 3465 } 3466 return(0); 3467 } 3468 3469 struct mount * 3470 cache_findmount(struct nchandle *nch) 3471 { 3472 struct findmount_info info; 3473 struct ncmount_cache *ncc; 3474 struct mount *mp; 3475 3476 /* 3477 * Fast 3478 */ 3479 if (ncmount_cache_enable == 0) { 3480 ncc = NULL; 3481 goto skip; 3482 } 3483 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3484 if (ncc->ncp == nch->ncp) { 3485 spin_lock_shared(&ncc->spin); 3486 if (ncc->isneg == 0 && 3487 ncc->ncp == nch->ncp && (mp = ncc->mp) != NULL) { 3488 if (mp->mnt_ncmounton.mount == nch->mount && 3489 mp->mnt_ncmounton.ncp == nch->ncp) { 3490 /* 3491 * Cache hit (positive) 3492 */ 3493 _cache_mntref(mp); 3494 spin_unlock_shared(&ncc->spin); 3495 return(mp); 3496 } 3497 /* else cache miss */ 3498 } 3499 if (ncc->isneg && 3500 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3501 /* 3502 * Cache hit (negative) 3503 */ 3504 spin_unlock_shared(&ncc->spin); 3505 return(NULL); 3506 } 3507 spin_unlock_shared(&ncc->spin); 3508 } 3509 skip: 3510 3511 /* 3512 * Slow 3513 */ 3514 info.result = NULL; 3515 info.nch_mount = nch->mount; 3516 info.nch_ncp = nch->ncp; 3517 mountlist_scan(cache_findmount_callback, &info, 3518 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 3519 3520 /* 3521 * Cache the result. 3522 * 3523 * Negative lookups: We cache the originating {ncp,mp}. (mp) is 3524 * only used for pointer comparisons and is not 3525 * referenced (otherwise there would be dangling 3526 * refs). 3527 * 3528 * Positive lookups: We cache the originating {ncp} and the target 3529 * (mp). (mp) is referenced. 3530 * 3531 * Indeterminant: If the match is undergoing an unmount we do 3532 * not cache it to avoid racing cache_unmounting(), 3533 * but still return the match. 3534 */ 3535 if (ncc) { 3536 spin_lock(&ncc->spin); 3537 if (info.result == NULL) { 3538 if (ncc->isneg == 0 && ncc->mp) 3539 _cache_mntrel(ncc->mp); 3540 ncc->ncp = nch->ncp; 3541 ncc->mp = nch->mount; 3542 ncc->isneg = 1; 3543 spin_unlock(&ncc->spin); 3544 } else if ((info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0) { 3545 if (ncc->isneg == 0 && ncc->mp) 3546 _cache_mntrel(ncc->mp); 3547 _cache_mntref(info.result); 3548 ncc->ncp = nch->ncp; 3549 ncc->mp = info.result; 3550 ncc->isneg = 0; 3551 spin_unlock(&ncc->spin); 3552 } else { 3553 spin_unlock(&ncc->spin); 3554 } 3555 } 3556 return(info.result); 3557 } 3558 3559 void 3560 cache_dropmount(struct mount *mp) 3561 { 3562 _cache_mntrel(mp); 3563 } 3564 3565 void 3566 cache_ismounting(struct mount *mp) 3567 { 3568 struct nchandle *nch = &mp->mnt_ncmounton; 3569 struct ncmount_cache *ncc; 3570 3571 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3572 if (ncc->isneg && 3573 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3574 spin_lock(&ncc->spin); 3575 if (ncc->isneg && 3576 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3577 ncc->ncp = NULL; 3578 ncc->mp = NULL; 3579 } 3580 spin_unlock(&ncc->spin); 3581 } 3582 } 3583 3584 void 3585 cache_unmounting(struct mount *mp) 3586 { 3587 struct nchandle *nch = &mp->mnt_ncmounton; 3588 struct ncmount_cache *ncc; 3589 3590 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3591 if (ncc->isneg == 0 && 3592 ncc->ncp == nch->ncp && ncc->mp == mp) { 3593 spin_lock(&ncc->spin); 3594 if (ncc->isneg == 0 && 3595 ncc->ncp == nch->ncp && ncc->mp == mp) { 3596 _cache_mntrel(mp); 3597 ncc->ncp = NULL; 3598 ncc->mp = NULL; 3599 } 3600 spin_unlock(&ncc->spin); 3601 } 3602 } 3603 3604 /* 3605 * Resolve an unresolved namecache entry, generally by looking it up. 3606 * The passed ncp must be locked and refd. 3607 * 3608 * Theoretically since a vnode cannot be recycled while held, and since 3609 * the nc_parent chain holds its vnode as long as children exist, the 3610 * direct parent of the cache entry we are trying to resolve should 3611 * have a valid vnode. If not then generate an error that we can 3612 * determine is related to a resolver bug. 3613 * 3614 * However, if a vnode was in the middle of a recyclement when the NCP 3615 * got locked, ncp->nc_vp might point to a vnode that is about to become 3616 * invalid. cache_resolve() handles this case by unresolving the entry 3617 * and then re-resolving it. 3618 * 3619 * Note that successful resolution does not necessarily return an error 3620 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 3621 * will be returned. 3622 */ 3623 int 3624 cache_resolve(struct nchandle *nch, struct ucred *cred) 3625 { 3626 struct namecache *par_tmp; 3627 struct namecache *par; 3628 struct namecache *ncp; 3629 struct nchandle nctmp; 3630 struct mount *mp; 3631 struct vnode *dvp; 3632 int error; 3633 3634 ncp = nch->ncp; 3635 mp = nch->mount; 3636 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 3637 restart: 3638 /* 3639 * If the ncp is already resolved we have nothing to do. However, 3640 * we do want to guarentee that a usable vnode is returned when 3641 * a vnode is present, so make sure it hasn't been reclaimed. 3642 */ 3643 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3644 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3645 _cache_setunresolved(ncp); 3646 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 3647 return (ncp->nc_error); 3648 } 3649 3650 /* 3651 * If the ncp was destroyed it will never resolve again. This 3652 * can basically only happen when someone is chdir'd into an 3653 * empty directory which is then rmdir'd. We want to catch this 3654 * here and not dive the VFS because the VFS might actually 3655 * have a way to re-resolve the disconnected ncp, which will 3656 * result in inconsistencies in the cdir/nch for proc->p_fd. 3657 */ 3658 if (ncp->nc_flag & NCF_DESTROYED) 3659 return(EINVAL); 3660 3661 /* 3662 * Mount points need special handling because the parent does not 3663 * belong to the same filesystem as the ncp. 3664 */ 3665 if (ncp == mp->mnt_ncmountpt.ncp) 3666 return (cache_resolve_mp(mp)); 3667 3668 /* 3669 * We expect an unbroken chain of ncps to at least the mount point, 3670 * and even all the way to root (but this code doesn't have to go 3671 * past the mount point). 3672 */ 3673 if (ncp->nc_parent == NULL) { 3674 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 3675 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3676 ncp->nc_error = EXDEV; 3677 return(ncp->nc_error); 3678 } 3679 3680 /* 3681 * The vp's of the parent directories in the chain are held via vhold() 3682 * due to the existance of the child, and should not disappear. 3683 * However, there are cases where they can disappear: 3684 * 3685 * - due to filesystem I/O errors. 3686 * - due to NFS being stupid about tracking the namespace and 3687 * destroys the namespace for entire directories quite often. 3688 * - due to forced unmounts. 3689 * - due to an rmdir (parent will be marked DESTROYED) 3690 * 3691 * When this occurs we have to track the chain backwards and resolve 3692 * it, looping until the resolver catches up to the current node. We 3693 * could recurse here but we might run ourselves out of kernel stack 3694 * so we do it in a more painful manner. This situation really should 3695 * not occur all that often, or if it does not have to go back too 3696 * many nodes to resolve the ncp. 3697 */ 3698 while ((dvp = cache_dvpref(ncp)) == NULL) { 3699 /* 3700 * This case can occur if a process is CD'd into a 3701 * directory which is then rmdir'd. If the parent is marked 3702 * destroyed there is no point trying to resolve it. 3703 */ 3704 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 3705 return(ENOENT); 3706 par = ncp->nc_parent; 3707 _cache_hold(par); 3708 _cache_lock(par); 3709 while ((par_tmp = par->nc_parent) != NULL && 3710 par_tmp->nc_vp == NULL) { 3711 _cache_hold(par_tmp); 3712 _cache_lock(par_tmp); 3713 _cache_put(par); 3714 par = par_tmp; 3715 } 3716 if (par->nc_parent == NULL) { 3717 kprintf("EXDEV case 2 %*.*s\n", 3718 par->nc_nlen, par->nc_nlen, par->nc_name); 3719 _cache_put(par); 3720 return (EXDEV); 3721 } 3722 /* 3723 * The parent is not set in stone, ref and lock it to prevent 3724 * it from disappearing. Also note that due to renames it 3725 * is possible for our ncp to move and for par to no longer 3726 * be one of its parents. We resolve it anyway, the loop 3727 * will handle any moves. 3728 */ 3729 _cache_get(par); /* additional hold/lock */ 3730 _cache_put(par); /* from earlier hold/lock */ 3731 if (par == nch->mount->mnt_ncmountpt.ncp) { 3732 cache_resolve_mp(nch->mount); 3733 } else if ((dvp = cache_dvpref(par)) == NULL) { 3734 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 3735 _cache_put(par); 3736 continue; 3737 } else { 3738 if (par->nc_flag & NCF_UNRESOLVED) { 3739 nctmp.mount = mp; 3740 nctmp.ncp = par; 3741 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3742 } 3743 vrele(dvp); 3744 } 3745 if ((error = par->nc_error) != 0) { 3746 if (par->nc_error != EAGAIN) { 3747 kprintf("EXDEV case 3 %*.*s error %d\n", 3748 par->nc_nlen, par->nc_nlen, par->nc_name, 3749 par->nc_error); 3750 _cache_put(par); 3751 return(error); 3752 } 3753 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 3754 par, par->nc_nlen, par->nc_nlen, par->nc_name); 3755 } 3756 _cache_put(par); 3757 /* loop */ 3758 } 3759 3760 /* 3761 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 3762 * ncp's and reattach them. If this occurs the original ncp is marked 3763 * EAGAIN to force a relookup. 3764 * 3765 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 3766 * ncp must already be resolved. 3767 */ 3768 if (dvp) { 3769 nctmp.mount = mp; 3770 nctmp.ncp = ncp; 3771 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3772 vrele(dvp); 3773 } else { 3774 ncp->nc_error = EPERM; 3775 } 3776 if (ncp->nc_error == EAGAIN) { 3777 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 3778 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3779 goto restart; 3780 } 3781 return(ncp->nc_error); 3782 } 3783 3784 /* 3785 * Resolve the ncp associated with a mount point. Such ncp's almost always 3786 * remain resolved and this routine is rarely called. NFS MPs tends to force 3787 * re-resolution more often due to its mac-truck-smash-the-namecache 3788 * method of tracking namespace changes. 3789 * 3790 * The semantics for this call is that the passed ncp must be locked on 3791 * entry and will be locked on return. However, if we actually have to 3792 * resolve the mount point we temporarily unlock the entry in order to 3793 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 3794 * the unlock we have to recheck the flags after we relock. 3795 */ 3796 static int 3797 cache_resolve_mp(struct mount *mp) 3798 { 3799 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 3800 struct vnode *vp; 3801 int error; 3802 3803 KKASSERT(mp != NULL); 3804 3805 /* 3806 * If the ncp is already resolved we have nothing to do. However, 3807 * we do want to guarentee that a usable vnode is returned when 3808 * a vnode is present, so make sure it hasn't been reclaimed. 3809 */ 3810 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3811 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3812 _cache_setunresolved(ncp); 3813 } 3814 3815 if (ncp->nc_flag & NCF_UNRESOLVED) { 3816 _cache_unlock(ncp); 3817 while (vfs_busy(mp, 0)) 3818 ; 3819 error = VFS_ROOT(mp, &vp); 3820 _cache_lock(ncp); 3821 3822 /* 3823 * recheck the ncp state after relocking. 3824 */ 3825 if (ncp->nc_flag & NCF_UNRESOLVED) { 3826 ncp->nc_error = error; 3827 if (error == 0) { 3828 _cache_setvp(mp, ncp, vp); 3829 vput(vp); 3830 } else { 3831 kprintf("[diagnostic] cache_resolve_mp: failed" 3832 " to resolve mount %p err=%d ncp=%p\n", 3833 mp, error, ncp); 3834 _cache_setvp(mp, ncp, NULL); 3835 } 3836 } else if (error == 0) { 3837 vput(vp); 3838 } 3839 vfs_unbusy(mp); 3840 } 3841 return(ncp->nc_error); 3842 } 3843 3844 /* 3845 * Clean out negative cache entries when too many have accumulated. 3846 */ 3847 static void 3848 _cache_cleanneg(long count) 3849 { 3850 struct pcpu_ncache *pn; 3851 struct namecache *ncp; 3852 static uint32_t neg_rover; 3853 uint32_t n; 3854 long vnegs; 3855 3856 n = neg_rover++; /* SMP heuristical, race ok */ 3857 cpu_ccfence(); 3858 n = n % (uint32_t)ncpus; 3859 3860 /* 3861 * Normalize vfscache_negs and count. count is sometimes based 3862 * on vfscache_negs. vfscache_negs is heuristical and can sometimes 3863 * have crazy values. 3864 */ 3865 vnegs = vfscache_negs; 3866 cpu_ccfence(); 3867 if (vnegs <= MINNEG) 3868 vnegs = MINNEG; 3869 if (count < 1) 3870 count = 1; 3871 3872 pn = &pcpu_ncache[n]; 3873 spin_lock(&pn->neg_spin); 3874 count = pn->neg_count * count / vnegs + 1; 3875 spin_unlock(&pn->neg_spin); 3876 3877 /* 3878 * Attempt to clean out the specified number of negative cache 3879 * entries. 3880 */ 3881 while (count > 0) { 3882 spin_lock(&pn->neg_spin); 3883 ncp = TAILQ_FIRST(&pn->neg_list); 3884 if (ncp == NULL) { 3885 spin_unlock(&pn->neg_spin); 3886 break; 3887 } 3888 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 3889 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 3890 _cache_hold(ncp); 3891 spin_unlock(&pn->neg_spin); 3892 3893 /* 3894 * This can race, so we must re-check that the ncp 3895 * is on the ncneg.list after successfully locking it. 3896 */ 3897 if (_cache_lock_special(ncp) == 0) { 3898 if (ncp->nc_vp == NULL && 3899 (ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3900 ncp = cache_zap(ncp, 1); 3901 if (ncp) 3902 _cache_drop(ncp); 3903 } else { 3904 _cache_unlock(ncp); 3905 _cache_drop(ncp); 3906 } 3907 } else { 3908 _cache_drop(ncp); 3909 } 3910 --count; 3911 } 3912 } 3913 3914 /* 3915 * Clean out positive cache entries when too many have accumulated. 3916 */ 3917 static void 3918 _cache_cleanpos(long count) 3919 { 3920 static volatile int rover; 3921 struct nchash_head *nchpp; 3922 struct namecache *ncp; 3923 int rover_copy; 3924 3925 /* 3926 * Attempt to clean out the specified number of negative cache 3927 * entries. 3928 */ 3929 while (count > 0) { 3930 rover_copy = ++rover; /* MPSAFEENOUGH */ 3931 cpu_ccfence(); 3932 nchpp = NCHHASH(rover_copy); 3933 3934 if (TAILQ_FIRST(&nchpp->list) == NULL) { 3935 --count; 3936 continue; 3937 } 3938 3939 /* 3940 * Cycle ncp on list, ignore and do not move DUMMY 3941 * ncps. These are temporary list iterators. 3942 * 3943 * We must cycle the ncp to the end of the list to 3944 * ensure that all ncp's have an equal chance of 3945 * being removed. 3946 */ 3947 spin_lock(&nchpp->spin); 3948 ncp = TAILQ_FIRST(&nchpp->list); 3949 while (ncp && (ncp->nc_flag & NCF_DUMMY)) 3950 ncp = TAILQ_NEXT(ncp, nc_hash); 3951 if (ncp) { 3952 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash); 3953 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash); 3954 _cache_hold(ncp); 3955 } 3956 spin_unlock(&nchpp->spin); 3957 3958 if (ncp) { 3959 if (_cache_lock_special(ncp) == 0) { 3960 ncp = cache_zap(ncp, 1); 3961 if (ncp) 3962 _cache_drop(ncp); 3963 } else { 3964 _cache_drop(ncp); 3965 } 3966 } 3967 --count; 3968 } 3969 } 3970 3971 /* 3972 * This is a kitchen sink function to clean out ncps which we 3973 * tried to zap from cache_drop() but failed because we were 3974 * unable to acquire the parent lock. 3975 * 3976 * Such entries can also be removed via cache_inval_vp(), such 3977 * as when unmounting. 3978 */ 3979 static void 3980 _cache_cleandefered(void) 3981 { 3982 struct nchash_head *nchpp; 3983 struct namecache *ncp; 3984 struct namecache dummy; 3985 int i; 3986 3987 /* 3988 * Create a list iterator. DUMMY indicates that this is a list 3989 * iterator, DESTROYED prevents matches by lookup functions. 3990 */ 3991 numdefered = 0; 3992 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0; 3993 bzero(&dummy, sizeof(dummy)); 3994 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY; 3995 dummy.nc_refs = 1; 3996 3997 for (i = 0; i <= nchash; ++i) { 3998 nchpp = &nchashtbl[i]; 3999 4000 spin_lock(&nchpp->spin); 4001 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 4002 ncp = &dummy; 4003 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) { 4004 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 4005 continue; 4006 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4007 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash); 4008 _cache_hold(ncp); 4009 spin_unlock(&nchpp->spin); 4010 if (_cache_lock_nonblock(ncp) == 0) { 4011 ncp->nc_flag &= ~NCF_DEFEREDZAP; 4012 _cache_unlock(ncp); 4013 } 4014 _cache_drop(ncp); 4015 spin_lock(&nchpp->spin); 4016 ncp = &dummy; 4017 } 4018 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4019 spin_unlock(&nchpp->spin); 4020 } 4021 } 4022 4023 /* 4024 * Name cache initialization, from vfsinit() when we are booting 4025 */ 4026 void 4027 nchinit(void) 4028 { 4029 struct pcpu_ncache *pn; 4030 globaldata_t gd; 4031 int i; 4032 4033 /* 4034 * Per-cpu accounting and negative hit list 4035 */ 4036 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus, 4037 M_VFSCACHE, M_WAITOK|M_ZERO); 4038 for (i = 0; i < ncpus; ++i) { 4039 pn = &pcpu_ncache[i]; 4040 TAILQ_INIT(&pn->neg_list); 4041 spin_init(&pn->neg_spin, "ncneg"); 4042 } 4043 4044 /* 4045 * Initialise per-cpu namecache effectiveness statistics. 4046 */ 4047 for (i = 0; i < ncpus; ++i) { 4048 gd = globaldata_find(i); 4049 gd->gd_nchstats = &nchstats[i]; 4050 } 4051 4052 /* 4053 * Create a generous namecache hash table 4054 */ 4055 nchashtbl = hashinit_ext(vfs_inodehashsize(), 4056 sizeof(struct nchash_head), 4057 M_VFSCACHE, &nchash); 4058 for (i = 0; i <= (int)nchash; ++i) { 4059 TAILQ_INIT(&nchashtbl[i].list); 4060 spin_init(&nchashtbl[i].spin, "nchinit_hash"); 4061 } 4062 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) 4063 spin_init(&ncmount_cache[i].spin, "nchinit_cache"); 4064 nclockwarn = 5 * hz; 4065 } 4066 4067 /* 4068 * Called from start_init() to bootstrap the root filesystem. Returns 4069 * a referenced, unlocked namecache record. 4070 */ 4071 void 4072 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 4073 { 4074 nch->ncp = cache_alloc(0); 4075 nch->mount = mp; 4076 _cache_mntref(mp); 4077 if (vp) 4078 _cache_setvp(nch->mount, nch->ncp, vp); 4079 } 4080 4081 /* 4082 * vfs_cache_setroot() 4083 * 4084 * Create an association between the root of our namecache and 4085 * the root vnode. This routine may be called several times during 4086 * booting. 4087 * 4088 * If the caller intends to save the returned namecache pointer somewhere 4089 * it must cache_hold() it. 4090 */ 4091 void 4092 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 4093 { 4094 struct vnode *ovp; 4095 struct nchandle onch; 4096 4097 ovp = rootvnode; 4098 onch = rootnch; 4099 rootvnode = nvp; 4100 if (nch) 4101 rootnch = *nch; 4102 else 4103 cache_zero(&rootnch); 4104 if (ovp) 4105 vrele(ovp); 4106 if (onch.ncp) 4107 cache_drop(&onch); 4108 } 4109 4110 /* 4111 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 4112 * topology and is being removed as quickly as possible. The new VOP_N*() 4113 * API calls are required to make specific adjustments using the supplied 4114 * ncp pointers rather then just bogusly purging random vnodes. 4115 * 4116 * Invalidate all namecache entries to a particular vnode as well as 4117 * any direct children of that vnode in the namecache. This is a 4118 * 'catch all' purge used by filesystems that do not know any better. 4119 * 4120 * Note that the linkage between the vnode and its namecache entries will 4121 * be removed, but the namecache entries themselves might stay put due to 4122 * active references from elsewhere in the system or due to the existance of 4123 * the children. The namecache topology is left intact even if we do not 4124 * know what the vnode association is. Such entries will be marked 4125 * NCF_UNRESOLVED. 4126 */ 4127 void 4128 cache_purge(struct vnode *vp) 4129 { 4130 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 4131 } 4132 4133 static int disablecwd; 4134 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 4135 "Disable getcwd"); 4136 4137 static u_long numcwdcalls; 4138 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0, 4139 "Number of current directory resolution calls"); 4140 static u_long numcwdfailnf; 4141 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0, 4142 "Number of current directory failures due to lack of file"); 4143 static u_long numcwdfailsz; 4144 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0, 4145 "Number of current directory failures due to large result"); 4146 static u_long numcwdfound; 4147 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0, 4148 "Number of current directory resolution successes"); 4149 4150 /* 4151 * MPALMOSTSAFE 4152 */ 4153 int 4154 sys___getcwd(struct __getcwd_args *uap) 4155 { 4156 u_int buflen; 4157 int error; 4158 char *buf; 4159 char *bp; 4160 4161 if (disablecwd) 4162 return (ENODEV); 4163 4164 buflen = uap->buflen; 4165 if (buflen == 0) 4166 return (EINVAL); 4167 if (buflen > MAXPATHLEN) 4168 buflen = MAXPATHLEN; 4169 4170 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 4171 bp = kern_getcwd(buf, buflen, &error); 4172 if (error == 0) 4173 error = copyout(bp, uap->buf, strlen(bp) + 1); 4174 kfree(buf, M_TEMP); 4175 return (error); 4176 } 4177 4178 char * 4179 kern_getcwd(char *buf, size_t buflen, int *error) 4180 { 4181 struct proc *p = curproc; 4182 char *bp; 4183 int i, slash_prefixed; 4184 struct filedesc *fdp; 4185 struct nchandle nch; 4186 struct namecache *ncp; 4187 4188 numcwdcalls++; 4189 bp = buf; 4190 bp += buflen - 1; 4191 *bp = '\0'; 4192 fdp = p->p_fd; 4193 slash_prefixed = 0; 4194 4195 nch = fdp->fd_ncdir; 4196 ncp = nch.ncp; 4197 if (ncp) 4198 _cache_hold(ncp); 4199 4200 while (ncp && (ncp != fdp->fd_nrdir.ncp || 4201 nch.mount != fdp->fd_nrdir.mount) 4202 ) { 4203 /* 4204 * While traversing upwards if we encounter the root 4205 * of the current mount we have to skip to the mount point 4206 * in the underlying filesystem. 4207 */ 4208 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 4209 nch = nch.mount->mnt_ncmounton; 4210 _cache_drop(ncp); 4211 ncp = nch.ncp; 4212 if (ncp) 4213 _cache_hold(ncp); 4214 continue; 4215 } 4216 4217 /* 4218 * Prepend the path segment 4219 */ 4220 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4221 if (bp == buf) { 4222 numcwdfailsz++; 4223 *error = ERANGE; 4224 bp = NULL; 4225 goto done; 4226 } 4227 *--bp = ncp->nc_name[i]; 4228 } 4229 if (bp == buf) { 4230 numcwdfailsz++; 4231 *error = ERANGE; 4232 bp = NULL; 4233 goto done; 4234 } 4235 *--bp = '/'; 4236 slash_prefixed = 1; 4237 4238 /* 4239 * Go up a directory. This isn't a mount point so we don't 4240 * have to check again. 4241 */ 4242 while ((nch.ncp = ncp->nc_parent) != NULL) { 4243 if (ncp_shared_lock_disable) 4244 _cache_lock(ncp); 4245 else 4246 _cache_lock_shared(ncp); 4247 if (nch.ncp != ncp->nc_parent) { 4248 _cache_unlock(ncp); 4249 continue; 4250 } 4251 _cache_hold(nch.ncp); 4252 _cache_unlock(ncp); 4253 break; 4254 } 4255 _cache_drop(ncp); 4256 ncp = nch.ncp; 4257 } 4258 if (ncp == NULL) { 4259 numcwdfailnf++; 4260 *error = ENOENT; 4261 bp = NULL; 4262 goto done; 4263 } 4264 if (!slash_prefixed) { 4265 if (bp == buf) { 4266 numcwdfailsz++; 4267 *error = ERANGE; 4268 bp = NULL; 4269 goto done; 4270 } 4271 *--bp = '/'; 4272 } 4273 numcwdfound++; 4274 *error = 0; 4275 done: 4276 if (ncp) 4277 _cache_drop(ncp); 4278 return (bp); 4279 } 4280 4281 /* 4282 * Thus begins the fullpath magic. 4283 * 4284 * The passed nchp is referenced but not locked. 4285 */ 4286 static int disablefullpath; 4287 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 4288 &disablefullpath, 0, 4289 "Disable fullpath lookups"); 4290 4291 int 4292 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase, 4293 char **retbuf, char **freebuf, int guess) 4294 { 4295 struct nchandle fd_nrdir; 4296 struct nchandle nch; 4297 struct namecache *ncp; 4298 struct mount *mp, *new_mp; 4299 char *bp, *buf; 4300 int slash_prefixed; 4301 int error = 0; 4302 int i; 4303 4304 *retbuf = NULL; 4305 *freebuf = NULL; 4306 4307 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 4308 bp = buf + MAXPATHLEN - 1; 4309 *bp = '\0'; 4310 if (nchbase) 4311 fd_nrdir = *nchbase; 4312 else if (p != NULL) 4313 fd_nrdir = p->p_fd->fd_nrdir; 4314 else 4315 fd_nrdir = rootnch; 4316 slash_prefixed = 0; 4317 nch = *nchp; 4318 ncp = nch.ncp; 4319 if (ncp) 4320 _cache_hold(ncp); 4321 mp = nch.mount; 4322 4323 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 4324 new_mp = NULL; 4325 4326 /* 4327 * If we are asked to guess the upwards path, we do so whenever 4328 * we encounter an ncp marked as a mountpoint. We try to find 4329 * the actual mountpoint by finding the mountpoint with this 4330 * ncp. 4331 */ 4332 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 4333 new_mp = mount_get_by_nc(ncp); 4334 } 4335 /* 4336 * While traversing upwards if we encounter the root 4337 * of the current mount we have to skip to the mount point. 4338 */ 4339 if (ncp == mp->mnt_ncmountpt.ncp) { 4340 new_mp = mp; 4341 } 4342 if (new_mp) { 4343 nch = new_mp->mnt_ncmounton; 4344 _cache_drop(ncp); 4345 ncp = nch.ncp; 4346 if (ncp) 4347 _cache_hold(ncp); 4348 mp = nch.mount; 4349 continue; 4350 } 4351 4352 /* 4353 * Prepend the path segment 4354 */ 4355 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4356 if (bp == buf) { 4357 kfree(buf, M_TEMP); 4358 error = ENOMEM; 4359 goto done; 4360 } 4361 *--bp = ncp->nc_name[i]; 4362 } 4363 if (bp == buf) { 4364 kfree(buf, M_TEMP); 4365 error = ENOMEM; 4366 goto done; 4367 } 4368 *--bp = '/'; 4369 slash_prefixed = 1; 4370 4371 /* 4372 * Go up a directory. This isn't a mount point so we don't 4373 * have to check again. 4374 * 4375 * We can only safely access nc_parent with ncp held locked. 4376 */ 4377 while ((nch.ncp = ncp->nc_parent) != NULL) { 4378 _cache_lock(ncp); 4379 if (nch.ncp != ncp->nc_parent) { 4380 _cache_unlock(ncp); 4381 continue; 4382 } 4383 _cache_hold(nch.ncp); 4384 _cache_unlock(ncp); 4385 break; 4386 } 4387 _cache_drop(ncp); 4388 ncp = nch.ncp; 4389 } 4390 if (ncp == NULL) { 4391 kfree(buf, M_TEMP); 4392 error = ENOENT; 4393 goto done; 4394 } 4395 4396 if (!slash_prefixed) { 4397 if (bp == buf) { 4398 kfree(buf, M_TEMP); 4399 error = ENOMEM; 4400 goto done; 4401 } 4402 *--bp = '/'; 4403 } 4404 *retbuf = bp; 4405 *freebuf = buf; 4406 error = 0; 4407 done: 4408 if (ncp) 4409 _cache_drop(ncp); 4410 return(error); 4411 } 4412 4413 int 4414 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, 4415 char **freebuf, int guess) 4416 { 4417 struct namecache *ncp; 4418 struct nchandle nch; 4419 int error; 4420 4421 *freebuf = NULL; 4422 if (disablefullpath) 4423 return (ENODEV); 4424 4425 if (p == NULL) 4426 return (EINVAL); 4427 4428 /* vn is NULL, client wants us to use p->p_textvp */ 4429 if (vn == NULL) { 4430 if ((vn = p->p_textvp) == NULL) 4431 return (EINVAL); 4432 } 4433 spin_lock_shared(&vn->v_spin); 4434 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 4435 if (ncp->nc_nlen) 4436 break; 4437 } 4438 if (ncp == NULL) { 4439 spin_unlock_shared(&vn->v_spin); 4440 return (EINVAL); 4441 } 4442 _cache_hold(ncp); 4443 spin_unlock_shared(&vn->v_spin); 4444 4445 nch.ncp = ncp; 4446 nch.mount = vn->v_mount; 4447 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess); 4448 _cache_drop(ncp); 4449 return (error); 4450 } 4451 4452 void 4453 vfscache_rollup_cpu(struct globaldata *gd) 4454 { 4455 struct pcpu_ncache *pn; 4456 long count; 4457 4458 if (pcpu_ncache == NULL) 4459 return; 4460 pn = &pcpu_ncache[gd->gd_cpuid]; 4461 4462 if (pn->vfscache_count) { 4463 count = atomic_swap_long(&pn->vfscache_count, 0); 4464 atomic_add_long(&vfscache_count, count); 4465 } 4466 if (pn->vfscache_leafs) { 4467 count = atomic_swap_long(&pn->vfscache_leafs, 0); 4468 atomic_add_long(&vfscache_leafs, count); 4469 } 4470 if (pn->vfscache_negs) { 4471 count = atomic_swap_long(&pn->vfscache_negs, 0); 4472 atomic_add_long(&vfscache_negs, count); 4473 } 4474 if (pn->numdefered) { 4475 count = atomic_swap_long(&pn->numdefered, 0); 4476 atomic_add_long(&numdefered, count); 4477 } 4478 } 4479 4480 #if 0 4481 static void 4482 vfscache_rollup_all(void) 4483 { 4484 int n; 4485 4486 for (n = 0; n < ncpus; ++n) 4487 vfscache_rollup_cpu(globaldata_find(n)); 4488 } 4489 #endif 4490