1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. Neither the name of the University nor the names of its contributors 49 * may be used to endorse or promote products derived from this software 50 * without specific prior written permission. 51 * 52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 62 * SUCH DAMAGE. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/uio.h> 68 #include <sys/kernel.h> 69 #include <sys/sysctl.h> 70 #include <sys/mount.h> 71 #include <sys/vnode.h> 72 #include <sys/malloc.h> 73 #include <sys/sysproto.h> 74 #include <sys/spinlock.h> 75 #include <sys/proc.h> 76 #include <sys/namei.h> 77 #include <sys/nlookup.h> 78 #include <sys/filedesc.h> 79 #include <sys/fnv_hash.h> 80 #include <sys/globaldata.h> 81 #include <sys/kern_syscall.h> 82 #include <sys/dirent.h> 83 #include <ddb/ddb.h> 84 85 #include <sys/spinlock2.h> 86 87 #define MAX_RECURSION_DEPTH 64 88 89 /* 90 * Random lookups in the cache are accomplished with a hash table using 91 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 92 * 93 * Negative entries may exist and correspond to resolved namecache 94 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 95 * will be set if the entry corresponds to a whited-out directory entry 96 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list 97 * is locked via pcpu_ncache[n].neg_spin; 98 * 99 * MPSAFE RULES: 100 * 101 * (1) A ncp must be referenced before it can be locked. 102 * 103 * (2) A ncp must be locked in order to modify it. 104 * 105 * (3) ncp locks are always ordered child -> parent. That may seem 106 * backwards but forward scans use the hash table and thus can hold 107 * the parent unlocked when traversing downward. 108 * 109 * This allows insert/rename/delete/dot-dot and other operations 110 * to use ncp->nc_parent links. 111 * 112 * This also prevents a locked up e.g. NFS node from creating a 113 * chain reaction all the way back to the root vnode / namecache. 114 * 115 * (4) parent linkages require both the parent and child to be locked. 116 */ 117 118 /* 119 * Structures associated with name cacheing. 120 */ 121 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 122 #define MINNEG 1024 123 #define MINPOS 1024 124 #define NCMOUNT_NUMCACHE 16301 /* prime number */ 125 126 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 127 128 TAILQ_HEAD(nchash_list, namecache); 129 130 /* 131 * Don't cachealign, but at least pad to 32 bytes so entries 132 * don't cross a cache line. 133 */ 134 struct nchash_head { 135 struct nchash_list list; /* 16 bytes */ 136 struct spinlock spin; /* 8 bytes */ 137 long pad01; /* 8 bytes */ 138 }; 139 140 struct ncmount_cache { 141 struct spinlock spin; 142 struct namecache *ncp; 143 struct mount *mp; 144 int isneg; /* if != 0 mp is originator and not target */ 145 } __cachealign; 146 147 struct pcpu_ncache { 148 struct spinlock neg_spin; /* for neg_list and neg_count */ 149 struct namecache_list neg_list; 150 long neg_count; 151 long vfscache_negs; 152 long vfscache_count; 153 long vfscache_leafs; 154 long numdefered; 155 } __cachealign; 156 157 __read_mostly static struct nchash_head *nchashtbl; 158 __read_mostly static struct pcpu_ncache *pcpu_ncache; 159 static struct ncmount_cache ncmount_cache[NCMOUNT_NUMCACHE]; 160 161 /* 162 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 163 * to create the namecache infrastructure leading to a dangling vnode. 164 * 165 * 0 Only errors are reported 166 * 1 Successes are reported 167 * 2 Successes + the whole directory scan is reported 168 * 3 Force the directory scan code run as if the parent vnode did not 169 * have a namecache record, even if it does have one. 170 */ 171 __read_mostly static int ncvp_debug; 172 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 173 "Namecache debug level (0-3)"); 174 175 __read_mostly static u_long nchash; /* size of hash table */ 176 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 177 "Size of namecache hash table"); 178 179 __read_mostly static int ncnegflush = 10; /* burst for negative flush */ 180 SYSCTL_INT(_debug, OID_AUTO, ncnegflush, CTLFLAG_RW, &ncnegflush, 0, 181 "Batch flush negative entries"); 182 183 __read_mostly static int ncposflush = 10; /* burst for positive flush */ 184 SYSCTL_INT(_debug, OID_AUTO, ncposflush, CTLFLAG_RW, &ncposflush, 0, 185 "Batch flush positive entries"); 186 187 __read_mostly static int ncnegfactor = 16; /* ratio of negative entries */ 188 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 189 "Ratio of namecache negative entries"); 190 191 __read_mostly static int nclockwarn; /* warn on locked entries in ticks */ 192 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 193 "Warn on locked namecache entries in ticks"); 194 195 __read_mostly static int ncposlimit; /* number of cache entries allocated */ 196 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 197 "Number of cache entries allocated"); 198 199 __read_mostly static int ncp_shared_lock_disable = 0; 200 SYSCTL_INT(_debug, OID_AUTO, ncp_shared_lock_disable, CTLFLAG_RW, 201 &ncp_shared_lock_disable, 0, "Disable shared namecache locks"); 202 203 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 204 "sizeof(struct vnode)"); 205 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 206 "sizeof(struct namecache)"); 207 208 __read_mostly static int ncmount_cache_enable = 1; 209 SYSCTL_INT(_debug, OID_AUTO, ncmount_cache_enable, CTLFLAG_RW, 210 &ncmount_cache_enable, 0, "mount point cache"); 211 212 static __inline void _cache_drop(struct namecache *ncp); 213 static int cache_resolve_mp(struct mount *mp); 214 static struct vnode *cache_dvpref(struct namecache *ncp); 215 static void _cache_lock(struct namecache *ncp); 216 static void _cache_setunresolved(struct namecache *ncp); 217 static void _cache_cleanneg(long count); 218 static void _cache_cleanpos(long count); 219 static void _cache_cleandefered(void); 220 static void _cache_unlink(struct namecache *ncp); 221 #if 0 222 static void vfscache_rollup_all(void); 223 #endif 224 225 /* 226 * The new name cache statistics (these are rolled up globals and not 227 * modified in the critical path, see struct pcpu_ncache). 228 */ 229 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 230 static long vfscache_negs; 231 SYSCTL_LONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &vfscache_negs, 0, 232 "Number of negative namecache entries"); 233 static long vfscache_count; 234 SYSCTL_LONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &vfscache_count, 0, 235 "Number of namecaches entries"); 236 static long vfscache_leafs; 237 SYSCTL_LONG(_vfs_cache, OID_AUTO, numleafs, CTLFLAG_RD, &vfscache_leafs, 0, 238 "Number of namecaches entries"); 239 static long numdefered; 240 SYSCTL_LONG(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 241 "Number of cache entries allocated"); 242 243 244 struct nchstats nchstats[SMP_MAXCPU]; 245 /* 246 * Export VFS cache effectiveness statistics to user-land. 247 * 248 * The statistics are left for aggregation to user-land so 249 * neat things can be achieved, like observing per-CPU cache 250 * distribution. 251 */ 252 static int 253 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 254 { 255 struct globaldata *gd; 256 int i, error; 257 258 error = 0; 259 for (i = 0; i < ncpus; ++i) { 260 gd = globaldata_find(i); 261 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 262 sizeof(struct nchstats)))) 263 break; 264 } 265 266 return (error); 267 } 268 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 269 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 270 271 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 272 273 /* 274 * Cache mount points and namecache records in order to avoid unnecessary 275 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP 276 * performance and is particularly important on multi-socket systems to 277 * reduce cache-line ping-ponging. 278 * 279 * Try to keep the pcpu structure within one cache line (~64 bytes). 280 */ 281 #define MNTCACHE_COUNT 5 282 283 struct mntcache { 284 struct mount *mntary[MNTCACHE_COUNT]; 285 struct namecache *ncp1; 286 struct namecache *ncp2; 287 struct nchandle ncdir; 288 int iter; 289 int unused01; 290 } __cachealign; 291 292 static struct mntcache pcpu_mntcache[MAXCPU]; 293 294 static 295 void 296 _cache_mntref(struct mount *mp) 297 { 298 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 299 int i; 300 301 for (i = 0; i < MNTCACHE_COUNT; ++i) { 302 if (cache->mntary[i] != mp) 303 continue; 304 if (atomic_cmpset_ptr((void *)&cache->mntary[i], mp, NULL)) 305 return; 306 } 307 atomic_add_int(&mp->mnt_refs, 1); 308 } 309 310 static 311 void 312 _cache_mntrel(struct mount *mp) 313 { 314 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 315 int i; 316 317 for (i = 0; i < MNTCACHE_COUNT; ++i) { 318 if (cache->mntary[i] == NULL) { 319 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp); 320 if (mp == NULL) 321 return; 322 } 323 } 324 i = (int)((uint32_t)++cache->iter % (uint32_t)MNTCACHE_COUNT); 325 mp = atomic_swap_ptr((void *)&cache->mntary[i], mp); 326 if (mp) 327 atomic_add_int(&mp->mnt_refs, -1); 328 } 329 330 /* 331 * Clears all cached mount points on all cpus. This routine should only 332 * be called when we are waiting for a mount to clear, e.g. so we can 333 * unmount. 334 */ 335 void 336 cache_clearmntcache(void) 337 { 338 int n; 339 340 for (n = 0; n < ncpus; ++n) { 341 struct mntcache *cache = &pcpu_mntcache[n]; 342 struct namecache *ncp; 343 struct mount *mp; 344 int i; 345 346 for (i = 0; i < MNTCACHE_COUNT; ++i) { 347 if (cache->mntary[i]) { 348 mp = atomic_swap_ptr( 349 (void *)&cache->mntary[i], NULL); 350 if (mp) 351 atomic_add_int(&mp->mnt_refs, -1); 352 } 353 } 354 if (cache->ncp1) { 355 ncp = atomic_swap_ptr((void *)&cache->ncp1, NULL); 356 if (ncp) 357 _cache_drop(ncp); 358 } 359 if (cache->ncp2) { 360 ncp = atomic_swap_ptr((void *)&cache->ncp2, NULL); 361 if (ncp) 362 _cache_drop(ncp); 363 } 364 if (cache->ncdir.ncp) { 365 ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, NULL); 366 if (ncp) 367 _cache_drop(ncp); 368 } 369 if (cache->ncdir.mount) { 370 mp = atomic_swap_ptr((void *)&cache->ncdir.mount, NULL); 371 if (mp) 372 atomic_add_int(&mp->mnt_refs, -1); 373 } 374 } 375 } 376 377 378 /* 379 * Namespace locking. The caller must already hold a reference to the 380 * namecache structure in order to lock/unlock it. This function prevents 381 * the namespace from being created or destroyed by accessors other then 382 * the lock holder. 383 * 384 * Note that holding a locked namecache structure prevents other threads 385 * from making namespace changes (e.g. deleting or creating), prevents 386 * vnode association state changes by other threads, and prevents the 387 * namecache entry from being resolved or unresolved by other threads. 388 * 389 * An exclusive lock owner has full authority to associate/disassociate 390 * vnodes and resolve/unresolve the locked ncp. 391 * 392 * A shared lock owner only has authority to acquire the underlying vnode, 393 * if any. 394 * 395 * The primary lock field is nc_lockstatus. nc_locktd is set after the 396 * fact (when locking) or cleared prior to unlocking. 397 * 398 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 399 * or recycled, but it does NOT help you if the vnode had already 400 * initiated a recyclement. If this is important, use cache_get() 401 * rather then cache_lock() (and deal with the differences in the 402 * way the refs counter is handled). Or, alternatively, make an 403 * unconditional call to cache_validate() or cache_resolve() 404 * after cache_lock() returns. 405 */ 406 static 407 void 408 _cache_lock(struct namecache *ncp) 409 { 410 thread_t td; 411 int didwarn; 412 int begticks; 413 int error; 414 u_int count; 415 416 KKASSERT(ncp->nc_refs != 0); 417 didwarn = 0; 418 begticks = 0; 419 td = curthread; 420 421 for (;;) { 422 count = ncp->nc_lockstatus; 423 cpu_ccfence(); 424 425 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 426 if (atomic_cmpset_int(&ncp->nc_lockstatus, 427 count, count + 1)) { 428 /* 429 * The vp associated with a locked ncp must 430 * be held to prevent it from being recycled. 431 * 432 * WARNING! If VRECLAIMED is set the vnode 433 * could already be in the middle of a recycle. 434 * Callers must use cache_vref() or 435 * cache_vget() on the locked ncp to 436 * validate the vp or set the cache entry 437 * to unresolved. 438 * 439 * NOTE! vhold() is allowed if we hold a 440 * lock on the ncp (which we do). 441 */ 442 ncp->nc_locktd = td; 443 if (ncp->nc_vp) 444 vhold(ncp->nc_vp); 445 break; 446 } 447 /* cmpset failed */ 448 continue; 449 } 450 if (ncp->nc_locktd == td) { 451 KKASSERT((count & NC_SHLOCK_FLAG) == 0); 452 if (atomic_cmpset_int(&ncp->nc_lockstatus, 453 count, count + 1)) { 454 break; 455 } 456 /* cmpset failed */ 457 continue; 458 } 459 tsleep_interlock(&ncp->nc_locktd, 0); 460 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 461 count | NC_EXLOCK_REQ) == 0) { 462 /* cmpset failed */ 463 continue; 464 } 465 if (begticks == 0) 466 begticks = ticks; 467 error = tsleep(&ncp->nc_locktd, PINTERLOCKED, 468 "clock", nclockwarn); 469 if (error == EWOULDBLOCK) { 470 if (didwarn == 0) { 471 didwarn = ticks; 472 kprintf("[diagnostic] cache_lock: " 473 "%s blocked on %p %08x", 474 td->td_comm, ncp, count); 475 kprintf(" \"%*.*s\"\n", 476 ncp->nc_nlen, ncp->nc_nlen, 477 ncp->nc_name); 478 } 479 } 480 /* loop */ 481 } 482 if (didwarn) { 483 kprintf("[diagnostic] cache_lock: %s unblocked %*.*s after " 484 "%d secs\n", 485 td->td_comm, 486 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 487 (int)(ticks + (hz / 2) - begticks) / hz); 488 } 489 } 490 491 /* 492 * The shared lock works similarly to the exclusive lock except 493 * nc_locktd is left NULL and we need an interlock (VHOLD) to 494 * prevent vhold() races, since the moment our cmpset_int succeeds 495 * another cpu can come in and get its own shared lock. 496 * 497 * A critical section is needed to prevent interruption during the 498 * VHOLD interlock. 499 */ 500 static 501 void 502 _cache_lock_shared(struct namecache *ncp) 503 { 504 int didwarn; 505 int error; 506 u_int count; 507 u_int optreq = NC_EXLOCK_REQ; 508 509 KKASSERT(ncp->nc_refs != 0); 510 didwarn = 0; 511 512 for (;;) { 513 count = ncp->nc_lockstatus; 514 cpu_ccfence(); 515 516 if ((count & ~NC_SHLOCK_REQ) == 0) { 517 crit_enter(); 518 if (atomic_cmpset_int(&ncp->nc_lockstatus, 519 count, 520 (count + 1) | NC_SHLOCK_FLAG | 521 NC_SHLOCK_VHOLD)) { 522 /* 523 * The vp associated with a locked ncp must 524 * be held to prevent it from being recycled. 525 * 526 * WARNING! If VRECLAIMED is set the vnode 527 * could already be in the middle of a recycle. 528 * Callers must use cache_vref() or 529 * cache_vget() on the locked ncp to 530 * validate the vp or set the cache entry 531 * to unresolved. 532 * 533 * NOTE! vhold() is allowed if we hold a 534 * lock on the ncp (which we do). 535 */ 536 if (ncp->nc_vp) 537 vhold(ncp->nc_vp); 538 atomic_clear_int(&ncp->nc_lockstatus, 539 NC_SHLOCK_VHOLD); 540 crit_exit(); 541 break; 542 } 543 /* cmpset failed */ 544 crit_exit(); 545 continue; 546 } 547 548 /* 549 * If already held shared we can just bump the count, but 550 * only allow this if nobody is trying to get the lock 551 * exclusively. If we are blocking too long ignore excl 552 * requests (which can race/deadlock us). 553 * 554 * VHOLD is a bit of a hack. Even though we successfully 555 * added another shared ref, the cpu that got the first 556 * shared ref might not yet have held the vnode. 557 */ 558 if ((count & (optreq|NC_SHLOCK_FLAG)) == NC_SHLOCK_FLAG) { 559 KKASSERT((count & ~(NC_EXLOCK_REQ | 560 NC_SHLOCK_REQ | 561 NC_SHLOCK_FLAG)) > 0); 562 if (atomic_cmpset_int(&ncp->nc_lockstatus, 563 count, count + 1)) { 564 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 565 cpu_pause(); 566 break; 567 } 568 continue; 569 } 570 tsleep_interlock(ncp, 0); 571 if (atomic_cmpset_int(&ncp->nc_lockstatus, count, 572 count | NC_SHLOCK_REQ) == 0) { 573 /* cmpset failed */ 574 continue; 575 } 576 error = tsleep(ncp, PINTERLOCKED, "clocksh", nclockwarn); 577 if (error == EWOULDBLOCK) { 578 optreq = 0; 579 if (didwarn == 0) { 580 didwarn = ticks - nclockwarn; 581 kprintf("[diagnostic] cache_lock_shared: " 582 "%s blocked on %p %08x " 583 "\"%*.*s\"\n", 584 curthread->td_comm, ncp, count, 585 ncp->nc_nlen, ncp->nc_nlen, 586 ncp->nc_name); 587 } 588 } 589 /* loop */ 590 } 591 if (didwarn) { 592 kprintf("[diagnostic] cache_lock_shared: " 593 "%s unblocked %*.*s after %d secs\n", 594 curthread->td_comm, 595 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 596 (int)(ticks - didwarn) / hz); 597 } 598 } 599 600 /* 601 * Lock ncp exclusively, return 0 on success. 602 * 603 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 604 * such as the case where one of its children is locked. 605 */ 606 static 607 int 608 _cache_lock_nonblock(struct namecache *ncp) 609 { 610 thread_t td; 611 u_int count; 612 613 td = curthread; 614 615 for (;;) { 616 count = ncp->nc_lockstatus; 617 618 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 0) { 619 if (atomic_cmpset_int(&ncp->nc_lockstatus, 620 count, count + 1)) { 621 /* 622 * The vp associated with a locked ncp must 623 * be held to prevent it from being recycled. 624 * 625 * WARNING! If VRECLAIMED is set the vnode 626 * could already be in the middle of a recycle. 627 * Callers must use cache_vref() or 628 * cache_vget() on the locked ncp to 629 * validate the vp or set the cache entry 630 * to unresolved. 631 * 632 * NOTE! vhold() is allowed if we hold a 633 * lock on the ncp (which we do). 634 */ 635 ncp->nc_locktd = td; 636 if (ncp->nc_vp) 637 vhold(ncp->nc_vp); 638 break; 639 } 640 /* cmpset failed */ 641 continue; 642 } 643 if (ncp->nc_locktd == td) { 644 if (atomic_cmpset_int(&ncp->nc_lockstatus, 645 count, count + 1)) { 646 break; 647 } 648 /* cmpset failed */ 649 continue; 650 } 651 return(EWOULDBLOCK); 652 } 653 return(0); 654 } 655 656 /* 657 * The shared lock works similarly to the exclusive lock except 658 * nc_locktd is left NULL and we need an interlock (VHOLD) to 659 * prevent vhold() races, since the moment our cmpset_int succeeds 660 * another cpu can come in and get its own shared lock. 661 * 662 * A critical section is needed to prevent interruption during the 663 * VHOLD interlock. 664 */ 665 static 666 int 667 _cache_lock_shared_nonblock(struct namecache *ncp) 668 { 669 u_int count; 670 671 for (;;) { 672 count = ncp->nc_lockstatus; 673 674 if ((count & ~NC_SHLOCK_REQ) == 0) { 675 crit_enter(); 676 if (atomic_cmpset_int(&ncp->nc_lockstatus, 677 count, 678 (count + 1) | NC_SHLOCK_FLAG | 679 NC_SHLOCK_VHOLD)) { 680 /* 681 * The vp associated with a locked ncp must 682 * be held to prevent it from being recycled. 683 * 684 * WARNING! If VRECLAIMED is set the vnode 685 * could already be in the middle of a recycle. 686 * Callers must use cache_vref() or 687 * cache_vget() on the locked ncp to 688 * validate the vp or set the cache entry 689 * to unresolved. 690 * 691 * NOTE! vhold() is allowed if we hold a 692 * lock on the ncp (which we do). 693 */ 694 if (ncp->nc_vp) 695 vhold(ncp->nc_vp); 696 atomic_clear_int(&ncp->nc_lockstatus, 697 NC_SHLOCK_VHOLD); 698 crit_exit(); 699 break; 700 } 701 /* cmpset failed */ 702 crit_exit(); 703 continue; 704 } 705 706 /* 707 * If already held shared we can just bump the count, but 708 * only allow this if nobody is trying to get the lock 709 * exclusively. 710 * 711 * VHOLD is a bit of a hack. Even though we successfully 712 * added another shared ref, the cpu that got the first 713 * shared ref might not yet have held the vnode. 714 */ 715 if ((count & (NC_EXLOCK_REQ|NC_SHLOCK_FLAG)) == 716 NC_SHLOCK_FLAG) { 717 KKASSERT((count & ~(NC_EXLOCK_REQ | 718 NC_SHLOCK_REQ | 719 NC_SHLOCK_FLAG)) > 0); 720 if (atomic_cmpset_int(&ncp->nc_lockstatus, 721 count, count + 1)) { 722 while (ncp->nc_lockstatus & NC_SHLOCK_VHOLD) 723 cpu_pause(); 724 break; 725 } 726 continue; 727 } 728 return(EWOULDBLOCK); 729 } 730 return(0); 731 } 732 733 /* 734 * Helper function 735 * 736 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 737 * 738 * nc_locktd must be NULLed out prior to nc_lockstatus getting cleared. 739 */ 740 static 741 void 742 _cache_unlock(struct namecache *ncp) 743 { 744 thread_t td __debugvar = curthread; 745 u_int count; 746 u_int ncount; 747 struct vnode *dropvp; 748 749 KKASSERT(ncp->nc_refs >= 0); 750 KKASSERT((ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) > 0); 751 KKASSERT((ncp->nc_lockstatus & NC_SHLOCK_FLAG) || ncp->nc_locktd == td); 752 753 count = ncp->nc_lockstatus; 754 cpu_ccfence(); 755 756 /* 757 * Clear nc_locktd prior to the atomic op (excl lock only) 758 */ 759 if ((count & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) 760 ncp->nc_locktd = NULL; 761 dropvp = NULL; 762 763 for (;;) { 764 if ((count & 765 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ|NC_SHLOCK_FLAG)) == 1) { 766 dropvp = ncp->nc_vp; 767 if (count & NC_EXLOCK_REQ) 768 ncount = count & NC_SHLOCK_REQ; /* cnt->0 */ 769 else 770 ncount = 0; 771 772 if (atomic_cmpset_int(&ncp->nc_lockstatus, 773 count, ncount)) { 774 if (count & NC_EXLOCK_REQ) 775 wakeup(&ncp->nc_locktd); 776 else if (count & NC_SHLOCK_REQ) 777 wakeup(ncp); 778 break; 779 } 780 dropvp = NULL; 781 } else { 782 KKASSERT((count & NC_SHLOCK_VHOLD) == 0); 783 KKASSERT((count & ~(NC_EXLOCK_REQ | 784 NC_SHLOCK_REQ | 785 NC_SHLOCK_FLAG)) > 1); 786 if (atomic_cmpset_int(&ncp->nc_lockstatus, 787 count, count - 1)) { 788 break; 789 } 790 } 791 count = ncp->nc_lockstatus; 792 cpu_ccfence(); 793 } 794 795 /* 796 * Don't actually drop the vp until we successfully clean out 797 * the lock, otherwise we may race another shared lock. 798 */ 799 if (dropvp) 800 vdrop(dropvp); 801 } 802 803 static 804 int 805 _cache_lockstatus(struct namecache *ncp) 806 { 807 if (ncp->nc_locktd == curthread) 808 return(LK_EXCLUSIVE); 809 if (ncp->nc_lockstatus & NC_SHLOCK_FLAG) 810 return(LK_SHARED); 811 return(-1); 812 } 813 814 /* 815 * cache_hold() and cache_drop() prevent the premature deletion of a 816 * namecache entry but do not prevent operations (such as zapping) on 817 * that namecache entry. 818 * 819 * This routine may only be called from outside this source module if 820 * nc_refs is already at least 1. 821 * 822 * This is a rare case where callers are allowed to hold a spinlock, 823 * so we can't ourselves. 824 */ 825 static __inline 826 struct namecache * 827 _cache_hold(struct namecache *ncp) 828 { 829 atomic_add_int(&ncp->nc_refs, 1); 830 return(ncp); 831 } 832 833 /* 834 * Drop a cache entry, taking care to deal with races. 835 * 836 * For potential 1->0 transitions we must hold the ncp lock to safely 837 * test its flags. An unresolved entry with no children must be zapped 838 * to avoid leaks. 839 * 840 * The call to cache_zap() itself will handle all remaining races and 841 * will decrement the ncp's refs regardless. If we are resolved or 842 * have children nc_refs can safely be dropped to 0 without having to 843 * zap the entry. 844 * 845 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 846 * 847 * NOTE: cache_zap() may return a non-NULL referenced parent which must 848 * be dropped in a loop. 849 */ 850 static __inline 851 void 852 _cache_drop(struct namecache *ncp) 853 { 854 int refs; 855 856 while (ncp) { 857 KKASSERT(ncp->nc_refs > 0); 858 refs = ncp->nc_refs; 859 860 if (refs == 1) { 861 if (_cache_lock_nonblock(ncp) == 0) { 862 ncp->nc_flag &= ~NCF_DEFEREDZAP; 863 if ((ncp->nc_flag & NCF_UNRESOLVED) && 864 TAILQ_EMPTY(&ncp->nc_list)) { 865 ncp = cache_zap(ncp, 1); 866 continue; 867 } 868 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 869 _cache_unlock(ncp); 870 break; 871 } 872 _cache_unlock(ncp); 873 } 874 } else { 875 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 876 break; 877 } 878 cpu_pause(); 879 } 880 } 881 882 /* 883 * Link a new namecache entry to its parent and to the hash table. Be 884 * careful to avoid races if vhold() blocks in the future. 885 * 886 * Both ncp and par must be referenced and locked. 887 * 888 * NOTE: The hash table spinlock is held during this call, we can't do 889 * anything fancy. 890 */ 891 static void 892 _cache_link_parent(struct namecache *ncp, struct namecache *par, 893 struct nchash_head *nchpp) 894 { 895 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 896 897 KKASSERT(ncp->nc_parent == NULL); 898 ncp->nc_parent = par; 899 ncp->nc_head = nchpp; 900 901 /* 902 * Set inheritance flags. Note that the parent flags may be 903 * stale due to getattr potentially not having been run yet 904 * (it gets run during nlookup()'s). 905 */ 906 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 907 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 908 ncp->nc_flag |= NCF_SF_PNOCACHE; 909 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 910 ncp->nc_flag |= NCF_UF_PCACHE; 911 912 /* 913 * Add to hash table and parent, adjust accounting 914 */ 915 TAILQ_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 916 atomic_add_long(&pn->vfscache_count, 1); 917 if (TAILQ_EMPTY(&ncp->nc_list)) 918 atomic_add_long(&pn->vfscache_leafs, 1); 919 920 if (TAILQ_EMPTY(&par->nc_list)) { 921 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 922 atomic_add_long(&pn->vfscache_leafs, -1); 923 /* 924 * Any vp associated with an ncp which has children must 925 * be held to prevent it from being recycled. 926 */ 927 if (par->nc_vp) 928 vhold(par->nc_vp); 929 } else { 930 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 931 } 932 } 933 934 /* 935 * Remove the parent and hash associations from a namecache structure. 936 * If this is the last child of the parent the cache_drop(par) will 937 * attempt to recursively zap the parent. 938 * 939 * ncp must be locked. This routine will acquire a temporary lock on 940 * the parent as wlel as the appropriate hash chain. 941 */ 942 static void 943 _cache_unlink_parent(struct namecache *ncp) 944 { 945 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 946 struct namecache *par; 947 struct vnode *dropvp; 948 949 if ((par = ncp->nc_parent) != NULL) { 950 KKASSERT(ncp->nc_parent == par); 951 _cache_hold(par); 952 _cache_lock(par); 953 spin_lock(&ncp->nc_head->spin); 954 955 /* 956 * Remove from hash table and parent, adjust accounting 957 */ 958 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 959 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 960 atomic_add_long(&pn->vfscache_count, -1); 961 if (TAILQ_EMPTY(&ncp->nc_list)) 962 atomic_add_long(&pn->vfscache_leafs, -1); 963 964 dropvp = NULL; 965 if (TAILQ_EMPTY(&par->nc_list)) { 966 atomic_add_long(&pn->vfscache_leafs, 1); 967 if (par->nc_vp) 968 dropvp = par->nc_vp; 969 } 970 spin_unlock(&ncp->nc_head->spin); 971 ncp->nc_parent = NULL; 972 ncp->nc_head = NULL; 973 _cache_unlock(par); 974 _cache_drop(par); 975 976 /* 977 * We can only safely vdrop with no spinlocks held. 978 */ 979 if (dropvp) 980 vdrop(dropvp); 981 } 982 } 983 984 /* 985 * Allocate a new namecache structure. Most of the code does not require 986 * zero-termination of the string but it makes vop_compat_ncreate() easier. 987 */ 988 static struct namecache * 989 cache_alloc(int nlen) 990 { 991 struct namecache *ncp; 992 993 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 994 if (nlen) 995 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 996 ncp->nc_nlen = nlen; 997 ncp->nc_flag = NCF_UNRESOLVED; 998 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 999 ncp->nc_refs = 1; 1000 1001 TAILQ_INIT(&ncp->nc_list); 1002 _cache_lock(ncp); 1003 return(ncp); 1004 } 1005 1006 /* 1007 * Can only be called for the case where the ncp has never been 1008 * associated with anything (so no spinlocks are needed). 1009 */ 1010 static void 1011 _cache_free(struct namecache *ncp) 1012 { 1013 KKASSERT(ncp->nc_refs == 1 && ncp->nc_lockstatus == 1); 1014 if (ncp->nc_name) 1015 kfree(ncp->nc_name, M_VFSCACHE); 1016 kfree(ncp, M_VFSCACHE); 1017 } 1018 1019 /* 1020 * [re]initialize a nchandle. 1021 */ 1022 void 1023 cache_zero(struct nchandle *nch) 1024 { 1025 nch->ncp = NULL; 1026 nch->mount = NULL; 1027 } 1028 1029 /* 1030 * Ref and deref a namecache structure. 1031 * 1032 * The caller must specify a stable ncp pointer, typically meaning the 1033 * ncp is already referenced but this can also occur indirectly through 1034 * e.g. holding a lock on a direct child. 1035 * 1036 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 1037 * use read spinlocks here. 1038 */ 1039 struct nchandle * 1040 cache_hold(struct nchandle *nch) 1041 { 1042 _cache_hold(nch->ncp); 1043 _cache_mntref(nch->mount); 1044 return(nch); 1045 } 1046 1047 /* 1048 * Create a copy of a namecache handle for an already-referenced 1049 * entry. 1050 */ 1051 void 1052 cache_copy(struct nchandle *nch, struct nchandle *target) 1053 { 1054 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1055 struct namecache *ncp; 1056 1057 *target = *nch; 1058 _cache_mntref(target->mount); 1059 ncp = target->ncp; 1060 if (ncp) { 1061 if (ncp == cache->ncp1) { 1062 if (atomic_cmpset_ptr((void *)&cache->ncp1, ncp, NULL)) 1063 return; 1064 } 1065 if (ncp == cache->ncp2) { 1066 if (atomic_cmpset_ptr((void *)&cache->ncp2, ncp, NULL)) 1067 return; 1068 } 1069 _cache_hold(ncp); 1070 } 1071 } 1072 1073 /* 1074 * Caller wants to copy the current directory, copy it out from our 1075 * pcpu cache if possible (the entire critical path is just two localized 1076 * cmpset ops). If the pcpu cache has a snapshot at all it will be a 1077 * valid one, so we don't have to lock p->p_fd even though we are loading 1078 * two fields. 1079 * 1080 * This has a limited effect since nlookup must still ref and shlock the 1081 * vnode to check perms. We do avoid the per-proc spin-lock though, which 1082 * can aid threaded programs. 1083 */ 1084 void 1085 cache_copy_ncdir(struct proc *p, struct nchandle *target) 1086 { 1087 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1088 1089 *target = p->p_fd->fd_ncdir; 1090 if (target->ncp == cache->ncdir.ncp && 1091 target->mount == cache->ncdir.mount) { 1092 if (atomic_cmpset_ptr((void *)&cache->ncdir.ncp, 1093 target->ncp, NULL)) { 1094 if (atomic_cmpset_ptr((void *)&cache->ncdir.mount, 1095 target->mount, NULL)) { 1096 /* CRITICAL PATH */ 1097 return; 1098 } 1099 _cache_drop(target->ncp); 1100 } 1101 } 1102 spin_lock_shared(&p->p_fd->fd_spin); 1103 cache_copy(&p->p_fd->fd_ncdir, target); 1104 spin_unlock_shared(&p->p_fd->fd_spin); 1105 } 1106 1107 void 1108 cache_changemount(struct nchandle *nch, struct mount *mp) 1109 { 1110 _cache_mntref(mp); 1111 _cache_mntrel(nch->mount); 1112 nch->mount = mp; 1113 } 1114 1115 void 1116 cache_drop(struct nchandle *nch) 1117 { 1118 _cache_mntrel(nch->mount); 1119 _cache_drop(nch->ncp); 1120 nch->ncp = NULL; 1121 nch->mount = NULL; 1122 } 1123 1124 /* 1125 * Drop the nchandle, but try to cache the ref to avoid global atomic 1126 * ops. This is typically done on the system root and jail root nchandles. 1127 */ 1128 void 1129 cache_drop_and_cache(struct nchandle *nch) 1130 { 1131 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1132 struct namecache *ncp; 1133 1134 _cache_mntrel(nch->mount); 1135 ncp = nch->ncp; 1136 if (cache->ncp1 == NULL) { 1137 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp); 1138 if (ncp == NULL) 1139 goto done; 1140 } 1141 if (cache->ncp2 == NULL) { 1142 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp); 1143 if (ncp == NULL) 1144 goto done; 1145 } 1146 if (++cache->iter & 1) 1147 ncp = atomic_swap_ptr((void *)&cache->ncp2, ncp); 1148 else 1149 ncp = atomic_swap_ptr((void *)&cache->ncp1, ncp); 1150 if (ncp) 1151 _cache_drop(ncp); 1152 done: 1153 nch->ncp = NULL; 1154 nch->mount = NULL; 1155 } 1156 1157 /* 1158 * We are dropping what the caller believes is the current directory, 1159 * unconditionally store it in our pcpu cache. Anything already in 1160 * the cache will be discarded. 1161 */ 1162 void 1163 cache_drop_ncdir(struct nchandle *nch) 1164 { 1165 struct mntcache *cache = &pcpu_mntcache[mycpu->gd_cpuid]; 1166 1167 nch->ncp = atomic_swap_ptr((void *)&cache->ncdir.ncp, nch->ncp); 1168 nch->mount = atomic_swap_ptr((void *)&cache->ncdir.mount, nch->mount); 1169 if (nch->ncp) 1170 _cache_drop(nch->ncp); 1171 if (nch->mount) 1172 _cache_mntrel(nch->mount); 1173 nch->ncp = NULL; 1174 nch->mount = NULL; 1175 } 1176 1177 int 1178 cache_lockstatus(struct nchandle *nch) 1179 { 1180 return(_cache_lockstatus(nch->ncp)); 1181 } 1182 1183 void 1184 cache_lock(struct nchandle *nch) 1185 { 1186 _cache_lock(nch->ncp); 1187 } 1188 1189 void 1190 cache_lock_maybe_shared(struct nchandle *nch, int excl) 1191 { 1192 struct namecache *ncp = nch->ncp; 1193 1194 if (ncp_shared_lock_disable || excl || 1195 (ncp->nc_flag & NCF_UNRESOLVED)) { 1196 _cache_lock(ncp); 1197 } else { 1198 _cache_lock_shared(ncp); 1199 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1200 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1201 _cache_unlock(ncp); 1202 _cache_lock(ncp); 1203 } 1204 } else { 1205 _cache_unlock(ncp); 1206 _cache_lock(ncp); 1207 } 1208 } 1209 } 1210 1211 /* 1212 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 1213 * is responsible for checking both for validity on return as they 1214 * may have become invalid. 1215 * 1216 * We have to deal with potential deadlocks here, just ping pong 1217 * the lock until we get it (we will always block somewhere when 1218 * looping so this is not cpu-intensive). 1219 * 1220 * which = 0 nch1 not locked, nch2 is locked 1221 * which = 1 nch1 is locked, nch2 is not locked 1222 */ 1223 void 1224 cache_relock(struct nchandle *nch1, struct ucred *cred1, 1225 struct nchandle *nch2, struct ucred *cred2) 1226 { 1227 int which; 1228 1229 which = 0; 1230 1231 for (;;) { 1232 if (which == 0) { 1233 if (cache_lock_nonblock(nch1) == 0) { 1234 cache_resolve(nch1, cred1); 1235 break; 1236 } 1237 cache_unlock(nch2); 1238 cache_lock(nch1); 1239 cache_resolve(nch1, cred1); 1240 which = 1; 1241 } else { 1242 if (cache_lock_nonblock(nch2) == 0) { 1243 cache_resolve(nch2, cred2); 1244 break; 1245 } 1246 cache_unlock(nch1); 1247 cache_lock(nch2); 1248 cache_resolve(nch2, cred2); 1249 which = 0; 1250 } 1251 } 1252 } 1253 1254 int 1255 cache_lock_nonblock(struct nchandle *nch) 1256 { 1257 return(_cache_lock_nonblock(nch->ncp)); 1258 } 1259 1260 void 1261 cache_unlock(struct nchandle *nch) 1262 { 1263 _cache_unlock(nch->ncp); 1264 } 1265 1266 /* 1267 * ref-and-lock, unlock-and-deref functions. 1268 * 1269 * This function is primarily used by nlookup. Even though cache_lock 1270 * holds the vnode, it is possible that the vnode may have already 1271 * initiated a recyclement. 1272 * 1273 * We want cache_get() to return a definitively usable vnode or a 1274 * definitively unresolved ncp. 1275 */ 1276 static 1277 struct namecache * 1278 _cache_get(struct namecache *ncp) 1279 { 1280 _cache_hold(ncp); 1281 _cache_lock(ncp); 1282 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1283 _cache_setunresolved(ncp); 1284 return(ncp); 1285 } 1286 1287 /* 1288 * Attempt to obtain a shared lock on the ncp. A shared lock will only 1289 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is 1290 * valid. Otherwise an exclusive lock will be acquired instead. 1291 */ 1292 static 1293 struct namecache * 1294 _cache_get_maybe_shared(struct namecache *ncp, int excl) 1295 { 1296 if (ncp_shared_lock_disable || excl || 1297 (ncp->nc_flag & NCF_UNRESOLVED)) { 1298 return(_cache_get(ncp)); 1299 } 1300 _cache_hold(ncp); 1301 _cache_lock_shared(ncp); 1302 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1303 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) { 1304 _cache_unlock(ncp); 1305 ncp = _cache_get(ncp); 1306 _cache_drop(ncp); 1307 } 1308 } else { 1309 _cache_unlock(ncp); 1310 ncp = _cache_get(ncp); 1311 _cache_drop(ncp); 1312 } 1313 return(ncp); 1314 } 1315 1316 /* 1317 * This is a special form of _cache_lock() which only succeeds if 1318 * it can get a pristine, non-recursive lock. The caller must have 1319 * already ref'd the ncp. 1320 * 1321 * On success the ncp will be locked, on failure it will not. The 1322 * ref count does not change either way. 1323 * 1324 * We want _cache_lock_special() (on success) to return a definitively 1325 * usable vnode or a definitively unresolved ncp. 1326 */ 1327 static int 1328 _cache_lock_special(struct namecache *ncp) 1329 { 1330 if (_cache_lock_nonblock(ncp) == 0) { 1331 if ((ncp->nc_lockstatus & 1332 ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) == 1) { 1333 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1334 _cache_setunresolved(ncp); 1335 return(0); 1336 } 1337 _cache_unlock(ncp); 1338 } 1339 return(EWOULDBLOCK); 1340 } 1341 1342 /* 1343 * This function tries to get a shared lock but will back-off to an exclusive 1344 * lock if: 1345 * 1346 * (1) Some other thread is trying to obtain an exclusive lock 1347 * (to prevent the exclusive requester from getting livelocked out 1348 * by many shared locks). 1349 * 1350 * (2) The current thread already owns an exclusive lock (to avoid 1351 * deadlocking). 1352 * 1353 * WARNING! On machines with lots of cores we really want to try hard to 1354 * get a shared lock or concurrent path lookups can chain-react 1355 * into a very high-latency exclusive lock. 1356 */ 1357 static int 1358 _cache_lock_shared_special(struct namecache *ncp) 1359 { 1360 /* 1361 * Only honor a successful shared lock (returning 0) if there is 1362 * no exclusive request pending and the vnode, if present, is not 1363 * in a reclaimed state. 1364 */ 1365 if (_cache_lock_shared_nonblock(ncp) == 0) { 1366 if ((ncp->nc_lockstatus & NC_EXLOCK_REQ) == 0) { 1367 if (ncp->nc_vp == NULL || 1368 (ncp->nc_vp->v_flag & VRECLAIMED) == 0) { 1369 return(0); 1370 } 1371 } 1372 _cache_unlock(ncp); 1373 return(EWOULDBLOCK); 1374 } 1375 1376 /* 1377 * Non-blocking shared lock failed. If we already own the exclusive 1378 * lock just acquire another exclusive lock (instead of deadlocking). 1379 * Otherwise acquire a shared lock. 1380 */ 1381 if (ncp->nc_locktd == curthread) { 1382 _cache_lock(ncp); 1383 return(0); 1384 } 1385 _cache_lock_shared(ncp); 1386 return(0); 1387 } 1388 1389 1390 /* 1391 * NOTE: The same nchandle can be passed for both arguments. 1392 */ 1393 void 1394 cache_get(struct nchandle *nch, struct nchandle *target) 1395 { 1396 KKASSERT(nch->ncp->nc_refs > 0); 1397 target->mount = nch->mount; 1398 target->ncp = _cache_get(nch->ncp); 1399 _cache_mntref(target->mount); 1400 } 1401 1402 void 1403 cache_get_maybe_shared(struct nchandle *nch, struct nchandle *target, int excl) 1404 { 1405 KKASSERT(nch->ncp->nc_refs > 0); 1406 target->mount = nch->mount; 1407 target->ncp = _cache_get_maybe_shared(nch->ncp, excl); 1408 _cache_mntref(target->mount); 1409 } 1410 1411 /* 1412 * 1413 */ 1414 static __inline 1415 void 1416 _cache_put(struct namecache *ncp) 1417 { 1418 _cache_unlock(ncp); 1419 _cache_drop(ncp); 1420 } 1421 1422 /* 1423 * 1424 */ 1425 void 1426 cache_put(struct nchandle *nch) 1427 { 1428 _cache_mntrel(nch->mount); 1429 _cache_put(nch->ncp); 1430 nch->ncp = NULL; 1431 nch->mount = NULL; 1432 } 1433 1434 /* 1435 * Resolve an unresolved ncp by associating a vnode with it. If the 1436 * vnode is NULL, a negative cache entry is created. 1437 * 1438 * The ncp should be locked on entry and will remain locked on return. 1439 */ 1440 static 1441 void 1442 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 1443 { 1444 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 1445 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1446 1447 if (vp != NULL) { 1448 /* 1449 * Any vp associated with an ncp which has children must 1450 * be held. Any vp associated with a locked ncp must be held. 1451 */ 1452 if (!TAILQ_EMPTY(&ncp->nc_list)) 1453 vhold(vp); 1454 spin_lock(&vp->v_spin); 1455 ncp->nc_vp = vp; 1456 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 1457 spin_unlock(&vp->v_spin); 1458 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1459 vhold(vp); 1460 1461 /* 1462 * Set auxiliary flags 1463 */ 1464 switch(vp->v_type) { 1465 case VDIR: 1466 ncp->nc_flag |= NCF_ISDIR; 1467 break; 1468 case VLNK: 1469 ncp->nc_flag |= NCF_ISSYMLINK; 1470 /* XXX cache the contents of the symlink */ 1471 break; 1472 default: 1473 break; 1474 } 1475 ncp->nc_error = 0; 1476 /* XXX: this is a hack to work-around the lack of a real pfs vfs 1477 * implementation*/ 1478 if (mp != NULL) 1479 if (strncmp(mp->mnt_stat.f_fstypename, "null", 5) == 0) 1480 vp->v_pfsmp = mp; 1481 } else { 1482 /* 1483 * When creating a negative cache hit we set the 1484 * namecache_gen. A later resolve will clean out the 1485 * negative cache hit if the mount point's namecache_gen 1486 * has changed. Used by devfs, could also be used by 1487 * other remote FSs. 1488 */ 1489 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 1490 1491 ncp->nc_vp = NULL; 1492 ncp->nc_negcpu = mycpu->gd_cpuid; 1493 spin_lock(&pn->neg_spin); 1494 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 1495 ++pn->neg_count; 1496 spin_unlock(&pn->neg_spin); 1497 atomic_add_long(&pn->vfscache_negs, 1); 1498 1499 ncp->nc_error = ENOENT; 1500 if (mp) 1501 VFS_NCPGEN_SET(mp, ncp); 1502 } 1503 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 1504 } 1505 1506 /* 1507 * 1508 */ 1509 void 1510 cache_setvp(struct nchandle *nch, struct vnode *vp) 1511 { 1512 _cache_setvp(nch->mount, nch->ncp, vp); 1513 } 1514 1515 /* 1516 * 1517 */ 1518 void 1519 cache_settimeout(struct nchandle *nch, int nticks) 1520 { 1521 struct namecache *ncp = nch->ncp; 1522 1523 if ((ncp->nc_timeout = ticks + nticks) == 0) 1524 ncp->nc_timeout = 1; 1525 } 1526 1527 /* 1528 * Disassociate the vnode or negative-cache association and mark a 1529 * namecache entry as unresolved again. Note that the ncp is still 1530 * left in the hash table and still linked to its parent. 1531 * 1532 * The ncp should be locked and refd on entry and will remain locked and refd 1533 * on return. 1534 * 1535 * This routine is normally never called on a directory containing children. 1536 * However, NFS often does just that in its rename() code as a cop-out to 1537 * avoid complex namespace operations. This disconnects a directory vnode 1538 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 1539 * sync. 1540 * 1541 */ 1542 static 1543 void 1544 _cache_setunresolved(struct namecache *ncp) 1545 { 1546 struct vnode *vp; 1547 1548 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1549 ncp->nc_flag |= NCF_UNRESOLVED; 1550 ncp->nc_timeout = 0; 1551 ncp->nc_error = ENOTCONN; 1552 if ((vp = ncp->nc_vp) != NULL) { 1553 spin_lock(&vp->v_spin); 1554 ncp->nc_vp = NULL; 1555 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 1556 spin_unlock(&vp->v_spin); 1557 1558 /* 1559 * Any vp associated with an ncp with children is 1560 * held by that ncp. Any vp associated with a locked 1561 * ncp is held by that ncp. These conditions must be 1562 * undone when the vp is cleared out from the ncp. 1563 */ 1564 if (!TAILQ_EMPTY(&ncp->nc_list)) 1565 vdrop(vp); 1566 if (ncp->nc_lockstatus & ~(NC_EXLOCK_REQ|NC_SHLOCK_REQ)) 1567 vdrop(vp); 1568 } else { 1569 struct pcpu_ncache *pn; 1570 1571 pn = &pcpu_ncache[ncp->nc_negcpu]; 1572 1573 atomic_add_long(&pn->vfscache_negs, -1); 1574 spin_lock(&pn->neg_spin); 1575 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 1576 --pn->neg_count; 1577 spin_unlock(&pn->neg_spin); 1578 } 1579 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 1580 } 1581 } 1582 1583 /* 1584 * The cache_nresolve() code calls this function to automatically 1585 * set a resolved cache element to unresolved if it has timed out 1586 * or if it is a negative cache hit and the mount point namecache_gen 1587 * has changed. 1588 */ 1589 static __inline int 1590 _cache_auto_unresolve_test(struct mount *mp, struct namecache *ncp) 1591 { 1592 /* 1593 * Try to zap entries that have timed out. We have 1594 * to be careful here because locked leafs may depend 1595 * on the vnode remaining intact in a parent, so only 1596 * do this under very specific conditions. 1597 */ 1598 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1599 TAILQ_EMPTY(&ncp->nc_list)) { 1600 return 1; 1601 } 1602 1603 /* 1604 * If a resolved negative cache hit is invalid due to 1605 * the mount's namecache generation being bumped, zap it. 1606 */ 1607 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) { 1608 return 1; 1609 } 1610 1611 /* 1612 * Otherwise we are good 1613 */ 1614 return 0; 1615 } 1616 1617 static __inline void 1618 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1619 { 1620 /* 1621 * Already in an unresolved state, nothing to do. 1622 */ 1623 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1624 if (_cache_auto_unresolve_test(mp, ncp)) 1625 _cache_setunresolved(ncp); 1626 } 1627 } 1628 1629 /* 1630 * 1631 */ 1632 void 1633 cache_setunresolved(struct nchandle *nch) 1634 { 1635 _cache_setunresolved(nch->ncp); 1636 } 1637 1638 /* 1639 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1640 * looking for matches. This flag tells the lookup code when it must 1641 * check for a mount linkage and also prevents the directories in question 1642 * from being deleted or renamed. 1643 */ 1644 static 1645 int 1646 cache_clrmountpt_callback(struct mount *mp, void *data) 1647 { 1648 struct nchandle *nch = data; 1649 1650 if (mp->mnt_ncmounton.ncp == nch->ncp) 1651 return(1); 1652 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1653 return(1); 1654 return(0); 1655 } 1656 1657 /* 1658 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated 1659 * with a mount point. 1660 */ 1661 void 1662 cache_clrmountpt(struct nchandle *nch) 1663 { 1664 int count; 1665 1666 count = mountlist_scan(cache_clrmountpt_callback, nch, 1667 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1668 if (count == 0) 1669 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1670 } 1671 1672 /* 1673 * Invalidate portions of the namecache topology given a starting entry. 1674 * The passed ncp is set to an unresolved state and: 1675 * 1676 * The passed ncp must be referencxed and locked. The routine may unlock 1677 * and relock ncp several times, and will recheck the children and loop 1678 * to catch races. When done the passed ncp will be returned with the 1679 * reference and lock intact. 1680 * 1681 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1682 * that the physical underlying nodes have been 1683 * destroyed... as in deleted. For example, when 1684 * a directory is removed. This will cause record 1685 * lookups on the name to no longer be able to find 1686 * the record and tells the resolver to return failure 1687 * rather then trying to resolve through the parent. 1688 * 1689 * The topology itself, including ncp->nc_name, 1690 * remains intact. 1691 * 1692 * This only applies to the passed ncp, if CINV_CHILDREN 1693 * is specified the children are not flagged. 1694 * 1695 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1696 * state as well. 1697 * 1698 * Note that this will also have the side effect of 1699 * cleaning out any unreferenced nodes in the topology 1700 * from the leaves up as the recursion backs out. 1701 * 1702 * Note that the topology for any referenced nodes remains intact, but 1703 * the nodes will be marked as having been destroyed and will be set 1704 * to an unresolved state. 1705 * 1706 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1707 * the namecache entry may not actually be invalidated on return if it was 1708 * revalidated while recursing down into its children. This code guarentees 1709 * that the node(s) will go through an invalidation cycle, but does not 1710 * guarentee that they will remain in an invalidated state. 1711 * 1712 * Returns non-zero if a revalidation was detected during the invalidation 1713 * recursion, zero otherwise. Note that since only the original ncp is 1714 * locked the revalidation ultimately can only indicate that the original ncp 1715 * *MIGHT* no have been reresolved. 1716 * 1717 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1718 * have to avoid blowing out the kernel stack. We do this by saving the 1719 * deep namecache node and aborting the recursion, then re-recursing at that 1720 * node using a depth-first algorithm in order to allow multiple deep 1721 * recursions to chain through each other, then we restart the invalidation 1722 * from scratch. 1723 */ 1724 1725 struct cinvtrack { 1726 struct namecache *resume_ncp; 1727 int depth; 1728 }; 1729 1730 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1731 1732 static 1733 int 1734 _cache_inval(struct namecache *ncp, int flags) 1735 { 1736 struct cinvtrack track; 1737 struct namecache *ncp2; 1738 int r; 1739 1740 track.depth = 0; 1741 track.resume_ncp = NULL; 1742 1743 for (;;) { 1744 r = _cache_inval_internal(ncp, flags, &track); 1745 if (track.resume_ncp == NULL) 1746 break; 1747 _cache_unlock(ncp); 1748 while ((ncp2 = track.resume_ncp) != NULL) { 1749 track.resume_ncp = NULL; 1750 _cache_lock(ncp2); 1751 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1752 &track); 1753 _cache_put(ncp2); 1754 } 1755 _cache_lock(ncp); 1756 } 1757 return(r); 1758 } 1759 1760 int 1761 cache_inval(struct nchandle *nch, int flags) 1762 { 1763 return(_cache_inval(nch->ncp, flags)); 1764 } 1765 1766 /* 1767 * Helper for _cache_inval(). The passed ncp is refd and locked and 1768 * remains that way on return, but may be unlocked/relocked multiple 1769 * times by the routine. 1770 */ 1771 static int 1772 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1773 { 1774 struct namecache *nextkid; 1775 int rcnt = 0; 1776 1777 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 1778 1779 _cache_setunresolved(ncp); 1780 if (flags & CINV_DESTROY) { 1781 ncp->nc_flag |= NCF_DESTROYED; 1782 ++ncp->nc_generation; 1783 } 1784 while ((flags & CINV_CHILDREN) && 1785 (nextkid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1786 ) { 1787 struct namecache *kid; 1788 int restart; 1789 1790 restart = 0; 1791 _cache_hold(nextkid); 1792 if (++track->depth > MAX_RECURSION_DEPTH) { 1793 track->resume_ncp = ncp; 1794 _cache_hold(ncp); 1795 ++rcnt; 1796 } 1797 while ((kid = nextkid) != NULL) { 1798 /* 1799 * Parent (ncp) must be locked for the iteration. 1800 */ 1801 nextkid = NULL; 1802 if (kid->nc_parent != ncp) { 1803 _cache_drop(kid); 1804 kprintf("cache_inval_internal restartA %s\n", 1805 ncp->nc_name); 1806 restart = 1; 1807 break; 1808 } 1809 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1810 _cache_hold(nextkid); 1811 1812 /* 1813 * Parent unlocked for this section to avoid 1814 * deadlocks. 1815 */ 1816 _cache_unlock(ncp); 1817 if (track->resume_ncp) { 1818 _cache_drop(kid); 1819 _cache_lock(ncp); 1820 break; 1821 } 1822 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1823 TAILQ_FIRST(&kid->nc_list) 1824 ) { 1825 _cache_lock(kid); 1826 if (kid->nc_parent != ncp) { 1827 kprintf("cache_inval_internal " 1828 "restartB %s\n", 1829 ncp->nc_name); 1830 restart = 1; 1831 _cache_unlock(kid); 1832 _cache_drop(kid); 1833 _cache_lock(ncp); 1834 break; 1835 } 1836 1837 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1838 _cache_unlock(kid); 1839 } 1840 _cache_drop(kid); 1841 _cache_lock(ncp); 1842 } 1843 if (nextkid) 1844 _cache_drop(nextkid); 1845 --track->depth; 1846 if (restart == 0) 1847 break; 1848 } 1849 1850 /* 1851 * Someone could have gotten in there while ncp was unlocked, 1852 * retry if so. 1853 */ 1854 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1855 ++rcnt; 1856 return (rcnt); 1857 } 1858 1859 /* 1860 * Invalidate a vnode's namecache associations. To avoid races against 1861 * the resolver we do not invalidate a node which we previously invalidated 1862 * but which was then re-resolved while we were in the invalidation loop. 1863 * 1864 * Returns non-zero if any namecache entries remain after the invalidation 1865 * loop completed. 1866 * 1867 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1868 * be ripped out of the topology while held, the vnode's v_namecache 1869 * list has no such restriction. NCP's can be ripped out of the list 1870 * at virtually any time if not locked, even if held. 1871 * 1872 * In addition, the v_namecache list itself must be locked via 1873 * the vnode's spinlock. 1874 */ 1875 int 1876 cache_inval_vp(struct vnode *vp, int flags) 1877 { 1878 struct namecache *ncp; 1879 struct namecache *next; 1880 1881 restart: 1882 spin_lock(&vp->v_spin); 1883 ncp = TAILQ_FIRST(&vp->v_namecache); 1884 if (ncp) 1885 _cache_hold(ncp); 1886 while (ncp) { 1887 /* loop entered with ncp held and vp spin-locked */ 1888 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1889 _cache_hold(next); 1890 spin_unlock(&vp->v_spin); 1891 _cache_lock(ncp); 1892 if (ncp->nc_vp != vp) { 1893 kprintf("Warning: cache_inval_vp: race-A detected on " 1894 "%s\n", ncp->nc_name); 1895 _cache_put(ncp); 1896 if (next) 1897 _cache_drop(next); 1898 goto restart; 1899 } 1900 _cache_inval(ncp, flags); 1901 _cache_put(ncp); /* also releases reference */ 1902 ncp = next; 1903 spin_lock(&vp->v_spin); 1904 if (ncp && ncp->nc_vp != vp) { 1905 spin_unlock(&vp->v_spin); 1906 kprintf("Warning: cache_inval_vp: race-B detected on " 1907 "%s\n", ncp->nc_name); 1908 _cache_drop(ncp); 1909 goto restart; 1910 } 1911 } 1912 spin_unlock(&vp->v_spin); 1913 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1914 } 1915 1916 /* 1917 * This routine is used instead of the normal cache_inval_vp() when we 1918 * are trying to recycle otherwise good vnodes. 1919 * 1920 * Return 0 on success, non-zero if not all namecache records could be 1921 * disassociated from the vnode (for various reasons). 1922 */ 1923 int 1924 cache_inval_vp_nonblock(struct vnode *vp) 1925 { 1926 struct namecache *ncp; 1927 struct namecache *next; 1928 1929 spin_lock(&vp->v_spin); 1930 ncp = TAILQ_FIRST(&vp->v_namecache); 1931 if (ncp) 1932 _cache_hold(ncp); 1933 while (ncp) { 1934 /* loop entered with ncp held */ 1935 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1936 _cache_hold(next); 1937 spin_unlock(&vp->v_spin); 1938 if (_cache_lock_nonblock(ncp)) { 1939 _cache_drop(ncp); 1940 if (next) 1941 _cache_drop(next); 1942 goto done; 1943 } 1944 if (ncp->nc_vp != vp) { 1945 kprintf("Warning: cache_inval_vp: race-A detected on " 1946 "%s\n", ncp->nc_name); 1947 _cache_put(ncp); 1948 if (next) 1949 _cache_drop(next); 1950 goto done; 1951 } 1952 _cache_inval(ncp, 0); 1953 _cache_put(ncp); /* also releases reference */ 1954 ncp = next; 1955 spin_lock(&vp->v_spin); 1956 if (ncp && ncp->nc_vp != vp) { 1957 spin_unlock(&vp->v_spin); 1958 kprintf("Warning: cache_inval_vp: race-B detected on " 1959 "%s\n", ncp->nc_name); 1960 _cache_drop(ncp); 1961 goto done; 1962 } 1963 } 1964 spin_unlock(&vp->v_spin); 1965 done: 1966 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1967 } 1968 1969 /* 1970 * Clears the universal directory search 'ok' flag. This flag allows 1971 * nlookup() to bypass normal vnode checks. This flag is a cached flag 1972 * so clearing it simply forces revalidation. 1973 */ 1974 void 1975 cache_inval_wxok(struct vnode *vp) 1976 { 1977 struct namecache *ncp; 1978 1979 spin_lock(&vp->v_spin); 1980 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1981 if (ncp->nc_flag & NCF_WXOK) 1982 atomic_clear_short(&ncp->nc_flag, NCF_WXOK); 1983 } 1984 spin_unlock(&vp->v_spin); 1985 } 1986 1987 /* 1988 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1989 * must be locked. The target ncp is destroyed (as a normal rename-over 1990 * would destroy the target file or directory). 1991 * 1992 * Because there may be references to the source ncp we cannot copy its 1993 * contents to the target. Instead the source ncp is relinked as the target 1994 * and the target ncp is removed from the namecache topology. 1995 */ 1996 void 1997 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1998 { 1999 struct namecache *fncp = fnch->ncp; 2000 struct namecache *tncp = tnch->ncp; 2001 struct namecache *tncp_par; 2002 struct nchash_head *nchpp; 2003 u_int32_t hash; 2004 char *oname; 2005 char *nname; 2006 2007 ++fncp->nc_generation; 2008 ++tncp->nc_generation; 2009 if (tncp->nc_nlen) { 2010 nname = kmalloc(tncp->nc_nlen + 1, M_VFSCACHE, M_WAITOK); 2011 bcopy(tncp->nc_name, nname, tncp->nc_nlen); 2012 nname[tncp->nc_nlen] = 0; 2013 } else { 2014 nname = NULL; 2015 } 2016 2017 /* 2018 * Rename fncp (unlink) 2019 */ 2020 _cache_unlink_parent(fncp); 2021 oname = fncp->nc_name; 2022 fncp->nc_name = nname; 2023 fncp->nc_nlen = tncp->nc_nlen; 2024 if (oname) 2025 kfree(oname, M_VFSCACHE); 2026 2027 tncp_par = tncp->nc_parent; 2028 _cache_hold(tncp_par); 2029 _cache_lock(tncp_par); 2030 2031 /* 2032 * Rename fncp (relink) 2033 */ 2034 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 2035 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 2036 nchpp = NCHHASH(hash); 2037 2038 spin_lock(&nchpp->spin); 2039 _cache_link_parent(fncp, tncp_par, nchpp); 2040 spin_unlock(&nchpp->spin); 2041 2042 _cache_put(tncp_par); 2043 2044 /* 2045 * Get rid of the overwritten tncp (unlink) 2046 */ 2047 _cache_unlink(tncp); 2048 } 2049 2050 /* 2051 * Perform actions consistent with unlinking a file. The passed-in ncp 2052 * must be locked. 2053 * 2054 * The ncp is marked DESTROYED so it no longer shows up in searches, 2055 * and will be physically deleted when the vnode goes away. 2056 * 2057 * If the related vnode has no refs then we cycle it through vget()/vput() 2058 * to (possibly if we don't have a ref race) trigger a deactivation, 2059 * allowing the VFS to trivially detect and recycle the deleted vnode 2060 * via VOP_INACTIVE(). 2061 * 2062 * NOTE: _cache_rename() will automatically call _cache_unlink() on the 2063 * target ncp. 2064 */ 2065 void 2066 cache_unlink(struct nchandle *nch) 2067 { 2068 _cache_unlink(nch->ncp); 2069 } 2070 2071 static void 2072 _cache_unlink(struct namecache *ncp) 2073 { 2074 struct vnode *vp; 2075 2076 /* 2077 * Causes lookups to fail and allows another ncp with the same 2078 * name to be created under ncp->nc_parent. 2079 */ 2080 ncp->nc_flag |= NCF_DESTROYED; 2081 ++ncp->nc_generation; 2082 2083 /* 2084 * Attempt to trigger a deactivation. Set VREF_FINALIZE to 2085 * force action on the 1->0 transition. 2086 */ 2087 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 2088 (vp = ncp->nc_vp) != NULL) { 2089 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 2090 if (VREFCNT(vp) <= 0) { 2091 if (vget(vp, LK_SHARED) == 0) 2092 vput(vp); 2093 } 2094 } 2095 } 2096 2097 /* 2098 * Return non-zero if the nch might be associated with an open and/or mmap()'d 2099 * file. The easy solution is to just return non-zero if the vnode has refs. 2100 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to 2101 * force the reclaim). 2102 */ 2103 int 2104 cache_isopen(struct nchandle *nch) 2105 { 2106 struct vnode *vp; 2107 struct namecache *ncp = nch->ncp; 2108 2109 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0 && 2110 (vp = ncp->nc_vp) != NULL && 2111 VREFCNT(vp)) { 2112 return 1; 2113 } 2114 return 0; 2115 } 2116 2117 2118 /* 2119 * vget the vnode associated with the namecache entry. Resolve the namecache 2120 * entry if necessary. The passed ncp must be referenced and locked. If 2121 * the ncp is resolved it might be locked shared. 2122 * 2123 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 2124 * (depending on the passed lk_type) will be returned in *vpp with an error 2125 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 2126 * most typical error is ENOENT, meaning that the ncp represents a negative 2127 * cache hit and there is no vnode to retrieve, but other errors can occur 2128 * too. 2129 * 2130 * The vget() can race a reclaim. If this occurs we re-resolve the 2131 * namecache entry. 2132 * 2133 * There are numerous places in the kernel where vget() is called on a 2134 * vnode while one or more of its namecache entries is locked. Releasing 2135 * a vnode never deadlocks against locked namecache entries (the vnode 2136 * will not get recycled while referenced ncp's exist). This means we 2137 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 2138 * lock when acquiring the vp lock or we might cause a deadlock. 2139 * 2140 * NOTE: The passed-in ncp must be locked exclusively if it is initially 2141 * unresolved. If a reclaim race occurs the passed-in ncp will be 2142 * relocked exclusively before being re-resolved. 2143 */ 2144 int 2145 cache_vget(struct nchandle *nch, struct ucred *cred, 2146 int lk_type, struct vnode **vpp) 2147 { 2148 struct namecache *ncp; 2149 struct vnode *vp; 2150 int error; 2151 2152 ncp = nch->ncp; 2153 again: 2154 vp = NULL; 2155 if (ncp->nc_flag & NCF_UNRESOLVED) 2156 error = cache_resolve(nch, cred); 2157 else 2158 error = 0; 2159 2160 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 2161 error = vget(vp, lk_type); 2162 if (error) { 2163 /* 2164 * VRECLAIM race 2165 * 2166 * The ncp may have been locked shared, we must relock 2167 * it exclusively before we can set it to unresolved. 2168 */ 2169 if (error == ENOENT) { 2170 kprintf("Warning: vnode reclaim race detected " 2171 "in cache_vget on %p (%s)\n", 2172 vp, ncp->nc_name); 2173 _cache_unlock(ncp); 2174 _cache_lock(ncp); 2175 _cache_setunresolved(ncp); 2176 goto again; 2177 } 2178 2179 /* 2180 * Not a reclaim race, some other error. 2181 */ 2182 KKASSERT(ncp->nc_vp == vp); 2183 vp = NULL; 2184 } else { 2185 KKASSERT(ncp->nc_vp == vp); 2186 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 2187 } 2188 } 2189 if (error == 0 && vp == NULL) 2190 error = ENOENT; 2191 *vpp = vp; 2192 return(error); 2193 } 2194 2195 /* 2196 * Similar to cache_vget() but only acquires a ref on the vnode. 2197 * 2198 * NOTE: The passed-in ncp must be locked exclusively if it is initially 2199 * unresolved. If a reclaim race occurs the passed-in ncp will be 2200 * relocked exclusively before being re-resolved. 2201 */ 2202 int 2203 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 2204 { 2205 struct namecache *ncp; 2206 struct vnode *vp; 2207 int error; 2208 2209 ncp = nch->ncp; 2210 again: 2211 vp = NULL; 2212 if (ncp->nc_flag & NCF_UNRESOLVED) 2213 error = cache_resolve(nch, cred); 2214 else 2215 error = 0; 2216 2217 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 2218 error = vget(vp, LK_SHARED); 2219 if (error) { 2220 /* 2221 * VRECLAIM race 2222 */ 2223 if (error == ENOENT) { 2224 kprintf("Warning: vnode reclaim race detected " 2225 "in cache_vget on %p (%s)\n", 2226 vp, ncp->nc_name); 2227 _cache_unlock(ncp); 2228 _cache_lock(ncp); 2229 _cache_setunresolved(ncp); 2230 goto again; 2231 } 2232 2233 /* 2234 * Not a reclaim race, some other error. 2235 */ 2236 KKASSERT(ncp->nc_vp == vp); 2237 vp = NULL; 2238 } else { 2239 KKASSERT(ncp->nc_vp == vp); 2240 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 2241 /* caller does not want a lock */ 2242 vn_unlock(vp); 2243 } 2244 } 2245 if (error == 0 && vp == NULL) 2246 error = ENOENT; 2247 *vpp = vp; 2248 return(error); 2249 } 2250 2251 /* 2252 * Return a referenced vnode representing the parent directory of 2253 * ncp. 2254 * 2255 * Because the caller has locked the ncp it should not be possible for 2256 * the parent ncp to go away. However, the parent can unresolve its 2257 * dvp at any time so we must be able to acquire a lock on the parent 2258 * to safely access nc_vp. 2259 * 2260 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 2261 * so use vhold()/vdrop() while holding the lock to prevent dvp from 2262 * getting destroyed. 2263 * 2264 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a 2265 * lock on the ncp in question.. 2266 */ 2267 static struct vnode * 2268 cache_dvpref(struct namecache *ncp) 2269 { 2270 struct namecache *par; 2271 struct vnode *dvp; 2272 2273 dvp = NULL; 2274 if ((par = ncp->nc_parent) != NULL) { 2275 _cache_hold(par); 2276 _cache_lock(par); 2277 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 2278 if ((dvp = par->nc_vp) != NULL) 2279 vhold(dvp); 2280 } 2281 _cache_unlock(par); 2282 if (dvp) { 2283 if (vget(dvp, LK_SHARED) == 0) { 2284 vn_unlock(dvp); 2285 vdrop(dvp); 2286 /* return refd, unlocked dvp */ 2287 } else { 2288 vdrop(dvp); 2289 dvp = NULL; 2290 } 2291 } 2292 _cache_drop(par); 2293 } 2294 return(dvp); 2295 } 2296 2297 /* 2298 * Convert a directory vnode to a namecache record without any other 2299 * knowledge of the topology. This ONLY works with directory vnodes and 2300 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 2301 * returned ncp (if not NULL) will be held and unlocked. 2302 * 2303 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 2304 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 2305 * for dvp. This will fail only if the directory has been deleted out from 2306 * under the caller. 2307 * 2308 * Callers must always check for a NULL return no matter the value of 'makeit'. 2309 * 2310 * To avoid underflowing the kernel stack each recursive call increments 2311 * the makeit variable. 2312 */ 2313 2314 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2315 struct vnode *dvp, char *fakename); 2316 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2317 struct vnode **saved_dvp); 2318 2319 int 2320 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 2321 struct nchandle *nch) 2322 { 2323 struct vnode *saved_dvp; 2324 struct vnode *pvp; 2325 char *fakename; 2326 int error; 2327 2328 nch->ncp = NULL; 2329 nch->mount = dvp->v_mount; 2330 saved_dvp = NULL; 2331 fakename = NULL; 2332 2333 /* 2334 * Handle the makeit == 0 degenerate case 2335 */ 2336 if (makeit == 0) { 2337 spin_lock_shared(&dvp->v_spin); 2338 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2339 if (nch->ncp) 2340 cache_hold(nch); 2341 spin_unlock_shared(&dvp->v_spin); 2342 } 2343 2344 /* 2345 * Loop until resolution, inside code will break out on error. 2346 */ 2347 while (makeit) { 2348 /* 2349 * Break out if we successfully acquire a working ncp. 2350 */ 2351 spin_lock_shared(&dvp->v_spin); 2352 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 2353 if (nch->ncp) { 2354 cache_hold(nch); 2355 spin_unlock_shared(&dvp->v_spin); 2356 break; 2357 } 2358 spin_unlock_shared(&dvp->v_spin); 2359 2360 /* 2361 * If dvp is the root of its filesystem it should already 2362 * have a namecache pointer associated with it as a side 2363 * effect of the mount, but it may have been disassociated. 2364 */ 2365 if (dvp->v_flag & VROOT) { 2366 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 2367 error = cache_resolve_mp(nch->mount); 2368 _cache_put(nch->ncp); 2369 if (ncvp_debug) { 2370 kprintf("cache_fromdvp: resolve root of mount %p error %d", 2371 dvp->v_mount, error); 2372 } 2373 if (error) { 2374 if (ncvp_debug) 2375 kprintf(" failed\n"); 2376 nch->ncp = NULL; 2377 break; 2378 } 2379 if (ncvp_debug) 2380 kprintf(" succeeded\n"); 2381 continue; 2382 } 2383 2384 /* 2385 * If we are recursed too deeply resort to an O(n^2) 2386 * algorithm to resolve the namecache topology. The 2387 * resolved pvp is left referenced in saved_dvp to 2388 * prevent the tree from being destroyed while we loop. 2389 */ 2390 if (makeit > 20) { 2391 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 2392 if (error) { 2393 kprintf("lookupdotdot(longpath) failed %d " 2394 "dvp %p\n", error, dvp); 2395 nch->ncp = NULL; 2396 break; 2397 } 2398 continue; 2399 } 2400 2401 /* 2402 * Get the parent directory and resolve its ncp. 2403 */ 2404 if (fakename) { 2405 kfree(fakename, M_TEMP); 2406 fakename = NULL; 2407 } 2408 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2409 &fakename); 2410 if (error) { 2411 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 2412 break; 2413 } 2414 vn_unlock(pvp); 2415 2416 /* 2417 * Reuse makeit as a recursion depth counter. On success 2418 * nch will be fully referenced. 2419 */ 2420 cache_fromdvp(pvp, cred, makeit + 1, nch); 2421 vrele(pvp); 2422 if (nch->ncp == NULL) 2423 break; 2424 2425 /* 2426 * Do an inefficient scan of pvp (embodied by ncp) to look 2427 * for dvp. This will create a namecache record for dvp on 2428 * success. We loop up to recheck on success. 2429 * 2430 * ncp and dvp are both held but not locked. 2431 */ 2432 error = cache_inefficient_scan(nch, cred, dvp, fakename); 2433 if (error) { 2434 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 2435 pvp, nch->ncp->nc_name, dvp); 2436 cache_drop(nch); 2437 /* nch was NULLed out, reload mount */ 2438 nch->mount = dvp->v_mount; 2439 break; 2440 } 2441 if (ncvp_debug) { 2442 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 2443 pvp, nch->ncp->nc_name); 2444 } 2445 cache_drop(nch); 2446 /* nch was NULLed out, reload mount */ 2447 nch->mount = dvp->v_mount; 2448 } 2449 2450 /* 2451 * If nch->ncp is non-NULL it will have been held already. 2452 */ 2453 if (fakename) 2454 kfree(fakename, M_TEMP); 2455 if (saved_dvp) 2456 vrele(saved_dvp); 2457 if (nch->ncp) 2458 return (0); 2459 return (EINVAL); 2460 } 2461 2462 /* 2463 * Go up the chain of parent directories until we find something 2464 * we can resolve into the namecache. This is very inefficient. 2465 */ 2466 static 2467 int 2468 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 2469 struct vnode **saved_dvp) 2470 { 2471 struct nchandle nch; 2472 struct vnode *pvp; 2473 int error; 2474 static time_t last_fromdvp_report; 2475 char *fakename; 2476 2477 /* 2478 * Loop getting the parent directory vnode until we get something we 2479 * can resolve in the namecache. 2480 */ 2481 vref(dvp); 2482 nch.mount = dvp->v_mount; 2483 nch.ncp = NULL; 2484 fakename = NULL; 2485 2486 for (;;) { 2487 if (fakename) { 2488 kfree(fakename, M_TEMP); 2489 fakename = NULL; 2490 } 2491 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 2492 &fakename); 2493 if (error) { 2494 vrele(dvp); 2495 break; 2496 } 2497 vn_unlock(pvp); 2498 spin_lock_shared(&pvp->v_spin); 2499 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 2500 _cache_hold(nch.ncp); 2501 spin_unlock_shared(&pvp->v_spin); 2502 vrele(pvp); 2503 break; 2504 } 2505 spin_unlock_shared(&pvp->v_spin); 2506 if (pvp->v_flag & VROOT) { 2507 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 2508 error = cache_resolve_mp(nch.mount); 2509 _cache_unlock(nch.ncp); 2510 vrele(pvp); 2511 if (error) { 2512 _cache_drop(nch.ncp); 2513 nch.ncp = NULL; 2514 vrele(dvp); 2515 } 2516 break; 2517 } 2518 vrele(dvp); 2519 dvp = pvp; 2520 } 2521 if (error == 0) { 2522 if (last_fromdvp_report != time_uptime) { 2523 last_fromdvp_report = time_uptime; 2524 kprintf("Warning: extremely inefficient path " 2525 "resolution on %s\n", 2526 nch.ncp->nc_name); 2527 } 2528 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 2529 2530 /* 2531 * Hopefully dvp now has a namecache record associated with 2532 * it. Leave it referenced to prevent the kernel from 2533 * recycling the vnode. Otherwise extremely long directory 2534 * paths could result in endless recycling. 2535 */ 2536 if (*saved_dvp) 2537 vrele(*saved_dvp); 2538 *saved_dvp = dvp; 2539 _cache_drop(nch.ncp); 2540 } 2541 if (fakename) 2542 kfree(fakename, M_TEMP); 2543 return (error); 2544 } 2545 2546 /* 2547 * Do an inefficient scan of the directory represented by ncp looking for 2548 * the directory vnode dvp. ncp must be held but not locked on entry and 2549 * will be held on return. dvp must be refd but not locked on entry and 2550 * will remain refd on return. 2551 * 2552 * Why do this at all? Well, due to its stateless nature the NFS server 2553 * converts file handles directly to vnodes without necessarily going through 2554 * the namecache ops that would otherwise create the namecache topology 2555 * leading to the vnode. We could either (1) Change the namecache algorithms 2556 * to allow disconnect namecache records that are re-merged opportunistically, 2557 * or (2) Make the NFS server backtrack and scan to recover a connected 2558 * namecache topology in order to then be able to issue new API lookups. 2559 * 2560 * It turns out that (1) is a huge mess. It takes a nice clean set of 2561 * namecache algorithms and introduces a lot of complication in every subsystem 2562 * that calls into the namecache to deal with the re-merge case, especially 2563 * since we are using the namecache to placehold negative lookups and the 2564 * vnode might not be immediately assigned. (2) is certainly far less 2565 * efficient then (1), but since we are only talking about directories here 2566 * (which are likely to remain cached), the case does not actually run all 2567 * that often and has the supreme advantage of not polluting the namecache 2568 * algorithms. 2569 * 2570 * If a fakename is supplied just construct a namecache entry using the 2571 * fake name. 2572 */ 2573 static int 2574 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 2575 struct vnode *dvp, char *fakename) 2576 { 2577 struct nlcomponent nlc; 2578 struct nchandle rncp; 2579 struct dirent *den; 2580 struct vnode *pvp; 2581 struct vattr vat; 2582 struct iovec iov; 2583 struct uio uio; 2584 int blksize; 2585 int eofflag; 2586 int bytes; 2587 char *rbuf; 2588 int error; 2589 2590 vat.va_blocksize = 0; 2591 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 2592 return (error); 2593 cache_lock(nch); 2594 error = cache_vref(nch, cred, &pvp); 2595 cache_unlock(nch); 2596 if (error) 2597 return (error); 2598 if (ncvp_debug) { 2599 kprintf("inefficient_scan of (%p,%s): directory iosize %ld " 2600 "vattr fileid = %lld\n", 2601 nch->ncp, nch->ncp->nc_name, 2602 vat.va_blocksize, 2603 (long long)vat.va_fileid); 2604 } 2605 2606 /* 2607 * Use the supplied fakename if not NULL. Fake names are typically 2608 * not in the actual filesystem hierarchy. This is used by HAMMER 2609 * to glue @@timestamp recursions together. 2610 */ 2611 if (fakename) { 2612 nlc.nlc_nameptr = fakename; 2613 nlc.nlc_namelen = strlen(fakename); 2614 rncp = cache_nlookup(nch, &nlc); 2615 goto done; 2616 } 2617 2618 if ((blksize = vat.va_blocksize) == 0) 2619 blksize = DEV_BSIZE; 2620 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 2621 rncp.ncp = NULL; 2622 2623 eofflag = 0; 2624 uio.uio_offset = 0; 2625 again: 2626 iov.iov_base = rbuf; 2627 iov.iov_len = blksize; 2628 uio.uio_iov = &iov; 2629 uio.uio_iovcnt = 1; 2630 uio.uio_resid = blksize; 2631 uio.uio_segflg = UIO_SYSSPACE; 2632 uio.uio_rw = UIO_READ; 2633 uio.uio_td = curthread; 2634 2635 if (ncvp_debug >= 2) 2636 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 2637 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 2638 if (error == 0) { 2639 den = (struct dirent *)rbuf; 2640 bytes = blksize - uio.uio_resid; 2641 2642 while (bytes > 0) { 2643 if (ncvp_debug >= 2) { 2644 kprintf("cache_inefficient_scan: %*.*s\n", 2645 den->d_namlen, den->d_namlen, 2646 den->d_name); 2647 } 2648 if (den->d_type != DT_WHT && 2649 den->d_ino == vat.va_fileid) { 2650 if (ncvp_debug) { 2651 kprintf("cache_inefficient_scan: " 2652 "MATCHED inode %lld path %s/%*.*s\n", 2653 (long long)vat.va_fileid, 2654 nch->ncp->nc_name, 2655 den->d_namlen, den->d_namlen, 2656 den->d_name); 2657 } 2658 nlc.nlc_nameptr = den->d_name; 2659 nlc.nlc_namelen = den->d_namlen; 2660 rncp = cache_nlookup(nch, &nlc); 2661 KKASSERT(rncp.ncp != NULL); 2662 break; 2663 } 2664 bytes -= _DIRENT_DIRSIZ(den); 2665 den = _DIRENT_NEXT(den); 2666 } 2667 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 2668 goto again; 2669 } 2670 kfree(rbuf, M_TEMP); 2671 done: 2672 vrele(pvp); 2673 if (rncp.ncp) { 2674 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 2675 _cache_setvp(rncp.mount, rncp.ncp, dvp); 2676 if (ncvp_debug >= 2) { 2677 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 2678 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 2679 } 2680 } else { 2681 if (ncvp_debug >= 2) { 2682 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 2683 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 2684 rncp.ncp->nc_vp); 2685 } 2686 } 2687 if (rncp.ncp->nc_vp == NULL) 2688 error = rncp.ncp->nc_error; 2689 /* 2690 * Release rncp after a successful nlookup. rncp was fully 2691 * referenced. 2692 */ 2693 cache_put(&rncp); 2694 } else { 2695 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 2696 dvp, nch->ncp->nc_name); 2697 error = ENOENT; 2698 } 2699 return (error); 2700 } 2701 2702 /* 2703 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 2704 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list. 2705 * 2706 * Then, if there are no additional references to the ncp and no children, 2707 * the ncp is removed from the topology and destroyed. 2708 * 2709 * References and/or children may exist if the ncp is in the middle of the 2710 * topology, preventing the ncp from being destroyed. 2711 * 2712 * This function must be called with the ncp held and locked and will unlock 2713 * and drop it during zapping. 2714 * 2715 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 2716 * This case can occur in the cache_drop() path. 2717 * 2718 * This function may returned a held (but NOT locked) parent node which the 2719 * caller must drop. We do this so _cache_drop() can loop, to avoid 2720 * blowing out the kernel stack. 2721 * 2722 * WARNING! For MPSAFE operation this routine must acquire up to three 2723 * spin locks to be able to safely test nc_refs. Lock order is 2724 * very important. 2725 * 2726 * hash spinlock if on hash list 2727 * parent spinlock if child of parent 2728 * (the ncp is unresolved so there is no vnode association) 2729 */ 2730 static struct namecache * 2731 cache_zap(struct namecache *ncp, int nonblock) 2732 { 2733 struct namecache *par; 2734 struct vnode *dropvp; 2735 struct nchash_head *nchpp; 2736 int refs; 2737 2738 /* 2739 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2740 */ 2741 _cache_setunresolved(ncp); 2742 2743 /* 2744 * Try to scrap the entry and possibly tail-recurse on its parent. 2745 * We only scrap unref'd (other then our ref) unresolved entries, 2746 * we do not scrap 'live' entries. 2747 * 2748 * Note that once the spinlocks are acquired if nc_refs == 1 no 2749 * other references are possible. If it isn't, however, we have 2750 * to decrement but also be sure to avoid a 1->0 transition. 2751 */ 2752 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2753 KKASSERT(ncp->nc_refs > 0); 2754 2755 /* 2756 * Acquire locks. Note that the parent can't go away while we hold 2757 * a child locked. 2758 */ 2759 nchpp = NULL; 2760 if ((par = ncp->nc_parent) != NULL) { 2761 if (nonblock) { 2762 for (;;) { 2763 if (_cache_lock_nonblock(par) == 0) 2764 break; 2765 refs = ncp->nc_refs; 2766 ncp->nc_flag |= NCF_DEFEREDZAP; 2767 atomic_add_long( 2768 &pcpu_ncache[mycpu->gd_cpuid].numdefered, 2769 1); 2770 if (atomic_cmpset_int(&ncp->nc_refs, 2771 refs, refs - 1)) { 2772 _cache_unlock(ncp); 2773 return(NULL); 2774 } 2775 cpu_pause(); 2776 } 2777 _cache_hold(par); 2778 } else { 2779 _cache_hold(par); 2780 _cache_lock(par); 2781 } 2782 nchpp = ncp->nc_head; 2783 spin_lock(&nchpp->spin); 2784 } 2785 2786 /* 2787 * At this point if we find refs == 1 it should not be possible for 2788 * anyone else to have access to the ncp. We are holding the only 2789 * possible access point left (nchpp) spin-locked. 2790 * 2791 * If someone other then us has a ref or we have children 2792 * we cannot zap the entry. The 1->0 transition and any 2793 * further list operation is protected by the spinlocks 2794 * we have acquired but other transitions are not. 2795 */ 2796 for (;;) { 2797 refs = ncp->nc_refs; 2798 cpu_ccfence(); 2799 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2800 break; 2801 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2802 if (par) { 2803 spin_unlock(&nchpp->spin); 2804 _cache_put(par); 2805 } 2806 _cache_unlock(ncp); 2807 return(NULL); 2808 } 2809 cpu_pause(); 2810 } 2811 2812 /* 2813 * We are the only ref and with the spinlocks held no further 2814 * refs can be acquired by others. 2815 * 2816 * Remove us from the hash list and parent list. We have to 2817 * drop a ref on the parent's vp if the parent's list becomes 2818 * empty. 2819 */ 2820 dropvp = NULL; 2821 if (par) { 2822 struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid]; 2823 2824 KKASSERT(nchpp == ncp->nc_head); 2825 TAILQ_REMOVE(&ncp->nc_head->list, ncp, nc_hash); 2826 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2827 atomic_add_long(&pn->vfscache_count, -1); 2828 if (TAILQ_EMPTY(&ncp->nc_list)) 2829 atomic_add_long(&pn->vfscache_leafs, -1); 2830 2831 if (TAILQ_EMPTY(&par->nc_list)) { 2832 atomic_add_long(&pn->vfscache_leafs, 1); 2833 if (par->nc_vp) 2834 dropvp = par->nc_vp; 2835 } 2836 ncp->nc_head = NULL; 2837 ncp->nc_parent = NULL; 2838 spin_unlock(&nchpp->spin); 2839 _cache_unlock(par); 2840 } else { 2841 KKASSERT(ncp->nc_head == NULL); 2842 } 2843 2844 /* 2845 * ncp should not have picked up any refs. Physically 2846 * destroy the ncp. 2847 */ 2848 if (ncp->nc_refs != 1) { 2849 int save_refs = ncp->nc_refs; 2850 cpu_ccfence(); 2851 panic("cache_zap: %p bad refs %d (%d)\n", 2852 ncp, save_refs, atomic_fetchadd_int(&ncp->nc_refs, 0)); 2853 } 2854 KKASSERT(ncp->nc_refs == 1); 2855 /* _cache_unlock(ncp) not required */ 2856 ncp->nc_refs = -1; /* safety */ 2857 if (ncp->nc_name) 2858 kfree(ncp->nc_name, M_VFSCACHE); 2859 kfree(ncp, M_VFSCACHE); 2860 2861 /* 2862 * Delayed drop (we had to release our spinlocks) 2863 * 2864 * The refed parent (if not NULL) must be dropped. The 2865 * caller is responsible for looping. 2866 */ 2867 if (dropvp) 2868 vdrop(dropvp); 2869 return(par); 2870 } 2871 2872 /* 2873 * Clean up dangling negative cache and defered-drop entries in the 2874 * namecache. 2875 * 2876 * This routine is called in the critical path and also called from 2877 * vnlru(). When called from vnlru we use a lower limit to try to 2878 * deal with the negative cache before the critical path has to start 2879 * dealing with it. 2880 */ 2881 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2882 2883 static cache_hs_t neg_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2884 static cache_hs_t pos_cache_hysteresis_state[2] = { CHI_LOW, CHI_LOW }; 2885 2886 void 2887 cache_hysteresis(int critpath) 2888 { 2889 long poslimit; 2890 long neglimit = maxvnodes / ncnegfactor; 2891 long xnumcache = vfscache_leafs; 2892 2893 if (critpath == 0) 2894 neglimit = neglimit * 8 / 10; 2895 2896 /* 2897 * Don't cache too many negative hits. We use hysteresis to reduce 2898 * the impact on the critical path. 2899 */ 2900 switch(neg_cache_hysteresis_state[critpath]) { 2901 case CHI_LOW: 2902 if (vfscache_negs > MINNEG && vfscache_negs > neglimit) { 2903 if (critpath) 2904 _cache_cleanneg(ncnegflush); 2905 else 2906 _cache_cleanneg(ncnegflush + 2907 vfscache_negs - neglimit); 2908 neg_cache_hysteresis_state[critpath] = CHI_HIGH; 2909 } 2910 break; 2911 case CHI_HIGH: 2912 if (vfscache_negs > MINNEG * 9 / 10 && 2913 vfscache_negs * 9 / 10 > neglimit 2914 ) { 2915 if (critpath) 2916 _cache_cleanneg(ncnegflush); 2917 else 2918 _cache_cleanneg(ncnegflush + 2919 vfscache_negs * 9 / 10 - 2920 neglimit); 2921 } else { 2922 neg_cache_hysteresis_state[critpath] = CHI_LOW; 2923 } 2924 break; 2925 } 2926 2927 /* 2928 * Don't cache too many positive hits. We use hysteresis to reduce 2929 * the impact on the critical path. 2930 * 2931 * Excessive positive hits can accumulate due to large numbers of 2932 * hardlinks (the vnode cache will not prevent hl ncps from growing 2933 * into infinity). 2934 */ 2935 if ((poslimit = ncposlimit) == 0) 2936 poslimit = maxvnodes * 2; 2937 if (critpath == 0) 2938 poslimit = poslimit * 8 / 10; 2939 2940 switch(pos_cache_hysteresis_state[critpath]) { 2941 case CHI_LOW: 2942 if (xnumcache > poslimit && xnumcache > MINPOS) { 2943 if (critpath) 2944 _cache_cleanpos(ncposflush); 2945 else 2946 _cache_cleanpos(ncposflush + 2947 xnumcache - poslimit); 2948 pos_cache_hysteresis_state[critpath] = CHI_HIGH; 2949 } 2950 break; 2951 case CHI_HIGH: 2952 if (xnumcache > poslimit * 5 / 6 && xnumcache > MINPOS) { 2953 if (critpath) 2954 _cache_cleanpos(ncposflush); 2955 else 2956 _cache_cleanpos(ncposflush + 2957 xnumcache - poslimit * 5 / 6); 2958 } else { 2959 pos_cache_hysteresis_state[critpath] = CHI_LOW; 2960 } 2961 break; 2962 } 2963 2964 /* 2965 * Clean out dangling defered-zap ncps which could not be cleanly 2966 * dropped if too many build up. Note that numdefered is 2967 * heuristical. Make sure we are real-time for the current cpu, 2968 * plus the global rollup. 2969 */ 2970 if (pcpu_ncache[mycpu->gd_cpuid].numdefered + numdefered > neglimit) { 2971 _cache_cleandefered(); 2972 } 2973 } 2974 2975 /* 2976 * NEW NAMECACHE LOOKUP API 2977 * 2978 * Lookup an entry in the namecache. The passed par_nch must be referenced 2979 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2980 * is ALWAYS returned, eve if the supplied component is illegal. 2981 * 2982 * The resulting namecache entry should be returned to the system with 2983 * cache_put() or cache_unlock() + cache_drop(). 2984 * 2985 * namecache locks are recursive but care must be taken to avoid lock order 2986 * reversals (hence why the passed par_nch must be unlocked). Locking 2987 * rules are to order for parent traversals, not for child traversals. 2988 * 2989 * Nobody else will be able to manipulate the associated namespace (e.g. 2990 * create, delete, rename, rename-target) until the caller unlocks the 2991 * entry. 2992 * 2993 * The returned entry will be in one of three states: positive hit (non-null 2994 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2995 * Unresolved entries must be resolved through the filesystem to associate the 2996 * vnode and/or determine whether a positive or negative hit has occured. 2997 * 2998 * It is not necessary to lock a directory in order to lock namespace under 2999 * that directory. In fact, it is explicitly not allowed to do that. A 3000 * directory is typically only locked when being created, renamed, or 3001 * destroyed. 3002 * 3003 * The directory (par) may be unresolved, in which case any returned child 3004 * will likely also be marked unresolved. Likely but not guarenteed. Since 3005 * the filesystem lookup requires a resolved directory vnode the caller is 3006 * responsible for resolving the namecache chain top-down. This API 3007 * specifically allows whole chains to be created in an unresolved state. 3008 */ 3009 struct nchandle 3010 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 3011 { 3012 struct nchandle nch; 3013 struct namecache *ncp; 3014 struct namecache *new_ncp; 3015 struct namecache *rep_ncp; /* reuse a destroyed ncp */ 3016 struct nchash_head *nchpp; 3017 struct mount *mp; 3018 u_int32_t hash; 3019 globaldata_t gd; 3020 int par_locked; 3021 3022 gd = mycpu; 3023 mp = par_nch->mount; 3024 par_locked = 0; 3025 3026 /* 3027 * This is a good time to call it, no ncp's are locked by 3028 * the caller or us. 3029 */ 3030 cache_hysteresis(1); 3031 3032 /* 3033 * Try to locate an existing entry 3034 */ 3035 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3036 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3037 new_ncp = NULL; 3038 nchpp = NCHHASH(hash); 3039 restart: 3040 rep_ncp = NULL; 3041 if (new_ncp) 3042 spin_lock(&nchpp->spin); 3043 else 3044 spin_lock_shared(&nchpp->spin); 3045 3046 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3047 /* 3048 * Break out if we find a matching entry. Note that 3049 * UNRESOLVED entries may match, but DESTROYED entries 3050 * do not. 3051 * 3052 * We may be able to reuse DESTROYED entries that we come 3053 * across, even if the name does not match, as long as 3054 * nc_nlen is correct. 3055 */ 3056 if (ncp->nc_parent == par_nch->ncp && 3057 ncp->nc_nlen == nlc->nlc_namelen) { 3058 if (ncp->nc_flag & NCF_DESTROYED) { 3059 if (ncp->nc_refs == 0 && rep_ncp == NULL) 3060 rep_ncp = ncp; 3061 continue; 3062 } 3063 if (bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen)) 3064 continue; 3065 _cache_hold(ncp); 3066 if (new_ncp) 3067 spin_unlock(&nchpp->spin); 3068 else 3069 spin_unlock_shared(&nchpp->spin); 3070 if (par_locked) { 3071 _cache_unlock(par_nch->ncp); 3072 par_locked = 0; 3073 } 3074 if (_cache_lock_special(ncp) == 0) { 3075 /* 3076 * Successfully locked but we must re-test 3077 * conditions that might have changed since 3078 * we did not have the lock before. 3079 */ 3080 if (ncp->nc_parent != par_nch->ncp || 3081 ncp->nc_nlen != nlc->nlc_namelen || 3082 bcmp(ncp->nc_name, nlc->nlc_nameptr, 3083 ncp->nc_nlen) || 3084 (ncp->nc_flag & NCF_DESTROYED)) { 3085 _cache_put(ncp); 3086 goto restart; 3087 } 3088 _cache_auto_unresolve(mp, ncp); 3089 if (new_ncp) 3090 _cache_free(new_ncp); 3091 goto found; 3092 } 3093 _cache_get(ncp); /* cycle the lock to block */ 3094 _cache_put(ncp); 3095 _cache_drop(ncp); 3096 goto restart; 3097 } 3098 } 3099 3100 /* 3101 * We failed to locate the entry, try to resurrect a destroyed 3102 * entry that we did find that is already correctly linked into 3103 * nchpp and the parent. We must re-test conditions after 3104 * successfully locking rep_ncp. 3105 * 3106 * This case can occur under heavy loads due to not being able 3107 * to safely lock the parent in cache_zap(). Nominally a repeated 3108 * create/unlink load, but only the namelen needs to match. 3109 */ 3110 if (rep_ncp && new_ncp == NULL) { 3111 if (_cache_lock_nonblock(rep_ncp) == 0) { 3112 _cache_hold(rep_ncp); 3113 if (rep_ncp->nc_parent == par_nch->ncp && 3114 rep_ncp->nc_nlen == nlc->nlc_namelen && 3115 (rep_ncp->nc_flag & NCF_DESTROYED)) { 3116 /* 3117 * Update nc_name as reuse as new. 3118 */ 3119 ncp = rep_ncp; 3120 bcopy(nlc->nlc_nameptr, ncp->nc_name, 3121 nlc->nlc_namelen); 3122 spin_unlock_shared(&nchpp->spin); 3123 _cache_setunresolved(ncp); 3124 ncp->nc_flag = NCF_UNRESOLVED; 3125 ncp->nc_error = ENOTCONN; 3126 goto found; 3127 } 3128 _cache_put(rep_ncp); 3129 } 3130 } 3131 3132 /* 3133 * Otherwise create a new entry and add it to the cache. The parent 3134 * ncp must also be locked so we can link into it. 3135 * 3136 * We have to relookup after possibly blocking in kmalloc or 3137 * when locking par_nch. 3138 * 3139 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 3140 * mount case, in which case nc_name will be NULL. 3141 */ 3142 if (new_ncp == NULL) { 3143 spin_unlock_shared(&nchpp->spin); 3144 new_ncp = cache_alloc(nlc->nlc_namelen); 3145 if (nlc->nlc_namelen) { 3146 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 3147 nlc->nlc_namelen); 3148 new_ncp->nc_name[nlc->nlc_namelen] = 0; 3149 } 3150 goto restart; 3151 } 3152 3153 /* 3154 * NOTE! The spinlock is held exclusively here because new_ncp 3155 * is non-NULL. 3156 */ 3157 if (par_locked == 0) { 3158 spin_unlock(&nchpp->spin); 3159 _cache_lock(par_nch->ncp); 3160 par_locked = 1; 3161 goto restart; 3162 } 3163 3164 /* 3165 * WARNING! We still hold the spinlock. We have to set the hash 3166 * table entry atomically. 3167 */ 3168 ncp = new_ncp; 3169 _cache_link_parent(ncp, par_nch->ncp, nchpp); 3170 spin_unlock(&nchpp->spin); 3171 _cache_unlock(par_nch->ncp); 3172 /* par_locked = 0 - not used */ 3173 found: 3174 /* 3175 * stats and namecache size management 3176 */ 3177 if (ncp->nc_flag & NCF_UNRESOLVED) 3178 ++gd->gd_nchstats->ncs_miss; 3179 else if (ncp->nc_vp) 3180 ++gd->gd_nchstats->ncs_goodhits; 3181 else 3182 ++gd->gd_nchstats->ncs_neghits; 3183 nch.mount = mp; 3184 nch.ncp = ncp; 3185 _cache_mntref(nch.mount); 3186 3187 return(nch); 3188 } 3189 3190 /* 3191 * Attempt to lookup a namecache entry and return with a shared namecache 3192 * lock. 3193 */ 3194 int 3195 cache_nlookup_maybe_shared(struct nchandle *par_nch, struct nlcomponent *nlc, 3196 int excl, struct nchandle *res_nch) 3197 { 3198 struct namecache *ncp; 3199 struct nchash_head *nchpp; 3200 struct mount *mp; 3201 u_int32_t hash; 3202 globaldata_t gd; 3203 3204 /* 3205 * If exclusive requested or shared namecache locks are disabled, 3206 * return failure. 3207 */ 3208 if (ncp_shared_lock_disable || excl) 3209 return(EWOULDBLOCK); 3210 3211 gd = mycpu; 3212 mp = par_nch->mount; 3213 3214 /* 3215 * This is a good time to call it, no ncp's are locked by 3216 * the caller or us. 3217 */ 3218 cache_hysteresis(1); 3219 3220 /* 3221 * Try to locate an existing entry 3222 */ 3223 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3224 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3225 nchpp = NCHHASH(hash); 3226 3227 spin_lock_shared(&nchpp->spin); 3228 3229 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3230 /* 3231 * Break out if we find a matching entry. Note that 3232 * UNRESOLVED entries may match, but DESTROYED entries 3233 * do not. 3234 */ 3235 if (ncp->nc_parent == par_nch->ncp && 3236 ncp->nc_nlen == nlc->nlc_namelen && 3237 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3238 (ncp->nc_flag & NCF_DESTROYED) == 0 3239 ) { 3240 _cache_hold(ncp); 3241 spin_unlock_shared(&nchpp->spin); 3242 if (_cache_lock_shared_special(ncp) == 0) { 3243 if (ncp->nc_parent == par_nch->ncp && 3244 ncp->nc_nlen == nlc->nlc_namelen && 3245 bcmp(ncp->nc_name, nlc->nlc_nameptr, 3246 ncp->nc_nlen) == 0 && 3247 (ncp->nc_flag & NCF_DESTROYED) == 0 && 3248 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 3249 _cache_auto_unresolve_test(mp, ncp) == 0) { 3250 goto found; 3251 } 3252 _cache_unlock(ncp); 3253 } 3254 _cache_drop(ncp); 3255 spin_lock_shared(&nchpp->spin); 3256 break; 3257 } 3258 } 3259 3260 /* 3261 * Failure 3262 */ 3263 spin_unlock_shared(&nchpp->spin); 3264 return(EWOULDBLOCK); 3265 3266 /* 3267 * Success 3268 * 3269 * Note that nc_error might be non-zero (e.g ENOENT). 3270 */ 3271 found: 3272 res_nch->mount = mp; 3273 res_nch->ncp = ncp; 3274 ++gd->gd_nchstats->ncs_goodhits; 3275 _cache_mntref(res_nch->mount); 3276 3277 KKASSERT(ncp->nc_error != EWOULDBLOCK); 3278 return(ncp->nc_error); 3279 } 3280 3281 /* 3282 * This is a non-blocking verison of cache_nlookup() used by 3283 * nfs_readdirplusrpc_uio(). It can fail for any reason and 3284 * will return nch.ncp == NULL in that case. 3285 */ 3286 struct nchandle 3287 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 3288 { 3289 struct nchandle nch; 3290 struct namecache *ncp; 3291 struct namecache *new_ncp; 3292 struct nchash_head *nchpp; 3293 struct mount *mp; 3294 u_int32_t hash; 3295 globaldata_t gd; 3296 int par_locked; 3297 3298 gd = mycpu; 3299 mp = par_nch->mount; 3300 par_locked = 0; 3301 3302 /* 3303 * Try to locate an existing entry 3304 */ 3305 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 3306 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 3307 new_ncp = NULL; 3308 nchpp = NCHHASH(hash); 3309 restart: 3310 spin_lock(&nchpp->spin); 3311 TAILQ_FOREACH(ncp, &nchpp->list, nc_hash) { 3312 /* 3313 * Break out if we find a matching entry. Note that 3314 * UNRESOLVED entries may match, but DESTROYED entries 3315 * do not. 3316 */ 3317 if (ncp->nc_parent == par_nch->ncp && 3318 ncp->nc_nlen == nlc->nlc_namelen && 3319 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 3320 (ncp->nc_flag & NCF_DESTROYED) == 0 3321 ) { 3322 _cache_hold(ncp); 3323 spin_unlock(&nchpp->spin); 3324 if (par_locked) { 3325 _cache_unlock(par_nch->ncp); 3326 par_locked = 0; 3327 } 3328 if (_cache_lock_special(ncp) == 0) { 3329 if (ncp->nc_parent != par_nch->ncp || 3330 ncp->nc_nlen != nlc->nlc_namelen || 3331 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) || 3332 (ncp->nc_flag & NCF_DESTROYED)) { 3333 kprintf("cache_lookup_nonblock: " 3334 "ncp-race %p %*.*s\n", 3335 ncp, 3336 nlc->nlc_namelen, 3337 nlc->nlc_namelen, 3338 nlc->nlc_nameptr); 3339 _cache_unlock(ncp); 3340 _cache_drop(ncp); 3341 goto failed; 3342 } 3343 _cache_auto_unresolve(mp, ncp); 3344 if (new_ncp) { 3345 _cache_free(new_ncp); 3346 new_ncp = NULL; 3347 } 3348 goto found; 3349 } 3350 _cache_drop(ncp); 3351 goto failed; 3352 } 3353 } 3354 3355 /* 3356 * We failed to locate an entry, create a new entry and add it to 3357 * the cache. The parent ncp must also be locked so we 3358 * can link into it. 3359 * 3360 * We have to relookup after possibly blocking in kmalloc or 3361 * when locking par_nch. 3362 * 3363 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 3364 * mount case, in which case nc_name will be NULL. 3365 */ 3366 if (new_ncp == NULL) { 3367 spin_unlock(&nchpp->spin); 3368 new_ncp = cache_alloc(nlc->nlc_namelen); 3369 if (nlc->nlc_namelen) { 3370 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 3371 nlc->nlc_namelen); 3372 new_ncp->nc_name[nlc->nlc_namelen] = 0; 3373 } 3374 goto restart; 3375 } 3376 if (par_locked == 0) { 3377 spin_unlock(&nchpp->spin); 3378 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 3379 par_locked = 1; 3380 goto restart; 3381 } 3382 goto failed; 3383 } 3384 3385 /* 3386 * WARNING! We still hold the spinlock. We have to set the hash 3387 * table entry atomically. 3388 */ 3389 ncp = new_ncp; 3390 _cache_link_parent(ncp, par_nch->ncp, nchpp); 3391 spin_unlock(&nchpp->spin); 3392 _cache_unlock(par_nch->ncp); 3393 /* par_locked = 0 - not used */ 3394 found: 3395 /* 3396 * stats and namecache size management 3397 */ 3398 if (ncp->nc_flag & NCF_UNRESOLVED) 3399 ++gd->gd_nchstats->ncs_miss; 3400 else if (ncp->nc_vp) 3401 ++gd->gd_nchstats->ncs_goodhits; 3402 else 3403 ++gd->gd_nchstats->ncs_neghits; 3404 nch.mount = mp; 3405 nch.ncp = ncp; 3406 _cache_mntref(nch.mount); 3407 3408 return(nch); 3409 failed: 3410 if (new_ncp) { 3411 _cache_free(new_ncp); 3412 new_ncp = NULL; 3413 } 3414 nch.mount = NULL; 3415 nch.ncp = NULL; 3416 return(nch); 3417 } 3418 3419 /* 3420 * The namecache entry is marked as being used as a mount point. 3421 * Locate the mount if it is visible to the caller. The DragonFly 3422 * mount system allows arbitrary loops in the topology and disentangles 3423 * those loops by matching against (mp, ncp) rather than just (ncp). 3424 * This means any given ncp can dive any number of mounts, depending 3425 * on the relative mount (e.g. nullfs) the caller is at in the topology. 3426 * 3427 * We use a very simple frontend cache to reduce SMP conflicts, 3428 * which we have to do because the mountlist scan needs an exclusive 3429 * lock around its ripout info list. Not to mention that there might 3430 * be a lot of mounts. 3431 */ 3432 struct findmount_info { 3433 struct mount *result; 3434 struct mount *nch_mount; 3435 struct namecache *nch_ncp; 3436 }; 3437 3438 static 3439 struct ncmount_cache * 3440 ncmount_cache_lookup(struct mount *mp, struct namecache *ncp) 3441 { 3442 uintptr_t hash; 3443 3444 hash = (uintptr_t)mp + ((uintptr_t)mp >> 18); 3445 hash += (uintptr_t)ncp + ((uintptr_t)ncp >> 16); 3446 hash = (hash >> 1) % NCMOUNT_NUMCACHE; 3447 3448 return (&ncmount_cache[hash]); 3449 } 3450 3451 static 3452 int 3453 cache_findmount_callback(struct mount *mp, void *data) 3454 { 3455 struct findmount_info *info = data; 3456 3457 /* 3458 * Check the mount's mounted-on point against the passed nch. 3459 */ 3460 if (mp->mnt_ncmounton.mount == info->nch_mount && 3461 mp->mnt_ncmounton.ncp == info->nch_ncp 3462 ) { 3463 info->result = mp; 3464 _cache_mntref(mp); 3465 return(-1); 3466 } 3467 return(0); 3468 } 3469 3470 struct mount * 3471 cache_findmount(struct nchandle *nch) 3472 { 3473 struct findmount_info info; 3474 struct ncmount_cache *ncc; 3475 struct mount *mp; 3476 3477 /* 3478 * Fast 3479 */ 3480 if (ncmount_cache_enable == 0) { 3481 ncc = NULL; 3482 goto skip; 3483 } 3484 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3485 if (ncc->ncp == nch->ncp) { 3486 spin_lock_shared(&ncc->spin); 3487 if (ncc->isneg == 0 && 3488 ncc->ncp == nch->ncp && (mp = ncc->mp) != NULL) { 3489 if (mp->mnt_ncmounton.mount == nch->mount && 3490 mp->mnt_ncmounton.ncp == nch->ncp) { 3491 /* 3492 * Cache hit (positive) 3493 */ 3494 _cache_mntref(mp); 3495 spin_unlock_shared(&ncc->spin); 3496 return(mp); 3497 } 3498 /* else cache miss */ 3499 } 3500 if (ncc->isneg && 3501 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3502 /* 3503 * Cache hit (negative) 3504 */ 3505 spin_unlock_shared(&ncc->spin); 3506 return(NULL); 3507 } 3508 spin_unlock_shared(&ncc->spin); 3509 } 3510 skip: 3511 3512 /* 3513 * Slow 3514 */ 3515 info.result = NULL; 3516 info.nch_mount = nch->mount; 3517 info.nch_ncp = nch->ncp; 3518 mountlist_scan(cache_findmount_callback, &info, 3519 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 3520 3521 /* 3522 * Cache the result. 3523 * 3524 * Negative lookups: We cache the originating {ncp,mp}. (mp) is 3525 * only used for pointer comparisons and is not 3526 * referenced (otherwise there would be dangling 3527 * refs). 3528 * 3529 * Positive lookups: We cache the originating {ncp} and the target 3530 * (mp). (mp) is referenced. 3531 * 3532 * Indeterminant: If the match is undergoing an unmount we do 3533 * not cache it to avoid racing cache_unmounting(), 3534 * but still return the match. 3535 */ 3536 if (ncc) { 3537 spin_lock(&ncc->spin); 3538 if (info.result == NULL) { 3539 if (ncc->isneg == 0 && ncc->mp) 3540 _cache_mntrel(ncc->mp); 3541 ncc->ncp = nch->ncp; 3542 ncc->mp = nch->mount; 3543 ncc->isneg = 1; 3544 spin_unlock(&ncc->spin); 3545 } else if ((info.result->mnt_kern_flag & MNTK_UNMOUNT) == 0) { 3546 if (ncc->isneg == 0 && ncc->mp) 3547 _cache_mntrel(ncc->mp); 3548 _cache_mntref(info.result); 3549 ncc->ncp = nch->ncp; 3550 ncc->mp = info.result; 3551 ncc->isneg = 0; 3552 spin_unlock(&ncc->spin); 3553 } else { 3554 spin_unlock(&ncc->spin); 3555 } 3556 } 3557 return(info.result); 3558 } 3559 3560 void 3561 cache_dropmount(struct mount *mp) 3562 { 3563 _cache_mntrel(mp); 3564 } 3565 3566 void 3567 cache_ismounting(struct mount *mp) 3568 { 3569 struct nchandle *nch = &mp->mnt_ncmounton; 3570 struct ncmount_cache *ncc; 3571 3572 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3573 if (ncc->isneg && 3574 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3575 spin_lock(&ncc->spin); 3576 if (ncc->isneg && 3577 ncc->ncp == nch->ncp && ncc->mp == nch->mount) { 3578 ncc->ncp = NULL; 3579 ncc->mp = NULL; 3580 } 3581 spin_unlock(&ncc->spin); 3582 } 3583 } 3584 3585 void 3586 cache_unmounting(struct mount *mp) 3587 { 3588 struct nchandle *nch = &mp->mnt_ncmounton; 3589 struct ncmount_cache *ncc; 3590 3591 ncc = ncmount_cache_lookup(nch->mount, nch->ncp); 3592 if (ncc->isneg == 0 && 3593 ncc->ncp == nch->ncp && ncc->mp == mp) { 3594 spin_lock(&ncc->spin); 3595 if (ncc->isneg == 0 && 3596 ncc->ncp == nch->ncp && ncc->mp == mp) { 3597 _cache_mntrel(mp); 3598 ncc->ncp = NULL; 3599 ncc->mp = NULL; 3600 } 3601 spin_unlock(&ncc->spin); 3602 } 3603 } 3604 3605 /* 3606 * Resolve an unresolved namecache entry, generally by looking it up. 3607 * The passed ncp must be locked and refd. 3608 * 3609 * Theoretically since a vnode cannot be recycled while held, and since 3610 * the nc_parent chain holds its vnode as long as children exist, the 3611 * direct parent of the cache entry we are trying to resolve should 3612 * have a valid vnode. If not then generate an error that we can 3613 * determine is related to a resolver bug. 3614 * 3615 * However, if a vnode was in the middle of a recyclement when the NCP 3616 * got locked, ncp->nc_vp might point to a vnode that is about to become 3617 * invalid. cache_resolve() handles this case by unresolving the entry 3618 * and then re-resolving it. 3619 * 3620 * Note that successful resolution does not necessarily return an error 3621 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 3622 * will be returned. 3623 */ 3624 int 3625 cache_resolve(struct nchandle *nch, struct ucred *cred) 3626 { 3627 struct namecache *par_tmp; 3628 struct namecache *par; 3629 struct namecache *ncp; 3630 struct nchandle nctmp; 3631 struct mount *mp; 3632 struct vnode *dvp; 3633 int error; 3634 3635 ncp = nch->ncp; 3636 mp = nch->mount; 3637 KKASSERT(_cache_lockstatus(ncp) == LK_EXCLUSIVE); 3638 restart: 3639 /* 3640 * If the ncp is already resolved we have nothing to do. However, 3641 * we do want to guarentee that a usable vnode is returned when 3642 * a vnode is present, so make sure it hasn't been reclaimed. 3643 */ 3644 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3645 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3646 _cache_setunresolved(ncp); 3647 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 3648 return (ncp->nc_error); 3649 } 3650 3651 /* 3652 * If the ncp was destroyed it will never resolve again. This 3653 * can basically only happen when someone is chdir'd into an 3654 * empty directory which is then rmdir'd. We want to catch this 3655 * here and not dive the VFS because the VFS might actually 3656 * have a way to re-resolve the disconnected ncp, which will 3657 * result in inconsistencies in the cdir/nch for proc->p_fd. 3658 */ 3659 if (ncp->nc_flag & NCF_DESTROYED) 3660 return(EINVAL); 3661 3662 /* 3663 * Mount points need special handling because the parent does not 3664 * belong to the same filesystem as the ncp. 3665 */ 3666 if (ncp == mp->mnt_ncmountpt.ncp) 3667 return (cache_resolve_mp(mp)); 3668 3669 /* 3670 * We expect an unbroken chain of ncps to at least the mount point, 3671 * and even all the way to root (but this code doesn't have to go 3672 * past the mount point). 3673 */ 3674 if (ncp->nc_parent == NULL) { 3675 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 3676 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3677 ncp->nc_error = EXDEV; 3678 return(ncp->nc_error); 3679 } 3680 3681 /* 3682 * The vp's of the parent directories in the chain are held via vhold() 3683 * due to the existance of the child, and should not disappear. 3684 * However, there are cases where they can disappear: 3685 * 3686 * - due to filesystem I/O errors. 3687 * - due to NFS being stupid about tracking the namespace and 3688 * destroys the namespace for entire directories quite often. 3689 * - due to forced unmounts. 3690 * - due to an rmdir (parent will be marked DESTROYED) 3691 * 3692 * When this occurs we have to track the chain backwards and resolve 3693 * it, looping until the resolver catches up to the current node. We 3694 * could recurse here but we might run ourselves out of kernel stack 3695 * so we do it in a more painful manner. This situation really should 3696 * not occur all that often, or if it does not have to go back too 3697 * many nodes to resolve the ncp. 3698 */ 3699 while ((dvp = cache_dvpref(ncp)) == NULL) { 3700 /* 3701 * This case can occur if a process is CD'd into a 3702 * directory which is then rmdir'd. If the parent is marked 3703 * destroyed there is no point trying to resolve it. 3704 */ 3705 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 3706 return(ENOENT); 3707 par = ncp->nc_parent; 3708 _cache_hold(par); 3709 _cache_lock(par); 3710 while ((par_tmp = par->nc_parent) != NULL && 3711 par_tmp->nc_vp == NULL) { 3712 _cache_hold(par_tmp); 3713 _cache_lock(par_tmp); 3714 _cache_put(par); 3715 par = par_tmp; 3716 } 3717 if (par->nc_parent == NULL) { 3718 kprintf("EXDEV case 2 %*.*s\n", 3719 par->nc_nlen, par->nc_nlen, par->nc_name); 3720 _cache_put(par); 3721 return (EXDEV); 3722 } 3723 /* 3724 * The parent is not set in stone, ref and lock it to prevent 3725 * it from disappearing. Also note that due to renames it 3726 * is possible for our ncp to move and for par to no longer 3727 * be one of its parents. We resolve it anyway, the loop 3728 * will handle any moves. 3729 */ 3730 _cache_get(par); /* additional hold/lock */ 3731 _cache_put(par); /* from earlier hold/lock */ 3732 if (par == nch->mount->mnt_ncmountpt.ncp) { 3733 cache_resolve_mp(nch->mount); 3734 } else if ((dvp = cache_dvpref(par)) == NULL) { 3735 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 3736 _cache_put(par); 3737 continue; 3738 } else { 3739 if (par->nc_flag & NCF_UNRESOLVED) { 3740 nctmp.mount = mp; 3741 nctmp.ncp = par; 3742 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3743 } 3744 vrele(dvp); 3745 } 3746 if ((error = par->nc_error) != 0) { 3747 if (par->nc_error != EAGAIN) { 3748 kprintf("EXDEV case 3 %*.*s error %d\n", 3749 par->nc_nlen, par->nc_nlen, par->nc_name, 3750 par->nc_error); 3751 _cache_put(par); 3752 return(error); 3753 } 3754 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 3755 par, par->nc_nlen, par->nc_nlen, par->nc_name); 3756 } 3757 _cache_put(par); 3758 /* loop */ 3759 } 3760 3761 /* 3762 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 3763 * ncp's and reattach them. If this occurs the original ncp is marked 3764 * EAGAIN to force a relookup. 3765 * 3766 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 3767 * ncp must already be resolved. 3768 */ 3769 if (dvp) { 3770 nctmp.mount = mp; 3771 nctmp.ncp = ncp; 3772 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 3773 vrele(dvp); 3774 } else { 3775 ncp->nc_error = EPERM; 3776 } 3777 if (ncp->nc_error == EAGAIN) { 3778 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 3779 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 3780 goto restart; 3781 } 3782 return(ncp->nc_error); 3783 } 3784 3785 /* 3786 * Resolve the ncp associated with a mount point. Such ncp's almost always 3787 * remain resolved and this routine is rarely called. NFS MPs tends to force 3788 * re-resolution more often due to its mac-truck-smash-the-namecache 3789 * method of tracking namespace changes. 3790 * 3791 * The semantics for this call is that the passed ncp must be locked on 3792 * entry and will be locked on return. However, if we actually have to 3793 * resolve the mount point we temporarily unlock the entry in order to 3794 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 3795 * the unlock we have to recheck the flags after we relock. 3796 */ 3797 static int 3798 cache_resolve_mp(struct mount *mp) 3799 { 3800 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 3801 struct vnode *vp; 3802 int error; 3803 3804 KKASSERT(mp != NULL); 3805 3806 /* 3807 * If the ncp is already resolved we have nothing to do. However, 3808 * we do want to guarentee that a usable vnode is returned when 3809 * a vnode is present, so make sure it hasn't been reclaimed. 3810 */ 3811 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3812 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 3813 _cache_setunresolved(ncp); 3814 } 3815 3816 if (ncp->nc_flag & NCF_UNRESOLVED) { 3817 _cache_unlock(ncp); 3818 while (vfs_busy(mp, 0)) 3819 ; 3820 error = VFS_ROOT(mp, &vp); 3821 _cache_lock(ncp); 3822 3823 /* 3824 * recheck the ncp state after relocking. 3825 */ 3826 if (ncp->nc_flag & NCF_UNRESOLVED) { 3827 ncp->nc_error = error; 3828 if (error == 0) { 3829 _cache_setvp(mp, ncp, vp); 3830 vput(vp); 3831 } else { 3832 kprintf("[diagnostic] cache_resolve_mp: failed" 3833 " to resolve mount %p err=%d ncp=%p\n", 3834 mp, error, ncp); 3835 _cache_setvp(mp, ncp, NULL); 3836 } 3837 } else if (error == 0) { 3838 vput(vp); 3839 } 3840 vfs_unbusy(mp); 3841 } 3842 return(ncp->nc_error); 3843 } 3844 3845 /* 3846 * Clean out negative cache entries when too many have accumulated. 3847 */ 3848 static void 3849 _cache_cleanneg(long count) 3850 { 3851 struct pcpu_ncache *pn; 3852 struct namecache *ncp; 3853 static uint32_t neg_rover; 3854 uint32_t n; 3855 long vnegs; 3856 3857 n = neg_rover++; /* SMP heuristical, race ok */ 3858 cpu_ccfence(); 3859 n = n % (uint32_t)ncpus; 3860 3861 /* 3862 * Normalize vfscache_negs and count. count is sometimes based 3863 * on vfscache_negs. vfscache_negs is heuristical and can sometimes 3864 * have crazy values. 3865 */ 3866 vnegs = vfscache_negs; 3867 cpu_ccfence(); 3868 if (vnegs <= MINNEG) 3869 vnegs = MINNEG; 3870 if (count < 1) 3871 count = 1; 3872 3873 pn = &pcpu_ncache[n]; 3874 spin_lock(&pn->neg_spin); 3875 count = pn->neg_count * count / vnegs + 1; 3876 spin_unlock(&pn->neg_spin); 3877 3878 /* 3879 * Attempt to clean out the specified number of negative cache 3880 * entries. 3881 */ 3882 while (count > 0) { 3883 spin_lock(&pn->neg_spin); 3884 ncp = TAILQ_FIRST(&pn->neg_list); 3885 if (ncp == NULL) { 3886 spin_unlock(&pn->neg_spin); 3887 break; 3888 } 3889 TAILQ_REMOVE(&pn->neg_list, ncp, nc_vnode); 3890 TAILQ_INSERT_TAIL(&pn->neg_list, ncp, nc_vnode); 3891 _cache_hold(ncp); 3892 spin_unlock(&pn->neg_spin); 3893 3894 /* 3895 * This can race, so we must re-check that the ncp 3896 * is on the ncneg.list after successfully locking it. 3897 */ 3898 if (_cache_lock_special(ncp) == 0) { 3899 if (ncp->nc_vp == NULL && 3900 (ncp->nc_flag & NCF_UNRESOLVED) == 0) { 3901 ncp = cache_zap(ncp, 1); 3902 if (ncp) 3903 _cache_drop(ncp); 3904 } else { 3905 _cache_unlock(ncp); 3906 _cache_drop(ncp); 3907 } 3908 } else { 3909 _cache_drop(ncp); 3910 } 3911 --count; 3912 } 3913 } 3914 3915 /* 3916 * Clean out positive cache entries when too many have accumulated. 3917 */ 3918 static void 3919 _cache_cleanpos(long count) 3920 { 3921 static volatile int rover; 3922 struct nchash_head *nchpp; 3923 struct namecache *ncp; 3924 int rover_copy; 3925 3926 /* 3927 * Attempt to clean out the specified number of negative cache 3928 * entries. 3929 */ 3930 while (count > 0) { 3931 rover_copy = ++rover; /* MPSAFEENOUGH */ 3932 cpu_ccfence(); 3933 nchpp = NCHHASH(rover_copy); 3934 3935 if (TAILQ_FIRST(&nchpp->list) == NULL) { 3936 --count; 3937 continue; 3938 } 3939 3940 /* 3941 * Cycle ncp on list, ignore and do not move DUMMY 3942 * ncps. These are temporary list iterators. 3943 * 3944 * We must cycle the ncp to the end of the list to 3945 * ensure that all ncp's have an equal chance of 3946 * being removed. 3947 */ 3948 spin_lock(&nchpp->spin); 3949 ncp = TAILQ_FIRST(&nchpp->list); 3950 while (ncp && (ncp->nc_flag & NCF_DUMMY)) 3951 ncp = TAILQ_NEXT(ncp, nc_hash); 3952 if (ncp) { 3953 TAILQ_REMOVE(&nchpp->list, ncp, nc_hash); 3954 TAILQ_INSERT_TAIL(&nchpp->list, ncp, nc_hash); 3955 _cache_hold(ncp); 3956 } 3957 spin_unlock(&nchpp->spin); 3958 3959 if (ncp) { 3960 if (_cache_lock_special(ncp) == 0) { 3961 ncp = cache_zap(ncp, 1); 3962 if (ncp) 3963 _cache_drop(ncp); 3964 } else { 3965 _cache_drop(ncp); 3966 } 3967 } 3968 --count; 3969 } 3970 } 3971 3972 /* 3973 * This is a kitchen sink function to clean out ncps which we 3974 * tried to zap from cache_drop() but failed because we were 3975 * unable to acquire the parent lock. 3976 * 3977 * Such entries can also be removed via cache_inval_vp(), such 3978 * as when unmounting. 3979 */ 3980 static void 3981 _cache_cleandefered(void) 3982 { 3983 struct nchash_head *nchpp; 3984 struct namecache *ncp; 3985 struct namecache dummy; 3986 int i; 3987 3988 /* 3989 * Create a list iterator. DUMMY indicates that this is a list 3990 * iterator, DESTROYED prevents matches by lookup functions. 3991 */ 3992 numdefered = 0; 3993 pcpu_ncache[mycpu->gd_cpuid].numdefered = 0; 3994 bzero(&dummy, sizeof(dummy)); 3995 dummy.nc_flag = NCF_DESTROYED | NCF_DUMMY; 3996 dummy.nc_refs = 1; 3997 3998 for (i = 0; i <= nchash; ++i) { 3999 nchpp = &nchashtbl[i]; 4000 4001 spin_lock(&nchpp->spin); 4002 TAILQ_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 4003 ncp = &dummy; 4004 while ((ncp = TAILQ_NEXT(ncp, nc_hash)) != NULL) { 4005 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 4006 continue; 4007 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4008 TAILQ_INSERT_AFTER(&nchpp->list, ncp, &dummy, nc_hash); 4009 _cache_hold(ncp); 4010 spin_unlock(&nchpp->spin); 4011 if (_cache_lock_nonblock(ncp) == 0) { 4012 ncp->nc_flag &= ~NCF_DEFEREDZAP; 4013 _cache_unlock(ncp); 4014 } 4015 _cache_drop(ncp); 4016 spin_lock(&nchpp->spin); 4017 ncp = &dummy; 4018 } 4019 TAILQ_REMOVE(&nchpp->list, &dummy, nc_hash); 4020 spin_unlock(&nchpp->spin); 4021 } 4022 } 4023 4024 /* 4025 * Name cache initialization, from vfsinit() when we are booting 4026 */ 4027 void 4028 nchinit(void) 4029 { 4030 struct pcpu_ncache *pn; 4031 globaldata_t gd; 4032 int i; 4033 4034 /* 4035 * Per-cpu accounting and negative hit list 4036 */ 4037 pcpu_ncache = kmalloc(sizeof(*pcpu_ncache) * ncpus, 4038 M_VFSCACHE, M_WAITOK|M_ZERO); 4039 for (i = 0; i < ncpus; ++i) { 4040 pn = &pcpu_ncache[i]; 4041 TAILQ_INIT(&pn->neg_list); 4042 spin_init(&pn->neg_spin, "ncneg"); 4043 } 4044 4045 /* 4046 * Initialise per-cpu namecache effectiveness statistics. 4047 */ 4048 for (i = 0; i < ncpus; ++i) { 4049 gd = globaldata_find(i); 4050 gd->gd_nchstats = &nchstats[i]; 4051 } 4052 4053 /* 4054 * Create a generous namecache hash table 4055 */ 4056 nchashtbl = hashinit_ext(vfs_inodehashsize(), 4057 sizeof(struct nchash_head), 4058 M_VFSCACHE, &nchash); 4059 for (i = 0; i <= (int)nchash; ++i) { 4060 TAILQ_INIT(&nchashtbl[i].list); 4061 spin_init(&nchashtbl[i].spin, "nchinit_hash"); 4062 } 4063 for (i = 0; i < NCMOUNT_NUMCACHE; ++i) 4064 spin_init(&ncmount_cache[i].spin, "nchinit_cache"); 4065 nclockwarn = 5 * hz; 4066 } 4067 4068 /* 4069 * Called from start_init() to bootstrap the root filesystem. Returns 4070 * a referenced, unlocked namecache record. 4071 */ 4072 void 4073 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 4074 { 4075 nch->ncp = cache_alloc(0); 4076 nch->mount = mp; 4077 _cache_mntref(mp); 4078 if (vp) 4079 _cache_setvp(nch->mount, nch->ncp, vp); 4080 } 4081 4082 /* 4083 * vfs_cache_setroot() 4084 * 4085 * Create an association between the root of our namecache and 4086 * the root vnode. This routine may be called several times during 4087 * booting. 4088 * 4089 * If the caller intends to save the returned namecache pointer somewhere 4090 * it must cache_hold() it. 4091 */ 4092 void 4093 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 4094 { 4095 struct vnode *ovp; 4096 struct nchandle onch; 4097 4098 ovp = rootvnode; 4099 onch = rootnch; 4100 rootvnode = nvp; 4101 if (nch) 4102 rootnch = *nch; 4103 else 4104 cache_zero(&rootnch); 4105 if (ovp) 4106 vrele(ovp); 4107 if (onch.ncp) 4108 cache_drop(&onch); 4109 } 4110 4111 /* 4112 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 4113 * topology and is being removed as quickly as possible. The new VOP_N*() 4114 * API calls are required to make specific adjustments using the supplied 4115 * ncp pointers rather then just bogusly purging random vnodes. 4116 * 4117 * Invalidate all namecache entries to a particular vnode as well as 4118 * any direct children of that vnode in the namecache. This is a 4119 * 'catch all' purge used by filesystems that do not know any better. 4120 * 4121 * Note that the linkage between the vnode and its namecache entries will 4122 * be removed, but the namecache entries themselves might stay put due to 4123 * active references from elsewhere in the system or due to the existance of 4124 * the children. The namecache topology is left intact even if we do not 4125 * know what the vnode association is. Such entries will be marked 4126 * NCF_UNRESOLVED. 4127 */ 4128 void 4129 cache_purge(struct vnode *vp) 4130 { 4131 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 4132 } 4133 4134 static int disablecwd; 4135 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 4136 "Disable getcwd"); 4137 4138 static u_long numcwdcalls; 4139 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0, 4140 "Number of current directory resolution calls"); 4141 static u_long numcwdfailnf; 4142 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0, 4143 "Number of current directory failures due to lack of file"); 4144 static u_long numcwdfailsz; 4145 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0, 4146 "Number of current directory failures due to large result"); 4147 static u_long numcwdfound; 4148 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0, 4149 "Number of current directory resolution successes"); 4150 4151 /* 4152 * MPALMOSTSAFE 4153 */ 4154 int 4155 sys___getcwd(struct __getcwd_args *uap) 4156 { 4157 u_int buflen; 4158 int error; 4159 char *buf; 4160 char *bp; 4161 4162 if (disablecwd) 4163 return (ENODEV); 4164 4165 buflen = uap->buflen; 4166 if (buflen == 0) 4167 return (EINVAL); 4168 if (buflen > MAXPATHLEN) 4169 buflen = MAXPATHLEN; 4170 4171 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 4172 bp = kern_getcwd(buf, buflen, &error); 4173 if (error == 0) 4174 error = copyout(bp, uap->buf, strlen(bp) + 1); 4175 kfree(buf, M_TEMP); 4176 return (error); 4177 } 4178 4179 char * 4180 kern_getcwd(char *buf, size_t buflen, int *error) 4181 { 4182 struct proc *p = curproc; 4183 char *bp; 4184 int i, slash_prefixed; 4185 struct filedesc *fdp; 4186 struct nchandle nch; 4187 struct namecache *ncp; 4188 4189 numcwdcalls++; 4190 bp = buf; 4191 bp += buflen - 1; 4192 *bp = '\0'; 4193 fdp = p->p_fd; 4194 slash_prefixed = 0; 4195 4196 nch = fdp->fd_ncdir; 4197 ncp = nch.ncp; 4198 if (ncp) 4199 _cache_hold(ncp); 4200 4201 while (ncp && (ncp != fdp->fd_nrdir.ncp || 4202 nch.mount != fdp->fd_nrdir.mount) 4203 ) { 4204 /* 4205 * While traversing upwards if we encounter the root 4206 * of the current mount we have to skip to the mount point 4207 * in the underlying filesystem. 4208 */ 4209 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 4210 nch = nch.mount->mnt_ncmounton; 4211 _cache_drop(ncp); 4212 ncp = nch.ncp; 4213 if (ncp) 4214 _cache_hold(ncp); 4215 continue; 4216 } 4217 4218 /* 4219 * Prepend the path segment 4220 */ 4221 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4222 if (bp == buf) { 4223 numcwdfailsz++; 4224 *error = ERANGE; 4225 bp = NULL; 4226 goto done; 4227 } 4228 *--bp = ncp->nc_name[i]; 4229 } 4230 if (bp == buf) { 4231 numcwdfailsz++; 4232 *error = ERANGE; 4233 bp = NULL; 4234 goto done; 4235 } 4236 *--bp = '/'; 4237 slash_prefixed = 1; 4238 4239 /* 4240 * Go up a directory. This isn't a mount point so we don't 4241 * have to check again. 4242 */ 4243 while ((nch.ncp = ncp->nc_parent) != NULL) { 4244 if (ncp_shared_lock_disable) 4245 _cache_lock(ncp); 4246 else 4247 _cache_lock_shared(ncp); 4248 if (nch.ncp != ncp->nc_parent) { 4249 _cache_unlock(ncp); 4250 continue; 4251 } 4252 _cache_hold(nch.ncp); 4253 _cache_unlock(ncp); 4254 break; 4255 } 4256 _cache_drop(ncp); 4257 ncp = nch.ncp; 4258 } 4259 if (ncp == NULL) { 4260 numcwdfailnf++; 4261 *error = ENOENT; 4262 bp = NULL; 4263 goto done; 4264 } 4265 if (!slash_prefixed) { 4266 if (bp == buf) { 4267 numcwdfailsz++; 4268 *error = ERANGE; 4269 bp = NULL; 4270 goto done; 4271 } 4272 *--bp = '/'; 4273 } 4274 numcwdfound++; 4275 *error = 0; 4276 done: 4277 if (ncp) 4278 _cache_drop(ncp); 4279 return (bp); 4280 } 4281 4282 /* 4283 * Thus begins the fullpath magic. 4284 * 4285 * The passed nchp is referenced but not locked. 4286 */ 4287 static int disablefullpath; 4288 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 4289 &disablefullpath, 0, 4290 "Disable fullpath lookups"); 4291 4292 int 4293 cache_fullpath(struct proc *p, struct nchandle *nchp, struct nchandle *nchbase, 4294 char **retbuf, char **freebuf, int guess) 4295 { 4296 struct nchandle fd_nrdir; 4297 struct nchandle nch; 4298 struct namecache *ncp; 4299 struct mount *mp, *new_mp; 4300 char *bp, *buf; 4301 int slash_prefixed; 4302 int error = 0; 4303 int i; 4304 4305 *retbuf = NULL; 4306 *freebuf = NULL; 4307 4308 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 4309 bp = buf + MAXPATHLEN - 1; 4310 *bp = '\0'; 4311 if (nchbase) 4312 fd_nrdir = *nchbase; 4313 else if (p != NULL) 4314 fd_nrdir = p->p_fd->fd_nrdir; 4315 else 4316 fd_nrdir = rootnch; 4317 slash_prefixed = 0; 4318 nch = *nchp; 4319 ncp = nch.ncp; 4320 if (ncp) 4321 _cache_hold(ncp); 4322 mp = nch.mount; 4323 4324 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 4325 new_mp = NULL; 4326 4327 /* 4328 * If we are asked to guess the upwards path, we do so whenever 4329 * we encounter an ncp marked as a mountpoint. We try to find 4330 * the actual mountpoint by finding the mountpoint with this 4331 * ncp. 4332 */ 4333 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 4334 new_mp = mount_get_by_nc(ncp); 4335 } 4336 /* 4337 * While traversing upwards if we encounter the root 4338 * of the current mount we have to skip to the mount point. 4339 */ 4340 if (ncp == mp->mnt_ncmountpt.ncp) { 4341 new_mp = mp; 4342 } 4343 if (new_mp) { 4344 nch = new_mp->mnt_ncmounton; 4345 _cache_drop(ncp); 4346 ncp = nch.ncp; 4347 if (ncp) 4348 _cache_hold(ncp); 4349 mp = nch.mount; 4350 continue; 4351 } 4352 4353 /* 4354 * Prepend the path segment 4355 */ 4356 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 4357 if (bp == buf) { 4358 kfree(buf, M_TEMP); 4359 error = ENOMEM; 4360 goto done; 4361 } 4362 *--bp = ncp->nc_name[i]; 4363 } 4364 if (bp == buf) { 4365 kfree(buf, M_TEMP); 4366 error = ENOMEM; 4367 goto done; 4368 } 4369 *--bp = '/'; 4370 slash_prefixed = 1; 4371 4372 /* 4373 * Go up a directory. This isn't a mount point so we don't 4374 * have to check again. 4375 * 4376 * We can only safely access nc_parent with ncp held locked. 4377 */ 4378 while ((nch.ncp = ncp->nc_parent) != NULL) { 4379 _cache_lock(ncp); 4380 if (nch.ncp != ncp->nc_parent) { 4381 _cache_unlock(ncp); 4382 continue; 4383 } 4384 _cache_hold(nch.ncp); 4385 _cache_unlock(ncp); 4386 break; 4387 } 4388 _cache_drop(ncp); 4389 ncp = nch.ncp; 4390 } 4391 if (ncp == NULL) { 4392 kfree(buf, M_TEMP); 4393 error = ENOENT; 4394 goto done; 4395 } 4396 4397 if (!slash_prefixed) { 4398 if (bp == buf) { 4399 kfree(buf, M_TEMP); 4400 error = ENOMEM; 4401 goto done; 4402 } 4403 *--bp = '/'; 4404 } 4405 *retbuf = bp; 4406 *freebuf = buf; 4407 error = 0; 4408 done: 4409 if (ncp) 4410 _cache_drop(ncp); 4411 return(error); 4412 } 4413 4414 int 4415 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, 4416 char **freebuf, int guess) 4417 { 4418 struct namecache *ncp; 4419 struct nchandle nch; 4420 int error; 4421 4422 *freebuf = NULL; 4423 if (disablefullpath) 4424 return (ENODEV); 4425 4426 if (p == NULL) 4427 return (EINVAL); 4428 4429 /* vn is NULL, client wants us to use p->p_textvp */ 4430 if (vn == NULL) { 4431 if ((vn = p->p_textvp) == NULL) 4432 return (EINVAL); 4433 } 4434 spin_lock_shared(&vn->v_spin); 4435 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 4436 if (ncp->nc_nlen) 4437 break; 4438 } 4439 if (ncp == NULL) { 4440 spin_unlock_shared(&vn->v_spin); 4441 return (EINVAL); 4442 } 4443 _cache_hold(ncp); 4444 spin_unlock_shared(&vn->v_spin); 4445 4446 nch.ncp = ncp; 4447 nch.mount = vn->v_mount; 4448 error = cache_fullpath(p, &nch, NULL, retbuf, freebuf, guess); 4449 _cache_drop(ncp); 4450 return (error); 4451 } 4452 4453 void 4454 vfscache_rollup_cpu(struct globaldata *gd) 4455 { 4456 struct pcpu_ncache *pn; 4457 long count; 4458 4459 if (pcpu_ncache == NULL) 4460 return; 4461 pn = &pcpu_ncache[gd->gd_cpuid]; 4462 4463 if (pn->vfscache_count) { 4464 count = atomic_swap_long(&pn->vfscache_count, 0); 4465 atomic_add_long(&vfscache_count, count); 4466 } 4467 if (pn->vfscache_leafs) { 4468 count = atomic_swap_long(&pn->vfscache_leafs, 0); 4469 atomic_add_long(&vfscache_leafs, count); 4470 } 4471 if (pn->vfscache_negs) { 4472 count = atomic_swap_long(&pn->vfscache_negs, 0); 4473 atomic_add_long(&vfscache_negs, count); 4474 } 4475 if (pn->numdefered) { 4476 count = atomic_swap_long(&pn->numdefered, 0); 4477 atomic_add_long(&numdefered, count); 4478 } 4479 } 4480 4481 #if 0 4482 static void 4483 vfscache_rollup_all(void) 4484 { 4485 int n; 4486 4487 for (n = 0; n < ncpus; ++n) 4488 vfscache_rollup_cpu(globaldata_find(n)); 4489 } 4490 #endif 4491