1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/sysctl.h> 73 #include <sys/mount.h> 74 #include <sys/vnode.h> 75 #include <sys/malloc.h> 76 #include <sys/sysproto.h> 77 #include <sys/spinlock.h> 78 #include <sys/proc.h> 79 #include <sys/namei.h> 80 #include <sys/nlookup.h> 81 #include <sys/filedesc.h> 82 #include <sys/fnv_hash.h> 83 #include <sys/globaldata.h> 84 #include <sys/kern_syscall.h> 85 #include <sys/dirent.h> 86 #include <ddb/ddb.h> 87 88 #include <sys/sysref2.h> 89 #include <sys/spinlock2.h> 90 #include <sys/mplock2.h> 91 92 #define MAX_RECURSION_DEPTH 64 93 94 /* 95 * Random lookups in the cache are accomplished with a hash table using 96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 97 * 98 * Negative entries may exist and correspond to resolved namecache 99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 100 * will be set if the entry corresponds to a whited-out directory entry 101 * (verses simply not finding the entry at all). ncneglist is locked 102 * with a global spinlock (ncspin). 103 * 104 * MPSAFE RULES: 105 * 106 * (1) A ncp must be referenced before it can be locked. 107 * 108 * (2) A ncp must be locked in order to modify it. 109 * 110 * (3) ncp locks are always ordered child -> parent. That may seem 111 * backwards but forward scans use the hash table and thus can hold 112 * the parent unlocked when traversing downward. 113 * 114 * This allows insert/rename/delete/dot-dot and other operations 115 * to use ncp->nc_parent links. 116 * 117 * This also prevents a locked up e.g. NFS node from creating a 118 * chain reaction all the way back to the root vnode / namecache. 119 * 120 * (4) parent linkages require both the parent and child to be locked. 121 */ 122 123 /* 124 * Structures associated with name cacheing. 125 */ 126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 127 #define MINNEG 1024 128 #define MINPOS 1024 129 130 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 131 132 LIST_HEAD(nchash_list, namecache); 133 134 struct nchash_head { 135 struct nchash_list list; 136 struct spinlock spin; 137 }; 138 139 static struct nchash_head *nchashtbl; 140 static struct namecache_list ncneglist; 141 static struct spinlock ncspin; 142 143 /* 144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 145 * to create the namecache infrastructure leading to a dangling vnode. 146 * 147 * 0 Only errors are reported 148 * 1 Successes are reported 149 * 2 Successes + the whole directory scan is reported 150 * 3 Force the directory scan code run as if the parent vnode did not 151 * have a namecache record, even if it does have one. 152 */ 153 static int ncvp_debug; 154 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 155 "Namecache debug level (0-3)"); 156 157 static u_long nchash; /* size of hash table */ 158 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 159 "Size of namecache hash table"); 160 161 static int ncnegfactor = 16; /* ratio of negative entries */ 162 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 163 "Ratio of namecache negative entries"); 164 165 static int nclockwarn; /* warn on locked entries in ticks */ 166 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 167 "Warn on locked namecache entries in ticks"); 168 169 static int numdefered; /* number of cache entries allocated */ 170 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 171 "Number of cache entries allocated"); 172 173 static int ncposlimit; /* number of cache entries allocated */ 174 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 175 "Number of cache entries allocated"); 176 177 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 178 "sizeof(struct vnode)"); 179 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 180 "sizeof(struct namecache)"); 181 182 static int cache_resolve_mp(struct mount *mp); 183 static struct vnode *cache_dvpref(struct namecache *ncp); 184 static void _cache_lock(struct namecache *ncp); 185 static void _cache_setunresolved(struct namecache *ncp); 186 static void _cache_cleanneg(int count); 187 static void _cache_cleanpos(int count); 188 static void _cache_cleandefered(void); 189 190 /* 191 * The new name cache statistics 192 */ 193 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 194 static int numneg; 195 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 196 "Number of negative namecache entries"); 197 static int numcache; 198 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 199 "Number of namecaches entries"); 200 static u_long numcalls; 201 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcalls, CTLFLAG_RD, &numcalls, 0, 202 "Number of namecache lookups"); 203 static u_long numchecks; 204 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numchecks, CTLFLAG_RD, &numchecks, 0, 205 "Number of checked entries in namecache lookups"); 206 207 struct nchstats nchstats[SMP_MAXCPU]; 208 /* 209 * Export VFS cache effectiveness statistics to user-land. 210 * 211 * The statistics are left for aggregation to user-land so 212 * neat things can be achieved, like observing per-CPU cache 213 * distribution. 214 */ 215 static int 216 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 217 { 218 struct globaldata *gd; 219 int i, error; 220 221 error = 0; 222 for (i = 0; i < ncpus; ++i) { 223 gd = globaldata_find(i); 224 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 225 sizeof(struct nchstats)))) 226 break; 227 } 228 229 return (error); 230 } 231 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 232 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 233 234 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 235 236 /* 237 * Namespace locking. The caller must already hold a reference to the 238 * namecache structure in order to lock/unlock it. This function prevents 239 * the namespace from being created or destroyed by accessors other then 240 * the lock holder. 241 * 242 * Note that holding a locked namecache structure prevents other threads 243 * from making namespace changes (e.g. deleting or creating), prevents 244 * vnode association state changes by other threads, and prevents the 245 * namecache entry from being resolved or unresolved by other threads. 246 * 247 * The lock owner has full authority to associate/disassociate vnodes 248 * and resolve/unresolve the locked ncp. 249 * 250 * The primary lock field is nc_exlocks. nc_locktd is set after the 251 * fact (when locking) or cleared prior to unlocking. 252 * 253 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 254 * or recycled, but it does NOT help you if the vnode had already 255 * initiated a recyclement. If this is important, use cache_get() 256 * rather then cache_lock() (and deal with the differences in the 257 * way the refs counter is handled). Or, alternatively, make an 258 * unconditional call to cache_validate() or cache_resolve() 259 * after cache_lock() returns. 260 * 261 * MPSAFE 262 */ 263 static 264 void 265 _cache_lock(struct namecache *ncp) 266 { 267 thread_t td; 268 int didwarn; 269 int error; 270 u_int count; 271 272 KKASSERT(ncp->nc_refs != 0); 273 didwarn = 0; 274 td = curthread; 275 276 for (;;) { 277 count = ncp->nc_exlocks; 278 279 if (count == 0) { 280 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 281 /* 282 * The vp associated with a locked ncp must 283 * be held to prevent it from being recycled. 284 * 285 * WARNING! If VRECLAIMED is set the vnode 286 * could already be in the middle of a recycle. 287 * Callers must use cache_vref() or 288 * cache_vget() on the locked ncp to 289 * validate the vp or set the cache entry 290 * to unresolved. 291 * 292 * NOTE! vhold() is allowed if we hold a 293 * lock on the ncp (which we do). 294 */ 295 ncp->nc_locktd = td; 296 if (ncp->nc_vp) 297 vhold(ncp->nc_vp); /* MPSAFE */ 298 break; 299 } 300 /* cmpset failed */ 301 continue; 302 } 303 if (ncp->nc_locktd == td) { 304 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 305 count + 1)) { 306 break; 307 } 308 /* cmpset failed */ 309 continue; 310 } 311 tsleep_interlock(ncp, 0); 312 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 313 count | NC_EXLOCK_REQ) == 0) { 314 /* cmpset failed */ 315 continue; 316 } 317 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn); 318 if (error == EWOULDBLOCK) { 319 if (didwarn == 0) { 320 didwarn = ticks; 321 kprintf("[diagnostic] cache_lock: blocked " 322 "on %p", 323 ncp); 324 kprintf(" \"%*.*s\"\n", 325 ncp->nc_nlen, ncp->nc_nlen, 326 ncp->nc_name); 327 } 328 } 329 } 330 if (didwarn) { 331 kprintf("[diagnostic] cache_lock: unblocked %*.*s after " 332 "%d secs\n", 333 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 334 (int)(ticks - didwarn) / hz); 335 } 336 } 337 338 /* 339 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 340 * such as the case where one of its children is locked. 341 * 342 * MPSAFE 343 */ 344 static 345 int 346 _cache_lock_nonblock(struct namecache *ncp) 347 { 348 thread_t td; 349 u_int count; 350 351 td = curthread; 352 353 for (;;) { 354 count = ncp->nc_exlocks; 355 356 if (count == 0) { 357 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 358 /* 359 * The vp associated with a locked ncp must 360 * be held to prevent it from being recycled. 361 * 362 * WARNING! If VRECLAIMED is set the vnode 363 * could already be in the middle of a recycle. 364 * Callers must use cache_vref() or 365 * cache_vget() on the locked ncp to 366 * validate the vp or set the cache entry 367 * to unresolved. 368 * 369 * NOTE! vhold() is allowed if we hold a 370 * lock on the ncp (which we do). 371 */ 372 ncp->nc_locktd = td; 373 if (ncp->nc_vp) 374 vhold(ncp->nc_vp); /* MPSAFE */ 375 break; 376 } 377 /* cmpset failed */ 378 continue; 379 } 380 if (ncp->nc_locktd == td) { 381 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 382 count + 1)) { 383 break; 384 } 385 /* cmpset failed */ 386 continue; 387 } 388 return(EWOULDBLOCK); 389 } 390 return(0); 391 } 392 393 /* 394 * Helper function 395 * 396 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 397 * 398 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared. 399 * 400 * MPSAFE 401 */ 402 static 403 void 404 _cache_unlock(struct namecache *ncp) 405 { 406 thread_t td __debugvar = curthread; 407 u_int count; 408 409 KKASSERT(ncp->nc_refs >= 0); 410 KKASSERT(ncp->nc_exlocks > 0); 411 KKASSERT(ncp->nc_locktd == td); 412 413 count = ncp->nc_exlocks; 414 if ((count & ~NC_EXLOCK_REQ) == 1) { 415 ncp->nc_locktd = NULL; 416 if (ncp->nc_vp) 417 vdrop(ncp->nc_vp); 418 } 419 for (;;) { 420 if ((count & ~NC_EXLOCK_REQ) == 1) { 421 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) { 422 if (count & NC_EXLOCK_REQ) 423 wakeup(ncp); 424 break; 425 } 426 } else { 427 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 428 count - 1)) { 429 break; 430 } 431 } 432 count = ncp->nc_exlocks; 433 } 434 } 435 436 437 /* 438 * cache_hold() and cache_drop() prevent the premature deletion of a 439 * namecache entry but do not prevent operations (such as zapping) on 440 * that namecache entry. 441 * 442 * This routine may only be called from outside this source module if 443 * nc_refs is already at least 1. 444 * 445 * This is a rare case where callers are allowed to hold a spinlock, 446 * so we can't ourselves. 447 * 448 * MPSAFE 449 */ 450 static __inline 451 struct namecache * 452 _cache_hold(struct namecache *ncp) 453 { 454 atomic_add_int(&ncp->nc_refs, 1); 455 return(ncp); 456 } 457 458 /* 459 * Drop a cache entry, taking care to deal with races. 460 * 461 * For potential 1->0 transitions we must hold the ncp lock to safely 462 * test its flags. An unresolved entry with no children must be zapped 463 * to avoid leaks. 464 * 465 * The call to cache_zap() itself will handle all remaining races and 466 * will decrement the ncp's refs regardless. If we are resolved or 467 * have children nc_refs can safely be dropped to 0 without having to 468 * zap the entry. 469 * 470 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 471 * 472 * NOTE: cache_zap() may return a non-NULL referenced parent which must 473 * be dropped in a loop. 474 * 475 * MPSAFE 476 */ 477 static __inline 478 void 479 _cache_drop(struct namecache *ncp) 480 { 481 int refs; 482 483 while (ncp) { 484 KKASSERT(ncp->nc_refs > 0); 485 refs = ncp->nc_refs; 486 487 if (refs == 1) { 488 if (_cache_lock_nonblock(ncp) == 0) { 489 ncp->nc_flag &= ~NCF_DEFEREDZAP; 490 if ((ncp->nc_flag & NCF_UNRESOLVED) && 491 TAILQ_EMPTY(&ncp->nc_list)) { 492 ncp = cache_zap(ncp, 1); 493 continue; 494 } 495 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 496 _cache_unlock(ncp); 497 break; 498 } 499 _cache_unlock(ncp); 500 } 501 } else { 502 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 503 break; 504 } 505 cpu_pause(); 506 } 507 } 508 509 /* 510 * Link a new namecache entry to its parent and to the hash table. Be 511 * careful to avoid races if vhold() blocks in the future. 512 * 513 * Both ncp and par must be referenced and locked. 514 * 515 * NOTE: The hash table spinlock is likely held during this call, we 516 * can't do anything fancy. 517 * 518 * MPSAFE 519 */ 520 static void 521 _cache_link_parent(struct namecache *ncp, struct namecache *par, 522 struct nchash_head *nchpp) 523 { 524 KKASSERT(ncp->nc_parent == NULL); 525 ncp->nc_parent = par; 526 ncp->nc_head = nchpp; 527 528 /* 529 * Set inheritance flags. Note that the parent flags may be 530 * stale due to getattr potentially not having been run yet 531 * (it gets run during nlookup()'s). 532 */ 533 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 534 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 535 ncp->nc_flag |= NCF_SF_PNOCACHE; 536 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 537 ncp->nc_flag |= NCF_UF_PCACHE; 538 539 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 540 541 if (TAILQ_EMPTY(&par->nc_list)) { 542 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 543 /* 544 * Any vp associated with an ncp which has children must 545 * be held to prevent it from being recycled. 546 */ 547 if (par->nc_vp) 548 vhold(par->nc_vp); 549 } else { 550 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 551 } 552 } 553 554 /* 555 * Remove the parent and hash associations from a namecache structure. 556 * If this is the last child of the parent the cache_drop(par) will 557 * attempt to recursively zap the parent. 558 * 559 * ncp must be locked. This routine will acquire a temporary lock on 560 * the parent as wlel as the appropriate hash chain. 561 * 562 * MPSAFE 563 */ 564 static void 565 _cache_unlink_parent(struct namecache *ncp) 566 { 567 struct namecache *par; 568 struct vnode *dropvp; 569 570 if ((par = ncp->nc_parent) != NULL) { 571 KKASSERT(ncp->nc_parent == par); 572 _cache_hold(par); 573 _cache_lock(par); 574 spin_lock(&ncp->nc_head->spin); 575 LIST_REMOVE(ncp, nc_hash); 576 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 577 dropvp = NULL; 578 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 579 dropvp = par->nc_vp; 580 spin_unlock(&ncp->nc_head->spin); 581 ncp->nc_parent = NULL; 582 ncp->nc_head = NULL; 583 _cache_unlock(par); 584 _cache_drop(par); 585 586 /* 587 * We can only safely vdrop with no spinlocks held. 588 */ 589 if (dropvp) 590 vdrop(dropvp); 591 } 592 } 593 594 /* 595 * Allocate a new namecache structure. Most of the code does not require 596 * zero-termination of the string but it makes vop_compat_ncreate() easier. 597 * 598 * MPSAFE 599 */ 600 static struct namecache * 601 cache_alloc(int nlen) 602 { 603 struct namecache *ncp; 604 605 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 606 if (nlen) 607 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 608 ncp->nc_nlen = nlen; 609 ncp->nc_flag = NCF_UNRESOLVED; 610 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 611 ncp->nc_refs = 1; 612 613 TAILQ_INIT(&ncp->nc_list); 614 _cache_lock(ncp); 615 return(ncp); 616 } 617 618 /* 619 * Can only be called for the case where the ncp has never been 620 * associated with anything (so no spinlocks are needed). 621 * 622 * MPSAFE 623 */ 624 static void 625 _cache_free(struct namecache *ncp) 626 { 627 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 628 if (ncp->nc_name) 629 kfree(ncp->nc_name, M_VFSCACHE); 630 kfree(ncp, M_VFSCACHE); 631 } 632 633 /* 634 * MPSAFE 635 */ 636 void 637 cache_zero(struct nchandle *nch) 638 { 639 nch->ncp = NULL; 640 nch->mount = NULL; 641 } 642 643 /* 644 * Ref and deref a namecache structure. 645 * 646 * The caller must specify a stable ncp pointer, typically meaning the 647 * ncp is already referenced but this can also occur indirectly through 648 * e.g. holding a lock on a direct child. 649 * 650 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 651 * use read spinlocks here. 652 * 653 * MPSAFE if nch is 654 */ 655 struct nchandle * 656 cache_hold(struct nchandle *nch) 657 { 658 _cache_hold(nch->ncp); 659 atomic_add_int(&nch->mount->mnt_refs, 1); 660 return(nch); 661 } 662 663 /* 664 * Create a copy of a namecache handle for an already-referenced 665 * entry. 666 * 667 * MPSAFE if nch is 668 */ 669 void 670 cache_copy(struct nchandle *nch, struct nchandle *target) 671 { 672 *target = *nch; 673 if (target->ncp) 674 _cache_hold(target->ncp); 675 atomic_add_int(&nch->mount->mnt_refs, 1); 676 } 677 678 /* 679 * MPSAFE if nch is 680 */ 681 void 682 cache_changemount(struct nchandle *nch, struct mount *mp) 683 { 684 atomic_add_int(&nch->mount->mnt_refs, -1); 685 nch->mount = mp; 686 atomic_add_int(&nch->mount->mnt_refs, 1); 687 } 688 689 /* 690 * MPSAFE 691 */ 692 void 693 cache_drop(struct nchandle *nch) 694 { 695 atomic_add_int(&nch->mount->mnt_refs, -1); 696 _cache_drop(nch->ncp); 697 nch->ncp = NULL; 698 nch->mount = NULL; 699 } 700 701 /* 702 * MPSAFE 703 */ 704 void 705 cache_lock(struct nchandle *nch) 706 { 707 _cache_lock(nch->ncp); 708 } 709 710 /* 711 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 712 * is responsible for checking both for validity on return as they 713 * may have become invalid. 714 * 715 * We have to deal with potential deadlocks here, just ping pong 716 * the lock until we get it (we will always block somewhere when 717 * looping so this is not cpu-intensive). 718 * 719 * which = 0 nch1 not locked, nch2 is locked 720 * which = 1 nch1 is locked, nch2 is not locked 721 */ 722 void 723 cache_relock(struct nchandle *nch1, struct ucred *cred1, 724 struct nchandle *nch2, struct ucred *cred2) 725 { 726 int which; 727 728 which = 0; 729 730 for (;;) { 731 if (which == 0) { 732 if (cache_lock_nonblock(nch1) == 0) { 733 cache_resolve(nch1, cred1); 734 break; 735 } 736 cache_unlock(nch2); 737 cache_lock(nch1); 738 cache_resolve(nch1, cred1); 739 which = 1; 740 } else { 741 if (cache_lock_nonblock(nch2) == 0) { 742 cache_resolve(nch2, cred2); 743 break; 744 } 745 cache_unlock(nch1); 746 cache_lock(nch2); 747 cache_resolve(nch2, cred2); 748 which = 0; 749 } 750 } 751 } 752 753 /* 754 * MPSAFE 755 */ 756 int 757 cache_lock_nonblock(struct nchandle *nch) 758 { 759 return(_cache_lock_nonblock(nch->ncp)); 760 } 761 762 763 /* 764 * MPSAFE 765 */ 766 void 767 cache_unlock(struct nchandle *nch) 768 { 769 _cache_unlock(nch->ncp); 770 } 771 772 /* 773 * ref-and-lock, unlock-and-deref functions. 774 * 775 * This function is primarily used by nlookup. Even though cache_lock 776 * holds the vnode, it is possible that the vnode may have already 777 * initiated a recyclement. 778 * 779 * We want cache_get() to return a definitively usable vnode or a 780 * definitively unresolved ncp. 781 * 782 * MPSAFE 783 */ 784 static 785 struct namecache * 786 _cache_get(struct namecache *ncp) 787 { 788 _cache_hold(ncp); 789 _cache_lock(ncp); 790 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 791 _cache_setunresolved(ncp); 792 return(ncp); 793 } 794 795 /* 796 * This is a special form of _cache_lock() which only succeeds if 797 * it can get a pristine, non-recursive lock. The caller must have 798 * already ref'd the ncp. 799 * 800 * On success the ncp will be locked, on failure it will not. The 801 * ref count does not change either way. 802 * 803 * We want _cache_lock_special() (on success) to return a definitively 804 * usable vnode or a definitively unresolved ncp. 805 * 806 * MPSAFE 807 */ 808 static int 809 _cache_lock_special(struct namecache *ncp) 810 { 811 if (_cache_lock_nonblock(ncp) == 0) { 812 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) { 813 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 814 _cache_setunresolved(ncp); 815 return(0); 816 } 817 _cache_unlock(ncp); 818 } 819 return(EWOULDBLOCK); 820 } 821 822 823 /* 824 * NOTE: The same nchandle can be passed for both arguments. 825 * 826 * MPSAFE 827 */ 828 void 829 cache_get(struct nchandle *nch, struct nchandle *target) 830 { 831 KKASSERT(nch->ncp->nc_refs > 0); 832 target->mount = nch->mount; 833 target->ncp = _cache_get(nch->ncp); 834 atomic_add_int(&target->mount->mnt_refs, 1); 835 } 836 837 /* 838 * MPSAFE 839 */ 840 static __inline 841 void 842 _cache_put(struct namecache *ncp) 843 { 844 _cache_unlock(ncp); 845 _cache_drop(ncp); 846 } 847 848 /* 849 * MPSAFE 850 */ 851 void 852 cache_put(struct nchandle *nch) 853 { 854 atomic_add_int(&nch->mount->mnt_refs, -1); 855 _cache_put(nch->ncp); 856 nch->ncp = NULL; 857 nch->mount = NULL; 858 } 859 860 /* 861 * Resolve an unresolved ncp by associating a vnode with it. If the 862 * vnode is NULL, a negative cache entry is created. 863 * 864 * The ncp should be locked on entry and will remain locked on return. 865 * 866 * MPSAFE 867 */ 868 static 869 void 870 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 871 { 872 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 873 874 if (vp != NULL) { 875 /* 876 * Any vp associated with an ncp which has children must 877 * be held. Any vp associated with a locked ncp must be held. 878 */ 879 if (!TAILQ_EMPTY(&ncp->nc_list)) 880 vhold(vp); 881 spin_lock(&vp->v_spin); 882 ncp->nc_vp = vp; 883 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 884 spin_unlock(&vp->v_spin); 885 if (ncp->nc_exlocks) 886 vhold(vp); 887 888 /* 889 * Set auxiliary flags 890 */ 891 switch(vp->v_type) { 892 case VDIR: 893 ncp->nc_flag |= NCF_ISDIR; 894 break; 895 case VLNK: 896 ncp->nc_flag |= NCF_ISSYMLINK; 897 /* XXX cache the contents of the symlink */ 898 break; 899 default: 900 break; 901 } 902 atomic_add_int(&numcache, 1); 903 ncp->nc_error = 0; 904 /* XXX: this is a hack to work-around the lack of a real pfs vfs 905 * implementation*/ 906 if (mp != NULL) 907 vp->v_pfsmp = mp; 908 } else { 909 /* 910 * When creating a negative cache hit we set the 911 * namecache_gen. A later resolve will clean out the 912 * negative cache hit if the mount point's namecache_gen 913 * has changed. Used by devfs, could also be used by 914 * other remote FSs. 915 */ 916 ncp->nc_vp = NULL; 917 spin_lock(&ncspin); 918 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 919 ++numneg; 920 spin_unlock(&ncspin); 921 ncp->nc_error = ENOENT; 922 if (mp) 923 VFS_NCPGEN_SET(mp, ncp); 924 } 925 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 926 } 927 928 /* 929 * MPSAFE 930 */ 931 void 932 cache_setvp(struct nchandle *nch, struct vnode *vp) 933 { 934 _cache_setvp(nch->mount, nch->ncp, vp); 935 } 936 937 /* 938 * MPSAFE 939 */ 940 void 941 cache_settimeout(struct nchandle *nch, int nticks) 942 { 943 struct namecache *ncp = nch->ncp; 944 945 if ((ncp->nc_timeout = ticks + nticks) == 0) 946 ncp->nc_timeout = 1; 947 } 948 949 /* 950 * Disassociate the vnode or negative-cache association and mark a 951 * namecache entry as unresolved again. Note that the ncp is still 952 * left in the hash table and still linked to its parent. 953 * 954 * The ncp should be locked and refd on entry and will remain locked and refd 955 * on return. 956 * 957 * This routine is normally never called on a directory containing children. 958 * However, NFS often does just that in its rename() code as a cop-out to 959 * avoid complex namespace operations. This disconnects a directory vnode 960 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 961 * sync. 962 * 963 * MPSAFE 964 */ 965 static 966 void 967 _cache_setunresolved(struct namecache *ncp) 968 { 969 struct vnode *vp; 970 971 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 972 ncp->nc_flag |= NCF_UNRESOLVED; 973 ncp->nc_timeout = 0; 974 ncp->nc_error = ENOTCONN; 975 if ((vp = ncp->nc_vp) != NULL) { 976 atomic_add_int(&numcache, -1); 977 spin_lock(&vp->v_spin); 978 ncp->nc_vp = NULL; 979 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 980 spin_unlock(&vp->v_spin); 981 982 /* 983 * Any vp associated with an ncp with children is 984 * held by that ncp. Any vp associated with a locked 985 * ncp is held by that ncp. These conditions must be 986 * undone when the vp is cleared out from the ncp. 987 */ 988 if (!TAILQ_EMPTY(&ncp->nc_list)) 989 vdrop(vp); 990 if (ncp->nc_exlocks) 991 vdrop(vp); 992 } else { 993 spin_lock(&ncspin); 994 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 995 --numneg; 996 spin_unlock(&ncspin); 997 } 998 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 999 } 1000 } 1001 1002 /* 1003 * The cache_nresolve() code calls this function to automatically 1004 * set a resolved cache element to unresolved if it has timed out 1005 * or if it is a negative cache hit and the mount point namecache_gen 1006 * has changed. 1007 * 1008 * MPSAFE 1009 */ 1010 static __inline void 1011 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1012 { 1013 /* 1014 * Already in an unresolved state, nothing to do. 1015 */ 1016 if (ncp->nc_flag & NCF_UNRESOLVED) 1017 return; 1018 1019 /* 1020 * Try to zap entries that have timed out. We have 1021 * to be careful here because locked leafs may depend 1022 * on the vnode remaining intact in a parent, so only 1023 * do this under very specific conditions. 1024 */ 1025 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1026 TAILQ_EMPTY(&ncp->nc_list)) { 1027 _cache_setunresolved(ncp); 1028 return; 1029 } 1030 1031 /* 1032 * If a resolved negative cache hit is invalid due to 1033 * the mount's namecache generation being bumped, zap it. 1034 */ 1035 if (ncp->nc_vp == NULL && VFS_NCPGEN_TEST(mp, ncp)) { 1036 _cache_setunresolved(ncp); 1037 return; 1038 } 1039 } 1040 1041 /* 1042 * MPSAFE 1043 */ 1044 void 1045 cache_setunresolved(struct nchandle *nch) 1046 { 1047 _cache_setunresolved(nch->ncp); 1048 } 1049 1050 /* 1051 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1052 * looking for matches. This flag tells the lookup code when it must 1053 * check for a mount linkage and also prevents the directories in question 1054 * from being deleted or renamed. 1055 * 1056 * MPSAFE 1057 */ 1058 static 1059 int 1060 cache_clrmountpt_callback(struct mount *mp, void *data) 1061 { 1062 struct nchandle *nch = data; 1063 1064 if (mp->mnt_ncmounton.ncp == nch->ncp) 1065 return(1); 1066 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1067 return(1); 1068 return(0); 1069 } 1070 1071 /* 1072 * MPSAFE 1073 */ 1074 void 1075 cache_clrmountpt(struct nchandle *nch) 1076 { 1077 int count; 1078 1079 count = mountlist_scan(cache_clrmountpt_callback, nch, 1080 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1081 if (count == 0) 1082 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1083 } 1084 1085 /* 1086 * Invalidate portions of the namecache topology given a starting entry. 1087 * The passed ncp is set to an unresolved state and: 1088 * 1089 * The passed ncp must be referencxed and locked. The routine may unlock 1090 * and relock ncp several times, and will recheck the children and loop 1091 * to catch races. When done the passed ncp will be returned with the 1092 * reference and lock intact. 1093 * 1094 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1095 * that the physical underlying nodes have been 1096 * destroyed... as in deleted. For example, when 1097 * a directory is removed. This will cause record 1098 * lookups on the name to no longer be able to find 1099 * the record and tells the resolver to return failure 1100 * rather then trying to resolve through the parent. 1101 * 1102 * The topology itself, including ncp->nc_name, 1103 * remains intact. 1104 * 1105 * This only applies to the passed ncp, if CINV_CHILDREN 1106 * is specified the children are not flagged. 1107 * 1108 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1109 * state as well. 1110 * 1111 * Note that this will also have the side effect of 1112 * cleaning out any unreferenced nodes in the topology 1113 * from the leaves up as the recursion backs out. 1114 * 1115 * Note that the topology for any referenced nodes remains intact, but 1116 * the nodes will be marked as having been destroyed and will be set 1117 * to an unresolved state. 1118 * 1119 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1120 * the namecache entry may not actually be invalidated on return if it was 1121 * revalidated while recursing down into its children. This code guarentees 1122 * that the node(s) will go through an invalidation cycle, but does not 1123 * guarentee that they will remain in an invalidated state. 1124 * 1125 * Returns non-zero if a revalidation was detected during the invalidation 1126 * recursion, zero otherwise. Note that since only the original ncp is 1127 * locked the revalidation ultimately can only indicate that the original ncp 1128 * *MIGHT* no have been reresolved. 1129 * 1130 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1131 * have to avoid blowing out the kernel stack. We do this by saving the 1132 * deep namecache node and aborting the recursion, then re-recursing at that 1133 * node using a depth-first algorithm in order to allow multiple deep 1134 * recursions to chain through each other, then we restart the invalidation 1135 * from scratch. 1136 * 1137 * MPSAFE 1138 */ 1139 1140 struct cinvtrack { 1141 struct namecache *resume_ncp; 1142 int depth; 1143 }; 1144 1145 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1146 1147 static 1148 int 1149 _cache_inval(struct namecache *ncp, int flags) 1150 { 1151 struct cinvtrack track; 1152 struct namecache *ncp2; 1153 int r; 1154 1155 track.depth = 0; 1156 track.resume_ncp = NULL; 1157 1158 for (;;) { 1159 r = _cache_inval_internal(ncp, flags, &track); 1160 if (track.resume_ncp == NULL) 1161 break; 1162 kprintf("Warning: deep namecache recursion at %s\n", 1163 ncp->nc_name); 1164 _cache_unlock(ncp); 1165 while ((ncp2 = track.resume_ncp) != NULL) { 1166 track.resume_ncp = NULL; 1167 _cache_lock(ncp2); 1168 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1169 &track); 1170 _cache_put(ncp2); 1171 } 1172 _cache_lock(ncp); 1173 } 1174 return(r); 1175 } 1176 1177 int 1178 cache_inval(struct nchandle *nch, int flags) 1179 { 1180 return(_cache_inval(nch->ncp, flags)); 1181 } 1182 1183 /* 1184 * Helper for _cache_inval(). The passed ncp is refd and locked and 1185 * remains that way on return, but may be unlocked/relocked multiple 1186 * times by the routine. 1187 */ 1188 static int 1189 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1190 { 1191 struct namecache *kid; 1192 struct namecache *nextkid; 1193 int rcnt = 0; 1194 1195 KKASSERT(ncp->nc_exlocks); 1196 1197 _cache_setunresolved(ncp); 1198 if (flags & CINV_DESTROY) 1199 ncp->nc_flag |= NCF_DESTROYED; 1200 if ((flags & CINV_CHILDREN) && 1201 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1202 ) { 1203 _cache_hold(kid); 1204 if (++track->depth > MAX_RECURSION_DEPTH) { 1205 track->resume_ncp = ncp; 1206 _cache_hold(ncp); 1207 ++rcnt; 1208 } 1209 _cache_unlock(ncp); 1210 while (kid) { 1211 if (track->resume_ncp) { 1212 _cache_drop(kid); 1213 break; 1214 } 1215 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1216 _cache_hold(nextkid); 1217 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1218 TAILQ_FIRST(&kid->nc_list) 1219 ) { 1220 _cache_lock(kid); 1221 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1222 _cache_unlock(kid); 1223 } 1224 _cache_drop(kid); 1225 kid = nextkid; 1226 } 1227 --track->depth; 1228 _cache_lock(ncp); 1229 } 1230 1231 /* 1232 * Someone could have gotten in there while ncp was unlocked, 1233 * retry if so. 1234 */ 1235 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1236 ++rcnt; 1237 return (rcnt); 1238 } 1239 1240 /* 1241 * Invalidate a vnode's namecache associations. To avoid races against 1242 * the resolver we do not invalidate a node which we previously invalidated 1243 * but which was then re-resolved while we were in the invalidation loop. 1244 * 1245 * Returns non-zero if any namecache entries remain after the invalidation 1246 * loop completed. 1247 * 1248 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1249 * be ripped out of the topology while held, the vnode's v_namecache 1250 * list has no such restriction. NCP's can be ripped out of the list 1251 * at virtually any time if not locked, even if held. 1252 * 1253 * In addition, the v_namecache list itself must be locked via 1254 * the vnode's spinlock. 1255 * 1256 * MPSAFE 1257 */ 1258 int 1259 cache_inval_vp(struct vnode *vp, int flags) 1260 { 1261 struct namecache *ncp; 1262 struct namecache *next; 1263 1264 restart: 1265 spin_lock(&vp->v_spin); 1266 ncp = TAILQ_FIRST(&vp->v_namecache); 1267 if (ncp) 1268 _cache_hold(ncp); 1269 while (ncp) { 1270 /* loop entered with ncp held and vp spin-locked */ 1271 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1272 _cache_hold(next); 1273 spin_unlock(&vp->v_spin); 1274 _cache_lock(ncp); 1275 if (ncp->nc_vp != vp) { 1276 kprintf("Warning: cache_inval_vp: race-A detected on " 1277 "%s\n", ncp->nc_name); 1278 _cache_put(ncp); 1279 if (next) 1280 _cache_drop(next); 1281 goto restart; 1282 } 1283 _cache_inval(ncp, flags); 1284 _cache_put(ncp); /* also releases reference */ 1285 ncp = next; 1286 spin_lock(&vp->v_spin); 1287 if (ncp && ncp->nc_vp != vp) { 1288 spin_unlock(&vp->v_spin); 1289 kprintf("Warning: cache_inval_vp: race-B detected on " 1290 "%s\n", ncp->nc_name); 1291 _cache_drop(ncp); 1292 goto restart; 1293 } 1294 } 1295 spin_unlock(&vp->v_spin); 1296 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1297 } 1298 1299 /* 1300 * This routine is used instead of the normal cache_inval_vp() when we 1301 * are trying to recycle otherwise good vnodes. 1302 * 1303 * Return 0 on success, non-zero if not all namecache records could be 1304 * disassociated from the vnode (for various reasons). 1305 * 1306 * MPSAFE 1307 */ 1308 int 1309 cache_inval_vp_nonblock(struct vnode *vp) 1310 { 1311 struct namecache *ncp; 1312 struct namecache *next; 1313 1314 spin_lock(&vp->v_spin); 1315 ncp = TAILQ_FIRST(&vp->v_namecache); 1316 if (ncp) 1317 _cache_hold(ncp); 1318 while (ncp) { 1319 /* loop entered with ncp held */ 1320 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1321 _cache_hold(next); 1322 spin_unlock(&vp->v_spin); 1323 if (_cache_lock_nonblock(ncp)) { 1324 _cache_drop(ncp); 1325 if (next) 1326 _cache_drop(next); 1327 goto done; 1328 } 1329 if (ncp->nc_vp != vp) { 1330 kprintf("Warning: cache_inval_vp: race-A detected on " 1331 "%s\n", ncp->nc_name); 1332 _cache_put(ncp); 1333 if (next) 1334 _cache_drop(next); 1335 goto done; 1336 } 1337 _cache_inval(ncp, 0); 1338 _cache_put(ncp); /* also releases reference */ 1339 ncp = next; 1340 spin_lock(&vp->v_spin); 1341 if (ncp && ncp->nc_vp != vp) { 1342 spin_unlock(&vp->v_spin); 1343 kprintf("Warning: cache_inval_vp: race-B detected on " 1344 "%s\n", ncp->nc_name); 1345 _cache_drop(ncp); 1346 goto done; 1347 } 1348 } 1349 spin_unlock(&vp->v_spin); 1350 done: 1351 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1352 } 1353 1354 /* 1355 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1356 * must be locked. The target ncp is destroyed (as a normal rename-over 1357 * would destroy the target file or directory). 1358 * 1359 * Because there may be references to the source ncp we cannot copy its 1360 * contents to the target. Instead the source ncp is relinked as the target 1361 * and the target ncp is removed from the namecache topology. 1362 * 1363 * MPSAFE 1364 */ 1365 void 1366 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1367 { 1368 struct namecache *fncp = fnch->ncp; 1369 struct namecache *tncp = tnch->ncp; 1370 struct namecache *tncp_par; 1371 struct nchash_head *nchpp; 1372 u_int32_t hash; 1373 char *oname; 1374 1375 /* 1376 * Rename fncp (unlink) 1377 */ 1378 _cache_unlink_parent(fncp); 1379 oname = fncp->nc_name; 1380 fncp->nc_name = tncp->nc_name; 1381 fncp->nc_nlen = tncp->nc_nlen; 1382 tncp_par = tncp->nc_parent; 1383 _cache_hold(tncp_par); 1384 _cache_lock(tncp_par); 1385 1386 /* 1387 * Rename fncp (relink) 1388 */ 1389 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 1390 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 1391 nchpp = NCHHASH(hash); 1392 1393 spin_lock(&nchpp->spin); 1394 _cache_link_parent(fncp, tncp_par, nchpp); 1395 spin_unlock(&nchpp->spin); 1396 1397 _cache_put(tncp_par); 1398 1399 /* 1400 * Get rid of the overwritten tncp (unlink) 1401 */ 1402 _cache_setunresolved(tncp); 1403 _cache_unlink_parent(tncp); 1404 tncp->nc_name = NULL; 1405 tncp->nc_nlen = 0; 1406 1407 if (oname) 1408 kfree(oname, M_VFSCACHE); 1409 } 1410 1411 /* 1412 * vget the vnode associated with the namecache entry. Resolve the namecache 1413 * entry if necessary. The passed ncp must be referenced and locked. 1414 * 1415 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1416 * (depending on the passed lk_type) will be returned in *vpp with an error 1417 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1418 * most typical error is ENOENT, meaning that the ncp represents a negative 1419 * cache hit and there is no vnode to retrieve, but other errors can occur 1420 * too. 1421 * 1422 * The vget() can race a reclaim. If this occurs we re-resolve the 1423 * namecache entry. 1424 * 1425 * There are numerous places in the kernel where vget() is called on a 1426 * vnode while one or more of its namecache entries is locked. Releasing 1427 * a vnode never deadlocks against locked namecache entries (the vnode 1428 * will not get recycled while referenced ncp's exist). This means we 1429 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 1430 * lock when acquiring the vp lock or we might cause a deadlock. 1431 * 1432 * MPSAFE 1433 */ 1434 int 1435 cache_vget(struct nchandle *nch, struct ucred *cred, 1436 int lk_type, struct vnode **vpp) 1437 { 1438 struct namecache *ncp; 1439 struct vnode *vp; 1440 int error; 1441 1442 ncp = nch->ncp; 1443 KKASSERT(ncp->nc_locktd == curthread); 1444 again: 1445 vp = NULL; 1446 if (ncp->nc_flag & NCF_UNRESOLVED) 1447 error = cache_resolve(nch, cred); 1448 else 1449 error = 0; 1450 1451 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1452 error = vget(vp, lk_type); 1453 if (error) { 1454 /* 1455 * VRECLAIM race 1456 */ 1457 if (error == ENOENT) { 1458 kprintf("Warning: vnode reclaim race detected " 1459 "in cache_vget on %p (%s)\n", 1460 vp, ncp->nc_name); 1461 _cache_setunresolved(ncp); 1462 goto again; 1463 } 1464 1465 /* 1466 * Not a reclaim race, some other error. 1467 */ 1468 KKASSERT(ncp->nc_vp == vp); 1469 vp = NULL; 1470 } else { 1471 KKASSERT(ncp->nc_vp == vp); 1472 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1473 } 1474 } 1475 if (error == 0 && vp == NULL) 1476 error = ENOENT; 1477 *vpp = vp; 1478 return(error); 1479 } 1480 1481 int 1482 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1483 { 1484 struct namecache *ncp; 1485 struct vnode *vp; 1486 int error; 1487 1488 ncp = nch->ncp; 1489 KKASSERT(ncp->nc_locktd == curthread); 1490 again: 1491 vp = NULL; 1492 if (ncp->nc_flag & NCF_UNRESOLVED) 1493 error = cache_resolve(nch, cred); 1494 else 1495 error = 0; 1496 1497 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1498 error = vget(vp, LK_SHARED); 1499 if (error) { 1500 /* 1501 * VRECLAIM race 1502 */ 1503 if (error == ENOENT) { 1504 kprintf("Warning: vnode reclaim race detected " 1505 "in cache_vget on %p (%s)\n", 1506 vp, ncp->nc_name); 1507 _cache_setunresolved(ncp); 1508 goto again; 1509 } 1510 1511 /* 1512 * Not a reclaim race, some other error. 1513 */ 1514 KKASSERT(ncp->nc_vp == vp); 1515 vp = NULL; 1516 } else { 1517 KKASSERT(ncp->nc_vp == vp); 1518 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1519 /* caller does not want a lock */ 1520 vn_unlock(vp); 1521 } 1522 } 1523 if (error == 0 && vp == NULL) 1524 error = ENOENT; 1525 *vpp = vp; 1526 return(error); 1527 } 1528 1529 /* 1530 * Return a referenced vnode representing the parent directory of 1531 * ncp. 1532 * 1533 * Because the caller has locked the ncp it should not be possible for 1534 * the parent ncp to go away. However, the parent can unresolve its 1535 * dvp at any time so we must be able to acquire a lock on the parent 1536 * to safely access nc_vp. 1537 * 1538 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 1539 * so use vhold()/vdrop() while holding the lock to prevent dvp from 1540 * getting destroyed. 1541 * 1542 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a 1543 * lock on the ncp in question.. 1544 */ 1545 static struct vnode * 1546 cache_dvpref(struct namecache *ncp) 1547 { 1548 struct namecache *par; 1549 struct vnode *dvp; 1550 1551 dvp = NULL; 1552 if ((par = ncp->nc_parent) != NULL) { 1553 _cache_hold(par); 1554 _cache_lock(par); 1555 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1556 if ((dvp = par->nc_vp) != NULL) 1557 vhold(dvp); 1558 } 1559 _cache_unlock(par); 1560 if (dvp) { 1561 if (vget(dvp, LK_SHARED) == 0) { 1562 vn_unlock(dvp); 1563 vdrop(dvp); 1564 /* return refd, unlocked dvp */ 1565 } else { 1566 vdrop(dvp); 1567 dvp = NULL; 1568 } 1569 } 1570 _cache_drop(par); 1571 } 1572 return(dvp); 1573 } 1574 1575 /* 1576 * Convert a directory vnode to a namecache record without any other 1577 * knowledge of the topology. This ONLY works with directory vnodes and 1578 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1579 * returned ncp (if not NULL) will be held and unlocked. 1580 * 1581 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1582 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1583 * for dvp. This will fail only if the directory has been deleted out from 1584 * under the caller. 1585 * 1586 * Callers must always check for a NULL return no matter the value of 'makeit'. 1587 * 1588 * To avoid underflowing the kernel stack each recursive call increments 1589 * the makeit variable. 1590 */ 1591 1592 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1593 struct vnode *dvp, char *fakename); 1594 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1595 struct vnode **saved_dvp); 1596 1597 int 1598 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1599 struct nchandle *nch) 1600 { 1601 struct vnode *saved_dvp; 1602 struct vnode *pvp; 1603 char *fakename; 1604 int error; 1605 1606 nch->ncp = NULL; 1607 nch->mount = dvp->v_mount; 1608 saved_dvp = NULL; 1609 fakename = NULL; 1610 1611 /* 1612 * Handle the makeit == 0 degenerate case 1613 */ 1614 if (makeit == 0) { 1615 spin_lock(&dvp->v_spin); 1616 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1617 if (nch->ncp) 1618 cache_hold(nch); 1619 spin_unlock(&dvp->v_spin); 1620 } 1621 1622 /* 1623 * Loop until resolution, inside code will break out on error. 1624 */ 1625 while (makeit) { 1626 /* 1627 * Break out if we successfully acquire a working ncp. 1628 */ 1629 spin_lock(&dvp->v_spin); 1630 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1631 if (nch->ncp) { 1632 cache_hold(nch); 1633 spin_unlock(&dvp->v_spin); 1634 break; 1635 } 1636 spin_unlock(&dvp->v_spin); 1637 1638 /* 1639 * If dvp is the root of its filesystem it should already 1640 * have a namecache pointer associated with it as a side 1641 * effect of the mount, but it may have been disassociated. 1642 */ 1643 if (dvp->v_flag & VROOT) { 1644 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1645 error = cache_resolve_mp(nch->mount); 1646 _cache_put(nch->ncp); 1647 if (ncvp_debug) { 1648 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1649 dvp->v_mount, error); 1650 } 1651 if (error) { 1652 if (ncvp_debug) 1653 kprintf(" failed\n"); 1654 nch->ncp = NULL; 1655 break; 1656 } 1657 if (ncvp_debug) 1658 kprintf(" succeeded\n"); 1659 continue; 1660 } 1661 1662 /* 1663 * If we are recursed too deeply resort to an O(n^2) 1664 * algorithm to resolve the namecache topology. The 1665 * resolved pvp is left referenced in saved_dvp to 1666 * prevent the tree from being destroyed while we loop. 1667 */ 1668 if (makeit > 20) { 1669 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1670 if (error) { 1671 kprintf("lookupdotdot(longpath) failed %d " 1672 "dvp %p\n", error, dvp); 1673 nch->ncp = NULL; 1674 break; 1675 } 1676 continue; 1677 } 1678 1679 /* 1680 * Get the parent directory and resolve its ncp. 1681 */ 1682 if (fakename) { 1683 kfree(fakename, M_TEMP); 1684 fakename = NULL; 1685 } 1686 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1687 &fakename); 1688 if (error) { 1689 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1690 break; 1691 } 1692 vn_unlock(pvp); 1693 1694 /* 1695 * Reuse makeit as a recursion depth counter. On success 1696 * nch will be fully referenced. 1697 */ 1698 cache_fromdvp(pvp, cred, makeit + 1, nch); 1699 vrele(pvp); 1700 if (nch->ncp == NULL) 1701 break; 1702 1703 /* 1704 * Do an inefficient scan of pvp (embodied by ncp) to look 1705 * for dvp. This will create a namecache record for dvp on 1706 * success. We loop up to recheck on success. 1707 * 1708 * ncp and dvp are both held but not locked. 1709 */ 1710 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1711 if (error) { 1712 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1713 pvp, nch->ncp->nc_name, dvp); 1714 cache_drop(nch); 1715 /* nch was NULLed out, reload mount */ 1716 nch->mount = dvp->v_mount; 1717 break; 1718 } 1719 if (ncvp_debug) { 1720 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1721 pvp, nch->ncp->nc_name); 1722 } 1723 cache_drop(nch); 1724 /* nch was NULLed out, reload mount */ 1725 nch->mount = dvp->v_mount; 1726 } 1727 1728 /* 1729 * If nch->ncp is non-NULL it will have been held already. 1730 */ 1731 if (fakename) 1732 kfree(fakename, M_TEMP); 1733 if (saved_dvp) 1734 vrele(saved_dvp); 1735 if (nch->ncp) 1736 return (0); 1737 return (EINVAL); 1738 } 1739 1740 /* 1741 * Go up the chain of parent directories until we find something 1742 * we can resolve into the namecache. This is very inefficient. 1743 */ 1744 static 1745 int 1746 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1747 struct vnode **saved_dvp) 1748 { 1749 struct nchandle nch; 1750 struct vnode *pvp; 1751 int error; 1752 static time_t last_fromdvp_report; 1753 char *fakename; 1754 1755 /* 1756 * Loop getting the parent directory vnode until we get something we 1757 * can resolve in the namecache. 1758 */ 1759 vref(dvp); 1760 nch.mount = dvp->v_mount; 1761 nch.ncp = NULL; 1762 fakename = NULL; 1763 1764 for (;;) { 1765 if (fakename) { 1766 kfree(fakename, M_TEMP); 1767 fakename = NULL; 1768 } 1769 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1770 &fakename); 1771 if (error) { 1772 vrele(dvp); 1773 break; 1774 } 1775 vn_unlock(pvp); 1776 spin_lock(&pvp->v_spin); 1777 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1778 _cache_hold(nch.ncp); 1779 spin_unlock(&pvp->v_spin); 1780 vrele(pvp); 1781 break; 1782 } 1783 spin_unlock(&pvp->v_spin); 1784 if (pvp->v_flag & VROOT) { 1785 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1786 error = cache_resolve_mp(nch.mount); 1787 _cache_unlock(nch.ncp); 1788 vrele(pvp); 1789 if (error) { 1790 _cache_drop(nch.ncp); 1791 nch.ncp = NULL; 1792 vrele(dvp); 1793 } 1794 break; 1795 } 1796 vrele(dvp); 1797 dvp = pvp; 1798 } 1799 if (error == 0) { 1800 if (last_fromdvp_report != time_second) { 1801 last_fromdvp_report = time_second; 1802 kprintf("Warning: extremely inefficient path " 1803 "resolution on %s\n", 1804 nch.ncp->nc_name); 1805 } 1806 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1807 1808 /* 1809 * Hopefully dvp now has a namecache record associated with 1810 * it. Leave it referenced to prevent the kernel from 1811 * recycling the vnode. Otherwise extremely long directory 1812 * paths could result in endless recycling. 1813 */ 1814 if (*saved_dvp) 1815 vrele(*saved_dvp); 1816 *saved_dvp = dvp; 1817 _cache_drop(nch.ncp); 1818 } 1819 if (fakename) 1820 kfree(fakename, M_TEMP); 1821 return (error); 1822 } 1823 1824 /* 1825 * Do an inefficient scan of the directory represented by ncp looking for 1826 * the directory vnode dvp. ncp must be held but not locked on entry and 1827 * will be held on return. dvp must be refd but not locked on entry and 1828 * will remain refd on return. 1829 * 1830 * Why do this at all? Well, due to its stateless nature the NFS server 1831 * converts file handles directly to vnodes without necessarily going through 1832 * the namecache ops that would otherwise create the namecache topology 1833 * leading to the vnode. We could either (1) Change the namecache algorithms 1834 * to allow disconnect namecache records that are re-merged opportunistically, 1835 * or (2) Make the NFS server backtrack and scan to recover a connected 1836 * namecache topology in order to then be able to issue new API lookups. 1837 * 1838 * It turns out that (1) is a huge mess. It takes a nice clean set of 1839 * namecache algorithms and introduces a lot of complication in every subsystem 1840 * that calls into the namecache to deal with the re-merge case, especially 1841 * since we are using the namecache to placehold negative lookups and the 1842 * vnode might not be immediately assigned. (2) is certainly far less 1843 * efficient then (1), but since we are only talking about directories here 1844 * (which are likely to remain cached), the case does not actually run all 1845 * that often and has the supreme advantage of not polluting the namecache 1846 * algorithms. 1847 * 1848 * If a fakename is supplied just construct a namecache entry using the 1849 * fake name. 1850 */ 1851 static int 1852 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1853 struct vnode *dvp, char *fakename) 1854 { 1855 struct nlcomponent nlc; 1856 struct nchandle rncp; 1857 struct dirent *den; 1858 struct vnode *pvp; 1859 struct vattr vat; 1860 struct iovec iov; 1861 struct uio uio; 1862 int blksize; 1863 int eofflag; 1864 int bytes; 1865 char *rbuf; 1866 int error; 1867 1868 vat.va_blocksize = 0; 1869 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1870 return (error); 1871 cache_lock(nch); 1872 error = cache_vref(nch, cred, &pvp); 1873 cache_unlock(nch); 1874 if (error) 1875 return (error); 1876 if (ncvp_debug) { 1877 kprintf("inefficient_scan: directory iosize %ld " 1878 "vattr fileid = %lld\n", 1879 vat.va_blocksize, 1880 (long long)vat.va_fileid); 1881 } 1882 1883 /* 1884 * Use the supplied fakename if not NULL. Fake names are typically 1885 * not in the actual filesystem hierarchy. This is used by HAMMER 1886 * to glue @@timestamp recursions together. 1887 */ 1888 if (fakename) { 1889 nlc.nlc_nameptr = fakename; 1890 nlc.nlc_namelen = strlen(fakename); 1891 rncp = cache_nlookup(nch, &nlc); 1892 goto done; 1893 } 1894 1895 if ((blksize = vat.va_blocksize) == 0) 1896 blksize = DEV_BSIZE; 1897 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1898 rncp.ncp = NULL; 1899 1900 eofflag = 0; 1901 uio.uio_offset = 0; 1902 again: 1903 iov.iov_base = rbuf; 1904 iov.iov_len = blksize; 1905 uio.uio_iov = &iov; 1906 uio.uio_iovcnt = 1; 1907 uio.uio_resid = blksize; 1908 uio.uio_segflg = UIO_SYSSPACE; 1909 uio.uio_rw = UIO_READ; 1910 uio.uio_td = curthread; 1911 1912 if (ncvp_debug >= 2) 1913 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1914 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1915 if (error == 0) { 1916 den = (struct dirent *)rbuf; 1917 bytes = blksize - uio.uio_resid; 1918 1919 while (bytes > 0) { 1920 if (ncvp_debug >= 2) { 1921 kprintf("cache_inefficient_scan: %*.*s\n", 1922 den->d_namlen, den->d_namlen, 1923 den->d_name); 1924 } 1925 if (den->d_type != DT_WHT && 1926 den->d_ino == vat.va_fileid) { 1927 if (ncvp_debug) { 1928 kprintf("cache_inefficient_scan: " 1929 "MATCHED inode %lld path %s/%*.*s\n", 1930 (long long)vat.va_fileid, 1931 nch->ncp->nc_name, 1932 den->d_namlen, den->d_namlen, 1933 den->d_name); 1934 } 1935 nlc.nlc_nameptr = den->d_name; 1936 nlc.nlc_namelen = den->d_namlen; 1937 rncp = cache_nlookup(nch, &nlc); 1938 KKASSERT(rncp.ncp != NULL); 1939 break; 1940 } 1941 bytes -= _DIRENT_DIRSIZ(den); 1942 den = _DIRENT_NEXT(den); 1943 } 1944 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1945 goto again; 1946 } 1947 kfree(rbuf, M_TEMP); 1948 done: 1949 vrele(pvp); 1950 if (rncp.ncp) { 1951 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1952 _cache_setvp(rncp.mount, rncp.ncp, dvp); 1953 if (ncvp_debug >= 2) { 1954 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1955 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1956 } 1957 } else { 1958 if (ncvp_debug >= 2) { 1959 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1960 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1961 rncp.ncp->nc_vp); 1962 } 1963 } 1964 if (rncp.ncp->nc_vp == NULL) 1965 error = rncp.ncp->nc_error; 1966 /* 1967 * Release rncp after a successful nlookup. rncp was fully 1968 * referenced. 1969 */ 1970 cache_put(&rncp); 1971 } else { 1972 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1973 dvp, nch->ncp->nc_name); 1974 error = ENOENT; 1975 } 1976 return (error); 1977 } 1978 1979 /* 1980 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1981 * state, which disassociates it from its vnode or ncneglist. 1982 * 1983 * Then, if there are no additional references to the ncp and no children, 1984 * the ncp is removed from the topology and destroyed. 1985 * 1986 * References and/or children may exist if the ncp is in the middle of the 1987 * topology, preventing the ncp from being destroyed. 1988 * 1989 * This function must be called with the ncp held and locked and will unlock 1990 * and drop it during zapping. 1991 * 1992 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 1993 * This case can occur in the cache_drop() path. 1994 * 1995 * This function may returned a held (but NOT locked) parent node which the 1996 * caller must drop. We do this so _cache_drop() can loop, to avoid 1997 * blowing out the kernel stack. 1998 * 1999 * WARNING! For MPSAFE operation this routine must acquire up to three 2000 * spin locks to be able to safely test nc_refs. Lock order is 2001 * very important. 2002 * 2003 * hash spinlock if on hash list 2004 * parent spinlock if child of parent 2005 * (the ncp is unresolved so there is no vnode association) 2006 */ 2007 static struct namecache * 2008 cache_zap(struct namecache *ncp, int nonblock) 2009 { 2010 struct namecache *par; 2011 struct vnode *dropvp; 2012 int refs; 2013 2014 /* 2015 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2016 */ 2017 _cache_setunresolved(ncp); 2018 2019 /* 2020 * Try to scrap the entry and possibly tail-recurse on its parent. 2021 * We only scrap unref'd (other then our ref) unresolved entries, 2022 * we do not scrap 'live' entries. 2023 * 2024 * Note that once the spinlocks are acquired if nc_refs == 1 no 2025 * other references are possible. If it isn't, however, we have 2026 * to decrement but also be sure to avoid a 1->0 transition. 2027 */ 2028 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2029 KKASSERT(ncp->nc_refs > 0); 2030 2031 /* 2032 * Acquire locks. Note that the parent can't go away while we hold 2033 * a child locked. 2034 */ 2035 if ((par = ncp->nc_parent) != NULL) { 2036 if (nonblock) { 2037 for (;;) { 2038 if (_cache_lock_nonblock(par) == 0) 2039 break; 2040 refs = ncp->nc_refs; 2041 ncp->nc_flag |= NCF_DEFEREDZAP; 2042 ++numdefered; /* MP race ok */ 2043 if (atomic_cmpset_int(&ncp->nc_refs, 2044 refs, refs - 1)) { 2045 _cache_unlock(ncp); 2046 return(NULL); 2047 } 2048 cpu_pause(); 2049 } 2050 _cache_hold(par); 2051 } else { 2052 _cache_hold(par); 2053 _cache_lock(par); 2054 } 2055 spin_lock(&ncp->nc_head->spin); 2056 } 2057 2058 /* 2059 * If someone other then us has a ref or we have children 2060 * we cannot zap the entry. The 1->0 transition and any 2061 * further list operation is protected by the spinlocks 2062 * we have acquired but other transitions are not. 2063 */ 2064 for (;;) { 2065 refs = ncp->nc_refs; 2066 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2067 break; 2068 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2069 if (par) { 2070 spin_unlock(&ncp->nc_head->spin); 2071 _cache_put(par); 2072 } 2073 _cache_unlock(ncp); 2074 return(NULL); 2075 } 2076 cpu_pause(); 2077 } 2078 2079 /* 2080 * We are the only ref and with the spinlocks held no further 2081 * refs can be acquired by others. 2082 * 2083 * Remove us from the hash list and parent list. We have to 2084 * drop a ref on the parent's vp if the parent's list becomes 2085 * empty. 2086 */ 2087 dropvp = NULL; 2088 if (par) { 2089 struct nchash_head *nchpp = ncp->nc_head; 2090 2091 KKASSERT(nchpp != NULL); 2092 LIST_REMOVE(ncp, nc_hash); 2093 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2094 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 2095 dropvp = par->nc_vp; 2096 ncp->nc_head = NULL; 2097 ncp->nc_parent = NULL; 2098 spin_unlock(&nchpp->spin); 2099 _cache_unlock(par); 2100 } else { 2101 KKASSERT(ncp->nc_head == NULL); 2102 } 2103 2104 /* 2105 * ncp should not have picked up any refs. Physically 2106 * destroy the ncp. 2107 */ 2108 KKASSERT(ncp->nc_refs == 1); 2109 /* _cache_unlock(ncp) not required */ 2110 ncp->nc_refs = -1; /* safety */ 2111 if (ncp->nc_name) 2112 kfree(ncp->nc_name, M_VFSCACHE); 2113 kfree(ncp, M_VFSCACHE); 2114 2115 /* 2116 * Delayed drop (we had to release our spinlocks) 2117 * 2118 * The refed parent (if not NULL) must be dropped. The 2119 * caller is responsible for looping. 2120 */ 2121 if (dropvp) 2122 vdrop(dropvp); 2123 return(par); 2124 } 2125 2126 /* 2127 * Clean up dangling negative cache and defered-drop entries in the 2128 * namecache. 2129 */ 2130 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2131 2132 static cache_hs_t neg_cache_hysteresis_state = CHI_LOW; 2133 static cache_hs_t pos_cache_hysteresis_state = CHI_LOW; 2134 2135 void 2136 cache_hysteresis(void) 2137 { 2138 int poslimit; 2139 2140 /* 2141 * Don't cache too many negative hits. We use hysteresis to reduce 2142 * the impact on the critical path. 2143 */ 2144 switch(neg_cache_hysteresis_state) { 2145 case CHI_LOW: 2146 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 2147 _cache_cleanneg(10); 2148 neg_cache_hysteresis_state = CHI_HIGH; 2149 } 2150 break; 2151 case CHI_HIGH: 2152 if (numneg > MINNEG * 9 / 10 && 2153 numneg * ncnegfactor * 9 / 10 > numcache 2154 ) { 2155 _cache_cleanneg(10); 2156 } else { 2157 neg_cache_hysteresis_state = CHI_LOW; 2158 } 2159 break; 2160 } 2161 2162 /* 2163 * Don't cache too many positive hits. We use hysteresis to reduce 2164 * the impact on the critical path. 2165 * 2166 * Excessive positive hits can accumulate due to large numbers of 2167 * hardlinks (the vnode cache will not prevent hl ncps from growing 2168 * into infinity). 2169 */ 2170 if ((poslimit = ncposlimit) == 0) 2171 poslimit = desiredvnodes * 2; 2172 2173 switch(pos_cache_hysteresis_state) { 2174 case CHI_LOW: 2175 if (numcache > poslimit && numcache > MINPOS) { 2176 _cache_cleanpos(10); 2177 pos_cache_hysteresis_state = CHI_HIGH; 2178 } 2179 break; 2180 case CHI_HIGH: 2181 if (numcache > poslimit * 5 / 6 && numcache > MINPOS) { 2182 _cache_cleanpos(10); 2183 } else { 2184 pos_cache_hysteresis_state = CHI_LOW; 2185 } 2186 break; 2187 } 2188 2189 /* 2190 * Clean out dangling defered-zap ncps which could not 2191 * be cleanly dropped if too many build up. Note 2192 * that numdefered is not an exact number as such ncps 2193 * can be reused and the counter is not handled in a MP 2194 * safe manner by design. 2195 */ 2196 if (numdefered * ncnegfactor > numcache) { 2197 _cache_cleandefered(); 2198 } 2199 } 2200 2201 /* 2202 * NEW NAMECACHE LOOKUP API 2203 * 2204 * Lookup an entry in the namecache. The passed par_nch must be referenced 2205 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2206 * is ALWAYS returned, eve if the supplied component is illegal. 2207 * 2208 * The resulting namecache entry should be returned to the system with 2209 * cache_put() or cache_unlock() + cache_drop(). 2210 * 2211 * namecache locks are recursive but care must be taken to avoid lock order 2212 * reversals (hence why the passed par_nch must be unlocked). Locking 2213 * rules are to order for parent traversals, not for child traversals. 2214 * 2215 * Nobody else will be able to manipulate the associated namespace (e.g. 2216 * create, delete, rename, rename-target) until the caller unlocks the 2217 * entry. 2218 * 2219 * The returned entry will be in one of three states: positive hit (non-null 2220 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2221 * Unresolved entries must be resolved through the filesystem to associate the 2222 * vnode and/or determine whether a positive or negative hit has occured. 2223 * 2224 * It is not necessary to lock a directory in order to lock namespace under 2225 * that directory. In fact, it is explicitly not allowed to do that. A 2226 * directory is typically only locked when being created, renamed, or 2227 * destroyed. 2228 * 2229 * The directory (par) may be unresolved, in which case any returned child 2230 * will likely also be marked unresolved. Likely but not guarenteed. Since 2231 * the filesystem lookup requires a resolved directory vnode the caller is 2232 * responsible for resolving the namecache chain top-down. This API 2233 * specifically allows whole chains to be created in an unresolved state. 2234 */ 2235 struct nchandle 2236 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 2237 { 2238 struct nchandle nch; 2239 struct namecache *ncp; 2240 struct namecache *new_ncp; 2241 struct nchash_head *nchpp; 2242 struct mount *mp; 2243 u_int32_t hash; 2244 globaldata_t gd; 2245 int par_locked; 2246 2247 numcalls++; 2248 gd = mycpu; 2249 mp = par_nch->mount; 2250 par_locked = 0; 2251 2252 /* 2253 * This is a good time to call it, no ncp's are locked by 2254 * the caller or us. 2255 */ 2256 cache_hysteresis(); 2257 2258 /* 2259 * Try to locate an existing entry 2260 */ 2261 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2262 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2263 new_ncp = NULL; 2264 nchpp = NCHHASH(hash); 2265 restart: 2266 spin_lock(&nchpp->spin); 2267 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2268 numchecks++; 2269 2270 /* 2271 * Break out if we find a matching entry. Note that 2272 * UNRESOLVED entries may match, but DESTROYED entries 2273 * do not. 2274 */ 2275 if (ncp->nc_parent == par_nch->ncp && 2276 ncp->nc_nlen == nlc->nlc_namelen && 2277 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2278 (ncp->nc_flag & NCF_DESTROYED) == 0 2279 ) { 2280 _cache_hold(ncp); 2281 spin_unlock(&nchpp->spin); 2282 if (par_locked) { 2283 _cache_unlock(par_nch->ncp); 2284 par_locked = 0; 2285 } 2286 if (_cache_lock_special(ncp) == 0) { 2287 _cache_auto_unresolve(mp, ncp); 2288 if (new_ncp) 2289 _cache_free(new_ncp); 2290 goto found; 2291 } 2292 _cache_get(ncp); 2293 _cache_put(ncp); 2294 _cache_drop(ncp); 2295 goto restart; 2296 } 2297 } 2298 2299 /* 2300 * We failed to locate an entry, create a new entry and add it to 2301 * the cache. The parent ncp must also be locked so we 2302 * can link into it. 2303 * 2304 * We have to relookup after possibly blocking in kmalloc or 2305 * when locking par_nch. 2306 * 2307 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2308 * mount case, in which case nc_name will be NULL. 2309 */ 2310 if (new_ncp == NULL) { 2311 spin_unlock(&nchpp->spin); 2312 new_ncp = cache_alloc(nlc->nlc_namelen); 2313 if (nlc->nlc_namelen) { 2314 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2315 nlc->nlc_namelen); 2316 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2317 } 2318 goto restart; 2319 } 2320 if (par_locked == 0) { 2321 spin_unlock(&nchpp->spin); 2322 _cache_lock(par_nch->ncp); 2323 par_locked = 1; 2324 goto restart; 2325 } 2326 2327 /* 2328 * WARNING! We still hold the spinlock. We have to set the hash 2329 * table entry atomically. 2330 */ 2331 ncp = new_ncp; 2332 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2333 spin_unlock(&nchpp->spin); 2334 _cache_unlock(par_nch->ncp); 2335 /* par_locked = 0 - not used */ 2336 found: 2337 /* 2338 * stats and namecache size management 2339 */ 2340 if (ncp->nc_flag & NCF_UNRESOLVED) 2341 ++gd->gd_nchstats->ncs_miss; 2342 else if (ncp->nc_vp) 2343 ++gd->gd_nchstats->ncs_goodhits; 2344 else 2345 ++gd->gd_nchstats->ncs_neghits; 2346 nch.mount = mp; 2347 nch.ncp = ncp; 2348 atomic_add_int(&nch.mount->mnt_refs, 1); 2349 return(nch); 2350 } 2351 2352 /* 2353 * This is a non-blocking verison of cache_nlookup() used by 2354 * nfs_readdirplusrpc_uio(). It can fail for any reason and 2355 * will return nch.ncp == NULL in that case. 2356 */ 2357 struct nchandle 2358 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 2359 { 2360 struct nchandle nch; 2361 struct namecache *ncp; 2362 struct namecache *new_ncp; 2363 struct nchash_head *nchpp; 2364 struct mount *mp; 2365 u_int32_t hash; 2366 globaldata_t gd; 2367 int par_locked; 2368 2369 numcalls++; 2370 gd = mycpu; 2371 mp = par_nch->mount; 2372 par_locked = 0; 2373 2374 /* 2375 * Try to locate an existing entry 2376 */ 2377 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2378 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2379 new_ncp = NULL; 2380 nchpp = NCHHASH(hash); 2381 restart: 2382 spin_lock(&nchpp->spin); 2383 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2384 numchecks++; 2385 2386 /* 2387 * Break out if we find a matching entry. Note that 2388 * UNRESOLVED entries may match, but DESTROYED entries 2389 * do not. 2390 */ 2391 if (ncp->nc_parent == par_nch->ncp && 2392 ncp->nc_nlen == nlc->nlc_namelen && 2393 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2394 (ncp->nc_flag & NCF_DESTROYED) == 0 2395 ) { 2396 _cache_hold(ncp); 2397 spin_unlock(&nchpp->spin); 2398 if (par_locked) { 2399 _cache_unlock(par_nch->ncp); 2400 par_locked = 0; 2401 } 2402 if (_cache_lock_special(ncp) == 0) { 2403 _cache_auto_unresolve(mp, ncp); 2404 if (new_ncp) { 2405 _cache_free(new_ncp); 2406 new_ncp = NULL; 2407 } 2408 goto found; 2409 } 2410 _cache_drop(ncp); 2411 goto failed; 2412 } 2413 } 2414 2415 /* 2416 * We failed to locate an entry, create a new entry and add it to 2417 * the cache. The parent ncp must also be locked so we 2418 * can link into it. 2419 * 2420 * We have to relookup after possibly blocking in kmalloc or 2421 * when locking par_nch. 2422 * 2423 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2424 * mount case, in which case nc_name will be NULL. 2425 */ 2426 if (new_ncp == NULL) { 2427 spin_unlock(&nchpp->spin); 2428 new_ncp = cache_alloc(nlc->nlc_namelen); 2429 if (nlc->nlc_namelen) { 2430 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2431 nlc->nlc_namelen); 2432 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2433 } 2434 goto restart; 2435 } 2436 if (par_locked == 0) { 2437 spin_unlock(&nchpp->spin); 2438 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 2439 par_locked = 1; 2440 goto restart; 2441 } 2442 goto failed; 2443 } 2444 2445 /* 2446 * WARNING! We still hold the spinlock. We have to set the hash 2447 * table entry atomically. 2448 */ 2449 ncp = new_ncp; 2450 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2451 spin_unlock(&nchpp->spin); 2452 _cache_unlock(par_nch->ncp); 2453 /* par_locked = 0 - not used */ 2454 found: 2455 /* 2456 * stats and namecache size management 2457 */ 2458 if (ncp->nc_flag & NCF_UNRESOLVED) 2459 ++gd->gd_nchstats->ncs_miss; 2460 else if (ncp->nc_vp) 2461 ++gd->gd_nchstats->ncs_goodhits; 2462 else 2463 ++gd->gd_nchstats->ncs_neghits; 2464 nch.mount = mp; 2465 nch.ncp = ncp; 2466 atomic_add_int(&nch.mount->mnt_refs, 1); 2467 return(nch); 2468 failed: 2469 if (new_ncp) { 2470 _cache_free(new_ncp); 2471 new_ncp = NULL; 2472 } 2473 nch.mount = NULL; 2474 nch.ncp = NULL; 2475 return(nch); 2476 } 2477 2478 /* 2479 * The namecache entry is marked as being used as a mount point. 2480 * Locate the mount if it is visible to the caller. 2481 */ 2482 struct findmount_info { 2483 struct mount *result; 2484 struct mount *nch_mount; 2485 struct namecache *nch_ncp; 2486 }; 2487 2488 static 2489 int 2490 cache_findmount_callback(struct mount *mp, void *data) 2491 { 2492 struct findmount_info *info = data; 2493 2494 /* 2495 * Check the mount's mounted-on point against the passed nch. 2496 */ 2497 if (mp->mnt_ncmounton.mount == info->nch_mount && 2498 mp->mnt_ncmounton.ncp == info->nch_ncp 2499 ) { 2500 info->result = mp; 2501 return(-1); 2502 } 2503 return(0); 2504 } 2505 2506 struct mount * 2507 cache_findmount(struct nchandle *nch) 2508 { 2509 struct findmount_info info; 2510 2511 info.result = NULL; 2512 info.nch_mount = nch->mount; 2513 info.nch_ncp = nch->ncp; 2514 mountlist_scan(cache_findmount_callback, &info, 2515 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 2516 return(info.result); 2517 } 2518 2519 /* 2520 * Resolve an unresolved namecache entry, generally by looking it up. 2521 * The passed ncp must be locked and refd. 2522 * 2523 * Theoretically since a vnode cannot be recycled while held, and since 2524 * the nc_parent chain holds its vnode as long as children exist, the 2525 * direct parent of the cache entry we are trying to resolve should 2526 * have a valid vnode. If not then generate an error that we can 2527 * determine is related to a resolver bug. 2528 * 2529 * However, if a vnode was in the middle of a recyclement when the NCP 2530 * got locked, ncp->nc_vp might point to a vnode that is about to become 2531 * invalid. cache_resolve() handles this case by unresolving the entry 2532 * and then re-resolving it. 2533 * 2534 * Note that successful resolution does not necessarily return an error 2535 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 2536 * will be returned. 2537 * 2538 * MPSAFE 2539 */ 2540 int 2541 cache_resolve(struct nchandle *nch, struct ucred *cred) 2542 { 2543 struct namecache *par_tmp; 2544 struct namecache *par; 2545 struct namecache *ncp; 2546 struct nchandle nctmp; 2547 struct mount *mp; 2548 struct vnode *dvp; 2549 int error; 2550 2551 ncp = nch->ncp; 2552 mp = nch->mount; 2553 restart: 2554 /* 2555 * If the ncp is already resolved we have nothing to do. However, 2556 * we do want to guarentee that a usable vnode is returned when 2557 * a vnode is present, so make sure it hasn't been reclaimed. 2558 */ 2559 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2560 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2561 _cache_setunresolved(ncp); 2562 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 2563 return (ncp->nc_error); 2564 } 2565 2566 /* 2567 * Mount points need special handling because the parent does not 2568 * belong to the same filesystem as the ncp. 2569 */ 2570 if (ncp == mp->mnt_ncmountpt.ncp) 2571 return (cache_resolve_mp(mp)); 2572 2573 /* 2574 * We expect an unbroken chain of ncps to at least the mount point, 2575 * and even all the way to root (but this code doesn't have to go 2576 * past the mount point). 2577 */ 2578 if (ncp->nc_parent == NULL) { 2579 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 2580 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2581 ncp->nc_error = EXDEV; 2582 return(ncp->nc_error); 2583 } 2584 2585 /* 2586 * The vp's of the parent directories in the chain are held via vhold() 2587 * due to the existance of the child, and should not disappear. 2588 * However, there are cases where they can disappear: 2589 * 2590 * - due to filesystem I/O errors. 2591 * - due to NFS being stupid about tracking the namespace and 2592 * destroys the namespace for entire directories quite often. 2593 * - due to forced unmounts. 2594 * - due to an rmdir (parent will be marked DESTROYED) 2595 * 2596 * When this occurs we have to track the chain backwards and resolve 2597 * it, looping until the resolver catches up to the current node. We 2598 * could recurse here but we might run ourselves out of kernel stack 2599 * so we do it in a more painful manner. This situation really should 2600 * not occur all that often, or if it does not have to go back too 2601 * many nodes to resolve the ncp. 2602 */ 2603 while ((dvp = cache_dvpref(ncp)) == NULL) { 2604 /* 2605 * This case can occur if a process is CD'd into a 2606 * directory which is then rmdir'd. If the parent is marked 2607 * destroyed there is no point trying to resolve it. 2608 */ 2609 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2610 return(ENOENT); 2611 par = ncp->nc_parent; 2612 _cache_hold(par); 2613 _cache_lock(par); 2614 while ((par_tmp = par->nc_parent) != NULL && 2615 par_tmp->nc_vp == NULL) { 2616 _cache_hold(par_tmp); 2617 _cache_lock(par_tmp); 2618 _cache_put(par); 2619 par = par_tmp; 2620 } 2621 if (par->nc_parent == NULL) { 2622 kprintf("EXDEV case 2 %*.*s\n", 2623 par->nc_nlen, par->nc_nlen, par->nc_name); 2624 _cache_put(par); 2625 return (EXDEV); 2626 } 2627 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2628 par->nc_nlen, par->nc_nlen, par->nc_name); 2629 /* 2630 * The parent is not set in stone, ref and lock it to prevent 2631 * it from disappearing. Also note that due to renames it 2632 * is possible for our ncp to move and for par to no longer 2633 * be one of its parents. We resolve it anyway, the loop 2634 * will handle any moves. 2635 */ 2636 _cache_get(par); /* additional hold/lock */ 2637 _cache_put(par); /* from earlier hold/lock */ 2638 if (par == nch->mount->mnt_ncmountpt.ncp) { 2639 cache_resolve_mp(nch->mount); 2640 } else if ((dvp = cache_dvpref(par)) == NULL) { 2641 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2642 _cache_put(par); 2643 continue; 2644 } else { 2645 if (par->nc_flag & NCF_UNRESOLVED) { 2646 nctmp.mount = mp; 2647 nctmp.ncp = par; 2648 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2649 } 2650 vrele(dvp); 2651 } 2652 if ((error = par->nc_error) != 0) { 2653 if (par->nc_error != EAGAIN) { 2654 kprintf("EXDEV case 3 %*.*s error %d\n", 2655 par->nc_nlen, par->nc_nlen, par->nc_name, 2656 par->nc_error); 2657 _cache_put(par); 2658 return(error); 2659 } 2660 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2661 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2662 } 2663 _cache_put(par); 2664 /* loop */ 2665 } 2666 2667 /* 2668 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2669 * ncp's and reattach them. If this occurs the original ncp is marked 2670 * EAGAIN to force a relookup. 2671 * 2672 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2673 * ncp must already be resolved. 2674 */ 2675 if (dvp) { 2676 nctmp.mount = mp; 2677 nctmp.ncp = ncp; 2678 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2679 vrele(dvp); 2680 } else { 2681 ncp->nc_error = EPERM; 2682 } 2683 if (ncp->nc_error == EAGAIN) { 2684 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2685 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2686 goto restart; 2687 } 2688 return(ncp->nc_error); 2689 } 2690 2691 /* 2692 * Resolve the ncp associated with a mount point. Such ncp's almost always 2693 * remain resolved and this routine is rarely called. NFS MPs tends to force 2694 * re-resolution more often due to its mac-truck-smash-the-namecache 2695 * method of tracking namespace changes. 2696 * 2697 * The semantics for this call is that the passed ncp must be locked on 2698 * entry and will be locked on return. However, if we actually have to 2699 * resolve the mount point we temporarily unlock the entry in order to 2700 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2701 * the unlock we have to recheck the flags after we relock. 2702 */ 2703 static int 2704 cache_resolve_mp(struct mount *mp) 2705 { 2706 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2707 struct vnode *vp; 2708 int error; 2709 2710 KKASSERT(mp != NULL); 2711 2712 /* 2713 * If the ncp is already resolved we have nothing to do. However, 2714 * we do want to guarentee that a usable vnode is returned when 2715 * a vnode is present, so make sure it hasn't been reclaimed. 2716 */ 2717 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2718 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2719 _cache_setunresolved(ncp); 2720 } 2721 2722 if (ncp->nc_flag & NCF_UNRESOLVED) { 2723 _cache_unlock(ncp); 2724 while (vfs_busy(mp, 0)) 2725 ; 2726 error = VFS_ROOT(mp, &vp); 2727 _cache_lock(ncp); 2728 2729 /* 2730 * recheck the ncp state after relocking. 2731 */ 2732 if (ncp->nc_flag & NCF_UNRESOLVED) { 2733 ncp->nc_error = error; 2734 if (error == 0) { 2735 _cache_setvp(mp, ncp, vp); 2736 vput(vp); 2737 } else { 2738 kprintf("[diagnostic] cache_resolve_mp: failed" 2739 " to resolve mount %p err=%d ncp=%p\n", 2740 mp, error, ncp); 2741 _cache_setvp(mp, ncp, NULL); 2742 } 2743 } else if (error == 0) { 2744 vput(vp); 2745 } 2746 vfs_unbusy(mp); 2747 } 2748 return(ncp->nc_error); 2749 } 2750 2751 /* 2752 * Clean out negative cache entries when too many have accumulated. 2753 * 2754 * MPSAFE 2755 */ 2756 static void 2757 _cache_cleanneg(int count) 2758 { 2759 struct namecache *ncp; 2760 2761 /* 2762 * Attempt to clean out the specified number of negative cache 2763 * entries. 2764 */ 2765 while (count) { 2766 spin_lock(&ncspin); 2767 ncp = TAILQ_FIRST(&ncneglist); 2768 if (ncp == NULL) { 2769 spin_unlock(&ncspin); 2770 break; 2771 } 2772 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2773 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2774 _cache_hold(ncp); 2775 spin_unlock(&ncspin); 2776 if (_cache_lock_special(ncp) == 0) { 2777 ncp = cache_zap(ncp, 1); 2778 if (ncp) 2779 _cache_drop(ncp); 2780 } else { 2781 _cache_drop(ncp); 2782 } 2783 --count; 2784 } 2785 } 2786 2787 /* 2788 * Clean out positive cache entries when too many have accumulated. 2789 * 2790 * MPSAFE 2791 */ 2792 static void 2793 _cache_cleanpos(int count) 2794 { 2795 static volatile int rover; 2796 struct nchash_head *nchpp; 2797 struct namecache *ncp; 2798 int rover_copy; 2799 2800 /* 2801 * Attempt to clean out the specified number of negative cache 2802 * entries. 2803 */ 2804 while (count) { 2805 rover_copy = ++rover; /* MPSAFEENOUGH */ 2806 cpu_ccfence(); 2807 nchpp = NCHHASH(rover_copy); 2808 2809 spin_lock(&nchpp->spin); 2810 ncp = LIST_FIRST(&nchpp->list); 2811 if (ncp) 2812 _cache_hold(ncp); 2813 spin_unlock(&nchpp->spin); 2814 2815 if (ncp) { 2816 if (_cache_lock_special(ncp) == 0) { 2817 ncp = cache_zap(ncp, 1); 2818 if (ncp) 2819 _cache_drop(ncp); 2820 } else { 2821 _cache_drop(ncp); 2822 } 2823 } 2824 --count; 2825 } 2826 } 2827 2828 /* 2829 * This is a kitchen sink function to clean out ncps which we 2830 * tried to zap from cache_drop() but failed because we were 2831 * unable to acquire the parent lock. 2832 * 2833 * Such entries can also be removed via cache_inval_vp(), such 2834 * as when unmounting. 2835 * 2836 * MPSAFE 2837 */ 2838 static void 2839 _cache_cleandefered(void) 2840 { 2841 struct nchash_head *nchpp; 2842 struct namecache *ncp; 2843 struct namecache dummy; 2844 int i; 2845 2846 numdefered = 0; 2847 bzero(&dummy, sizeof(dummy)); 2848 dummy.nc_flag = NCF_DESTROYED; 2849 2850 for (i = 0; i <= nchash; ++i) { 2851 nchpp = &nchashtbl[i]; 2852 2853 spin_lock(&nchpp->spin); 2854 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 2855 ncp = &dummy; 2856 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) { 2857 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 2858 continue; 2859 LIST_REMOVE(&dummy, nc_hash); 2860 LIST_INSERT_AFTER(ncp, &dummy, nc_hash); 2861 _cache_hold(ncp); 2862 spin_unlock(&nchpp->spin); 2863 if (_cache_lock_nonblock(ncp) == 0) { 2864 ncp->nc_flag &= ~NCF_DEFEREDZAP; 2865 _cache_unlock(ncp); 2866 } 2867 _cache_drop(ncp); 2868 spin_lock(&nchpp->spin); 2869 ncp = &dummy; 2870 } 2871 LIST_REMOVE(&dummy, nc_hash); 2872 spin_unlock(&nchpp->spin); 2873 } 2874 } 2875 2876 /* 2877 * Name cache initialization, from vfsinit() when we are booting 2878 */ 2879 void 2880 nchinit(void) 2881 { 2882 int i; 2883 globaldata_t gd; 2884 2885 /* initialise per-cpu namecache effectiveness statistics. */ 2886 for (i = 0; i < ncpus; ++i) { 2887 gd = globaldata_find(i); 2888 gd->gd_nchstats = &nchstats[i]; 2889 } 2890 TAILQ_INIT(&ncneglist); 2891 spin_init(&ncspin); 2892 nchashtbl = hashinit_ext(desiredvnodes / 2, 2893 sizeof(struct nchash_head), 2894 M_VFSCACHE, &nchash); 2895 for (i = 0; i <= (int)nchash; ++i) { 2896 LIST_INIT(&nchashtbl[i].list); 2897 spin_init(&nchashtbl[i].spin); 2898 } 2899 nclockwarn = 5 * hz; 2900 } 2901 2902 /* 2903 * Called from start_init() to bootstrap the root filesystem. Returns 2904 * a referenced, unlocked namecache record. 2905 */ 2906 void 2907 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2908 { 2909 nch->ncp = cache_alloc(0); 2910 nch->mount = mp; 2911 atomic_add_int(&mp->mnt_refs, 1); 2912 if (vp) 2913 _cache_setvp(nch->mount, nch->ncp, vp); 2914 } 2915 2916 /* 2917 * vfs_cache_setroot() 2918 * 2919 * Create an association between the root of our namecache and 2920 * the root vnode. This routine may be called several times during 2921 * booting. 2922 * 2923 * If the caller intends to save the returned namecache pointer somewhere 2924 * it must cache_hold() it. 2925 */ 2926 void 2927 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2928 { 2929 struct vnode *ovp; 2930 struct nchandle onch; 2931 2932 ovp = rootvnode; 2933 onch = rootnch; 2934 rootvnode = nvp; 2935 if (nch) 2936 rootnch = *nch; 2937 else 2938 cache_zero(&rootnch); 2939 if (ovp) 2940 vrele(ovp); 2941 if (onch.ncp) 2942 cache_drop(&onch); 2943 } 2944 2945 /* 2946 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2947 * topology and is being removed as quickly as possible. The new VOP_N*() 2948 * API calls are required to make specific adjustments using the supplied 2949 * ncp pointers rather then just bogusly purging random vnodes. 2950 * 2951 * Invalidate all namecache entries to a particular vnode as well as 2952 * any direct children of that vnode in the namecache. This is a 2953 * 'catch all' purge used by filesystems that do not know any better. 2954 * 2955 * Note that the linkage between the vnode and its namecache entries will 2956 * be removed, but the namecache entries themselves might stay put due to 2957 * active references from elsewhere in the system or due to the existance of 2958 * the children. The namecache topology is left intact even if we do not 2959 * know what the vnode association is. Such entries will be marked 2960 * NCF_UNRESOLVED. 2961 */ 2962 void 2963 cache_purge(struct vnode *vp) 2964 { 2965 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2966 } 2967 2968 /* 2969 * Flush all entries referencing a particular filesystem. 2970 * 2971 * Since we need to check it anyway, we will flush all the invalid 2972 * entries at the same time. 2973 */ 2974 #if 0 2975 2976 void 2977 cache_purgevfs(struct mount *mp) 2978 { 2979 struct nchash_head *nchpp; 2980 struct namecache *ncp, *nnp; 2981 2982 /* 2983 * Scan hash tables for applicable entries. 2984 */ 2985 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2986 spin_lock_wr(&nchpp->spin); XXX 2987 ncp = LIST_FIRST(&nchpp->list); 2988 if (ncp) 2989 _cache_hold(ncp); 2990 while (ncp) { 2991 nnp = LIST_NEXT(ncp, nc_hash); 2992 if (nnp) 2993 _cache_hold(nnp); 2994 if (ncp->nc_mount == mp) { 2995 _cache_lock(ncp); 2996 ncp = cache_zap(ncp, 0); 2997 if (ncp) 2998 _cache_drop(ncp); 2999 } else { 3000 _cache_drop(ncp); 3001 } 3002 ncp = nnp; 3003 } 3004 spin_unlock_wr(&nchpp->spin); XXX 3005 } 3006 } 3007 3008 #endif 3009 3010 static int disablecwd; 3011 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 3012 "Disable getcwd"); 3013 3014 static u_long numcwdcalls; 3015 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdcalls, CTLFLAG_RD, &numcwdcalls, 0, 3016 "Number of current directory resolution calls"); 3017 static u_long numcwdfailnf; 3018 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailnf, CTLFLAG_RD, &numcwdfailnf, 0, 3019 "Number of current directory failures due to lack of file"); 3020 static u_long numcwdfailsz; 3021 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfailsz, CTLFLAG_RD, &numcwdfailsz, 0, 3022 "Number of current directory failures due to large result"); 3023 static u_long numcwdfound; 3024 SYSCTL_ULONG(_vfs_cache, OID_AUTO, numcwdfound, CTLFLAG_RD, &numcwdfound, 0, 3025 "Number of current directory resolution successes"); 3026 3027 /* 3028 * MPALMOSTSAFE 3029 */ 3030 int 3031 sys___getcwd(struct __getcwd_args *uap) 3032 { 3033 u_int buflen; 3034 int error; 3035 char *buf; 3036 char *bp; 3037 3038 if (disablecwd) 3039 return (ENODEV); 3040 3041 buflen = uap->buflen; 3042 if (buflen == 0) 3043 return (EINVAL); 3044 if (buflen > MAXPATHLEN) 3045 buflen = MAXPATHLEN; 3046 3047 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 3048 get_mplock(); 3049 bp = kern_getcwd(buf, buflen, &error); 3050 rel_mplock(); 3051 if (error == 0) 3052 error = copyout(bp, uap->buf, strlen(bp) + 1); 3053 kfree(buf, M_TEMP); 3054 return (error); 3055 } 3056 3057 char * 3058 kern_getcwd(char *buf, size_t buflen, int *error) 3059 { 3060 struct proc *p = curproc; 3061 char *bp; 3062 int i, slash_prefixed; 3063 struct filedesc *fdp; 3064 struct nchandle nch; 3065 struct namecache *ncp; 3066 3067 numcwdcalls++; 3068 bp = buf; 3069 bp += buflen - 1; 3070 *bp = '\0'; 3071 fdp = p->p_fd; 3072 slash_prefixed = 0; 3073 3074 nch = fdp->fd_ncdir; 3075 ncp = nch.ncp; 3076 if (ncp) 3077 _cache_hold(ncp); 3078 3079 while (ncp && (ncp != fdp->fd_nrdir.ncp || 3080 nch.mount != fdp->fd_nrdir.mount) 3081 ) { 3082 /* 3083 * While traversing upwards if we encounter the root 3084 * of the current mount we have to skip to the mount point 3085 * in the underlying filesystem. 3086 */ 3087 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 3088 nch = nch.mount->mnt_ncmounton; 3089 _cache_drop(ncp); 3090 ncp = nch.ncp; 3091 if (ncp) 3092 _cache_hold(ncp); 3093 continue; 3094 } 3095 3096 /* 3097 * Prepend the path segment 3098 */ 3099 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3100 if (bp == buf) { 3101 numcwdfailsz++; 3102 *error = ERANGE; 3103 bp = NULL; 3104 goto done; 3105 } 3106 *--bp = ncp->nc_name[i]; 3107 } 3108 if (bp == buf) { 3109 numcwdfailsz++; 3110 *error = ERANGE; 3111 bp = NULL; 3112 goto done; 3113 } 3114 *--bp = '/'; 3115 slash_prefixed = 1; 3116 3117 /* 3118 * Go up a directory. This isn't a mount point so we don't 3119 * have to check again. 3120 */ 3121 while ((nch.ncp = ncp->nc_parent) != NULL) { 3122 _cache_lock(ncp); 3123 if (nch.ncp != ncp->nc_parent) { 3124 _cache_unlock(ncp); 3125 continue; 3126 } 3127 _cache_hold(nch.ncp); 3128 _cache_unlock(ncp); 3129 break; 3130 } 3131 _cache_drop(ncp); 3132 ncp = nch.ncp; 3133 } 3134 if (ncp == NULL) { 3135 numcwdfailnf++; 3136 *error = ENOENT; 3137 bp = NULL; 3138 goto done; 3139 } 3140 if (!slash_prefixed) { 3141 if (bp == buf) { 3142 numcwdfailsz++; 3143 *error = ERANGE; 3144 bp = NULL; 3145 goto done; 3146 } 3147 *--bp = '/'; 3148 } 3149 numcwdfound++; 3150 *error = 0; 3151 done: 3152 if (ncp) 3153 _cache_drop(ncp); 3154 return (bp); 3155 } 3156 3157 /* 3158 * Thus begins the fullpath magic. 3159 * 3160 * The passed nchp is referenced but not locked. 3161 */ 3162 static int disablefullpath; 3163 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 3164 &disablefullpath, 0, 3165 "Disable fullpath lookups"); 3166 3167 static u_int numfullpathcalls; 3168 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathcalls, CTLFLAG_RD, 3169 &numfullpathcalls, 0, 3170 "Number of full path resolutions in progress"); 3171 static u_int numfullpathfailnf; 3172 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailnf, CTLFLAG_RD, 3173 &numfullpathfailnf, 0, 3174 "Number of full path resolution failures due to lack of file"); 3175 static u_int numfullpathfailsz; 3176 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfailsz, CTLFLAG_RD, 3177 &numfullpathfailsz, 0, 3178 "Number of full path resolution failures due to insufficient memory"); 3179 static u_int numfullpathfound; 3180 SYSCTL_UINT(_vfs_cache, OID_AUTO, numfullpathfound, CTLFLAG_RD, 3181 &numfullpathfound, 0, 3182 "Number of full path resolution successes"); 3183 3184 int 3185 cache_fullpath(struct proc *p, struct nchandle *nchp, 3186 char **retbuf, char **freebuf, int guess) 3187 { 3188 struct nchandle fd_nrdir; 3189 struct nchandle nch; 3190 struct namecache *ncp; 3191 struct mount *mp, *new_mp; 3192 char *bp, *buf; 3193 int slash_prefixed; 3194 int error = 0; 3195 int i; 3196 3197 atomic_add_int(&numfullpathcalls, -1); 3198 3199 *retbuf = NULL; 3200 *freebuf = NULL; 3201 3202 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 3203 bp = buf + MAXPATHLEN - 1; 3204 *bp = '\0'; 3205 if (p != NULL) 3206 fd_nrdir = p->p_fd->fd_nrdir; 3207 else 3208 fd_nrdir = rootnch; 3209 slash_prefixed = 0; 3210 nch = *nchp; 3211 ncp = nch.ncp; 3212 if (ncp) 3213 _cache_hold(ncp); 3214 mp = nch.mount; 3215 3216 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 3217 new_mp = NULL; 3218 3219 /* 3220 * If we are asked to guess the upwards path, we do so whenever 3221 * we encounter an ncp marked as a mountpoint. We try to find 3222 * the actual mountpoint by finding the mountpoint with this ncp. 3223 */ 3224 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 3225 new_mp = mount_get_by_nc(ncp); 3226 } 3227 /* 3228 * While traversing upwards if we encounter the root 3229 * of the current mount we have to skip to the mount point. 3230 */ 3231 if (ncp == mp->mnt_ncmountpt.ncp) { 3232 new_mp = mp; 3233 } 3234 if (new_mp) { 3235 nch = new_mp->mnt_ncmounton; 3236 _cache_drop(ncp); 3237 ncp = nch.ncp; 3238 if (ncp) 3239 _cache_hold(ncp); 3240 mp = nch.mount; 3241 continue; 3242 } 3243 3244 /* 3245 * Prepend the path segment 3246 */ 3247 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3248 if (bp == buf) { 3249 numfullpathfailsz++; 3250 kfree(buf, M_TEMP); 3251 error = ENOMEM; 3252 goto done; 3253 } 3254 *--bp = ncp->nc_name[i]; 3255 } 3256 if (bp == buf) { 3257 numfullpathfailsz++; 3258 kfree(buf, M_TEMP); 3259 error = ENOMEM; 3260 goto done; 3261 } 3262 *--bp = '/'; 3263 slash_prefixed = 1; 3264 3265 /* 3266 * Go up a directory. This isn't a mount point so we don't 3267 * have to check again. 3268 * 3269 * We can only safely access nc_parent with ncp held locked. 3270 */ 3271 while ((nch.ncp = ncp->nc_parent) != NULL) { 3272 _cache_lock(ncp); 3273 if (nch.ncp != ncp->nc_parent) { 3274 _cache_unlock(ncp); 3275 continue; 3276 } 3277 _cache_hold(nch.ncp); 3278 _cache_unlock(ncp); 3279 break; 3280 } 3281 _cache_drop(ncp); 3282 ncp = nch.ncp; 3283 } 3284 if (ncp == NULL) { 3285 numfullpathfailnf++; 3286 kfree(buf, M_TEMP); 3287 error = ENOENT; 3288 goto done; 3289 } 3290 3291 if (!slash_prefixed) { 3292 if (bp == buf) { 3293 numfullpathfailsz++; 3294 kfree(buf, M_TEMP); 3295 error = ENOMEM; 3296 goto done; 3297 } 3298 *--bp = '/'; 3299 } 3300 numfullpathfound++; 3301 *retbuf = bp; 3302 *freebuf = buf; 3303 error = 0; 3304 done: 3305 if (ncp) 3306 _cache_drop(ncp); 3307 return(error); 3308 } 3309 3310 int 3311 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, 3312 int guess) 3313 { 3314 struct namecache *ncp; 3315 struct nchandle nch; 3316 int error; 3317 3318 *freebuf = NULL; 3319 atomic_add_int(&numfullpathcalls, 1); 3320 if (disablefullpath) 3321 return (ENODEV); 3322 3323 if (p == NULL) 3324 return (EINVAL); 3325 3326 /* vn is NULL, client wants us to use p->p_textvp */ 3327 if (vn == NULL) { 3328 if ((vn = p->p_textvp) == NULL) 3329 return (EINVAL); 3330 } 3331 spin_lock(&vn->v_spin); 3332 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 3333 if (ncp->nc_nlen) 3334 break; 3335 } 3336 if (ncp == NULL) { 3337 spin_unlock(&vn->v_spin); 3338 return (EINVAL); 3339 } 3340 _cache_hold(ncp); 3341 spin_unlock(&vn->v_spin); 3342 3343 atomic_add_int(&numfullpathcalls, -1); 3344 nch.ncp = ncp;; 3345 nch.mount = vn->v_mount; 3346 error = cache_fullpath(p, &nch, retbuf, freebuf, guess); 3347 _cache_drop(ncp); 3348 return (error); 3349 } 3350