1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/sysctl.h> 73 #include <sys/mount.h> 74 #include <sys/vnode.h> 75 #include <sys/malloc.h> 76 #include <sys/sysproto.h> 77 #include <sys/spinlock.h> 78 #include <sys/proc.h> 79 #include <sys/namei.h> 80 #include <sys/nlookup.h> 81 #include <sys/filedesc.h> 82 #include <sys/fnv_hash.h> 83 #include <sys/globaldata.h> 84 #include <sys/kern_syscall.h> 85 #include <sys/dirent.h> 86 #include <ddb/ddb.h> 87 88 #include <sys/sysref2.h> 89 #include <sys/spinlock2.h> 90 #include <sys/mplock2.h> 91 92 #define MAX_RECURSION_DEPTH 64 93 94 /* 95 * Random lookups in the cache are accomplished with a hash table using 96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 97 * 98 * Negative entries may exist and correspond to resolved namecache 99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 100 * will be set if the entry corresponds to a whited-out directory entry 101 * (verses simply not finding the entry at all). ncneglist is locked 102 * with a global spinlock (ncspin). 103 * 104 * MPSAFE RULES: 105 * 106 * (1) A ncp must be referenced before it can be locked. 107 * 108 * (2) A ncp must be locked in order to modify it. 109 * 110 * (3) ncp locks are always ordered child -> parent. That may seem 111 * backwards but forward scans use the hash table and thus can hold 112 * the parent unlocked when traversing downward. 113 * 114 * This allows insert/rename/delete/dot-dot and other operations 115 * to use ncp->nc_parent links. 116 * 117 * This also prevents a locked up e.g. NFS node from creating a 118 * chain reaction all the way back to the root vnode / namecache. 119 * 120 * (4) parent linkages require both the parent and child to be locked. 121 */ 122 123 /* 124 * Structures associated with name cacheing. 125 */ 126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 127 #define MINNEG 1024 128 #define MINPOS 1024 129 130 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 131 132 LIST_HEAD(nchash_list, namecache); 133 134 struct nchash_head { 135 struct nchash_list list; 136 struct spinlock spin; 137 }; 138 139 static struct nchash_head *nchashtbl; 140 static struct namecache_list ncneglist; 141 static struct spinlock ncspin; 142 143 /* 144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 145 * to create the namecache infrastructure leading to a dangling vnode. 146 * 147 * 0 Only errors are reported 148 * 1 Successes are reported 149 * 2 Successes + the whole directory scan is reported 150 * 3 Force the directory scan code run as if the parent vnode did not 151 * have a namecache record, even if it does have one. 152 */ 153 static int ncvp_debug; 154 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 155 156 static u_long nchash; /* size of hash table */ 157 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 158 159 static int ncnegfactor = 16; /* ratio of negative entries */ 160 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 161 162 static int nclockwarn; /* warn on locked entries in ticks */ 163 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 164 165 static int numdefered; /* number of cache entries allocated */ 166 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, ""); 167 168 static int ncposlimit; /* number of cache entries allocated */ 169 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, ""); 170 171 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 172 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 173 174 int cache_mpsafe = 1; 175 SYSCTL_INT(_vfs, OID_AUTO, cache_mpsafe, CTLFLAG_RW, &cache_mpsafe, 0, ""); 176 177 static int cache_resolve_mp(struct mount *mp); 178 static struct vnode *cache_dvpref(struct namecache *ncp); 179 static void _cache_lock(struct namecache *ncp); 180 static void _cache_setunresolved(struct namecache *ncp); 181 static void _cache_cleanneg(int count); 182 static void _cache_cleanpos(int count); 183 static void _cache_cleandefered(void); 184 185 /* 186 * The new name cache statistics 187 */ 188 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 189 #define STATNODE(mode, name, var) \ 190 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 191 #define STATNODE_INT(mode, name, var) \ 192 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 193 static int numneg; STATNODE_INT(CTLFLAG_RD, numneg, &numneg); 194 static int numcache; STATNODE_INT(CTLFLAG_RD, numcache, &numcache); 195 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 196 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 197 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 198 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 199 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 200 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 201 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 202 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 203 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 204 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 205 206 struct nchstats nchstats[SMP_MAXCPU]; 207 /* 208 * Export VFS cache effectiveness statistics to user-land. 209 * 210 * The statistics are left for aggregation to user-land so 211 * neat things can be achieved, like observing per-CPU cache 212 * distribution. 213 */ 214 static int 215 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 216 { 217 struct globaldata *gd; 218 int i, error; 219 220 error = 0; 221 for (i = 0; i < ncpus; ++i) { 222 gd = globaldata_find(i); 223 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 224 sizeof(struct nchstats)))) 225 break; 226 } 227 228 return (error); 229 } 230 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 231 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 232 233 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 234 235 /* 236 * Namespace locking. The caller must already hold a reference to the 237 * namecache structure in order to lock/unlock it. This function prevents 238 * the namespace from being created or destroyed by accessors other then 239 * the lock holder. 240 * 241 * Note that holding a locked namecache structure prevents other threads 242 * from making namespace changes (e.g. deleting or creating), prevents 243 * vnode association state changes by other threads, and prevents the 244 * namecache entry from being resolved or unresolved by other threads. 245 * 246 * The lock owner has full authority to associate/disassociate vnodes 247 * and resolve/unresolve the locked ncp. 248 * 249 * The primary lock field is nc_exlocks. nc_locktd is set after the 250 * fact (when locking) or cleared prior to unlocking. 251 * 252 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 253 * or recycled, but it does NOT help you if the vnode had already 254 * initiated a recyclement. If this is important, use cache_get() 255 * rather then cache_lock() (and deal with the differences in the 256 * way the refs counter is handled). Or, alternatively, make an 257 * unconditional call to cache_validate() or cache_resolve() 258 * after cache_lock() returns. 259 * 260 * MPSAFE 261 */ 262 static 263 void 264 _cache_lock(struct namecache *ncp) 265 { 266 thread_t td; 267 int didwarn; 268 int error; 269 u_int count; 270 271 KKASSERT(ncp->nc_refs != 0); 272 didwarn = 0; 273 td = curthread; 274 275 for (;;) { 276 count = ncp->nc_exlocks; 277 278 if (count == 0) { 279 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 280 /* 281 * The vp associated with a locked ncp must 282 * be held to prevent it from being recycled. 283 * 284 * WARNING! If VRECLAIMED is set the vnode 285 * could already be in the middle of a recycle. 286 * Callers must use cache_vref() or 287 * cache_vget() on the locked ncp to 288 * validate the vp or set the cache entry 289 * to unresolved. 290 * 291 * NOTE! vhold() is allowed if we hold a 292 * lock on the ncp (which we do). 293 */ 294 ncp->nc_locktd = td; 295 if (ncp->nc_vp) 296 vhold(ncp->nc_vp); /* MPSAFE */ 297 break; 298 } 299 /* cmpset failed */ 300 continue; 301 } 302 if (ncp->nc_locktd == td) { 303 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 304 count + 1)) { 305 break; 306 } 307 /* cmpset failed */ 308 continue; 309 } 310 tsleep_interlock(ncp, 0); 311 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 312 count | NC_EXLOCK_REQ) == 0) { 313 /* cmpset failed */ 314 continue; 315 } 316 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn); 317 if (error == EWOULDBLOCK) { 318 if (didwarn == 0) { 319 didwarn = ticks; 320 kprintf("[diagnostic] cache_lock: blocked " 321 "on %p", 322 ncp); 323 kprintf(" \"%*.*s\"\n", 324 ncp->nc_nlen, ncp->nc_nlen, 325 ncp->nc_name); 326 } 327 } 328 } 329 if (didwarn) { 330 kprintf("[diagnostic] cache_lock: unblocked %*.*s after " 331 "%d secs\n", 332 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 333 (int)(ticks - didwarn) / hz); 334 } 335 } 336 337 /* 338 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 339 * such as the case where one of its children is locked. 340 * 341 * MPSAFE 342 */ 343 static 344 int 345 _cache_lock_nonblock(struct namecache *ncp) 346 { 347 thread_t td; 348 u_int count; 349 350 td = curthread; 351 352 for (;;) { 353 count = ncp->nc_exlocks; 354 355 if (count == 0) { 356 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 357 /* 358 * The vp associated with a locked ncp must 359 * be held to prevent it from being recycled. 360 * 361 * WARNING! If VRECLAIMED is set the vnode 362 * could already be in the middle of a recycle. 363 * Callers must use cache_vref() or 364 * cache_vget() on the locked ncp to 365 * validate the vp or set the cache entry 366 * to unresolved. 367 * 368 * NOTE! vhold() is allowed if we hold a 369 * lock on the ncp (which we do). 370 */ 371 ncp->nc_locktd = td; 372 if (ncp->nc_vp) 373 vhold(ncp->nc_vp); /* MPSAFE */ 374 break; 375 } 376 /* cmpset failed */ 377 continue; 378 } 379 if (ncp->nc_locktd == td) { 380 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 381 count + 1)) { 382 break; 383 } 384 /* cmpset failed */ 385 continue; 386 } 387 return(EWOULDBLOCK); 388 } 389 return(0); 390 } 391 392 /* 393 * Helper function 394 * 395 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 396 * 397 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared. 398 * 399 * MPSAFE 400 */ 401 static 402 void 403 _cache_unlock(struct namecache *ncp) 404 { 405 thread_t td __debugvar = curthread; 406 u_int count; 407 408 KKASSERT(ncp->nc_refs >= 0); 409 KKASSERT(ncp->nc_exlocks > 0); 410 KKASSERT(ncp->nc_locktd == td); 411 412 count = ncp->nc_exlocks; 413 if ((count & ~NC_EXLOCK_REQ) == 1) { 414 ncp->nc_locktd = NULL; 415 if (ncp->nc_vp) 416 vdrop(ncp->nc_vp); 417 } 418 for (;;) { 419 if ((count & ~NC_EXLOCK_REQ) == 1) { 420 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) { 421 if (count & NC_EXLOCK_REQ) 422 wakeup(ncp); 423 break; 424 } 425 } else { 426 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 427 count - 1)) { 428 break; 429 } 430 } 431 count = ncp->nc_exlocks; 432 } 433 } 434 435 436 /* 437 * cache_hold() and cache_drop() prevent the premature deletion of a 438 * namecache entry but do not prevent operations (such as zapping) on 439 * that namecache entry. 440 * 441 * This routine may only be called from outside this source module if 442 * nc_refs is already at least 1. 443 * 444 * This is a rare case where callers are allowed to hold a spinlock, 445 * so we can't ourselves. 446 * 447 * MPSAFE 448 */ 449 static __inline 450 struct namecache * 451 _cache_hold(struct namecache *ncp) 452 { 453 atomic_add_int(&ncp->nc_refs, 1); 454 return(ncp); 455 } 456 457 /* 458 * Drop a cache entry, taking care to deal with races. 459 * 460 * For potential 1->0 transitions we must hold the ncp lock to safely 461 * test its flags. An unresolved entry with no children must be zapped 462 * to avoid leaks. 463 * 464 * The call to cache_zap() itself will handle all remaining races and 465 * will decrement the ncp's refs regardless. If we are resolved or 466 * have children nc_refs can safely be dropped to 0 without having to 467 * zap the entry. 468 * 469 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 470 * 471 * NOTE: cache_zap() may return a non-NULL referenced parent which must 472 * be dropped in a loop. 473 * 474 * MPSAFE 475 */ 476 static __inline 477 void 478 _cache_drop(struct namecache *ncp) 479 { 480 int refs; 481 482 while (ncp) { 483 KKASSERT(ncp->nc_refs > 0); 484 refs = ncp->nc_refs; 485 486 if (refs == 1) { 487 if (_cache_lock_nonblock(ncp) == 0) { 488 ncp->nc_flag &= ~NCF_DEFEREDZAP; 489 if ((ncp->nc_flag & NCF_UNRESOLVED) && 490 TAILQ_EMPTY(&ncp->nc_list)) { 491 ncp = cache_zap(ncp, 1); 492 continue; 493 } 494 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 495 _cache_unlock(ncp); 496 break; 497 } 498 _cache_unlock(ncp); 499 } 500 } else { 501 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 502 break; 503 } 504 cpu_pause(); 505 } 506 } 507 508 /* 509 * Link a new namecache entry to its parent and to the hash table. Be 510 * careful to avoid races if vhold() blocks in the future. 511 * 512 * Both ncp and par must be referenced and locked. 513 * 514 * NOTE: The hash table spinlock is likely held during this call, we 515 * can't do anything fancy. 516 * 517 * MPSAFE 518 */ 519 static void 520 _cache_link_parent(struct namecache *ncp, struct namecache *par, 521 struct nchash_head *nchpp) 522 { 523 KKASSERT(ncp->nc_parent == NULL); 524 ncp->nc_parent = par; 525 ncp->nc_head = nchpp; 526 527 /* 528 * Set inheritance flags. Note that the parent flags may be 529 * stale due to getattr potentially not having been run yet 530 * (it gets run during nlookup()'s). 531 */ 532 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 533 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 534 ncp->nc_flag |= NCF_SF_PNOCACHE; 535 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 536 ncp->nc_flag |= NCF_UF_PCACHE; 537 538 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 539 540 if (TAILQ_EMPTY(&par->nc_list)) { 541 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 542 /* 543 * Any vp associated with an ncp which has children must 544 * be held to prevent it from being recycled. 545 */ 546 if (par->nc_vp) 547 vhold(par->nc_vp); 548 } else { 549 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 550 } 551 } 552 553 /* 554 * Remove the parent and hash associations from a namecache structure. 555 * If this is the last child of the parent the cache_drop(par) will 556 * attempt to recursively zap the parent. 557 * 558 * ncp must be locked. This routine will acquire a temporary lock on 559 * the parent as wlel as the appropriate hash chain. 560 * 561 * MPSAFE 562 */ 563 static void 564 _cache_unlink_parent(struct namecache *ncp) 565 { 566 struct namecache *par; 567 struct vnode *dropvp; 568 569 if ((par = ncp->nc_parent) != NULL) { 570 KKASSERT(ncp->nc_parent == par); 571 _cache_hold(par); 572 _cache_lock(par); 573 spin_lock(&ncp->nc_head->spin); 574 LIST_REMOVE(ncp, nc_hash); 575 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 576 dropvp = NULL; 577 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 578 dropvp = par->nc_vp; 579 spin_unlock(&ncp->nc_head->spin); 580 ncp->nc_parent = NULL; 581 ncp->nc_head = NULL; 582 _cache_unlock(par); 583 _cache_drop(par); 584 585 /* 586 * We can only safely vdrop with no spinlocks held. 587 */ 588 if (dropvp) 589 vdrop(dropvp); 590 } 591 } 592 593 /* 594 * Allocate a new namecache structure. Most of the code does not require 595 * zero-termination of the string but it makes vop_compat_ncreate() easier. 596 * 597 * MPSAFE 598 */ 599 static struct namecache * 600 cache_alloc(int nlen) 601 { 602 struct namecache *ncp; 603 604 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 605 if (nlen) 606 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 607 ncp->nc_nlen = nlen; 608 ncp->nc_flag = NCF_UNRESOLVED; 609 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 610 ncp->nc_refs = 1; 611 612 TAILQ_INIT(&ncp->nc_list); 613 _cache_lock(ncp); 614 return(ncp); 615 } 616 617 /* 618 * Can only be called for the case where the ncp has never been 619 * associated with anything (so no spinlocks are needed). 620 * 621 * MPSAFE 622 */ 623 static void 624 _cache_free(struct namecache *ncp) 625 { 626 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 627 if (ncp->nc_name) 628 kfree(ncp->nc_name, M_VFSCACHE); 629 kfree(ncp, M_VFSCACHE); 630 } 631 632 /* 633 * MPSAFE 634 */ 635 void 636 cache_zero(struct nchandle *nch) 637 { 638 nch->ncp = NULL; 639 nch->mount = NULL; 640 } 641 642 /* 643 * Ref and deref a namecache structure. 644 * 645 * The caller must specify a stable ncp pointer, typically meaning the 646 * ncp is already referenced but this can also occur indirectly through 647 * e.g. holding a lock on a direct child. 648 * 649 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 650 * use read spinlocks here. 651 * 652 * MPSAFE if nch is 653 */ 654 struct nchandle * 655 cache_hold(struct nchandle *nch) 656 { 657 _cache_hold(nch->ncp); 658 atomic_add_int(&nch->mount->mnt_refs, 1); 659 return(nch); 660 } 661 662 /* 663 * Create a copy of a namecache handle for an already-referenced 664 * entry. 665 * 666 * MPSAFE if nch is 667 */ 668 void 669 cache_copy(struct nchandle *nch, struct nchandle *target) 670 { 671 *target = *nch; 672 if (target->ncp) 673 _cache_hold(target->ncp); 674 atomic_add_int(&nch->mount->mnt_refs, 1); 675 } 676 677 /* 678 * MPSAFE if nch is 679 */ 680 void 681 cache_changemount(struct nchandle *nch, struct mount *mp) 682 { 683 atomic_add_int(&nch->mount->mnt_refs, -1); 684 nch->mount = mp; 685 atomic_add_int(&nch->mount->mnt_refs, 1); 686 } 687 688 /* 689 * MPSAFE 690 */ 691 void 692 cache_drop(struct nchandle *nch) 693 { 694 atomic_add_int(&nch->mount->mnt_refs, -1); 695 _cache_drop(nch->ncp); 696 nch->ncp = NULL; 697 nch->mount = NULL; 698 } 699 700 /* 701 * MPSAFE 702 */ 703 void 704 cache_lock(struct nchandle *nch) 705 { 706 _cache_lock(nch->ncp); 707 } 708 709 /* 710 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 711 * is responsible for checking both for validity on return as they 712 * may have become invalid. 713 * 714 * We have to deal with potential deadlocks here, just ping pong 715 * the lock until we get it (we will always block somewhere when 716 * looping so this is not cpu-intensive). 717 * 718 * which = 0 nch1 not locked, nch2 is locked 719 * which = 1 nch1 is locked, nch2 is not locked 720 */ 721 void 722 cache_relock(struct nchandle *nch1, struct ucred *cred1, 723 struct nchandle *nch2, struct ucred *cred2) 724 { 725 int which; 726 727 which = 0; 728 729 for (;;) { 730 if (which == 0) { 731 if (cache_lock_nonblock(nch1) == 0) { 732 cache_resolve(nch1, cred1); 733 break; 734 } 735 cache_unlock(nch2); 736 cache_lock(nch1); 737 cache_resolve(nch1, cred1); 738 which = 1; 739 } else { 740 if (cache_lock_nonblock(nch2) == 0) { 741 cache_resolve(nch2, cred2); 742 break; 743 } 744 cache_unlock(nch1); 745 cache_lock(nch2); 746 cache_resolve(nch2, cred2); 747 which = 0; 748 } 749 } 750 } 751 752 /* 753 * MPSAFE 754 */ 755 int 756 cache_lock_nonblock(struct nchandle *nch) 757 { 758 return(_cache_lock_nonblock(nch->ncp)); 759 } 760 761 762 /* 763 * MPSAFE 764 */ 765 void 766 cache_unlock(struct nchandle *nch) 767 { 768 _cache_unlock(nch->ncp); 769 } 770 771 /* 772 * ref-and-lock, unlock-and-deref functions. 773 * 774 * This function is primarily used by nlookup. Even though cache_lock 775 * holds the vnode, it is possible that the vnode may have already 776 * initiated a recyclement. 777 * 778 * We want cache_get() to return a definitively usable vnode or a 779 * definitively unresolved ncp. 780 * 781 * MPSAFE 782 */ 783 static 784 struct namecache * 785 _cache_get(struct namecache *ncp) 786 { 787 _cache_hold(ncp); 788 _cache_lock(ncp); 789 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 790 _cache_setunresolved(ncp); 791 return(ncp); 792 } 793 794 /* 795 * This is a special form of _cache_lock() which only succeeds if 796 * it can get a pristine, non-recursive lock. The caller must have 797 * already ref'd the ncp. 798 * 799 * On success the ncp will be locked, on failure it will not. The 800 * ref count does not change either way. 801 * 802 * We want _cache_lock_special() (on success) to return a definitively 803 * usable vnode or a definitively unresolved ncp. 804 * 805 * MPSAFE 806 */ 807 static int 808 _cache_lock_special(struct namecache *ncp) 809 { 810 if (_cache_lock_nonblock(ncp) == 0) { 811 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) { 812 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 813 _cache_setunresolved(ncp); 814 return(0); 815 } 816 _cache_unlock(ncp); 817 } 818 return(EWOULDBLOCK); 819 } 820 821 822 /* 823 * NOTE: The same nchandle can be passed for both arguments. 824 * 825 * MPSAFE 826 */ 827 void 828 cache_get(struct nchandle *nch, struct nchandle *target) 829 { 830 KKASSERT(nch->ncp->nc_refs > 0); 831 target->mount = nch->mount; 832 target->ncp = _cache_get(nch->ncp); 833 atomic_add_int(&target->mount->mnt_refs, 1); 834 } 835 836 /* 837 * MPSAFE 838 */ 839 static __inline 840 void 841 _cache_put(struct namecache *ncp) 842 { 843 _cache_unlock(ncp); 844 _cache_drop(ncp); 845 } 846 847 /* 848 * MPSAFE 849 */ 850 void 851 cache_put(struct nchandle *nch) 852 { 853 atomic_add_int(&nch->mount->mnt_refs, -1); 854 _cache_put(nch->ncp); 855 nch->ncp = NULL; 856 nch->mount = NULL; 857 } 858 859 /* 860 * Resolve an unresolved ncp by associating a vnode with it. If the 861 * vnode is NULL, a negative cache entry is created. 862 * 863 * The ncp should be locked on entry and will remain locked on return. 864 * 865 * MPSAFE 866 */ 867 static 868 void 869 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 870 { 871 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 872 873 if (vp != NULL) { 874 /* 875 * Any vp associated with an ncp which has children must 876 * be held. Any vp associated with a locked ncp must be held. 877 */ 878 if (!TAILQ_EMPTY(&ncp->nc_list)) 879 vhold(vp); 880 spin_lock(&vp->v_spinlock); 881 ncp->nc_vp = vp; 882 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 883 spin_unlock(&vp->v_spinlock); 884 if (ncp->nc_exlocks) 885 vhold(vp); 886 887 /* 888 * Set auxiliary flags 889 */ 890 switch(vp->v_type) { 891 case VDIR: 892 ncp->nc_flag |= NCF_ISDIR; 893 break; 894 case VLNK: 895 ncp->nc_flag |= NCF_ISSYMLINK; 896 /* XXX cache the contents of the symlink */ 897 break; 898 default: 899 break; 900 } 901 atomic_add_int(&numcache, 1); 902 ncp->nc_error = 0; 903 } else { 904 /* 905 * When creating a negative cache hit we set the 906 * namecache_gen. A later resolve will clean out the 907 * negative cache hit if the mount point's namecache_gen 908 * has changed. Used by devfs, could also be used by 909 * other remote FSs. 910 */ 911 ncp->nc_vp = NULL; 912 spin_lock(&ncspin); 913 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 914 ++numneg; 915 spin_unlock(&ncspin); 916 ncp->nc_error = ENOENT; 917 if (mp) 918 ncp->nc_namecache_gen = mp->mnt_namecache_gen; 919 } 920 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 921 } 922 923 /* 924 * MPSAFE 925 */ 926 void 927 cache_setvp(struct nchandle *nch, struct vnode *vp) 928 { 929 _cache_setvp(nch->mount, nch->ncp, vp); 930 } 931 932 /* 933 * MPSAFE 934 */ 935 void 936 cache_settimeout(struct nchandle *nch, int nticks) 937 { 938 struct namecache *ncp = nch->ncp; 939 940 if ((ncp->nc_timeout = ticks + nticks) == 0) 941 ncp->nc_timeout = 1; 942 } 943 944 /* 945 * Disassociate the vnode or negative-cache association and mark a 946 * namecache entry as unresolved again. Note that the ncp is still 947 * left in the hash table and still linked to its parent. 948 * 949 * The ncp should be locked and refd on entry and will remain locked and refd 950 * on return. 951 * 952 * This routine is normally never called on a directory containing children. 953 * However, NFS often does just that in its rename() code as a cop-out to 954 * avoid complex namespace operations. This disconnects a directory vnode 955 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 956 * sync. 957 * 958 * MPSAFE 959 */ 960 static 961 void 962 _cache_setunresolved(struct namecache *ncp) 963 { 964 struct vnode *vp; 965 966 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 967 ncp->nc_flag |= NCF_UNRESOLVED; 968 ncp->nc_timeout = 0; 969 ncp->nc_error = ENOTCONN; 970 if ((vp = ncp->nc_vp) != NULL) { 971 atomic_add_int(&numcache, -1); 972 spin_lock(&vp->v_spinlock); 973 ncp->nc_vp = NULL; 974 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 975 spin_unlock(&vp->v_spinlock); 976 977 /* 978 * Any vp associated with an ncp with children is 979 * held by that ncp. Any vp associated with a locked 980 * ncp is held by that ncp. These conditions must be 981 * undone when the vp is cleared out from the ncp. 982 */ 983 if (!TAILQ_EMPTY(&ncp->nc_list)) 984 vdrop(vp); 985 if (ncp->nc_exlocks) 986 vdrop(vp); 987 } else { 988 spin_lock(&ncspin); 989 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 990 --numneg; 991 spin_unlock(&ncspin); 992 } 993 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 994 } 995 } 996 997 /* 998 * The cache_nresolve() code calls this function to automatically 999 * set a resolved cache element to unresolved if it has timed out 1000 * or if it is a negative cache hit and the mount point namecache_gen 1001 * has changed. 1002 * 1003 * MPSAFE 1004 */ 1005 static __inline void 1006 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1007 { 1008 /* 1009 * Already in an unresolved state, nothing to do. 1010 */ 1011 if (ncp->nc_flag & NCF_UNRESOLVED) 1012 return; 1013 1014 /* 1015 * Try to zap entries that have timed out. We have 1016 * to be careful here because locked leafs may depend 1017 * on the vnode remaining intact in a parent, so only 1018 * do this under very specific conditions. 1019 */ 1020 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1021 TAILQ_EMPTY(&ncp->nc_list)) { 1022 _cache_setunresolved(ncp); 1023 return; 1024 } 1025 1026 /* 1027 * If a resolved negative cache hit is invalid due to 1028 * the mount's namecache generation being bumped, zap it. 1029 */ 1030 if (ncp->nc_vp == NULL && 1031 ncp->nc_namecache_gen != mp->mnt_namecache_gen) { 1032 _cache_setunresolved(ncp); 1033 return; 1034 } 1035 } 1036 1037 /* 1038 * MPSAFE 1039 */ 1040 void 1041 cache_setunresolved(struct nchandle *nch) 1042 { 1043 _cache_setunresolved(nch->ncp); 1044 } 1045 1046 /* 1047 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1048 * looking for matches. This flag tells the lookup code when it must 1049 * check for a mount linkage and also prevents the directories in question 1050 * from being deleted or renamed. 1051 * 1052 * MPSAFE 1053 */ 1054 static 1055 int 1056 cache_clrmountpt_callback(struct mount *mp, void *data) 1057 { 1058 struct nchandle *nch = data; 1059 1060 if (mp->mnt_ncmounton.ncp == nch->ncp) 1061 return(1); 1062 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1063 return(1); 1064 return(0); 1065 } 1066 1067 /* 1068 * MPSAFE 1069 */ 1070 void 1071 cache_clrmountpt(struct nchandle *nch) 1072 { 1073 int count; 1074 1075 count = mountlist_scan(cache_clrmountpt_callback, nch, 1076 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1077 if (count == 0) 1078 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1079 } 1080 1081 /* 1082 * Invalidate portions of the namecache topology given a starting entry. 1083 * The passed ncp is set to an unresolved state and: 1084 * 1085 * The passed ncp must be referencxed and locked. The routine may unlock 1086 * and relock ncp several times, and will recheck the children and loop 1087 * to catch races. When done the passed ncp will be returned with the 1088 * reference and lock intact. 1089 * 1090 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1091 * that the physical underlying nodes have been 1092 * destroyed... as in deleted. For example, when 1093 * a directory is removed. This will cause record 1094 * lookups on the name to no longer be able to find 1095 * the record and tells the resolver to return failure 1096 * rather then trying to resolve through the parent. 1097 * 1098 * The topology itself, including ncp->nc_name, 1099 * remains intact. 1100 * 1101 * This only applies to the passed ncp, if CINV_CHILDREN 1102 * is specified the children are not flagged. 1103 * 1104 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1105 * state as well. 1106 * 1107 * Note that this will also have the side effect of 1108 * cleaning out any unreferenced nodes in the topology 1109 * from the leaves up as the recursion backs out. 1110 * 1111 * Note that the topology for any referenced nodes remains intact, but 1112 * the nodes will be marked as having been destroyed and will be set 1113 * to an unresolved state. 1114 * 1115 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1116 * the namecache entry may not actually be invalidated on return if it was 1117 * revalidated while recursing down into its children. This code guarentees 1118 * that the node(s) will go through an invalidation cycle, but does not 1119 * guarentee that they will remain in an invalidated state. 1120 * 1121 * Returns non-zero if a revalidation was detected during the invalidation 1122 * recursion, zero otherwise. Note that since only the original ncp is 1123 * locked the revalidation ultimately can only indicate that the original ncp 1124 * *MIGHT* no have been reresolved. 1125 * 1126 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1127 * have to avoid blowing out the kernel stack. We do this by saving the 1128 * deep namecache node and aborting the recursion, then re-recursing at that 1129 * node using a depth-first algorithm in order to allow multiple deep 1130 * recursions to chain through each other, then we restart the invalidation 1131 * from scratch. 1132 * 1133 * MPSAFE 1134 */ 1135 1136 struct cinvtrack { 1137 struct namecache *resume_ncp; 1138 int depth; 1139 }; 1140 1141 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1142 1143 static 1144 int 1145 _cache_inval(struct namecache *ncp, int flags) 1146 { 1147 struct cinvtrack track; 1148 struct namecache *ncp2; 1149 int r; 1150 1151 track.depth = 0; 1152 track.resume_ncp = NULL; 1153 1154 for (;;) { 1155 r = _cache_inval_internal(ncp, flags, &track); 1156 if (track.resume_ncp == NULL) 1157 break; 1158 kprintf("Warning: deep namecache recursion at %s\n", 1159 ncp->nc_name); 1160 _cache_unlock(ncp); 1161 while ((ncp2 = track.resume_ncp) != NULL) { 1162 track.resume_ncp = NULL; 1163 _cache_lock(ncp2); 1164 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1165 &track); 1166 _cache_put(ncp2); 1167 } 1168 _cache_lock(ncp); 1169 } 1170 return(r); 1171 } 1172 1173 int 1174 cache_inval(struct nchandle *nch, int flags) 1175 { 1176 return(_cache_inval(nch->ncp, flags)); 1177 } 1178 1179 /* 1180 * Helper for _cache_inval(). The passed ncp is refd and locked and 1181 * remains that way on return, but may be unlocked/relocked multiple 1182 * times by the routine. 1183 */ 1184 static int 1185 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1186 { 1187 struct namecache *kid; 1188 struct namecache *nextkid; 1189 int rcnt = 0; 1190 1191 KKASSERT(ncp->nc_exlocks); 1192 1193 _cache_setunresolved(ncp); 1194 if (flags & CINV_DESTROY) 1195 ncp->nc_flag |= NCF_DESTROYED; 1196 if ((flags & CINV_CHILDREN) && 1197 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1198 ) { 1199 _cache_hold(kid); 1200 if (++track->depth > MAX_RECURSION_DEPTH) { 1201 track->resume_ncp = ncp; 1202 _cache_hold(ncp); 1203 ++rcnt; 1204 } 1205 _cache_unlock(ncp); 1206 while (kid) { 1207 if (track->resume_ncp) { 1208 _cache_drop(kid); 1209 break; 1210 } 1211 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1212 _cache_hold(nextkid); 1213 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1214 TAILQ_FIRST(&kid->nc_list) 1215 ) { 1216 _cache_lock(kid); 1217 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1218 _cache_unlock(kid); 1219 } 1220 _cache_drop(kid); 1221 kid = nextkid; 1222 } 1223 --track->depth; 1224 _cache_lock(ncp); 1225 } 1226 1227 /* 1228 * Someone could have gotten in there while ncp was unlocked, 1229 * retry if so. 1230 */ 1231 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1232 ++rcnt; 1233 return (rcnt); 1234 } 1235 1236 /* 1237 * Invalidate a vnode's namecache associations. To avoid races against 1238 * the resolver we do not invalidate a node which we previously invalidated 1239 * but which was then re-resolved while we were in the invalidation loop. 1240 * 1241 * Returns non-zero if any namecache entries remain after the invalidation 1242 * loop completed. 1243 * 1244 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1245 * be ripped out of the topology while held, the vnode's v_namecache 1246 * list has no such restriction. NCP's can be ripped out of the list 1247 * at virtually any time if not locked, even if held. 1248 * 1249 * In addition, the v_namecache list itself must be locked via 1250 * the vnode's spinlock. 1251 * 1252 * MPSAFE 1253 */ 1254 int 1255 cache_inval_vp(struct vnode *vp, int flags) 1256 { 1257 struct namecache *ncp; 1258 struct namecache *next; 1259 1260 restart: 1261 spin_lock(&vp->v_spinlock); 1262 ncp = TAILQ_FIRST(&vp->v_namecache); 1263 if (ncp) 1264 _cache_hold(ncp); 1265 while (ncp) { 1266 /* loop entered with ncp held and vp spin-locked */ 1267 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1268 _cache_hold(next); 1269 spin_unlock(&vp->v_spinlock); 1270 _cache_lock(ncp); 1271 if (ncp->nc_vp != vp) { 1272 kprintf("Warning: cache_inval_vp: race-A detected on " 1273 "%s\n", ncp->nc_name); 1274 _cache_put(ncp); 1275 if (next) 1276 _cache_drop(next); 1277 goto restart; 1278 } 1279 _cache_inval(ncp, flags); 1280 _cache_put(ncp); /* also releases reference */ 1281 ncp = next; 1282 spin_lock(&vp->v_spinlock); 1283 if (ncp && ncp->nc_vp != vp) { 1284 spin_unlock(&vp->v_spinlock); 1285 kprintf("Warning: cache_inval_vp: race-B detected on " 1286 "%s\n", ncp->nc_name); 1287 _cache_drop(ncp); 1288 goto restart; 1289 } 1290 } 1291 spin_unlock(&vp->v_spinlock); 1292 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1293 } 1294 1295 /* 1296 * This routine is used instead of the normal cache_inval_vp() when we 1297 * are trying to recycle otherwise good vnodes. 1298 * 1299 * Return 0 on success, non-zero if not all namecache records could be 1300 * disassociated from the vnode (for various reasons). 1301 * 1302 * MPSAFE 1303 */ 1304 int 1305 cache_inval_vp_nonblock(struct vnode *vp) 1306 { 1307 struct namecache *ncp; 1308 struct namecache *next; 1309 1310 spin_lock(&vp->v_spinlock); 1311 ncp = TAILQ_FIRST(&vp->v_namecache); 1312 if (ncp) 1313 _cache_hold(ncp); 1314 while (ncp) { 1315 /* loop entered with ncp held */ 1316 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1317 _cache_hold(next); 1318 spin_unlock(&vp->v_spinlock); 1319 if (_cache_lock_nonblock(ncp)) { 1320 _cache_drop(ncp); 1321 if (next) 1322 _cache_drop(next); 1323 goto done; 1324 } 1325 if (ncp->nc_vp != vp) { 1326 kprintf("Warning: cache_inval_vp: race-A detected on " 1327 "%s\n", ncp->nc_name); 1328 _cache_put(ncp); 1329 if (next) 1330 _cache_drop(next); 1331 goto done; 1332 } 1333 _cache_inval(ncp, 0); 1334 _cache_put(ncp); /* also releases reference */ 1335 ncp = next; 1336 spin_lock(&vp->v_spinlock); 1337 if (ncp && ncp->nc_vp != vp) { 1338 spin_unlock(&vp->v_spinlock); 1339 kprintf("Warning: cache_inval_vp: race-B detected on " 1340 "%s\n", ncp->nc_name); 1341 _cache_drop(ncp); 1342 goto done; 1343 } 1344 } 1345 spin_unlock(&vp->v_spinlock); 1346 done: 1347 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1348 } 1349 1350 /* 1351 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1352 * must be locked. The target ncp is destroyed (as a normal rename-over 1353 * would destroy the target file or directory). 1354 * 1355 * Because there may be references to the source ncp we cannot copy its 1356 * contents to the target. Instead the source ncp is relinked as the target 1357 * and the target ncp is removed from the namecache topology. 1358 * 1359 * MPSAFE 1360 */ 1361 void 1362 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1363 { 1364 struct namecache *fncp = fnch->ncp; 1365 struct namecache *tncp = tnch->ncp; 1366 struct namecache *tncp_par; 1367 struct nchash_head *nchpp; 1368 u_int32_t hash; 1369 char *oname; 1370 1371 /* 1372 * Rename fncp (unlink) 1373 */ 1374 _cache_unlink_parent(fncp); 1375 oname = fncp->nc_name; 1376 fncp->nc_name = tncp->nc_name; 1377 fncp->nc_nlen = tncp->nc_nlen; 1378 tncp_par = tncp->nc_parent; 1379 _cache_hold(tncp_par); 1380 _cache_lock(tncp_par); 1381 1382 /* 1383 * Rename fncp (relink) 1384 */ 1385 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 1386 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 1387 nchpp = NCHHASH(hash); 1388 1389 spin_lock(&nchpp->spin); 1390 _cache_link_parent(fncp, tncp_par, nchpp); 1391 spin_unlock(&nchpp->spin); 1392 1393 _cache_put(tncp_par); 1394 1395 /* 1396 * Get rid of the overwritten tncp (unlink) 1397 */ 1398 _cache_setunresolved(tncp); 1399 _cache_unlink_parent(tncp); 1400 tncp->nc_name = NULL; 1401 tncp->nc_nlen = 0; 1402 1403 if (oname) 1404 kfree(oname, M_VFSCACHE); 1405 } 1406 1407 /* 1408 * vget the vnode associated with the namecache entry. Resolve the namecache 1409 * entry if necessary. The passed ncp must be referenced and locked. 1410 * 1411 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1412 * (depending on the passed lk_type) will be returned in *vpp with an error 1413 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1414 * most typical error is ENOENT, meaning that the ncp represents a negative 1415 * cache hit and there is no vnode to retrieve, but other errors can occur 1416 * too. 1417 * 1418 * The vget() can race a reclaim. If this occurs we re-resolve the 1419 * namecache entry. 1420 * 1421 * There are numerous places in the kernel where vget() is called on a 1422 * vnode while one or more of its namecache entries is locked. Releasing 1423 * a vnode never deadlocks against locked namecache entries (the vnode 1424 * will not get recycled while referenced ncp's exist). This means we 1425 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 1426 * lock when acquiring the vp lock or we might cause a deadlock. 1427 * 1428 * MPSAFE 1429 */ 1430 int 1431 cache_vget(struct nchandle *nch, struct ucred *cred, 1432 int lk_type, struct vnode **vpp) 1433 { 1434 struct namecache *ncp; 1435 struct vnode *vp; 1436 int error; 1437 1438 ncp = nch->ncp; 1439 KKASSERT(ncp->nc_locktd == curthread); 1440 again: 1441 vp = NULL; 1442 if (ncp->nc_flag & NCF_UNRESOLVED) 1443 error = cache_resolve(nch, cred); 1444 else 1445 error = 0; 1446 1447 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1448 error = vget(vp, lk_type); 1449 if (error) { 1450 /* 1451 * VRECLAIM race 1452 */ 1453 if (error == ENOENT) { 1454 kprintf("Warning: vnode reclaim race detected " 1455 "in cache_vget on %p (%s)\n", 1456 vp, ncp->nc_name); 1457 _cache_setunresolved(ncp); 1458 goto again; 1459 } 1460 1461 /* 1462 * Not a reclaim race, some other error. 1463 */ 1464 KKASSERT(ncp->nc_vp == vp); 1465 vp = NULL; 1466 } else { 1467 KKASSERT(ncp->nc_vp == vp); 1468 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1469 } 1470 } 1471 if (error == 0 && vp == NULL) 1472 error = ENOENT; 1473 *vpp = vp; 1474 return(error); 1475 } 1476 1477 int 1478 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1479 { 1480 struct namecache *ncp; 1481 struct vnode *vp; 1482 int error; 1483 1484 ncp = nch->ncp; 1485 KKASSERT(ncp->nc_locktd == curthread); 1486 again: 1487 vp = NULL; 1488 if (ncp->nc_flag & NCF_UNRESOLVED) 1489 error = cache_resolve(nch, cred); 1490 else 1491 error = 0; 1492 1493 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1494 error = vget(vp, LK_SHARED); 1495 if (error) { 1496 /* 1497 * VRECLAIM race 1498 */ 1499 if (error == ENOENT) { 1500 kprintf("Warning: vnode reclaim race detected " 1501 "in cache_vget on %p (%s)\n", 1502 vp, ncp->nc_name); 1503 _cache_setunresolved(ncp); 1504 goto again; 1505 } 1506 1507 /* 1508 * Not a reclaim race, some other error. 1509 */ 1510 KKASSERT(ncp->nc_vp == vp); 1511 vp = NULL; 1512 } else { 1513 KKASSERT(ncp->nc_vp == vp); 1514 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1515 /* caller does not want a lock */ 1516 vn_unlock(vp); 1517 } 1518 } 1519 if (error == 0 && vp == NULL) 1520 error = ENOENT; 1521 *vpp = vp; 1522 return(error); 1523 } 1524 1525 /* 1526 * Return a referenced vnode representing the parent directory of 1527 * ncp. 1528 * 1529 * Because the caller has locked the ncp it should not be possible for 1530 * the parent ncp to go away. However, the parent can unresolve its 1531 * dvp at any time so we must be able to acquire a lock on the parent 1532 * to safely access nc_vp. 1533 * 1534 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 1535 * so use vhold()/vdrop() while holding the lock to prevent dvp from 1536 * getting destroyed. 1537 * 1538 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a 1539 * lock on the ncp in question.. 1540 */ 1541 static struct vnode * 1542 cache_dvpref(struct namecache *ncp) 1543 { 1544 struct namecache *par; 1545 struct vnode *dvp; 1546 1547 dvp = NULL; 1548 if ((par = ncp->nc_parent) != NULL) { 1549 _cache_hold(par); 1550 _cache_lock(par); 1551 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1552 if ((dvp = par->nc_vp) != NULL) 1553 vhold(dvp); 1554 } 1555 _cache_unlock(par); 1556 if (dvp) { 1557 if (vget(dvp, LK_SHARED) == 0) { 1558 vn_unlock(dvp); 1559 vdrop(dvp); 1560 /* return refd, unlocked dvp */ 1561 } else { 1562 vdrop(dvp); 1563 dvp = NULL; 1564 } 1565 } 1566 _cache_drop(par); 1567 } 1568 return(dvp); 1569 } 1570 1571 /* 1572 * Convert a directory vnode to a namecache record without any other 1573 * knowledge of the topology. This ONLY works with directory vnodes and 1574 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1575 * returned ncp (if not NULL) will be held and unlocked. 1576 * 1577 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1578 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1579 * for dvp. This will fail only if the directory has been deleted out from 1580 * under the caller. 1581 * 1582 * Callers must always check for a NULL return no matter the value of 'makeit'. 1583 * 1584 * To avoid underflowing the kernel stack each recursive call increments 1585 * the makeit variable. 1586 */ 1587 1588 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1589 struct vnode *dvp, char *fakename); 1590 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1591 struct vnode **saved_dvp); 1592 1593 int 1594 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1595 struct nchandle *nch) 1596 { 1597 struct vnode *saved_dvp; 1598 struct vnode *pvp; 1599 char *fakename; 1600 int error; 1601 1602 nch->ncp = NULL; 1603 nch->mount = dvp->v_mount; 1604 saved_dvp = NULL; 1605 fakename = NULL; 1606 1607 /* 1608 * Handle the makeit == 0 degenerate case 1609 */ 1610 if (makeit == 0) { 1611 spin_lock(&dvp->v_spinlock); 1612 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1613 if (nch->ncp) 1614 cache_hold(nch); 1615 spin_unlock(&dvp->v_spinlock); 1616 } 1617 1618 /* 1619 * Loop until resolution, inside code will break out on error. 1620 */ 1621 while (makeit) { 1622 /* 1623 * Break out if we successfully acquire a working ncp. 1624 */ 1625 spin_lock(&dvp->v_spinlock); 1626 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1627 if (nch->ncp) { 1628 cache_hold(nch); 1629 spin_unlock(&dvp->v_spinlock); 1630 break; 1631 } 1632 spin_unlock(&dvp->v_spinlock); 1633 1634 /* 1635 * If dvp is the root of its filesystem it should already 1636 * have a namecache pointer associated with it as a side 1637 * effect of the mount, but it may have been disassociated. 1638 */ 1639 if (dvp->v_flag & VROOT) { 1640 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1641 error = cache_resolve_mp(nch->mount); 1642 _cache_put(nch->ncp); 1643 if (ncvp_debug) { 1644 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1645 dvp->v_mount, error); 1646 } 1647 if (error) { 1648 if (ncvp_debug) 1649 kprintf(" failed\n"); 1650 nch->ncp = NULL; 1651 break; 1652 } 1653 if (ncvp_debug) 1654 kprintf(" succeeded\n"); 1655 continue; 1656 } 1657 1658 /* 1659 * If we are recursed too deeply resort to an O(n^2) 1660 * algorithm to resolve the namecache topology. The 1661 * resolved pvp is left referenced in saved_dvp to 1662 * prevent the tree from being destroyed while we loop. 1663 */ 1664 if (makeit > 20) { 1665 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1666 if (error) { 1667 kprintf("lookupdotdot(longpath) failed %d " 1668 "dvp %p\n", error, dvp); 1669 nch->ncp = NULL; 1670 break; 1671 } 1672 continue; 1673 } 1674 1675 /* 1676 * Get the parent directory and resolve its ncp. 1677 */ 1678 if (fakename) { 1679 kfree(fakename, M_TEMP); 1680 fakename = NULL; 1681 } 1682 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1683 &fakename); 1684 if (error) { 1685 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1686 break; 1687 } 1688 vn_unlock(pvp); 1689 1690 /* 1691 * Reuse makeit as a recursion depth counter. On success 1692 * nch will be fully referenced. 1693 */ 1694 cache_fromdvp(pvp, cred, makeit + 1, nch); 1695 vrele(pvp); 1696 if (nch->ncp == NULL) 1697 break; 1698 1699 /* 1700 * Do an inefficient scan of pvp (embodied by ncp) to look 1701 * for dvp. This will create a namecache record for dvp on 1702 * success. We loop up to recheck on success. 1703 * 1704 * ncp and dvp are both held but not locked. 1705 */ 1706 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1707 if (error) { 1708 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1709 pvp, nch->ncp->nc_name, dvp); 1710 cache_drop(nch); 1711 /* nch was NULLed out, reload mount */ 1712 nch->mount = dvp->v_mount; 1713 break; 1714 } 1715 if (ncvp_debug) { 1716 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1717 pvp, nch->ncp->nc_name); 1718 } 1719 cache_drop(nch); 1720 /* nch was NULLed out, reload mount */ 1721 nch->mount = dvp->v_mount; 1722 } 1723 1724 /* 1725 * If nch->ncp is non-NULL it will have been held already. 1726 */ 1727 if (fakename) 1728 kfree(fakename, M_TEMP); 1729 if (saved_dvp) 1730 vrele(saved_dvp); 1731 if (nch->ncp) 1732 return (0); 1733 return (EINVAL); 1734 } 1735 1736 /* 1737 * Go up the chain of parent directories until we find something 1738 * we can resolve into the namecache. This is very inefficient. 1739 */ 1740 static 1741 int 1742 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1743 struct vnode **saved_dvp) 1744 { 1745 struct nchandle nch; 1746 struct vnode *pvp; 1747 int error; 1748 static time_t last_fromdvp_report; 1749 char *fakename; 1750 1751 /* 1752 * Loop getting the parent directory vnode until we get something we 1753 * can resolve in the namecache. 1754 */ 1755 vref(dvp); 1756 nch.mount = dvp->v_mount; 1757 nch.ncp = NULL; 1758 fakename = NULL; 1759 1760 for (;;) { 1761 if (fakename) { 1762 kfree(fakename, M_TEMP); 1763 fakename = NULL; 1764 } 1765 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1766 &fakename); 1767 if (error) { 1768 vrele(dvp); 1769 break; 1770 } 1771 vn_unlock(pvp); 1772 spin_lock(&pvp->v_spinlock); 1773 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1774 _cache_hold(nch.ncp); 1775 spin_unlock(&pvp->v_spinlock); 1776 vrele(pvp); 1777 break; 1778 } 1779 spin_unlock(&pvp->v_spinlock); 1780 if (pvp->v_flag & VROOT) { 1781 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1782 error = cache_resolve_mp(nch.mount); 1783 _cache_unlock(nch.ncp); 1784 vrele(pvp); 1785 if (error) { 1786 _cache_drop(nch.ncp); 1787 nch.ncp = NULL; 1788 vrele(dvp); 1789 } 1790 break; 1791 } 1792 vrele(dvp); 1793 dvp = pvp; 1794 } 1795 if (error == 0) { 1796 if (last_fromdvp_report != time_second) { 1797 last_fromdvp_report = time_second; 1798 kprintf("Warning: extremely inefficient path " 1799 "resolution on %s\n", 1800 nch.ncp->nc_name); 1801 } 1802 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1803 1804 /* 1805 * Hopefully dvp now has a namecache record associated with 1806 * it. Leave it referenced to prevent the kernel from 1807 * recycling the vnode. Otherwise extremely long directory 1808 * paths could result in endless recycling. 1809 */ 1810 if (*saved_dvp) 1811 vrele(*saved_dvp); 1812 *saved_dvp = dvp; 1813 _cache_drop(nch.ncp); 1814 } 1815 if (fakename) 1816 kfree(fakename, M_TEMP); 1817 return (error); 1818 } 1819 1820 /* 1821 * Do an inefficient scan of the directory represented by ncp looking for 1822 * the directory vnode dvp. ncp must be held but not locked on entry and 1823 * will be held on return. dvp must be refd but not locked on entry and 1824 * will remain refd on return. 1825 * 1826 * Why do this at all? Well, due to its stateless nature the NFS server 1827 * converts file handles directly to vnodes without necessarily going through 1828 * the namecache ops that would otherwise create the namecache topology 1829 * leading to the vnode. We could either (1) Change the namecache algorithms 1830 * to allow disconnect namecache records that are re-merged opportunistically, 1831 * or (2) Make the NFS server backtrack and scan to recover a connected 1832 * namecache topology in order to then be able to issue new API lookups. 1833 * 1834 * It turns out that (1) is a huge mess. It takes a nice clean set of 1835 * namecache algorithms and introduces a lot of complication in every subsystem 1836 * that calls into the namecache to deal with the re-merge case, especially 1837 * since we are using the namecache to placehold negative lookups and the 1838 * vnode might not be immediately assigned. (2) is certainly far less 1839 * efficient then (1), but since we are only talking about directories here 1840 * (which are likely to remain cached), the case does not actually run all 1841 * that often and has the supreme advantage of not polluting the namecache 1842 * algorithms. 1843 * 1844 * If a fakename is supplied just construct a namecache entry using the 1845 * fake name. 1846 */ 1847 static int 1848 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1849 struct vnode *dvp, char *fakename) 1850 { 1851 struct nlcomponent nlc; 1852 struct nchandle rncp; 1853 struct dirent *den; 1854 struct vnode *pvp; 1855 struct vattr vat; 1856 struct iovec iov; 1857 struct uio uio; 1858 int blksize; 1859 int eofflag; 1860 int bytes; 1861 char *rbuf; 1862 int error; 1863 1864 vat.va_blocksize = 0; 1865 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1866 return (error); 1867 cache_lock(nch); 1868 error = cache_vref(nch, cred, &pvp); 1869 cache_unlock(nch); 1870 if (error) 1871 return (error); 1872 if (ncvp_debug) { 1873 kprintf("inefficient_scan: directory iosize %ld " 1874 "vattr fileid = %lld\n", 1875 vat.va_blocksize, 1876 (long long)vat.va_fileid); 1877 } 1878 1879 /* 1880 * Use the supplied fakename if not NULL. Fake names are typically 1881 * not in the actual filesystem hierarchy. This is used by HAMMER 1882 * to glue @@timestamp recursions together. 1883 */ 1884 if (fakename) { 1885 nlc.nlc_nameptr = fakename; 1886 nlc.nlc_namelen = strlen(fakename); 1887 rncp = cache_nlookup(nch, &nlc); 1888 goto done; 1889 } 1890 1891 if ((blksize = vat.va_blocksize) == 0) 1892 blksize = DEV_BSIZE; 1893 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1894 rncp.ncp = NULL; 1895 1896 eofflag = 0; 1897 uio.uio_offset = 0; 1898 again: 1899 iov.iov_base = rbuf; 1900 iov.iov_len = blksize; 1901 uio.uio_iov = &iov; 1902 uio.uio_iovcnt = 1; 1903 uio.uio_resid = blksize; 1904 uio.uio_segflg = UIO_SYSSPACE; 1905 uio.uio_rw = UIO_READ; 1906 uio.uio_td = curthread; 1907 1908 if (ncvp_debug >= 2) 1909 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1910 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1911 if (error == 0) { 1912 den = (struct dirent *)rbuf; 1913 bytes = blksize - uio.uio_resid; 1914 1915 while (bytes > 0) { 1916 if (ncvp_debug >= 2) { 1917 kprintf("cache_inefficient_scan: %*.*s\n", 1918 den->d_namlen, den->d_namlen, 1919 den->d_name); 1920 } 1921 if (den->d_type != DT_WHT && 1922 den->d_ino == vat.va_fileid) { 1923 if (ncvp_debug) { 1924 kprintf("cache_inefficient_scan: " 1925 "MATCHED inode %lld path %s/%*.*s\n", 1926 (long long)vat.va_fileid, 1927 nch->ncp->nc_name, 1928 den->d_namlen, den->d_namlen, 1929 den->d_name); 1930 } 1931 nlc.nlc_nameptr = den->d_name; 1932 nlc.nlc_namelen = den->d_namlen; 1933 rncp = cache_nlookup(nch, &nlc); 1934 KKASSERT(rncp.ncp != NULL); 1935 break; 1936 } 1937 bytes -= _DIRENT_DIRSIZ(den); 1938 den = _DIRENT_NEXT(den); 1939 } 1940 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1941 goto again; 1942 } 1943 kfree(rbuf, M_TEMP); 1944 done: 1945 vrele(pvp); 1946 if (rncp.ncp) { 1947 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1948 _cache_setvp(rncp.mount, rncp.ncp, dvp); 1949 if (ncvp_debug >= 2) { 1950 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1951 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1952 } 1953 } else { 1954 if (ncvp_debug >= 2) { 1955 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1956 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1957 rncp.ncp->nc_vp); 1958 } 1959 } 1960 if (rncp.ncp->nc_vp == NULL) 1961 error = rncp.ncp->nc_error; 1962 /* 1963 * Release rncp after a successful nlookup. rncp was fully 1964 * referenced. 1965 */ 1966 cache_put(&rncp); 1967 } else { 1968 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1969 dvp, nch->ncp->nc_name); 1970 error = ENOENT; 1971 } 1972 return (error); 1973 } 1974 1975 /* 1976 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1977 * state, which disassociates it from its vnode or ncneglist. 1978 * 1979 * Then, if there are no additional references to the ncp and no children, 1980 * the ncp is removed from the topology and destroyed. 1981 * 1982 * References and/or children may exist if the ncp is in the middle of the 1983 * topology, preventing the ncp from being destroyed. 1984 * 1985 * This function must be called with the ncp held and locked and will unlock 1986 * and drop it during zapping. 1987 * 1988 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 1989 * This case can occur in the cache_drop() path. 1990 * 1991 * This function may returned a held (but NOT locked) parent node which the 1992 * caller must drop. We do this so _cache_drop() can loop, to avoid 1993 * blowing out the kernel stack. 1994 * 1995 * WARNING! For MPSAFE operation this routine must acquire up to three 1996 * spin locks to be able to safely test nc_refs. Lock order is 1997 * very important. 1998 * 1999 * hash spinlock if on hash list 2000 * parent spinlock if child of parent 2001 * (the ncp is unresolved so there is no vnode association) 2002 */ 2003 static struct namecache * 2004 cache_zap(struct namecache *ncp, int nonblock) 2005 { 2006 struct namecache *par; 2007 struct vnode *dropvp; 2008 int refs; 2009 2010 /* 2011 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2012 */ 2013 _cache_setunresolved(ncp); 2014 2015 /* 2016 * Try to scrap the entry and possibly tail-recurse on its parent. 2017 * We only scrap unref'd (other then our ref) unresolved entries, 2018 * we do not scrap 'live' entries. 2019 * 2020 * Note that once the spinlocks are acquired if nc_refs == 1 no 2021 * other references are possible. If it isn't, however, we have 2022 * to decrement but also be sure to avoid a 1->0 transition. 2023 */ 2024 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2025 KKASSERT(ncp->nc_refs > 0); 2026 2027 /* 2028 * Acquire locks. Note that the parent can't go away while we hold 2029 * a child locked. 2030 */ 2031 if ((par = ncp->nc_parent) != NULL) { 2032 if (nonblock) { 2033 for (;;) { 2034 if (_cache_lock_nonblock(par) == 0) 2035 break; 2036 refs = ncp->nc_refs; 2037 ncp->nc_flag |= NCF_DEFEREDZAP; 2038 ++numdefered; /* MP race ok */ 2039 if (atomic_cmpset_int(&ncp->nc_refs, 2040 refs, refs - 1)) { 2041 _cache_unlock(ncp); 2042 return(NULL); 2043 } 2044 cpu_pause(); 2045 } 2046 _cache_hold(par); 2047 } else { 2048 _cache_hold(par); 2049 _cache_lock(par); 2050 } 2051 spin_lock(&ncp->nc_head->spin); 2052 } 2053 2054 /* 2055 * If someone other then us has a ref or we have children 2056 * we cannot zap the entry. The 1->0 transition and any 2057 * further list operation is protected by the spinlocks 2058 * we have acquired but other transitions are not. 2059 */ 2060 for (;;) { 2061 refs = ncp->nc_refs; 2062 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2063 break; 2064 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2065 if (par) { 2066 spin_unlock(&ncp->nc_head->spin); 2067 _cache_put(par); 2068 } 2069 _cache_unlock(ncp); 2070 return(NULL); 2071 } 2072 cpu_pause(); 2073 } 2074 2075 /* 2076 * We are the only ref and with the spinlocks held no further 2077 * refs can be acquired by others. 2078 * 2079 * Remove us from the hash list and parent list. We have to 2080 * drop a ref on the parent's vp if the parent's list becomes 2081 * empty. 2082 */ 2083 dropvp = NULL; 2084 if (par) { 2085 struct nchash_head *nchpp = ncp->nc_head; 2086 2087 KKASSERT(nchpp != NULL); 2088 LIST_REMOVE(ncp, nc_hash); 2089 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2090 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 2091 dropvp = par->nc_vp; 2092 ncp->nc_head = NULL; 2093 ncp->nc_parent = NULL; 2094 spin_unlock(&nchpp->spin); 2095 _cache_unlock(par); 2096 } else { 2097 KKASSERT(ncp->nc_head == NULL); 2098 } 2099 2100 /* 2101 * ncp should not have picked up any refs. Physically 2102 * destroy the ncp. 2103 */ 2104 KKASSERT(ncp->nc_refs == 1); 2105 /* _cache_unlock(ncp) not required */ 2106 ncp->nc_refs = -1; /* safety */ 2107 if (ncp->nc_name) 2108 kfree(ncp->nc_name, M_VFSCACHE); 2109 kfree(ncp, M_VFSCACHE); 2110 2111 /* 2112 * Delayed drop (we had to release our spinlocks) 2113 * 2114 * The refed parent (if not NULL) must be dropped. The 2115 * caller is responsible for looping. 2116 */ 2117 if (dropvp) 2118 vdrop(dropvp); 2119 return(par); 2120 } 2121 2122 /* 2123 * Clean up dangling negative cache and defered-drop entries in the 2124 * namecache. 2125 */ 2126 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2127 2128 static cache_hs_t neg_cache_hysteresis_state = CHI_LOW; 2129 static cache_hs_t pos_cache_hysteresis_state = CHI_LOW; 2130 2131 void 2132 cache_hysteresis(void) 2133 { 2134 int poslimit; 2135 2136 /* 2137 * Don't cache too many negative hits. We use hysteresis to reduce 2138 * the impact on the critical path. 2139 */ 2140 switch(neg_cache_hysteresis_state) { 2141 case CHI_LOW: 2142 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 2143 _cache_cleanneg(10); 2144 neg_cache_hysteresis_state = CHI_HIGH; 2145 } 2146 break; 2147 case CHI_HIGH: 2148 if (numneg > MINNEG * 9 / 10 && 2149 numneg * ncnegfactor * 9 / 10 > numcache 2150 ) { 2151 _cache_cleanneg(10); 2152 } else { 2153 neg_cache_hysteresis_state = CHI_LOW; 2154 } 2155 break; 2156 } 2157 2158 /* 2159 * Don't cache too many positive hits. We use hysteresis to reduce 2160 * the impact on the critical path. 2161 * 2162 * Excessive positive hits can accumulate due to large numbers of 2163 * hardlinks (the vnode cache will not prevent hl ncps from growing 2164 * into infinity). 2165 */ 2166 if ((poslimit = ncposlimit) == 0) 2167 poslimit = desiredvnodes * 2; 2168 2169 switch(pos_cache_hysteresis_state) { 2170 case CHI_LOW: 2171 if (numcache > poslimit && numcache > MINPOS) { 2172 _cache_cleanpos(10); 2173 pos_cache_hysteresis_state = CHI_HIGH; 2174 } 2175 break; 2176 case CHI_HIGH: 2177 if (numcache > poslimit * 5 / 6 && numcache > MINPOS) { 2178 _cache_cleanpos(10); 2179 } else { 2180 pos_cache_hysteresis_state = CHI_LOW; 2181 } 2182 break; 2183 } 2184 2185 /* 2186 * Clean out dangling defered-zap ncps which could not 2187 * be cleanly dropped if too many build up. Note 2188 * that numdefered is not an exact number as such ncps 2189 * can be reused and the counter is not handled in a MP 2190 * safe manner by design. 2191 */ 2192 if (numdefered * ncnegfactor > numcache) { 2193 _cache_cleandefered(); 2194 } 2195 } 2196 2197 /* 2198 * NEW NAMECACHE LOOKUP API 2199 * 2200 * Lookup an entry in the namecache. The passed par_nch must be referenced 2201 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2202 * is ALWAYS returned, eve if the supplied component is illegal. 2203 * 2204 * The resulting namecache entry should be returned to the system with 2205 * cache_put() or cache_unlock() + cache_drop(). 2206 * 2207 * namecache locks are recursive but care must be taken to avoid lock order 2208 * reversals (hence why the passed par_nch must be unlocked). Locking 2209 * rules are to order for parent traversals, not for child traversals. 2210 * 2211 * Nobody else will be able to manipulate the associated namespace (e.g. 2212 * create, delete, rename, rename-target) until the caller unlocks the 2213 * entry. 2214 * 2215 * The returned entry will be in one of three states: positive hit (non-null 2216 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2217 * Unresolved entries must be resolved through the filesystem to associate the 2218 * vnode and/or determine whether a positive or negative hit has occured. 2219 * 2220 * It is not necessary to lock a directory in order to lock namespace under 2221 * that directory. In fact, it is explicitly not allowed to do that. A 2222 * directory is typically only locked when being created, renamed, or 2223 * destroyed. 2224 * 2225 * The directory (par) may be unresolved, in which case any returned child 2226 * will likely also be marked unresolved. Likely but not guarenteed. Since 2227 * the filesystem lookup requires a resolved directory vnode the caller is 2228 * responsible for resolving the namecache chain top-down. This API 2229 * specifically allows whole chains to be created in an unresolved state. 2230 */ 2231 struct nchandle 2232 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 2233 { 2234 struct nchandle nch; 2235 struct namecache *ncp; 2236 struct namecache *new_ncp; 2237 struct nchash_head *nchpp; 2238 struct mount *mp; 2239 u_int32_t hash; 2240 globaldata_t gd; 2241 int par_locked; 2242 2243 numcalls++; 2244 gd = mycpu; 2245 mp = par_nch->mount; 2246 par_locked = 0; 2247 2248 /* 2249 * This is a good time to call it, no ncp's are locked by 2250 * the caller or us. 2251 */ 2252 cache_hysteresis(); 2253 2254 /* 2255 * Try to locate an existing entry 2256 */ 2257 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2258 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2259 new_ncp = NULL; 2260 nchpp = NCHHASH(hash); 2261 restart: 2262 spin_lock(&nchpp->spin); 2263 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2264 numchecks++; 2265 2266 /* 2267 * Break out if we find a matching entry. Note that 2268 * UNRESOLVED entries may match, but DESTROYED entries 2269 * do not. 2270 */ 2271 if (ncp->nc_parent == par_nch->ncp && 2272 ncp->nc_nlen == nlc->nlc_namelen && 2273 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2274 (ncp->nc_flag & NCF_DESTROYED) == 0 2275 ) { 2276 _cache_hold(ncp); 2277 spin_unlock(&nchpp->spin); 2278 if (par_locked) { 2279 _cache_unlock(par_nch->ncp); 2280 par_locked = 0; 2281 } 2282 if (_cache_lock_special(ncp) == 0) { 2283 _cache_auto_unresolve(mp, ncp); 2284 if (new_ncp) 2285 _cache_free(new_ncp); 2286 goto found; 2287 } 2288 _cache_get(ncp); 2289 _cache_put(ncp); 2290 _cache_drop(ncp); 2291 goto restart; 2292 } 2293 } 2294 2295 /* 2296 * We failed to locate an entry, create a new entry and add it to 2297 * the cache. The parent ncp must also be locked so we 2298 * can link into it. 2299 * 2300 * We have to relookup after possibly blocking in kmalloc or 2301 * when locking par_nch. 2302 * 2303 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2304 * mount case, in which case nc_name will be NULL. 2305 */ 2306 if (new_ncp == NULL) { 2307 spin_unlock(&nchpp->spin); 2308 new_ncp = cache_alloc(nlc->nlc_namelen); 2309 if (nlc->nlc_namelen) { 2310 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2311 nlc->nlc_namelen); 2312 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2313 } 2314 goto restart; 2315 } 2316 if (par_locked == 0) { 2317 spin_unlock(&nchpp->spin); 2318 _cache_lock(par_nch->ncp); 2319 par_locked = 1; 2320 goto restart; 2321 } 2322 2323 /* 2324 * WARNING! We still hold the spinlock. We have to set the hash 2325 * table entry atomically. 2326 */ 2327 ncp = new_ncp; 2328 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2329 spin_unlock(&nchpp->spin); 2330 _cache_unlock(par_nch->ncp); 2331 /* par_locked = 0 - not used */ 2332 found: 2333 /* 2334 * stats and namecache size management 2335 */ 2336 if (ncp->nc_flag & NCF_UNRESOLVED) 2337 ++gd->gd_nchstats->ncs_miss; 2338 else if (ncp->nc_vp) 2339 ++gd->gd_nchstats->ncs_goodhits; 2340 else 2341 ++gd->gd_nchstats->ncs_neghits; 2342 nch.mount = mp; 2343 nch.ncp = ncp; 2344 atomic_add_int(&nch.mount->mnt_refs, 1); 2345 return(nch); 2346 } 2347 2348 /* 2349 * This is a non-blocking verison of cache_nlookup() used by 2350 * nfs_readdirplusrpc_uio(). It can fail for any reason and 2351 * will return nch.ncp == NULL in that case. 2352 */ 2353 struct nchandle 2354 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 2355 { 2356 struct nchandle nch; 2357 struct namecache *ncp; 2358 struct namecache *new_ncp; 2359 struct nchash_head *nchpp; 2360 struct mount *mp; 2361 u_int32_t hash; 2362 globaldata_t gd; 2363 int par_locked; 2364 2365 numcalls++; 2366 gd = mycpu; 2367 mp = par_nch->mount; 2368 par_locked = 0; 2369 2370 /* 2371 * Try to locate an existing entry 2372 */ 2373 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2374 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2375 new_ncp = NULL; 2376 nchpp = NCHHASH(hash); 2377 restart: 2378 spin_lock(&nchpp->spin); 2379 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2380 numchecks++; 2381 2382 /* 2383 * Break out if we find a matching entry. Note that 2384 * UNRESOLVED entries may match, but DESTROYED entries 2385 * do not. 2386 */ 2387 if (ncp->nc_parent == par_nch->ncp && 2388 ncp->nc_nlen == nlc->nlc_namelen && 2389 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2390 (ncp->nc_flag & NCF_DESTROYED) == 0 2391 ) { 2392 _cache_hold(ncp); 2393 spin_unlock(&nchpp->spin); 2394 if (par_locked) { 2395 _cache_unlock(par_nch->ncp); 2396 par_locked = 0; 2397 } 2398 if (_cache_lock_special(ncp) == 0) { 2399 _cache_auto_unresolve(mp, ncp); 2400 if (new_ncp) { 2401 _cache_free(new_ncp); 2402 new_ncp = NULL; 2403 } 2404 goto found; 2405 } 2406 _cache_drop(ncp); 2407 goto failed; 2408 } 2409 } 2410 2411 /* 2412 * We failed to locate an entry, create a new entry and add it to 2413 * the cache. The parent ncp must also be locked so we 2414 * can link into it. 2415 * 2416 * We have to relookup after possibly blocking in kmalloc or 2417 * when locking par_nch. 2418 * 2419 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2420 * mount case, in which case nc_name will be NULL. 2421 */ 2422 if (new_ncp == NULL) { 2423 spin_unlock(&nchpp->spin); 2424 new_ncp = cache_alloc(nlc->nlc_namelen); 2425 if (nlc->nlc_namelen) { 2426 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2427 nlc->nlc_namelen); 2428 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2429 } 2430 goto restart; 2431 } 2432 if (par_locked == 0) { 2433 spin_unlock(&nchpp->spin); 2434 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 2435 par_locked = 1; 2436 goto restart; 2437 } 2438 goto failed; 2439 } 2440 2441 /* 2442 * WARNING! We still hold the spinlock. We have to set the hash 2443 * table entry atomically. 2444 */ 2445 ncp = new_ncp; 2446 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2447 spin_unlock(&nchpp->spin); 2448 _cache_unlock(par_nch->ncp); 2449 /* par_locked = 0 - not used */ 2450 found: 2451 /* 2452 * stats and namecache size management 2453 */ 2454 if (ncp->nc_flag & NCF_UNRESOLVED) 2455 ++gd->gd_nchstats->ncs_miss; 2456 else if (ncp->nc_vp) 2457 ++gd->gd_nchstats->ncs_goodhits; 2458 else 2459 ++gd->gd_nchstats->ncs_neghits; 2460 nch.mount = mp; 2461 nch.ncp = ncp; 2462 atomic_add_int(&nch.mount->mnt_refs, 1); 2463 return(nch); 2464 failed: 2465 if (new_ncp) { 2466 _cache_free(new_ncp); 2467 new_ncp = NULL; 2468 } 2469 nch.mount = NULL; 2470 nch.ncp = NULL; 2471 return(nch); 2472 } 2473 2474 /* 2475 * The namecache entry is marked as being used as a mount point. 2476 * Locate the mount if it is visible to the caller. 2477 */ 2478 struct findmount_info { 2479 struct mount *result; 2480 struct mount *nch_mount; 2481 struct namecache *nch_ncp; 2482 }; 2483 2484 static 2485 int 2486 cache_findmount_callback(struct mount *mp, void *data) 2487 { 2488 struct findmount_info *info = data; 2489 2490 /* 2491 * Check the mount's mounted-on point against the passed nch. 2492 */ 2493 if (mp->mnt_ncmounton.mount == info->nch_mount && 2494 mp->mnt_ncmounton.ncp == info->nch_ncp 2495 ) { 2496 info->result = mp; 2497 return(-1); 2498 } 2499 return(0); 2500 } 2501 2502 struct mount * 2503 cache_findmount(struct nchandle *nch) 2504 { 2505 struct findmount_info info; 2506 2507 info.result = NULL; 2508 info.nch_mount = nch->mount; 2509 info.nch_ncp = nch->ncp; 2510 mountlist_scan(cache_findmount_callback, &info, 2511 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 2512 return(info.result); 2513 } 2514 2515 /* 2516 * Resolve an unresolved namecache entry, generally by looking it up. 2517 * The passed ncp must be locked and refd. 2518 * 2519 * Theoretically since a vnode cannot be recycled while held, and since 2520 * the nc_parent chain holds its vnode as long as children exist, the 2521 * direct parent of the cache entry we are trying to resolve should 2522 * have a valid vnode. If not then generate an error that we can 2523 * determine is related to a resolver bug. 2524 * 2525 * However, if a vnode was in the middle of a recyclement when the NCP 2526 * got locked, ncp->nc_vp might point to a vnode that is about to become 2527 * invalid. cache_resolve() handles this case by unresolving the entry 2528 * and then re-resolving it. 2529 * 2530 * Note that successful resolution does not necessarily return an error 2531 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 2532 * will be returned. 2533 * 2534 * MPSAFE 2535 */ 2536 int 2537 cache_resolve(struct nchandle *nch, struct ucred *cred) 2538 { 2539 struct namecache *par_tmp; 2540 struct namecache *par; 2541 struct namecache *ncp; 2542 struct nchandle nctmp; 2543 struct mount *mp; 2544 struct vnode *dvp; 2545 int error; 2546 2547 ncp = nch->ncp; 2548 mp = nch->mount; 2549 restart: 2550 /* 2551 * If the ncp is already resolved we have nothing to do. However, 2552 * we do want to guarentee that a usable vnode is returned when 2553 * a vnode is present, so make sure it hasn't been reclaimed. 2554 */ 2555 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2556 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2557 _cache_setunresolved(ncp); 2558 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 2559 return (ncp->nc_error); 2560 } 2561 2562 /* 2563 * Mount points need special handling because the parent does not 2564 * belong to the same filesystem as the ncp. 2565 */ 2566 if (ncp == mp->mnt_ncmountpt.ncp) 2567 return (cache_resolve_mp(mp)); 2568 2569 /* 2570 * We expect an unbroken chain of ncps to at least the mount point, 2571 * and even all the way to root (but this code doesn't have to go 2572 * past the mount point). 2573 */ 2574 if (ncp->nc_parent == NULL) { 2575 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 2576 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2577 ncp->nc_error = EXDEV; 2578 return(ncp->nc_error); 2579 } 2580 2581 /* 2582 * The vp's of the parent directories in the chain are held via vhold() 2583 * due to the existance of the child, and should not disappear. 2584 * However, there are cases where they can disappear: 2585 * 2586 * - due to filesystem I/O errors. 2587 * - due to NFS being stupid about tracking the namespace and 2588 * destroys the namespace for entire directories quite often. 2589 * - due to forced unmounts. 2590 * - due to an rmdir (parent will be marked DESTROYED) 2591 * 2592 * When this occurs we have to track the chain backwards and resolve 2593 * it, looping until the resolver catches up to the current node. We 2594 * could recurse here but we might run ourselves out of kernel stack 2595 * so we do it in a more painful manner. This situation really should 2596 * not occur all that often, or if it does not have to go back too 2597 * many nodes to resolve the ncp. 2598 */ 2599 while ((dvp = cache_dvpref(ncp)) == NULL) { 2600 /* 2601 * This case can occur if a process is CD'd into a 2602 * directory which is then rmdir'd. If the parent is marked 2603 * destroyed there is no point trying to resolve it. 2604 */ 2605 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2606 return(ENOENT); 2607 par = ncp->nc_parent; 2608 _cache_hold(par); 2609 _cache_lock(par); 2610 while ((par_tmp = par->nc_parent) != NULL && 2611 par_tmp->nc_vp == NULL) { 2612 _cache_hold(par_tmp); 2613 _cache_lock(par_tmp); 2614 _cache_put(par); 2615 par = par_tmp; 2616 } 2617 if (par->nc_parent == NULL) { 2618 kprintf("EXDEV case 2 %*.*s\n", 2619 par->nc_nlen, par->nc_nlen, par->nc_name); 2620 _cache_put(par); 2621 return (EXDEV); 2622 } 2623 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2624 par->nc_nlen, par->nc_nlen, par->nc_name); 2625 /* 2626 * The parent is not set in stone, ref and lock it to prevent 2627 * it from disappearing. Also note that due to renames it 2628 * is possible for our ncp to move and for par to no longer 2629 * be one of its parents. We resolve it anyway, the loop 2630 * will handle any moves. 2631 */ 2632 _cache_get(par); /* additional hold/lock */ 2633 _cache_put(par); /* from earlier hold/lock */ 2634 if (par == nch->mount->mnt_ncmountpt.ncp) { 2635 cache_resolve_mp(nch->mount); 2636 } else if ((dvp = cache_dvpref(par)) == NULL) { 2637 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2638 _cache_put(par); 2639 continue; 2640 } else { 2641 if (par->nc_flag & NCF_UNRESOLVED) { 2642 nctmp.mount = mp; 2643 nctmp.ncp = par; 2644 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2645 } 2646 vrele(dvp); 2647 } 2648 if ((error = par->nc_error) != 0) { 2649 if (par->nc_error != EAGAIN) { 2650 kprintf("EXDEV case 3 %*.*s error %d\n", 2651 par->nc_nlen, par->nc_nlen, par->nc_name, 2652 par->nc_error); 2653 _cache_put(par); 2654 return(error); 2655 } 2656 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2657 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2658 } 2659 _cache_put(par); 2660 /* loop */ 2661 } 2662 2663 /* 2664 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2665 * ncp's and reattach them. If this occurs the original ncp is marked 2666 * EAGAIN to force a relookup. 2667 * 2668 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2669 * ncp must already be resolved. 2670 */ 2671 if (dvp) { 2672 nctmp.mount = mp; 2673 nctmp.ncp = ncp; 2674 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2675 vrele(dvp); 2676 } else { 2677 ncp->nc_error = EPERM; 2678 } 2679 if (ncp->nc_error == EAGAIN) { 2680 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2681 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2682 goto restart; 2683 } 2684 return(ncp->nc_error); 2685 } 2686 2687 /* 2688 * Resolve the ncp associated with a mount point. Such ncp's almost always 2689 * remain resolved and this routine is rarely called. NFS MPs tends to force 2690 * re-resolution more often due to its mac-truck-smash-the-namecache 2691 * method of tracking namespace changes. 2692 * 2693 * The semantics for this call is that the passed ncp must be locked on 2694 * entry and will be locked on return. However, if we actually have to 2695 * resolve the mount point we temporarily unlock the entry in order to 2696 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2697 * the unlock we have to recheck the flags after we relock. 2698 */ 2699 static int 2700 cache_resolve_mp(struct mount *mp) 2701 { 2702 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2703 struct vnode *vp; 2704 int error; 2705 2706 KKASSERT(mp != NULL); 2707 2708 /* 2709 * If the ncp is already resolved we have nothing to do. However, 2710 * we do want to guarentee that a usable vnode is returned when 2711 * a vnode is present, so make sure it hasn't been reclaimed. 2712 */ 2713 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2714 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2715 _cache_setunresolved(ncp); 2716 } 2717 2718 if (ncp->nc_flag & NCF_UNRESOLVED) { 2719 _cache_unlock(ncp); 2720 while (vfs_busy(mp, 0)) 2721 ; 2722 error = VFS_ROOT(mp, &vp); 2723 _cache_lock(ncp); 2724 2725 /* 2726 * recheck the ncp state after relocking. 2727 */ 2728 if (ncp->nc_flag & NCF_UNRESOLVED) { 2729 ncp->nc_error = error; 2730 if (error == 0) { 2731 _cache_setvp(mp, ncp, vp); 2732 vput(vp); 2733 } else { 2734 kprintf("[diagnostic] cache_resolve_mp: failed" 2735 " to resolve mount %p err=%d ncp=%p\n", 2736 mp, error, ncp); 2737 _cache_setvp(mp, ncp, NULL); 2738 } 2739 } else if (error == 0) { 2740 vput(vp); 2741 } 2742 vfs_unbusy(mp); 2743 } 2744 return(ncp->nc_error); 2745 } 2746 2747 /* 2748 * Clean out negative cache entries when too many have accumulated. 2749 * 2750 * MPSAFE 2751 */ 2752 static void 2753 _cache_cleanneg(int count) 2754 { 2755 struct namecache *ncp; 2756 2757 /* 2758 * Attempt to clean out the specified number of negative cache 2759 * entries. 2760 */ 2761 while (count) { 2762 spin_lock(&ncspin); 2763 ncp = TAILQ_FIRST(&ncneglist); 2764 if (ncp == NULL) { 2765 spin_unlock(&ncspin); 2766 break; 2767 } 2768 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2769 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2770 _cache_hold(ncp); 2771 spin_unlock(&ncspin); 2772 if (_cache_lock_special(ncp) == 0) { 2773 ncp = cache_zap(ncp, 1); 2774 if (ncp) 2775 _cache_drop(ncp); 2776 } else { 2777 _cache_drop(ncp); 2778 } 2779 --count; 2780 } 2781 } 2782 2783 /* 2784 * Clean out positive cache entries when too many have accumulated. 2785 * 2786 * MPSAFE 2787 */ 2788 static void 2789 _cache_cleanpos(int count) 2790 { 2791 static volatile int rover; 2792 struct nchash_head *nchpp; 2793 struct namecache *ncp; 2794 int rover_copy; 2795 2796 /* 2797 * Attempt to clean out the specified number of negative cache 2798 * entries. 2799 */ 2800 while (count) { 2801 rover_copy = ++rover; /* MPSAFEENOUGH */ 2802 nchpp = NCHHASH(rover_copy); 2803 2804 spin_lock(&nchpp->spin); 2805 ncp = LIST_FIRST(&nchpp->list); 2806 if (ncp) 2807 _cache_hold(ncp); 2808 spin_unlock(&nchpp->spin); 2809 2810 if (ncp) { 2811 if (_cache_lock_special(ncp) == 0) { 2812 ncp = cache_zap(ncp, 1); 2813 if (ncp) 2814 _cache_drop(ncp); 2815 } else { 2816 _cache_drop(ncp); 2817 } 2818 } 2819 --count; 2820 } 2821 } 2822 2823 /* 2824 * This is a kitchen sink function to clean out ncps which we 2825 * tried to zap from cache_drop() but failed because we were 2826 * unable to acquire the parent lock. 2827 * 2828 * Such entries can also be removed via cache_inval_vp(), such 2829 * as when unmounting. 2830 * 2831 * MPSAFE 2832 */ 2833 static void 2834 _cache_cleandefered(void) 2835 { 2836 struct nchash_head *nchpp; 2837 struct namecache *ncp; 2838 struct namecache dummy; 2839 int i; 2840 2841 numdefered = 0; 2842 bzero(&dummy, sizeof(dummy)); 2843 dummy.nc_flag = NCF_DESTROYED; 2844 2845 for (i = 0; i <= nchash; ++i) { 2846 nchpp = &nchashtbl[i]; 2847 2848 spin_lock(&nchpp->spin); 2849 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 2850 ncp = &dummy; 2851 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) { 2852 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 2853 continue; 2854 LIST_REMOVE(&dummy, nc_hash); 2855 LIST_INSERT_AFTER(ncp, &dummy, nc_hash); 2856 _cache_hold(ncp); 2857 spin_unlock(&nchpp->spin); 2858 if (_cache_lock_nonblock(ncp) == 0) { 2859 ncp->nc_flag &= ~NCF_DEFEREDZAP; 2860 _cache_unlock(ncp); 2861 } 2862 _cache_drop(ncp); 2863 spin_lock(&nchpp->spin); 2864 ncp = &dummy; 2865 } 2866 LIST_REMOVE(&dummy, nc_hash); 2867 spin_unlock(&nchpp->spin); 2868 } 2869 } 2870 2871 /* 2872 * Name cache initialization, from vfsinit() when we are booting 2873 */ 2874 void 2875 nchinit(void) 2876 { 2877 int i; 2878 globaldata_t gd; 2879 2880 /* initialise per-cpu namecache effectiveness statistics. */ 2881 for (i = 0; i < ncpus; ++i) { 2882 gd = globaldata_find(i); 2883 gd->gd_nchstats = &nchstats[i]; 2884 } 2885 TAILQ_INIT(&ncneglist); 2886 spin_init(&ncspin); 2887 nchashtbl = hashinit_ext(desiredvnodes / 2, 2888 sizeof(struct nchash_head), 2889 M_VFSCACHE, &nchash); 2890 for (i = 0; i <= (int)nchash; ++i) { 2891 LIST_INIT(&nchashtbl[i].list); 2892 spin_init(&nchashtbl[i].spin); 2893 } 2894 nclockwarn = 5 * hz; 2895 } 2896 2897 /* 2898 * Called from start_init() to bootstrap the root filesystem. Returns 2899 * a referenced, unlocked namecache record. 2900 */ 2901 void 2902 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2903 { 2904 nch->ncp = cache_alloc(0); 2905 nch->mount = mp; 2906 atomic_add_int(&mp->mnt_refs, 1); 2907 if (vp) 2908 _cache_setvp(nch->mount, nch->ncp, vp); 2909 } 2910 2911 /* 2912 * vfs_cache_setroot() 2913 * 2914 * Create an association between the root of our namecache and 2915 * the root vnode. This routine may be called several times during 2916 * booting. 2917 * 2918 * If the caller intends to save the returned namecache pointer somewhere 2919 * it must cache_hold() it. 2920 */ 2921 void 2922 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2923 { 2924 struct vnode *ovp; 2925 struct nchandle onch; 2926 2927 ovp = rootvnode; 2928 onch = rootnch; 2929 rootvnode = nvp; 2930 if (nch) 2931 rootnch = *nch; 2932 else 2933 cache_zero(&rootnch); 2934 if (ovp) 2935 vrele(ovp); 2936 if (onch.ncp) 2937 cache_drop(&onch); 2938 } 2939 2940 /* 2941 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2942 * topology and is being removed as quickly as possible. The new VOP_N*() 2943 * API calls are required to make specific adjustments using the supplied 2944 * ncp pointers rather then just bogusly purging random vnodes. 2945 * 2946 * Invalidate all namecache entries to a particular vnode as well as 2947 * any direct children of that vnode in the namecache. This is a 2948 * 'catch all' purge used by filesystems that do not know any better. 2949 * 2950 * Note that the linkage between the vnode and its namecache entries will 2951 * be removed, but the namecache entries themselves might stay put due to 2952 * active references from elsewhere in the system or due to the existance of 2953 * the children. The namecache topology is left intact even if we do not 2954 * know what the vnode association is. Such entries will be marked 2955 * NCF_UNRESOLVED. 2956 */ 2957 void 2958 cache_purge(struct vnode *vp) 2959 { 2960 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2961 } 2962 2963 /* 2964 * Flush all entries referencing a particular filesystem. 2965 * 2966 * Since we need to check it anyway, we will flush all the invalid 2967 * entries at the same time. 2968 */ 2969 #if 0 2970 2971 void 2972 cache_purgevfs(struct mount *mp) 2973 { 2974 struct nchash_head *nchpp; 2975 struct namecache *ncp, *nnp; 2976 2977 /* 2978 * Scan hash tables for applicable entries. 2979 */ 2980 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2981 spin_lock_wr(&nchpp->spin); XXX 2982 ncp = LIST_FIRST(&nchpp->list); 2983 if (ncp) 2984 _cache_hold(ncp); 2985 while (ncp) { 2986 nnp = LIST_NEXT(ncp, nc_hash); 2987 if (nnp) 2988 _cache_hold(nnp); 2989 if (ncp->nc_mount == mp) { 2990 _cache_lock(ncp); 2991 ncp = cache_zap(ncp, 0); 2992 if (ncp) 2993 _cache_drop(ncp); 2994 } else { 2995 _cache_drop(ncp); 2996 } 2997 ncp = nnp; 2998 } 2999 spin_unlock_wr(&nchpp->spin); XXX 3000 } 3001 } 3002 3003 #endif 3004 3005 static int disablecwd; 3006 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 3007 3008 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 3009 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 3010 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 3011 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 3012 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 3013 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 3014 3015 /* 3016 * MPALMOSTSAFE 3017 */ 3018 int 3019 sys___getcwd(struct __getcwd_args *uap) 3020 { 3021 u_int buflen; 3022 int error; 3023 char *buf; 3024 char *bp; 3025 3026 if (disablecwd) 3027 return (ENODEV); 3028 3029 buflen = uap->buflen; 3030 if (buflen == 0) 3031 return (EINVAL); 3032 if (buflen > MAXPATHLEN) 3033 buflen = MAXPATHLEN; 3034 3035 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 3036 get_mplock(); 3037 bp = kern_getcwd(buf, buflen, &error); 3038 rel_mplock(); 3039 if (error == 0) 3040 error = copyout(bp, uap->buf, strlen(bp) + 1); 3041 kfree(buf, M_TEMP); 3042 return (error); 3043 } 3044 3045 char * 3046 kern_getcwd(char *buf, size_t buflen, int *error) 3047 { 3048 struct proc *p = curproc; 3049 char *bp; 3050 int i, slash_prefixed; 3051 struct filedesc *fdp; 3052 struct nchandle nch; 3053 struct namecache *ncp; 3054 3055 numcwdcalls++; 3056 bp = buf; 3057 bp += buflen - 1; 3058 *bp = '\0'; 3059 fdp = p->p_fd; 3060 slash_prefixed = 0; 3061 3062 nch = fdp->fd_ncdir; 3063 ncp = nch.ncp; 3064 if (ncp) 3065 _cache_hold(ncp); 3066 3067 while (ncp && (ncp != fdp->fd_nrdir.ncp || 3068 nch.mount != fdp->fd_nrdir.mount) 3069 ) { 3070 /* 3071 * While traversing upwards if we encounter the root 3072 * of the current mount we have to skip to the mount point 3073 * in the underlying filesystem. 3074 */ 3075 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 3076 nch = nch.mount->mnt_ncmounton; 3077 _cache_drop(ncp); 3078 ncp = nch.ncp; 3079 if (ncp) 3080 _cache_hold(ncp); 3081 continue; 3082 } 3083 3084 /* 3085 * Prepend the path segment 3086 */ 3087 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3088 if (bp == buf) { 3089 numcwdfail4++; 3090 *error = ERANGE; 3091 bp = NULL; 3092 goto done; 3093 } 3094 *--bp = ncp->nc_name[i]; 3095 } 3096 if (bp == buf) { 3097 numcwdfail4++; 3098 *error = ERANGE; 3099 bp = NULL; 3100 goto done; 3101 } 3102 *--bp = '/'; 3103 slash_prefixed = 1; 3104 3105 /* 3106 * Go up a directory. This isn't a mount point so we don't 3107 * have to check again. 3108 */ 3109 while ((nch.ncp = ncp->nc_parent) != NULL) { 3110 _cache_lock(ncp); 3111 if (nch.ncp != ncp->nc_parent) { 3112 _cache_unlock(ncp); 3113 continue; 3114 } 3115 _cache_hold(nch.ncp); 3116 _cache_unlock(ncp); 3117 break; 3118 } 3119 _cache_drop(ncp); 3120 ncp = nch.ncp; 3121 } 3122 if (ncp == NULL) { 3123 numcwdfail2++; 3124 *error = ENOENT; 3125 bp = NULL; 3126 goto done; 3127 } 3128 if (!slash_prefixed) { 3129 if (bp == buf) { 3130 numcwdfail4++; 3131 *error = ERANGE; 3132 bp = NULL; 3133 goto done; 3134 } 3135 *--bp = '/'; 3136 } 3137 numcwdfound++; 3138 *error = 0; 3139 done: 3140 if (ncp) 3141 _cache_drop(ncp); 3142 return (bp); 3143 } 3144 3145 /* 3146 * Thus begins the fullpath magic. 3147 * 3148 * The passed nchp is referenced but not locked. 3149 */ 3150 #undef STATNODE 3151 #define STATNODE(name) \ 3152 static u_int name; \ 3153 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 3154 3155 static int disablefullpath; 3156 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 3157 &disablefullpath, 0, ""); 3158 3159 STATNODE(numfullpathcalls); 3160 STATNODE(numfullpathfail1); 3161 STATNODE(numfullpathfail2); 3162 STATNODE(numfullpathfail3); 3163 STATNODE(numfullpathfail4); 3164 STATNODE(numfullpathfound); 3165 3166 int 3167 cache_fullpath(struct proc *p, struct nchandle *nchp, 3168 char **retbuf, char **freebuf, int guess) 3169 { 3170 struct nchandle fd_nrdir; 3171 struct nchandle nch; 3172 struct namecache *ncp; 3173 struct mount *mp, *new_mp; 3174 char *bp, *buf; 3175 int slash_prefixed; 3176 int error = 0; 3177 int i; 3178 3179 atomic_add_int(&numfullpathcalls, -1); 3180 3181 *retbuf = NULL; 3182 *freebuf = NULL; 3183 3184 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 3185 bp = buf + MAXPATHLEN - 1; 3186 *bp = '\0'; 3187 if (p != NULL) 3188 fd_nrdir = p->p_fd->fd_nrdir; 3189 else 3190 fd_nrdir = rootnch; 3191 slash_prefixed = 0; 3192 nch = *nchp; 3193 ncp = nch.ncp; 3194 if (ncp) 3195 _cache_hold(ncp); 3196 mp = nch.mount; 3197 3198 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 3199 new_mp = NULL; 3200 3201 /* 3202 * If we are asked to guess the upwards path, we do so whenever 3203 * we encounter an ncp marked as a mountpoint. We try to find 3204 * the actual mountpoint by finding the mountpoint with this ncp. 3205 */ 3206 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 3207 new_mp = mount_get_by_nc(ncp); 3208 } 3209 /* 3210 * While traversing upwards if we encounter the root 3211 * of the current mount we have to skip to the mount point. 3212 */ 3213 if (ncp == mp->mnt_ncmountpt.ncp) { 3214 new_mp = mp; 3215 } 3216 if (new_mp) { 3217 nch = new_mp->mnt_ncmounton; 3218 _cache_drop(ncp); 3219 ncp = nch.ncp; 3220 if (ncp) 3221 _cache_hold(ncp); 3222 mp = nch.mount; 3223 continue; 3224 } 3225 3226 /* 3227 * Prepend the path segment 3228 */ 3229 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3230 if (bp == buf) { 3231 numfullpathfail4++; 3232 kfree(buf, M_TEMP); 3233 error = ENOMEM; 3234 goto done; 3235 } 3236 *--bp = ncp->nc_name[i]; 3237 } 3238 if (bp == buf) { 3239 numfullpathfail4++; 3240 kfree(buf, M_TEMP); 3241 error = ENOMEM; 3242 goto done; 3243 } 3244 *--bp = '/'; 3245 slash_prefixed = 1; 3246 3247 /* 3248 * Go up a directory. This isn't a mount point so we don't 3249 * have to check again. 3250 * 3251 * We can only safely access nc_parent with ncp held locked. 3252 */ 3253 while ((nch.ncp = ncp->nc_parent) != NULL) { 3254 _cache_lock(ncp); 3255 if (nch.ncp != ncp->nc_parent) { 3256 _cache_unlock(ncp); 3257 continue; 3258 } 3259 _cache_hold(nch.ncp); 3260 _cache_unlock(ncp); 3261 break; 3262 } 3263 _cache_drop(ncp); 3264 ncp = nch.ncp; 3265 } 3266 if (ncp == NULL) { 3267 numfullpathfail2++; 3268 kfree(buf, M_TEMP); 3269 error = ENOENT; 3270 goto done; 3271 } 3272 3273 if (!slash_prefixed) { 3274 if (bp == buf) { 3275 numfullpathfail4++; 3276 kfree(buf, M_TEMP); 3277 error = ENOMEM; 3278 goto done; 3279 } 3280 *--bp = '/'; 3281 } 3282 numfullpathfound++; 3283 *retbuf = bp; 3284 *freebuf = buf; 3285 error = 0; 3286 done: 3287 if (ncp) 3288 _cache_drop(ncp); 3289 return(error); 3290 } 3291 3292 int 3293 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int guess) 3294 { 3295 struct namecache *ncp; 3296 struct nchandle nch; 3297 int error; 3298 3299 atomic_add_int(&numfullpathcalls, 1); 3300 if (disablefullpath) 3301 return (ENODEV); 3302 3303 if (p == NULL) 3304 return (EINVAL); 3305 3306 /* vn is NULL, client wants us to use p->p_textvp */ 3307 if (vn == NULL) { 3308 if ((vn = p->p_textvp) == NULL) 3309 return (EINVAL); 3310 } 3311 spin_lock(&vn->v_spinlock); 3312 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 3313 if (ncp->nc_nlen) 3314 break; 3315 } 3316 if (ncp == NULL) { 3317 spin_unlock(&vn->v_spinlock); 3318 return (EINVAL); 3319 } 3320 _cache_hold(ncp); 3321 spin_unlock(&vn->v_spinlock); 3322 3323 atomic_add_int(&numfullpathcalls, -1); 3324 nch.ncp = ncp;; 3325 nch.mount = vn->v_mount; 3326 error = cache_fullpath(p, &nch, retbuf, freebuf, guess); 3327 _cache_drop(ncp); 3328 return (error); 3329 } 3330