1 /* 2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/kernel.h> 72 #include <sys/sysctl.h> 73 #include <sys/mount.h> 74 #include <sys/vnode.h> 75 #include <sys/malloc.h> 76 #include <sys/sysproto.h> 77 #include <sys/spinlock.h> 78 #include <sys/proc.h> 79 #include <sys/namei.h> 80 #include <sys/nlookup.h> 81 #include <sys/filedesc.h> 82 #include <sys/fnv_hash.h> 83 #include <sys/globaldata.h> 84 #include <sys/kern_syscall.h> 85 #include <sys/dirent.h> 86 #include <ddb/ddb.h> 87 88 #include <sys/sysref2.h> 89 #include <sys/spinlock2.h> 90 #include <sys/mplock2.h> 91 92 #define MAX_RECURSION_DEPTH 64 93 94 /* 95 * Random lookups in the cache are accomplished with a hash table using 96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock. 97 * 98 * Negative entries may exist and correspond to resolved namecache 99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT 100 * will be set if the entry corresponds to a whited-out directory entry 101 * (verses simply not finding the entry at all). ncneglist is locked 102 * with a global spinlock (ncspin). 103 * 104 * MPSAFE RULES: 105 * 106 * (1) A ncp must be referenced before it can be locked. 107 * 108 * (2) A ncp must be locked in order to modify it. 109 * 110 * (3) ncp locks are always ordered child -> parent. That may seem 111 * backwards but forward scans use the hash table and thus can hold 112 * the parent unlocked when traversing downward. 113 * 114 * This allows insert/rename/delete/dot-dot and other operations 115 * to use ncp->nc_parent links. 116 * 117 * This also prevents a locked up e.g. NFS node from creating a 118 * chain reaction all the way back to the root vnode / namecache. 119 * 120 * (4) parent linkages require both the parent and child to be locked. 121 */ 122 123 /* 124 * Structures associated with name cacheing. 125 */ 126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 127 #define MINNEG 1024 128 #define MINPOS 1024 129 130 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 131 132 LIST_HEAD(nchash_list, namecache); 133 134 struct nchash_head { 135 struct nchash_list list; 136 struct spinlock spin; 137 }; 138 139 static struct nchash_head *nchashtbl; 140 static struct namecache_list ncneglist; 141 static struct spinlock ncspin; 142 143 /* 144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 145 * to create the namecache infrastructure leading to a dangling vnode. 146 * 147 * 0 Only errors are reported 148 * 1 Successes are reported 149 * 2 Successes + the whole directory scan is reported 150 * 3 Force the directory scan code run as if the parent vnode did not 151 * have a namecache record, even if it does have one. 152 */ 153 static int ncvp_debug; 154 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, 155 "Namecache debug level (0-3)"); 156 157 static u_long nchash; /* size of hash table */ 158 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 159 "Size of namecache hash table"); 160 161 static int ncnegfactor = 16; /* ratio of negative entries */ 162 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 163 "Ratio of namecache negative entries"); 164 165 static int nclockwarn; /* warn on locked entries in ticks */ 166 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, 167 "Warn on locked namecache entries in ticks"); 168 169 static int numdefered; /* number of cache entries allocated */ 170 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, 171 "Number of cache entries allocated"); 172 173 static int ncposlimit; /* number of cache entries allocated */ 174 SYSCTL_INT(_debug, OID_AUTO, ncposlimit, CTLFLAG_RW, &ncposlimit, 0, 175 "Number of cache entries allocated"); 176 177 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), 178 "sizeof(struct vnode)"); 179 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), 180 "sizeof(struct namecache)"); 181 182 static int cache_resolve_mp(struct mount *mp); 183 static struct vnode *cache_dvpref(struct namecache *ncp); 184 static void _cache_lock(struct namecache *ncp); 185 static void _cache_setunresolved(struct namecache *ncp); 186 static void _cache_cleanneg(int count); 187 static void _cache_cleanpos(int count); 188 static void _cache_cleandefered(void); 189 190 /* 191 * The new name cache statistics 192 */ 193 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 194 #define STATNODE(mode, name, var) \ 195 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 196 #define STATNODE_INT(mode, name, var) \ 197 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 198 static int numneg; STATNODE_INT(CTLFLAG_RD, numneg, &numneg); 199 static int numcache; STATNODE_INT(CTLFLAG_RD, numcache, &numcache); 200 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 201 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 202 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 203 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 204 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 205 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 206 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 207 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 208 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 209 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 210 211 struct nchstats nchstats[SMP_MAXCPU]; 212 /* 213 * Export VFS cache effectiveness statistics to user-land. 214 * 215 * The statistics are left for aggregation to user-land so 216 * neat things can be achieved, like observing per-CPU cache 217 * distribution. 218 */ 219 static int 220 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 221 { 222 struct globaldata *gd; 223 int i, error; 224 225 error = 0; 226 for (i = 0; i < ncpus; ++i) { 227 gd = globaldata_find(i); 228 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 229 sizeof(struct nchstats)))) 230 break; 231 } 232 233 return (error); 234 } 235 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 236 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 237 238 static struct namecache *cache_zap(struct namecache *ncp, int nonblock); 239 240 /* 241 * Namespace locking. The caller must already hold a reference to the 242 * namecache structure in order to lock/unlock it. This function prevents 243 * the namespace from being created or destroyed by accessors other then 244 * the lock holder. 245 * 246 * Note that holding a locked namecache structure prevents other threads 247 * from making namespace changes (e.g. deleting or creating), prevents 248 * vnode association state changes by other threads, and prevents the 249 * namecache entry from being resolved or unresolved by other threads. 250 * 251 * The lock owner has full authority to associate/disassociate vnodes 252 * and resolve/unresolve the locked ncp. 253 * 254 * The primary lock field is nc_exlocks. nc_locktd is set after the 255 * fact (when locking) or cleared prior to unlocking. 256 * 257 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 258 * or recycled, but it does NOT help you if the vnode had already 259 * initiated a recyclement. If this is important, use cache_get() 260 * rather then cache_lock() (and deal with the differences in the 261 * way the refs counter is handled). Or, alternatively, make an 262 * unconditional call to cache_validate() or cache_resolve() 263 * after cache_lock() returns. 264 * 265 * MPSAFE 266 */ 267 static 268 void 269 _cache_lock(struct namecache *ncp) 270 { 271 thread_t td; 272 int didwarn; 273 int error; 274 u_int count; 275 276 KKASSERT(ncp->nc_refs != 0); 277 didwarn = 0; 278 td = curthread; 279 280 for (;;) { 281 count = ncp->nc_exlocks; 282 283 if (count == 0) { 284 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 285 /* 286 * The vp associated with a locked ncp must 287 * be held to prevent it from being recycled. 288 * 289 * WARNING! If VRECLAIMED is set the vnode 290 * could already be in the middle of a recycle. 291 * Callers must use cache_vref() or 292 * cache_vget() on the locked ncp to 293 * validate the vp or set the cache entry 294 * to unresolved. 295 * 296 * NOTE! vhold() is allowed if we hold a 297 * lock on the ncp (which we do). 298 */ 299 ncp->nc_locktd = td; 300 if (ncp->nc_vp) 301 vhold(ncp->nc_vp); /* MPSAFE */ 302 break; 303 } 304 /* cmpset failed */ 305 continue; 306 } 307 if (ncp->nc_locktd == td) { 308 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 309 count + 1)) { 310 break; 311 } 312 /* cmpset failed */ 313 continue; 314 } 315 tsleep_interlock(ncp, 0); 316 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 317 count | NC_EXLOCK_REQ) == 0) { 318 /* cmpset failed */ 319 continue; 320 } 321 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn); 322 if (error == EWOULDBLOCK) { 323 if (didwarn == 0) { 324 didwarn = ticks; 325 kprintf("[diagnostic] cache_lock: blocked " 326 "on %p", 327 ncp); 328 kprintf(" \"%*.*s\"\n", 329 ncp->nc_nlen, ncp->nc_nlen, 330 ncp->nc_name); 331 } 332 } 333 } 334 if (didwarn) { 335 kprintf("[diagnostic] cache_lock: unblocked %*.*s after " 336 "%d secs\n", 337 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name, 338 (int)(ticks - didwarn) / hz); 339 } 340 } 341 342 /* 343 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance, 344 * such as the case where one of its children is locked. 345 * 346 * MPSAFE 347 */ 348 static 349 int 350 _cache_lock_nonblock(struct namecache *ncp) 351 { 352 thread_t td; 353 u_int count; 354 355 td = curthread; 356 357 for (;;) { 358 count = ncp->nc_exlocks; 359 360 if (count == 0) { 361 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) { 362 /* 363 * The vp associated with a locked ncp must 364 * be held to prevent it from being recycled. 365 * 366 * WARNING! If VRECLAIMED is set the vnode 367 * could already be in the middle of a recycle. 368 * Callers must use cache_vref() or 369 * cache_vget() on the locked ncp to 370 * validate the vp or set the cache entry 371 * to unresolved. 372 * 373 * NOTE! vhold() is allowed if we hold a 374 * lock on the ncp (which we do). 375 */ 376 ncp->nc_locktd = td; 377 if (ncp->nc_vp) 378 vhold(ncp->nc_vp); /* MPSAFE */ 379 break; 380 } 381 /* cmpset failed */ 382 continue; 383 } 384 if (ncp->nc_locktd == td) { 385 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 386 count + 1)) { 387 break; 388 } 389 /* cmpset failed */ 390 continue; 391 } 392 return(EWOULDBLOCK); 393 } 394 return(0); 395 } 396 397 /* 398 * Helper function 399 * 400 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop). 401 * 402 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared. 403 * 404 * MPSAFE 405 */ 406 static 407 void 408 _cache_unlock(struct namecache *ncp) 409 { 410 thread_t td __debugvar = curthread; 411 u_int count; 412 413 KKASSERT(ncp->nc_refs >= 0); 414 KKASSERT(ncp->nc_exlocks > 0); 415 KKASSERT(ncp->nc_locktd == td); 416 417 count = ncp->nc_exlocks; 418 if ((count & ~NC_EXLOCK_REQ) == 1) { 419 ncp->nc_locktd = NULL; 420 if (ncp->nc_vp) 421 vdrop(ncp->nc_vp); 422 } 423 for (;;) { 424 if ((count & ~NC_EXLOCK_REQ) == 1) { 425 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) { 426 if (count & NC_EXLOCK_REQ) 427 wakeup(ncp); 428 break; 429 } 430 } else { 431 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 432 count - 1)) { 433 break; 434 } 435 } 436 count = ncp->nc_exlocks; 437 } 438 } 439 440 441 /* 442 * cache_hold() and cache_drop() prevent the premature deletion of a 443 * namecache entry but do not prevent operations (such as zapping) on 444 * that namecache entry. 445 * 446 * This routine may only be called from outside this source module if 447 * nc_refs is already at least 1. 448 * 449 * This is a rare case where callers are allowed to hold a spinlock, 450 * so we can't ourselves. 451 * 452 * MPSAFE 453 */ 454 static __inline 455 struct namecache * 456 _cache_hold(struct namecache *ncp) 457 { 458 atomic_add_int(&ncp->nc_refs, 1); 459 return(ncp); 460 } 461 462 /* 463 * Drop a cache entry, taking care to deal with races. 464 * 465 * For potential 1->0 transitions we must hold the ncp lock to safely 466 * test its flags. An unresolved entry with no children must be zapped 467 * to avoid leaks. 468 * 469 * The call to cache_zap() itself will handle all remaining races and 470 * will decrement the ncp's refs regardless. If we are resolved or 471 * have children nc_refs can safely be dropped to 0 without having to 472 * zap the entry. 473 * 474 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion. 475 * 476 * NOTE: cache_zap() may return a non-NULL referenced parent which must 477 * be dropped in a loop. 478 * 479 * MPSAFE 480 */ 481 static __inline 482 void 483 _cache_drop(struct namecache *ncp) 484 { 485 int refs; 486 487 while (ncp) { 488 KKASSERT(ncp->nc_refs > 0); 489 refs = ncp->nc_refs; 490 491 if (refs == 1) { 492 if (_cache_lock_nonblock(ncp) == 0) { 493 ncp->nc_flag &= ~NCF_DEFEREDZAP; 494 if ((ncp->nc_flag & NCF_UNRESOLVED) && 495 TAILQ_EMPTY(&ncp->nc_list)) { 496 ncp = cache_zap(ncp, 1); 497 continue; 498 } 499 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) { 500 _cache_unlock(ncp); 501 break; 502 } 503 _cache_unlock(ncp); 504 } 505 } else { 506 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) 507 break; 508 } 509 cpu_pause(); 510 } 511 } 512 513 /* 514 * Link a new namecache entry to its parent and to the hash table. Be 515 * careful to avoid races if vhold() blocks in the future. 516 * 517 * Both ncp and par must be referenced and locked. 518 * 519 * NOTE: The hash table spinlock is likely held during this call, we 520 * can't do anything fancy. 521 * 522 * MPSAFE 523 */ 524 static void 525 _cache_link_parent(struct namecache *ncp, struct namecache *par, 526 struct nchash_head *nchpp) 527 { 528 KKASSERT(ncp->nc_parent == NULL); 529 ncp->nc_parent = par; 530 ncp->nc_head = nchpp; 531 532 /* 533 * Set inheritance flags. Note that the parent flags may be 534 * stale due to getattr potentially not having been run yet 535 * (it gets run during nlookup()'s). 536 */ 537 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE); 538 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) 539 ncp->nc_flag |= NCF_SF_PNOCACHE; 540 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE)) 541 ncp->nc_flag |= NCF_UF_PCACHE; 542 543 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash); 544 545 if (TAILQ_EMPTY(&par->nc_list)) { 546 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 547 /* 548 * Any vp associated with an ncp which has children must 549 * be held to prevent it from being recycled. 550 */ 551 if (par->nc_vp) 552 vhold(par->nc_vp); 553 } else { 554 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 555 } 556 } 557 558 /* 559 * Remove the parent and hash associations from a namecache structure. 560 * If this is the last child of the parent the cache_drop(par) will 561 * attempt to recursively zap the parent. 562 * 563 * ncp must be locked. This routine will acquire a temporary lock on 564 * the parent as wlel as the appropriate hash chain. 565 * 566 * MPSAFE 567 */ 568 static void 569 _cache_unlink_parent(struct namecache *ncp) 570 { 571 struct namecache *par; 572 struct vnode *dropvp; 573 574 if ((par = ncp->nc_parent) != NULL) { 575 KKASSERT(ncp->nc_parent == par); 576 _cache_hold(par); 577 _cache_lock(par); 578 spin_lock(&ncp->nc_head->spin); 579 LIST_REMOVE(ncp, nc_hash); 580 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 581 dropvp = NULL; 582 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 583 dropvp = par->nc_vp; 584 spin_unlock(&ncp->nc_head->spin); 585 ncp->nc_parent = NULL; 586 ncp->nc_head = NULL; 587 _cache_unlock(par); 588 _cache_drop(par); 589 590 /* 591 * We can only safely vdrop with no spinlocks held. 592 */ 593 if (dropvp) 594 vdrop(dropvp); 595 } 596 } 597 598 /* 599 * Allocate a new namecache structure. Most of the code does not require 600 * zero-termination of the string but it makes vop_compat_ncreate() easier. 601 * 602 * MPSAFE 603 */ 604 static struct namecache * 605 cache_alloc(int nlen) 606 { 607 struct namecache *ncp; 608 609 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 610 if (nlen) 611 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 612 ncp->nc_nlen = nlen; 613 ncp->nc_flag = NCF_UNRESOLVED; 614 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 615 ncp->nc_refs = 1; 616 617 TAILQ_INIT(&ncp->nc_list); 618 _cache_lock(ncp); 619 return(ncp); 620 } 621 622 /* 623 * Can only be called for the case where the ncp has never been 624 * associated with anything (so no spinlocks are needed). 625 * 626 * MPSAFE 627 */ 628 static void 629 _cache_free(struct namecache *ncp) 630 { 631 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 632 if (ncp->nc_name) 633 kfree(ncp->nc_name, M_VFSCACHE); 634 kfree(ncp, M_VFSCACHE); 635 } 636 637 /* 638 * MPSAFE 639 */ 640 void 641 cache_zero(struct nchandle *nch) 642 { 643 nch->ncp = NULL; 644 nch->mount = NULL; 645 } 646 647 /* 648 * Ref and deref a namecache structure. 649 * 650 * The caller must specify a stable ncp pointer, typically meaning the 651 * ncp is already referenced but this can also occur indirectly through 652 * e.g. holding a lock on a direct child. 653 * 654 * WARNING: Caller may hold an unrelated read spinlock, which means we can't 655 * use read spinlocks here. 656 * 657 * MPSAFE if nch is 658 */ 659 struct nchandle * 660 cache_hold(struct nchandle *nch) 661 { 662 _cache_hold(nch->ncp); 663 atomic_add_int(&nch->mount->mnt_refs, 1); 664 return(nch); 665 } 666 667 /* 668 * Create a copy of a namecache handle for an already-referenced 669 * entry. 670 * 671 * MPSAFE if nch is 672 */ 673 void 674 cache_copy(struct nchandle *nch, struct nchandle *target) 675 { 676 *target = *nch; 677 if (target->ncp) 678 _cache_hold(target->ncp); 679 atomic_add_int(&nch->mount->mnt_refs, 1); 680 } 681 682 /* 683 * MPSAFE if nch is 684 */ 685 void 686 cache_changemount(struct nchandle *nch, struct mount *mp) 687 { 688 atomic_add_int(&nch->mount->mnt_refs, -1); 689 nch->mount = mp; 690 atomic_add_int(&nch->mount->mnt_refs, 1); 691 } 692 693 /* 694 * MPSAFE 695 */ 696 void 697 cache_drop(struct nchandle *nch) 698 { 699 atomic_add_int(&nch->mount->mnt_refs, -1); 700 _cache_drop(nch->ncp); 701 nch->ncp = NULL; 702 nch->mount = NULL; 703 } 704 705 /* 706 * MPSAFE 707 */ 708 void 709 cache_lock(struct nchandle *nch) 710 { 711 _cache_lock(nch->ncp); 712 } 713 714 /* 715 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller 716 * is responsible for checking both for validity on return as they 717 * may have become invalid. 718 * 719 * We have to deal with potential deadlocks here, just ping pong 720 * the lock until we get it (we will always block somewhere when 721 * looping so this is not cpu-intensive). 722 * 723 * which = 0 nch1 not locked, nch2 is locked 724 * which = 1 nch1 is locked, nch2 is not locked 725 */ 726 void 727 cache_relock(struct nchandle *nch1, struct ucred *cred1, 728 struct nchandle *nch2, struct ucred *cred2) 729 { 730 int which; 731 732 which = 0; 733 734 for (;;) { 735 if (which == 0) { 736 if (cache_lock_nonblock(nch1) == 0) { 737 cache_resolve(nch1, cred1); 738 break; 739 } 740 cache_unlock(nch2); 741 cache_lock(nch1); 742 cache_resolve(nch1, cred1); 743 which = 1; 744 } else { 745 if (cache_lock_nonblock(nch2) == 0) { 746 cache_resolve(nch2, cred2); 747 break; 748 } 749 cache_unlock(nch1); 750 cache_lock(nch2); 751 cache_resolve(nch2, cred2); 752 which = 0; 753 } 754 } 755 } 756 757 /* 758 * MPSAFE 759 */ 760 int 761 cache_lock_nonblock(struct nchandle *nch) 762 { 763 return(_cache_lock_nonblock(nch->ncp)); 764 } 765 766 767 /* 768 * MPSAFE 769 */ 770 void 771 cache_unlock(struct nchandle *nch) 772 { 773 _cache_unlock(nch->ncp); 774 } 775 776 /* 777 * ref-and-lock, unlock-and-deref functions. 778 * 779 * This function is primarily used by nlookup. Even though cache_lock 780 * holds the vnode, it is possible that the vnode may have already 781 * initiated a recyclement. 782 * 783 * We want cache_get() to return a definitively usable vnode or a 784 * definitively unresolved ncp. 785 * 786 * MPSAFE 787 */ 788 static 789 struct namecache * 790 _cache_get(struct namecache *ncp) 791 { 792 _cache_hold(ncp); 793 _cache_lock(ncp); 794 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 795 _cache_setunresolved(ncp); 796 return(ncp); 797 } 798 799 /* 800 * This is a special form of _cache_lock() which only succeeds if 801 * it can get a pristine, non-recursive lock. The caller must have 802 * already ref'd the ncp. 803 * 804 * On success the ncp will be locked, on failure it will not. The 805 * ref count does not change either way. 806 * 807 * We want _cache_lock_special() (on success) to return a definitively 808 * usable vnode or a definitively unresolved ncp. 809 * 810 * MPSAFE 811 */ 812 static int 813 _cache_lock_special(struct namecache *ncp) 814 { 815 if (_cache_lock_nonblock(ncp) == 0) { 816 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) { 817 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 818 _cache_setunresolved(ncp); 819 return(0); 820 } 821 _cache_unlock(ncp); 822 } 823 return(EWOULDBLOCK); 824 } 825 826 827 /* 828 * NOTE: The same nchandle can be passed for both arguments. 829 * 830 * MPSAFE 831 */ 832 void 833 cache_get(struct nchandle *nch, struct nchandle *target) 834 { 835 KKASSERT(nch->ncp->nc_refs > 0); 836 target->mount = nch->mount; 837 target->ncp = _cache_get(nch->ncp); 838 atomic_add_int(&target->mount->mnt_refs, 1); 839 } 840 841 /* 842 * MPSAFE 843 */ 844 static __inline 845 void 846 _cache_put(struct namecache *ncp) 847 { 848 _cache_unlock(ncp); 849 _cache_drop(ncp); 850 } 851 852 /* 853 * MPSAFE 854 */ 855 void 856 cache_put(struct nchandle *nch) 857 { 858 atomic_add_int(&nch->mount->mnt_refs, -1); 859 _cache_put(nch->ncp); 860 nch->ncp = NULL; 861 nch->mount = NULL; 862 } 863 864 /* 865 * Resolve an unresolved ncp by associating a vnode with it. If the 866 * vnode is NULL, a negative cache entry is created. 867 * 868 * The ncp should be locked on entry and will remain locked on return. 869 * 870 * MPSAFE 871 */ 872 static 873 void 874 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 875 { 876 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 877 878 if (vp != NULL) { 879 /* 880 * Any vp associated with an ncp which has children must 881 * be held. Any vp associated with a locked ncp must be held. 882 */ 883 if (!TAILQ_EMPTY(&ncp->nc_list)) 884 vhold(vp); 885 spin_lock(&vp->v_spinlock); 886 ncp->nc_vp = vp; 887 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 888 spin_unlock(&vp->v_spinlock); 889 if (ncp->nc_exlocks) 890 vhold(vp); 891 892 /* 893 * Set auxiliary flags 894 */ 895 switch(vp->v_type) { 896 case VDIR: 897 ncp->nc_flag |= NCF_ISDIR; 898 break; 899 case VLNK: 900 ncp->nc_flag |= NCF_ISSYMLINK; 901 /* XXX cache the contents of the symlink */ 902 break; 903 default: 904 break; 905 } 906 atomic_add_int(&numcache, 1); 907 ncp->nc_error = 0; 908 } else { 909 /* 910 * When creating a negative cache hit we set the 911 * namecache_gen. A later resolve will clean out the 912 * negative cache hit if the mount point's namecache_gen 913 * has changed. Used by devfs, could also be used by 914 * other remote FSs. 915 */ 916 ncp->nc_vp = NULL; 917 spin_lock(&ncspin); 918 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 919 ++numneg; 920 spin_unlock(&ncspin); 921 ncp->nc_error = ENOENT; 922 if (mp) 923 ncp->nc_namecache_gen = mp->mnt_namecache_gen; 924 } 925 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP); 926 } 927 928 /* 929 * MPSAFE 930 */ 931 void 932 cache_setvp(struct nchandle *nch, struct vnode *vp) 933 { 934 _cache_setvp(nch->mount, nch->ncp, vp); 935 } 936 937 /* 938 * MPSAFE 939 */ 940 void 941 cache_settimeout(struct nchandle *nch, int nticks) 942 { 943 struct namecache *ncp = nch->ncp; 944 945 if ((ncp->nc_timeout = ticks + nticks) == 0) 946 ncp->nc_timeout = 1; 947 } 948 949 /* 950 * Disassociate the vnode or negative-cache association and mark a 951 * namecache entry as unresolved again. Note that the ncp is still 952 * left in the hash table and still linked to its parent. 953 * 954 * The ncp should be locked and refd on entry and will remain locked and refd 955 * on return. 956 * 957 * This routine is normally never called on a directory containing children. 958 * However, NFS often does just that in its rename() code as a cop-out to 959 * avoid complex namespace operations. This disconnects a directory vnode 960 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 961 * sync. 962 * 963 * MPSAFE 964 */ 965 static 966 void 967 _cache_setunresolved(struct namecache *ncp) 968 { 969 struct vnode *vp; 970 971 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 972 ncp->nc_flag |= NCF_UNRESOLVED; 973 ncp->nc_timeout = 0; 974 ncp->nc_error = ENOTCONN; 975 if ((vp = ncp->nc_vp) != NULL) { 976 atomic_add_int(&numcache, -1); 977 spin_lock(&vp->v_spinlock); 978 ncp->nc_vp = NULL; 979 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 980 spin_unlock(&vp->v_spinlock); 981 982 /* 983 * Any vp associated with an ncp with children is 984 * held by that ncp. Any vp associated with a locked 985 * ncp is held by that ncp. These conditions must be 986 * undone when the vp is cleared out from the ncp. 987 */ 988 if (!TAILQ_EMPTY(&ncp->nc_list)) 989 vdrop(vp); 990 if (ncp->nc_exlocks) 991 vdrop(vp); 992 } else { 993 spin_lock(&ncspin); 994 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 995 --numneg; 996 spin_unlock(&ncspin); 997 } 998 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 999 } 1000 } 1001 1002 /* 1003 * The cache_nresolve() code calls this function to automatically 1004 * set a resolved cache element to unresolved if it has timed out 1005 * or if it is a negative cache hit and the mount point namecache_gen 1006 * has changed. 1007 * 1008 * MPSAFE 1009 */ 1010 static __inline void 1011 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 1012 { 1013 /* 1014 * Already in an unresolved state, nothing to do. 1015 */ 1016 if (ncp->nc_flag & NCF_UNRESOLVED) 1017 return; 1018 1019 /* 1020 * Try to zap entries that have timed out. We have 1021 * to be careful here because locked leafs may depend 1022 * on the vnode remaining intact in a parent, so only 1023 * do this under very specific conditions. 1024 */ 1025 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 1026 TAILQ_EMPTY(&ncp->nc_list)) { 1027 _cache_setunresolved(ncp); 1028 return; 1029 } 1030 1031 /* 1032 * If a resolved negative cache hit is invalid due to 1033 * the mount's namecache generation being bumped, zap it. 1034 */ 1035 if (ncp->nc_vp == NULL && 1036 ncp->nc_namecache_gen != mp->mnt_namecache_gen) { 1037 _cache_setunresolved(ncp); 1038 return; 1039 } 1040 } 1041 1042 /* 1043 * MPSAFE 1044 */ 1045 void 1046 cache_setunresolved(struct nchandle *nch) 1047 { 1048 _cache_setunresolved(nch->ncp); 1049 } 1050 1051 /* 1052 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 1053 * looking for matches. This flag tells the lookup code when it must 1054 * check for a mount linkage and also prevents the directories in question 1055 * from being deleted or renamed. 1056 * 1057 * MPSAFE 1058 */ 1059 static 1060 int 1061 cache_clrmountpt_callback(struct mount *mp, void *data) 1062 { 1063 struct nchandle *nch = data; 1064 1065 if (mp->mnt_ncmounton.ncp == nch->ncp) 1066 return(1); 1067 if (mp->mnt_ncmountpt.ncp == nch->ncp) 1068 return(1); 1069 return(0); 1070 } 1071 1072 /* 1073 * MPSAFE 1074 */ 1075 void 1076 cache_clrmountpt(struct nchandle *nch) 1077 { 1078 int count; 1079 1080 count = mountlist_scan(cache_clrmountpt_callback, nch, 1081 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1082 if (count == 0) 1083 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 1084 } 1085 1086 /* 1087 * Invalidate portions of the namecache topology given a starting entry. 1088 * The passed ncp is set to an unresolved state and: 1089 * 1090 * The passed ncp must be referencxed and locked. The routine may unlock 1091 * and relock ncp several times, and will recheck the children and loop 1092 * to catch races. When done the passed ncp will be returned with the 1093 * reference and lock intact. 1094 * 1095 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 1096 * that the physical underlying nodes have been 1097 * destroyed... as in deleted. For example, when 1098 * a directory is removed. This will cause record 1099 * lookups on the name to no longer be able to find 1100 * the record and tells the resolver to return failure 1101 * rather then trying to resolve through the parent. 1102 * 1103 * The topology itself, including ncp->nc_name, 1104 * remains intact. 1105 * 1106 * This only applies to the passed ncp, if CINV_CHILDREN 1107 * is specified the children are not flagged. 1108 * 1109 * CINV_CHILDREN - Set all children (recursively) to an unresolved 1110 * state as well. 1111 * 1112 * Note that this will also have the side effect of 1113 * cleaning out any unreferenced nodes in the topology 1114 * from the leaves up as the recursion backs out. 1115 * 1116 * Note that the topology for any referenced nodes remains intact, but 1117 * the nodes will be marked as having been destroyed and will be set 1118 * to an unresolved state. 1119 * 1120 * It is possible for cache_inval() to race a cache_resolve(), meaning that 1121 * the namecache entry may not actually be invalidated on return if it was 1122 * revalidated while recursing down into its children. This code guarentees 1123 * that the node(s) will go through an invalidation cycle, but does not 1124 * guarentee that they will remain in an invalidated state. 1125 * 1126 * Returns non-zero if a revalidation was detected during the invalidation 1127 * recursion, zero otherwise. Note that since only the original ncp is 1128 * locked the revalidation ultimately can only indicate that the original ncp 1129 * *MIGHT* no have been reresolved. 1130 * 1131 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 1132 * have to avoid blowing out the kernel stack. We do this by saving the 1133 * deep namecache node and aborting the recursion, then re-recursing at that 1134 * node using a depth-first algorithm in order to allow multiple deep 1135 * recursions to chain through each other, then we restart the invalidation 1136 * from scratch. 1137 * 1138 * MPSAFE 1139 */ 1140 1141 struct cinvtrack { 1142 struct namecache *resume_ncp; 1143 int depth; 1144 }; 1145 1146 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 1147 1148 static 1149 int 1150 _cache_inval(struct namecache *ncp, int flags) 1151 { 1152 struct cinvtrack track; 1153 struct namecache *ncp2; 1154 int r; 1155 1156 track.depth = 0; 1157 track.resume_ncp = NULL; 1158 1159 for (;;) { 1160 r = _cache_inval_internal(ncp, flags, &track); 1161 if (track.resume_ncp == NULL) 1162 break; 1163 kprintf("Warning: deep namecache recursion at %s\n", 1164 ncp->nc_name); 1165 _cache_unlock(ncp); 1166 while ((ncp2 = track.resume_ncp) != NULL) { 1167 track.resume_ncp = NULL; 1168 _cache_lock(ncp2); 1169 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 1170 &track); 1171 _cache_put(ncp2); 1172 } 1173 _cache_lock(ncp); 1174 } 1175 return(r); 1176 } 1177 1178 int 1179 cache_inval(struct nchandle *nch, int flags) 1180 { 1181 return(_cache_inval(nch->ncp, flags)); 1182 } 1183 1184 /* 1185 * Helper for _cache_inval(). The passed ncp is refd and locked and 1186 * remains that way on return, but may be unlocked/relocked multiple 1187 * times by the routine. 1188 */ 1189 static int 1190 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 1191 { 1192 struct namecache *kid; 1193 struct namecache *nextkid; 1194 int rcnt = 0; 1195 1196 KKASSERT(ncp->nc_exlocks); 1197 1198 _cache_setunresolved(ncp); 1199 if (flags & CINV_DESTROY) 1200 ncp->nc_flag |= NCF_DESTROYED; 1201 if ((flags & CINV_CHILDREN) && 1202 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 1203 ) { 1204 _cache_hold(kid); 1205 if (++track->depth > MAX_RECURSION_DEPTH) { 1206 track->resume_ncp = ncp; 1207 _cache_hold(ncp); 1208 ++rcnt; 1209 } 1210 _cache_unlock(ncp); 1211 while (kid) { 1212 if (track->resume_ncp) { 1213 _cache_drop(kid); 1214 break; 1215 } 1216 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 1217 _cache_hold(nextkid); 1218 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 1219 TAILQ_FIRST(&kid->nc_list) 1220 ) { 1221 _cache_lock(kid); 1222 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 1223 _cache_unlock(kid); 1224 } 1225 _cache_drop(kid); 1226 kid = nextkid; 1227 } 1228 --track->depth; 1229 _cache_lock(ncp); 1230 } 1231 1232 /* 1233 * Someone could have gotten in there while ncp was unlocked, 1234 * retry if so. 1235 */ 1236 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1237 ++rcnt; 1238 return (rcnt); 1239 } 1240 1241 /* 1242 * Invalidate a vnode's namecache associations. To avoid races against 1243 * the resolver we do not invalidate a node which we previously invalidated 1244 * but which was then re-resolved while we were in the invalidation loop. 1245 * 1246 * Returns non-zero if any namecache entries remain after the invalidation 1247 * loop completed. 1248 * 1249 * NOTE: Unlike the namecache topology which guarentees that ncp's will not 1250 * be ripped out of the topology while held, the vnode's v_namecache 1251 * list has no such restriction. NCP's can be ripped out of the list 1252 * at virtually any time if not locked, even if held. 1253 * 1254 * In addition, the v_namecache list itself must be locked via 1255 * the vnode's spinlock. 1256 * 1257 * MPSAFE 1258 */ 1259 int 1260 cache_inval_vp(struct vnode *vp, int flags) 1261 { 1262 struct namecache *ncp; 1263 struct namecache *next; 1264 1265 restart: 1266 spin_lock(&vp->v_spinlock); 1267 ncp = TAILQ_FIRST(&vp->v_namecache); 1268 if (ncp) 1269 _cache_hold(ncp); 1270 while (ncp) { 1271 /* loop entered with ncp held and vp spin-locked */ 1272 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1273 _cache_hold(next); 1274 spin_unlock(&vp->v_spinlock); 1275 _cache_lock(ncp); 1276 if (ncp->nc_vp != vp) { 1277 kprintf("Warning: cache_inval_vp: race-A detected on " 1278 "%s\n", ncp->nc_name); 1279 _cache_put(ncp); 1280 if (next) 1281 _cache_drop(next); 1282 goto restart; 1283 } 1284 _cache_inval(ncp, flags); 1285 _cache_put(ncp); /* also releases reference */ 1286 ncp = next; 1287 spin_lock(&vp->v_spinlock); 1288 if (ncp && ncp->nc_vp != vp) { 1289 spin_unlock(&vp->v_spinlock); 1290 kprintf("Warning: cache_inval_vp: race-B detected on " 1291 "%s\n", ncp->nc_name); 1292 _cache_drop(ncp); 1293 goto restart; 1294 } 1295 } 1296 spin_unlock(&vp->v_spinlock); 1297 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1298 } 1299 1300 /* 1301 * This routine is used instead of the normal cache_inval_vp() when we 1302 * are trying to recycle otherwise good vnodes. 1303 * 1304 * Return 0 on success, non-zero if not all namecache records could be 1305 * disassociated from the vnode (for various reasons). 1306 * 1307 * MPSAFE 1308 */ 1309 int 1310 cache_inval_vp_nonblock(struct vnode *vp) 1311 { 1312 struct namecache *ncp; 1313 struct namecache *next; 1314 1315 spin_lock(&vp->v_spinlock); 1316 ncp = TAILQ_FIRST(&vp->v_namecache); 1317 if (ncp) 1318 _cache_hold(ncp); 1319 while (ncp) { 1320 /* loop entered with ncp held */ 1321 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1322 _cache_hold(next); 1323 spin_unlock(&vp->v_spinlock); 1324 if (_cache_lock_nonblock(ncp)) { 1325 _cache_drop(ncp); 1326 if (next) 1327 _cache_drop(next); 1328 goto done; 1329 } 1330 if (ncp->nc_vp != vp) { 1331 kprintf("Warning: cache_inval_vp: race-A detected on " 1332 "%s\n", ncp->nc_name); 1333 _cache_put(ncp); 1334 if (next) 1335 _cache_drop(next); 1336 goto done; 1337 } 1338 _cache_inval(ncp, 0); 1339 _cache_put(ncp); /* also releases reference */ 1340 ncp = next; 1341 spin_lock(&vp->v_spinlock); 1342 if (ncp && ncp->nc_vp != vp) { 1343 spin_unlock(&vp->v_spinlock); 1344 kprintf("Warning: cache_inval_vp: race-B detected on " 1345 "%s\n", ncp->nc_name); 1346 _cache_drop(ncp); 1347 goto done; 1348 } 1349 } 1350 spin_unlock(&vp->v_spinlock); 1351 done: 1352 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1353 } 1354 1355 /* 1356 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1357 * must be locked. The target ncp is destroyed (as a normal rename-over 1358 * would destroy the target file or directory). 1359 * 1360 * Because there may be references to the source ncp we cannot copy its 1361 * contents to the target. Instead the source ncp is relinked as the target 1362 * and the target ncp is removed from the namecache topology. 1363 * 1364 * MPSAFE 1365 */ 1366 void 1367 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1368 { 1369 struct namecache *fncp = fnch->ncp; 1370 struct namecache *tncp = tnch->ncp; 1371 struct namecache *tncp_par; 1372 struct nchash_head *nchpp; 1373 u_int32_t hash; 1374 char *oname; 1375 1376 /* 1377 * Rename fncp (unlink) 1378 */ 1379 _cache_unlink_parent(fncp); 1380 oname = fncp->nc_name; 1381 fncp->nc_name = tncp->nc_name; 1382 fncp->nc_nlen = tncp->nc_nlen; 1383 tncp_par = tncp->nc_parent; 1384 _cache_hold(tncp_par); 1385 _cache_lock(tncp_par); 1386 1387 /* 1388 * Rename fncp (relink) 1389 */ 1390 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT); 1391 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash); 1392 nchpp = NCHHASH(hash); 1393 1394 spin_lock(&nchpp->spin); 1395 _cache_link_parent(fncp, tncp_par, nchpp); 1396 spin_unlock(&nchpp->spin); 1397 1398 _cache_put(tncp_par); 1399 1400 /* 1401 * Get rid of the overwritten tncp (unlink) 1402 */ 1403 _cache_setunresolved(tncp); 1404 _cache_unlink_parent(tncp); 1405 tncp->nc_name = NULL; 1406 tncp->nc_nlen = 0; 1407 1408 if (oname) 1409 kfree(oname, M_VFSCACHE); 1410 } 1411 1412 /* 1413 * vget the vnode associated with the namecache entry. Resolve the namecache 1414 * entry if necessary. The passed ncp must be referenced and locked. 1415 * 1416 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1417 * (depending on the passed lk_type) will be returned in *vpp with an error 1418 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1419 * most typical error is ENOENT, meaning that the ncp represents a negative 1420 * cache hit and there is no vnode to retrieve, but other errors can occur 1421 * too. 1422 * 1423 * The vget() can race a reclaim. If this occurs we re-resolve the 1424 * namecache entry. 1425 * 1426 * There are numerous places in the kernel where vget() is called on a 1427 * vnode while one or more of its namecache entries is locked. Releasing 1428 * a vnode never deadlocks against locked namecache entries (the vnode 1429 * will not get recycled while referenced ncp's exist). This means we 1430 * can safely acquire the vnode. In fact, we MUST NOT release the ncp 1431 * lock when acquiring the vp lock or we might cause a deadlock. 1432 * 1433 * MPSAFE 1434 */ 1435 int 1436 cache_vget(struct nchandle *nch, struct ucred *cred, 1437 int lk_type, struct vnode **vpp) 1438 { 1439 struct namecache *ncp; 1440 struct vnode *vp; 1441 int error; 1442 1443 ncp = nch->ncp; 1444 KKASSERT(ncp->nc_locktd == curthread); 1445 again: 1446 vp = NULL; 1447 if (ncp->nc_flag & NCF_UNRESOLVED) 1448 error = cache_resolve(nch, cred); 1449 else 1450 error = 0; 1451 1452 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1453 error = vget(vp, lk_type); 1454 if (error) { 1455 /* 1456 * VRECLAIM race 1457 */ 1458 if (error == ENOENT) { 1459 kprintf("Warning: vnode reclaim race detected " 1460 "in cache_vget on %p (%s)\n", 1461 vp, ncp->nc_name); 1462 _cache_setunresolved(ncp); 1463 goto again; 1464 } 1465 1466 /* 1467 * Not a reclaim race, some other error. 1468 */ 1469 KKASSERT(ncp->nc_vp == vp); 1470 vp = NULL; 1471 } else { 1472 KKASSERT(ncp->nc_vp == vp); 1473 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1474 } 1475 } 1476 if (error == 0 && vp == NULL) 1477 error = ENOENT; 1478 *vpp = vp; 1479 return(error); 1480 } 1481 1482 int 1483 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1484 { 1485 struct namecache *ncp; 1486 struct vnode *vp; 1487 int error; 1488 1489 ncp = nch->ncp; 1490 KKASSERT(ncp->nc_locktd == curthread); 1491 again: 1492 vp = NULL; 1493 if (ncp->nc_flag & NCF_UNRESOLVED) 1494 error = cache_resolve(nch, cred); 1495 else 1496 error = 0; 1497 1498 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1499 error = vget(vp, LK_SHARED); 1500 if (error) { 1501 /* 1502 * VRECLAIM race 1503 */ 1504 if (error == ENOENT) { 1505 kprintf("Warning: vnode reclaim race detected " 1506 "in cache_vget on %p (%s)\n", 1507 vp, ncp->nc_name); 1508 _cache_setunresolved(ncp); 1509 goto again; 1510 } 1511 1512 /* 1513 * Not a reclaim race, some other error. 1514 */ 1515 KKASSERT(ncp->nc_vp == vp); 1516 vp = NULL; 1517 } else { 1518 KKASSERT(ncp->nc_vp == vp); 1519 KKASSERT((vp->v_flag & VRECLAIMED) == 0); 1520 /* caller does not want a lock */ 1521 vn_unlock(vp); 1522 } 1523 } 1524 if (error == 0 && vp == NULL) 1525 error = ENOENT; 1526 *vpp = vp; 1527 return(error); 1528 } 1529 1530 /* 1531 * Return a referenced vnode representing the parent directory of 1532 * ncp. 1533 * 1534 * Because the caller has locked the ncp it should not be possible for 1535 * the parent ncp to go away. However, the parent can unresolve its 1536 * dvp at any time so we must be able to acquire a lock on the parent 1537 * to safely access nc_vp. 1538 * 1539 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock, 1540 * so use vhold()/vdrop() while holding the lock to prevent dvp from 1541 * getting destroyed. 1542 * 1543 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a 1544 * lock on the ncp in question.. 1545 */ 1546 static struct vnode * 1547 cache_dvpref(struct namecache *ncp) 1548 { 1549 struct namecache *par; 1550 struct vnode *dvp; 1551 1552 dvp = NULL; 1553 if ((par = ncp->nc_parent) != NULL) { 1554 _cache_hold(par); 1555 _cache_lock(par); 1556 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1557 if ((dvp = par->nc_vp) != NULL) 1558 vhold(dvp); 1559 } 1560 _cache_unlock(par); 1561 if (dvp) { 1562 if (vget(dvp, LK_SHARED) == 0) { 1563 vn_unlock(dvp); 1564 vdrop(dvp); 1565 /* return refd, unlocked dvp */ 1566 } else { 1567 vdrop(dvp); 1568 dvp = NULL; 1569 } 1570 } 1571 _cache_drop(par); 1572 } 1573 return(dvp); 1574 } 1575 1576 /* 1577 * Convert a directory vnode to a namecache record without any other 1578 * knowledge of the topology. This ONLY works with directory vnodes and 1579 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1580 * returned ncp (if not NULL) will be held and unlocked. 1581 * 1582 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1583 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1584 * for dvp. This will fail only if the directory has been deleted out from 1585 * under the caller. 1586 * 1587 * Callers must always check for a NULL return no matter the value of 'makeit'. 1588 * 1589 * To avoid underflowing the kernel stack each recursive call increments 1590 * the makeit variable. 1591 */ 1592 1593 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1594 struct vnode *dvp, char *fakename); 1595 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1596 struct vnode **saved_dvp); 1597 1598 int 1599 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1600 struct nchandle *nch) 1601 { 1602 struct vnode *saved_dvp; 1603 struct vnode *pvp; 1604 char *fakename; 1605 int error; 1606 1607 nch->ncp = NULL; 1608 nch->mount = dvp->v_mount; 1609 saved_dvp = NULL; 1610 fakename = NULL; 1611 1612 /* 1613 * Handle the makeit == 0 degenerate case 1614 */ 1615 if (makeit == 0) { 1616 spin_lock(&dvp->v_spinlock); 1617 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1618 if (nch->ncp) 1619 cache_hold(nch); 1620 spin_unlock(&dvp->v_spinlock); 1621 } 1622 1623 /* 1624 * Loop until resolution, inside code will break out on error. 1625 */ 1626 while (makeit) { 1627 /* 1628 * Break out if we successfully acquire a working ncp. 1629 */ 1630 spin_lock(&dvp->v_spinlock); 1631 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1632 if (nch->ncp) { 1633 cache_hold(nch); 1634 spin_unlock(&dvp->v_spinlock); 1635 break; 1636 } 1637 spin_unlock(&dvp->v_spinlock); 1638 1639 /* 1640 * If dvp is the root of its filesystem it should already 1641 * have a namecache pointer associated with it as a side 1642 * effect of the mount, but it may have been disassociated. 1643 */ 1644 if (dvp->v_flag & VROOT) { 1645 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1646 error = cache_resolve_mp(nch->mount); 1647 _cache_put(nch->ncp); 1648 if (ncvp_debug) { 1649 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1650 dvp->v_mount, error); 1651 } 1652 if (error) { 1653 if (ncvp_debug) 1654 kprintf(" failed\n"); 1655 nch->ncp = NULL; 1656 break; 1657 } 1658 if (ncvp_debug) 1659 kprintf(" succeeded\n"); 1660 continue; 1661 } 1662 1663 /* 1664 * If we are recursed too deeply resort to an O(n^2) 1665 * algorithm to resolve the namecache topology. The 1666 * resolved pvp is left referenced in saved_dvp to 1667 * prevent the tree from being destroyed while we loop. 1668 */ 1669 if (makeit > 20) { 1670 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1671 if (error) { 1672 kprintf("lookupdotdot(longpath) failed %d " 1673 "dvp %p\n", error, dvp); 1674 nch->ncp = NULL; 1675 break; 1676 } 1677 continue; 1678 } 1679 1680 /* 1681 * Get the parent directory and resolve its ncp. 1682 */ 1683 if (fakename) { 1684 kfree(fakename, M_TEMP); 1685 fakename = NULL; 1686 } 1687 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1688 &fakename); 1689 if (error) { 1690 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1691 break; 1692 } 1693 vn_unlock(pvp); 1694 1695 /* 1696 * Reuse makeit as a recursion depth counter. On success 1697 * nch will be fully referenced. 1698 */ 1699 cache_fromdvp(pvp, cred, makeit + 1, nch); 1700 vrele(pvp); 1701 if (nch->ncp == NULL) 1702 break; 1703 1704 /* 1705 * Do an inefficient scan of pvp (embodied by ncp) to look 1706 * for dvp. This will create a namecache record for dvp on 1707 * success. We loop up to recheck on success. 1708 * 1709 * ncp and dvp are both held but not locked. 1710 */ 1711 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1712 if (error) { 1713 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1714 pvp, nch->ncp->nc_name, dvp); 1715 cache_drop(nch); 1716 /* nch was NULLed out, reload mount */ 1717 nch->mount = dvp->v_mount; 1718 break; 1719 } 1720 if (ncvp_debug) { 1721 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1722 pvp, nch->ncp->nc_name); 1723 } 1724 cache_drop(nch); 1725 /* nch was NULLed out, reload mount */ 1726 nch->mount = dvp->v_mount; 1727 } 1728 1729 /* 1730 * If nch->ncp is non-NULL it will have been held already. 1731 */ 1732 if (fakename) 1733 kfree(fakename, M_TEMP); 1734 if (saved_dvp) 1735 vrele(saved_dvp); 1736 if (nch->ncp) 1737 return (0); 1738 return (EINVAL); 1739 } 1740 1741 /* 1742 * Go up the chain of parent directories until we find something 1743 * we can resolve into the namecache. This is very inefficient. 1744 */ 1745 static 1746 int 1747 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1748 struct vnode **saved_dvp) 1749 { 1750 struct nchandle nch; 1751 struct vnode *pvp; 1752 int error; 1753 static time_t last_fromdvp_report; 1754 char *fakename; 1755 1756 /* 1757 * Loop getting the parent directory vnode until we get something we 1758 * can resolve in the namecache. 1759 */ 1760 vref(dvp); 1761 nch.mount = dvp->v_mount; 1762 nch.ncp = NULL; 1763 fakename = NULL; 1764 1765 for (;;) { 1766 if (fakename) { 1767 kfree(fakename, M_TEMP); 1768 fakename = NULL; 1769 } 1770 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1771 &fakename); 1772 if (error) { 1773 vrele(dvp); 1774 break; 1775 } 1776 vn_unlock(pvp); 1777 spin_lock(&pvp->v_spinlock); 1778 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1779 _cache_hold(nch.ncp); 1780 spin_unlock(&pvp->v_spinlock); 1781 vrele(pvp); 1782 break; 1783 } 1784 spin_unlock(&pvp->v_spinlock); 1785 if (pvp->v_flag & VROOT) { 1786 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1787 error = cache_resolve_mp(nch.mount); 1788 _cache_unlock(nch.ncp); 1789 vrele(pvp); 1790 if (error) { 1791 _cache_drop(nch.ncp); 1792 nch.ncp = NULL; 1793 vrele(dvp); 1794 } 1795 break; 1796 } 1797 vrele(dvp); 1798 dvp = pvp; 1799 } 1800 if (error == 0) { 1801 if (last_fromdvp_report != time_second) { 1802 last_fromdvp_report = time_second; 1803 kprintf("Warning: extremely inefficient path " 1804 "resolution on %s\n", 1805 nch.ncp->nc_name); 1806 } 1807 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1808 1809 /* 1810 * Hopefully dvp now has a namecache record associated with 1811 * it. Leave it referenced to prevent the kernel from 1812 * recycling the vnode. Otherwise extremely long directory 1813 * paths could result in endless recycling. 1814 */ 1815 if (*saved_dvp) 1816 vrele(*saved_dvp); 1817 *saved_dvp = dvp; 1818 _cache_drop(nch.ncp); 1819 } 1820 if (fakename) 1821 kfree(fakename, M_TEMP); 1822 return (error); 1823 } 1824 1825 /* 1826 * Do an inefficient scan of the directory represented by ncp looking for 1827 * the directory vnode dvp. ncp must be held but not locked on entry and 1828 * will be held on return. dvp must be refd but not locked on entry and 1829 * will remain refd on return. 1830 * 1831 * Why do this at all? Well, due to its stateless nature the NFS server 1832 * converts file handles directly to vnodes without necessarily going through 1833 * the namecache ops that would otherwise create the namecache topology 1834 * leading to the vnode. We could either (1) Change the namecache algorithms 1835 * to allow disconnect namecache records that are re-merged opportunistically, 1836 * or (2) Make the NFS server backtrack and scan to recover a connected 1837 * namecache topology in order to then be able to issue new API lookups. 1838 * 1839 * It turns out that (1) is a huge mess. It takes a nice clean set of 1840 * namecache algorithms and introduces a lot of complication in every subsystem 1841 * that calls into the namecache to deal with the re-merge case, especially 1842 * since we are using the namecache to placehold negative lookups and the 1843 * vnode might not be immediately assigned. (2) is certainly far less 1844 * efficient then (1), but since we are only talking about directories here 1845 * (which are likely to remain cached), the case does not actually run all 1846 * that often and has the supreme advantage of not polluting the namecache 1847 * algorithms. 1848 * 1849 * If a fakename is supplied just construct a namecache entry using the 1850 * fake name. 1851 */ 1852 static int 1853 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1854 struct vnode *dvp, char *fakename) 1855 { 1856 struct nlcomponent nlc; 1857 struct nchandle rncp; 1858 struct dirent *den; 1859 struct vnode *pvp; 1860 struct vattr vat; 1861 struct iovec iov; 1862 struct uio uio; 1863 int blksize; 1864 int eofflag; 1865 int bytes; 1866 char *rbuf; 1867 int error; 1868 1869 vat.va_blocksize = 0; 1870 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1871 return (error); 1872 cache_lock(nch); 1873 error = cache_vref(nch, cred, &pvp); 1874 cache_unlock(nch); 1875 if (error) 1876 return (error); 1877 if (ncvp_debug) { 1878 kprintf("inefficient_scan: directory iosize %ld " 1879 "vattr fileid = %lld\n", 1880 vat.va_blocksize, 1881 (long long)vat.va_fileid); 1882 } 1883 1884 /* 1885 * Use the supplied fakename if not NULL. Fake names are typically 1886 * not in the actual filesystem hierarchy. This is used by HAMMER 1887 * to glue @@timestamp recursions together. 1888 */ 1889 if (fakename) { 1890 nlc.nlc_nameptr = fakename; 1891 nlc.nlc_namelen = strlen(fakename); 1892 rncp = cache_nlookup(nch, &nlc); 1893 goto done; 1894 } 1895 1896 if ((blksize = vat.va_blocksize) == 0) 1897 blksize = DEV_BSIZE; 1898 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1899 rncp.ncp = NULL; 1900 1901 eofflag = 0; 1902 uio.uio_offset = 0; 1903 again: 1904 iov.iov_base = rbuf; 1905 iov.iov_len = blksize; 1906 uio.uio_iov = &iov; 1907 uio.uio_iovcnt = 1; 1908 uio.uio_resid = blksize; 1909 uio.uio_segflg = UIO_SYSSPACE; 1910 uio.uio_rw = UIO_READ; 1911 uio.uio_td = curthread; 1912 1913 if (ncvp_debug >= 2) 1914 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1915 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1916 if (error == 0) { 1917 den = (struct dirent *)rbuf; 1918 bytes = blksize - uio.uio_resid; 1919 1920 while (bytes > 0) { 1921 if (ncvp_debug >= 2) { 1922 kprintf("cache_inefficient_scan: %*.*s\n", 1923 den->d_namlen, den->d_namlen, 1924 den->d_name); 1925 } 1926 if (den->d_type != DT_WHT && 1927 den->d_ino == vat.va_fileid) { 1928 if (ncvp_debug) { 1929 kprintf("cache_inefficient_scan: " 1930 "MATCHED inode %lld path %s/%*.*s\n", 1931 (long long)vat.va_fileid, 1932 nch->ncp->nc_name, 1933 den->d_namlen, den->d_namlen, 1934 den->d_name); 1935 } 1936 nlc.nlc_nameptr = den->d_name; 1937 nlc.nlc_namelen = den->d_namlen; 1938 rncp = cache_nlookup(nch, &nlc); 1939 KKASSERT(rncp.ncp != NULL); 1940 break; 1941 } 1942 bytes -= _DIRENT_DIRSIZ(den); 1943 den = _DIRENT_NEXT(den); 1944 } 1945 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1946 goto again; 1947 } 1948 kfree(rbuf, M_TEMP); 1949 done: 1950 vrele(pvp); 1951 if (rncp.ncp) { 1952 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1953 _cache_setvp(rncp.mount, rncp.ncp, dvp); 1954 if (ncvp_debug >= 2) { 1955 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1956 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1957 } 1958 } else { 1959 if (ncvp_debug >= 2) { 1960 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1961 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1962 rncp.ncp->nc_vp); 1963 } 1964 } 1965 if (rncp.ncp->nc_vp == NULL) 1966 error = rncp.ncp->nc_error; 1967 /* 1968 * Release rncp after a successful nlookup. rncp was fully 1969 * referenced. 1970 */ 1971 cache_put(&rncp); 1972 } else { 1973 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1974 dvp, nch->ncp->nc_name); 1975 error = ENOENT; 1976 } 1977 return (error); 1978 } 1979 1980 /* 1981 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1982 * state, which disassociates it from its vnode or ncneglist. 1983 * 1984 * Then, if there are no additional references to the ncp and no children, 1985 * the ncp is removed from the topology and destroyed. 1986 * 1987 * References and/or children may exist if the ncp is in the middle of the 1988 * topology, preventing the ncp from being destroyed. 1989 * 1990 * This function must be called with the ncp held and locked and will unlock 1991 * and drop it during zapping. 1992 * 1993 * If nonblock is non-zero and the parent ncp cannot be locked we give up. 1994 * This case can occur in the cache_drop() path. 1995 * 1996 * This function may returned a held (but NOT locked) parent node which the 1997 * caller must drop. We do this so _cache_drop() can loop, to avoid 1998 * blowing out the kernel stack. 1999 * 2000 * WARNING! For MPSAFE operation this routine must acquire up to three 2001 * spin locks to be able to safely test nc_refs. Lock order is 2002 * very important. 2003 * 2004 * hash spinlock if on hash list 2005 * parent spinlock if child of parent 2006 * (the ncp is unresolved so there is no vnode association) 2007 */ 2008 static struct namecache * 2009 cache_zap(struct namecache *ncp, int nonblock) 2010 { 2011 struct namecache *par; 2012 struct vnode *dropvp; 2013 int refs; 2014 2015 /* 2016 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 2017 */ 2018 _cache_setunresolved(ncp); 2019 2020 /* 2021 * Try to scrap the entry and possibly tail-recurse on its parent. 2022 * We only scrap unref'd (other then our ref) unresolved entries, 2023 * we do not scrap 'live' entries. 2024 * 2025 * Note that once the spinlocks are acquired if nc_refs == 1 no 2026 * other references are possible. If it isn't, however, we have 2027 * to decrement but also be sure to avoid a 1->0 transition. 2028 */ 2029 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 2030 KKASSERT(ncp->nc_refs > 0); 2031 2032 /* 2033 * Acquire locks. Note that the parent can't go away while we hold 2034 * a child locked. 2035 */ 2036 if ((par = ncp->nc_parent) != NULL) { 2037 if (nonblock) { 2038 for (;;) { 2039 if (_cache_lock_nonblock(par) == 0) 2040 break; 2041 refs = ncp->nc_refs; 2042 ncp->nc_flag |= NCF_DEFEREDZAP; 2043 ++numdefered; /* MP race ok */ 2044 if (atomic_cmpset_int(&ncp->nc_refs, 2045 refs, refs - 1)) { 2046 _cache_unlock(ncp); 2047 return(NULL); 2048 } 2049 cpu_pause(); 2050 } 2051 _cache_hold(par); 2052 } else { 2053 _cache_hold(par); 2054 _cache_lock(par); 2055 } 2056 spin_lock(&ncp->nc_head->spin); 2057 } 2058 2059 /* 2060 * If someone other then us has a ref or we have children 2061 * we cannot zap the entry. The 1->0 transition and any 2062 * further list operation is protected by the spinlocks 2063 * we have acquired but other transitions are not. 2064 */ 2065 for (;;) { 2066 refs = ncp->nc_refs; 2067 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list)) 2068 break; 2069 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) { 2070 if (par) { 2071 spin_unlock(&ncp->nc_head->spin); 2072 _cache_put(par); 2073 } 2074 _cache_unlock(ncp); 2075 return(NULL); 2076 } 2077 cpu_pause(); 2078 } 2079 2080 /* 2081 * We are the only ref and with the spinlocks held no further 2082 * refs can be acquired by others. 2083 * 2084 * Remove us from the hash list and parent list. We have to 2085 * drop a ref on the parent's vp if the parent's list becomes 2086 * empty. 2087 */ 2088 dropvp = NULL; 2089 if (par) { 2090 struct nchash_head *nchpp = ncp->nc_head; 2091 2092 KKASSERT(nchpp != NULL); 2093 LIST_REMOVE(ncp, nc_hash); 2094 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 2095 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 2096 dropvp = par->nc_vp; 2097 ncp->nc_head = NULL; 2098 ncp->nc_parent = NULL; 2099 spin_unlock(&nchpp->spin); 2100 _cache_unlock(par); 2101 } else { 2102 KKASSERT(ncp->nc_head == NULL); 2103 } 2104 2105 /* 2106 * ncp should not have picked up any refs. Physically 2107 * destroy the ncp. 2108 */ 2109 KKASSERT(ncp->nc_refs == 1); 2110 /* _cache_unlock(ncp) not required */ 2111 ncp->nc_refs = -1; /* safety */ 2112 if (ncp->nc_name) 2113 kfree(ncp->nc_name, M_VFSCACHE); 2114 kfree(ncp, M_VFSCACHE); 2115 2116 /* 2117 * Delayed drop (we had to release our spinlocks) 2118 * 2119 * The refed parent (if not NULL) must be dropped. The 2120 * caller is responsible for looping. 2121 */ 2122 if (dropvp) 2123 vdrop(dropvp); 2124 return(par); 2125 } 2126 2127 /* 2128 * Clean up dangling negative cache and defered-drop entries in the 2129 * namecache. 2130 */ 2131 typedef enum { CHI_LOW, CHI_HIGH } cache_hs_t; 2132 2133 static cache_hs_t neg_cache_hysteresis_state = CHI_LOW; 2134 static cache_hs_t pos_cache_hysteresis_state = CHI_LOW; 2135 2136 void 2137 cache_hysteresis(void) 2138 { 2139 int poslimit; 2140 2141 /* 2142 * Don't cache too many negative hits. We use hysteresis to reduce 2143 * the impact on the critical path. 2144 */ 2145 switch(neg_cache_hysteresis_state) { 2146 case CHI_LOW: 2147 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 2148 _cache_cleanneg(10); 2149 neg_cache_hysteresis_state = CHI_HIGH; 2150 } 2151 break; 2152 case CHI_HIGH: 2153 if (numneg > MINNEG * 9 / 10 && 2154 numneg * ncnegfactor * 9 / 10 > numcache 2155 ) { 2156 _cache_cleanneg(10); 2157 } else { 2158 neg_cache_hysteresis_state = CHI_LOW; 2159 } 2160 break; 2161 } 2162 2163 /* 2164 * Don't cache too many positive hits. We use hysteresis to reduce 2165 * the impact on the critical path. 2166 * 2167 * Excessive positive hits can accumulate due to large numbers of 2168 * hardlinks (the vnode cache will not prevent hl ncps from growing 2169 * into infinity). 2170 */ 2171 if ((poslimit = ncposlimit) == 0) 2172 poslimit = desiredvnodes * 2; 2173 2174 switch(pos_cache_hysteresis_state) { 2175 case CHI_LOW: 2176 if (numcache > poslimit && numcache > MINPOS) { 2177 _cache_cleanpos(10); 2178 pos_cache_hysteresis_state = CHI_HIGH; 2179 } 2180 break; 2181 case CHI_HIGH: 2182 if (numcache > poslimit * 5 / 6 && numcache > MINPOS) { 2183 _cache_cleanpos(10); 2184 } else { 2185 pos_cache_hysteresis_state = CHI_LOW; 2186 } 2187 break; 2188 } 2189 2190 /* 2191 * Clean out dangling defered-zap ncps which could not 2192 * be cleanly dropped if too many build up. Note 2193 * that numdefered is not an exact number as such ncps 2194 * can be reused and the counter is not handled in a MP 2195 * safe manner by design. 2196 */ 2197 if (numdefered * ncnegfactor > numcache) { 2198 _cache_cleandefered(); 2199 } 2200 } 2201 2202 /* 2203 * NEW NAMECACHE LOOKUP API 2204 * 2205 * Lookup an entry in the namecache. The passed par_nch must be referenced 2206 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp 2207 * is ALWAYS returned, eve if the supplied component is illegal. 2208 * 2209 * The resulting namecache entry should be returned to the system with 2210 * cache_put() or cache_unlock() + cache_drop(). 2211 * 2212 * namecache locks are recursive but care must be taken to avoid lock order 2213 * reversals (hence why the passed par_nch must be unlocked). Locking 2214 * rules are to order for parent traversals, not for child traversals. 2215 * 2216 * Nobody else will be able to manipulate the associated namespace (e.g. 2217 * create, delete, rename, rename-target) until the caller unlocks the 2218 * entry. 2219 * 2220 * The returned entry will be in one of three states: positive hit (non-null 2221 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 2222 * Unresolved entries must be resolved through the filesystem to associate the 2223 * vnode and/or determine whether a positive or negative hit has occured. 2224 * 2225 * It is not necessary to lock a directory in order to lock namespace under 2226 * that directory. In fact, it is explicitly not allowed to do that. A 2227 * directory is typically only locked when being created, renamed, or 2228 * destroyed. 2229 * 2230 * The directory (par) may be unresolved, in which case any returned child 2231 * will likely also be marked unresolved. Likely but not guarenteed. Since 2232 * the filesystem lookup requires a resolved directory vnode the caller is 2233 * responsible for resolving the namecache chain top-down. This API 2234 * specifically allows whole chains to be created in an unresolved state. 2235 */ 2236 struct nchandle 2237 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 2238 { 2239 struct nchandle nch; 2240 struct namecache *ncp; 2241 struct namecache *new_ncp; 2242 struct nchash_head *nchpp; 2243 struct mount *mp; 2244 u_int32_t hash; 2245 globaldata_t gd; 2246 int par_locked; 2247 2248 numcalls++; 2249 gd = mycpu; 2250 mp = par_nch->mount; 2251 par_locked = 0; 2252 2253 /* 2254 * This is a good time to call it, no ncp's are locked by 2255 * the caller or us. 2256 */ 2257 cache_hysteresis(); 2258 2259 /* 2260 * Try to locate an existing entry 2261 */ 2262 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2263 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2264 new_ncp = NULL; 2265 nchpp = NCHHASH(hash); 2266 restart: 2267 spin_lock(&nchpp->spin); 2268 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2269 numchecks++; 2270 2271 /* 2272 * Break out if we find a matching entry. Note that 2273 * UNRESOLVED entries may match, but DESTROYED entries 2274 * do not. 2275 */ 2276 if (ncp->nc_parent == par_nch->ncp && 2277 ncp->nc_nlen == nlc->nlc_namelen && 2278 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2279 (ncp->nc_flag & NCF_DESTROYED) == 0 2280 ) { 2281 _cache_hold(ncp); 2282 spin_unlock(&nchpp->spin); 2283 if (par_locked) { 2284 _cache_unlock(par_nch->ncp); 2285 par_locked = 0; 2286 } 2287 if (_cache_lock_special(ncp) == 0) { 2288 _cache_auto_unresolve(mp, ncp); 2289 if (new_ncp) 2290 _cache_free(new_ncp); 2291 goto found; 2292 } 2293 _cache_get(ncp); 2294 _cache_put(ncp); 2295 _cache_drop(ncp); 2296 goto restart; 2297 } 2298 } 2299 2300 /* 2301 * We failed to locate an entry, create a new entry and add it to 2302 * the cache. The parent ncp must also be locked so we 2303 * can link into it. 2304 * 2305 * We have to relookup after possibly blocking in kmalloc or 2306 * when locking par_nch. 2307 * 2308 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2309 * mount case, in which case nc_name will be NULL. 2310 */ 2311 if (new_ncp == NULL) { 2312 spin_unlock(&nchpp->spin); 2313 new_ncp = cache_alloc(nlc->nlc_namelen); 2314 if (nlc->nlc_namelen) { 2315 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2316 nlc->nlc_namelen); 2317 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2318 } 2319 goto restart; 2320 } 2321 if (par_locked == 0) { 2322 spin_unlock(&nchpp->spin); 2323 _cache_lock(par_nch->ncp); 2324 par_locked = 1; 2325 goto restart; 2326 } 2327 2328 /* 2329 * WARNING! We still hold the spinlock. We have to set the hash 2330 * table entry atomically. 2331 */ 2332 ncp = new_ncp; 2333 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2334 spin_unlock(&nchpp->spin); 2335 _cache_unlock(par_nch->ncp); 2336 /* par_locked = 0 - not used */ 2337 found: 2338 /* 2339 * stats and namecache size management 2340 */ 2341 if (ncp->nc_flag & NCF_UNRESOLVED) 2342 ++gd->gd_nchstats->ncs_miss; 2343 else if (ncp->nc_vp) 2344 ++gd->gd_nchstats->ncs_goodhits; 2345 else 2346 ++gd->gd_nchstats->ncs_neghits; 2347 nch.mount = mp; 2348 nch.ncp = ncp; 2349 atomic_add_int(&nch.mount->mnt_refs, 1); 2350 return(nch); 2351 } 2352 2353 /* 2354 * This is a non-blocking verison of cache_nlookup() used by 2355 * nfs_readdirplusrpc_uio(). It can fail for any reason and 2356 * will return nch.ncp == NULL in that case. 2357 */ 2358 struct nchandle 2359 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc) 2360 { 2361 struct nchandle nch; 2362 struct namecache *ncp; 2363 struct namecache *new_ncp; 2364 struct nchash_head *nchpp; 2365 struct mount *mp; 2366 u_int32_t hash; 2367 globaldata_t gd; 2368 int par_locked; 2369 2370 numcalls++; 2371 gd = mycpu; 2372 mp = par_nch->mount; 2373 par_locked = 0; 2374 2375 /* 2376 * Try to locate an existing entry 2377 */ 2378 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 2379 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 2380 new_ncp = NULL; 2381 nchpp = NCHHASH(hash); 2382 restart: 2383 spin_lock(&nchpp->spin); 2384 LIST_FOREACH(ncp, &nchpp->list, nc_hash) { 2385 numchecks++; 2386 2387 /* 2388 * Break out if we find a matching entry. Note that 2389 * UNRESOLVED entries may match, but DESTROYED entries 2390 * do not. 2391 */ 2392 if (ncp->nc_parent == par_nch->ncp && 2393 ncp->nc_nlen == nlc->nlc_namelen && 2394 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 2395 (ncp->nc_flag & NCF_DESTROYED) == 0 2396 ) { 2397 _cache_hold(ncp); 2398 spin_unlock(&nchpp->spin); 2399 if (par_locked) { 2400 _cache_unlock(par_nch->ncp); 2401 par_locked = 0; 2402 } 2403 if (_cache_lock_special(ncp) == 0) { 2404 _cache_auto_unresolve(mp, ncp); 2405 if (new_ncp) { 2406 _cache_free(new_ncp); 2407 new_ncp = NULL; 2408 } 2409 goto found; 2410 } 2411 _cache_drop(ncp); 2412 goto failed; 2413 } 2414 } 2415 2416 /* 2417 * We failed to locate an entry, create a new entry and add it to 2418 * the cache. The parent ncp must also be locked so we 2419 * can link into it. 2420 * 2421 * We have to relookup after possibly blocking in kmalloc or 2422 * when locking par_nch. 2423 * 2424 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special 2425 * mount case, in which case nc_name will be NULL. 2426 */ 2427 if (new_ncp == NULL) { 2428 spin_unlock(&nchpp->spin); 2429 new_ncp = cache_alloc(nlc->nlc_namelen); 2430 if (nlc->nlc_namelen) { 2431 bcopy(nlc->nlc_nameptr, new_ncp->nc_name, 2432 nlc->nlc_namelen); 2433 new_ncp->nc_name[nlc->nlc_namelen] = 0; 2434 } 2435 goto restart; 2436 } 2437 if (par_locked == 0) { 2438 spin_unlock(&nchpp->spin); 2439 if (_cache_lock_nonblock(par_nch->ncp) == 0) { 2440 par_locked = 1; 2441 goto restart; 2442 } 2443 goto failed; 2444 } 2445 2446 /* 2447 * WARNING! We still hold the spinlock. We have to set the hash 2448 * table entry atomically. 2449 */ 2450 ncp = new_ncp; 2451 _cache_link_parent(ncp, par_nch->ncp, nchpp); 2452 spin_unlock(&nchpp->spin); 2453 _cache_unlock(par_nch->ncp); 2454 /* par_locked = 0 - not used */ 2455 found: 2456 /* 2457 * stats and namecache size management 2458 */ 2459 if (ncp->nc_flag & NCF_UNRESOLVED) 2460 ++gd->gd_nchstats->ncs_miss; 2461 else if (ncp->nc_vp) 2462 ++gd->gd_nchstats->ncs_goodhits; 2463 else 2464 ++gd->gd_nchstats->ncs_neghits; 2465 nch.mount = mp; 2466 nch.ncp = ncp; 2467 atomic_add_int(&nch.mount->mnt_refs, 1); 2468 return(nch); 2469 failed: 2470 if (new_ncp) { 2471 _cache_free(new_ncp); 2472 new_ncp = NULL; 2473 } 2474 nch.mount = NULL; 2475 nch.ncp = NULL; 2476 return(nch); 2477 } 2478 2479 /* 2480 * The namecache entry is marked as being used as a mount point. 2481 * Locate the mount if it is visible to the caller. 2482 */ 2483 struct findmount_info { 2484 struct mount *result; 2485 struct mount *nch_mount; 2486 struct namecache *nch_ncp; 2487 }; 2488 2489 static 2490 int 2491 cache_findmount_callback(struct mount *mp, void *data) 2492 { 2493 struct findmount_info *info = data; 2494 2495 /* 2496 * Check the mount's mounted-on point against the passed nch. 2497 */ 2498 if (mp->mnt_ncmounton.mount == info->nch_mount && 2499 mp->mnt_ncmounton.ncp == info->nch_ncp 2500 ) { 2501 info->result = mp; 2502 return(-1); 2503 } 2504 return(0); 2505 } 2506 2507 struct mount * 2508 cache_findmount(struct nchandle *nch) 2509 { 2510 struct findmount_info info; 2511 2512 info.result = NULL; 2513 info.nch_mount = nch->mount; 2514 info.nch_ncp = nch->ncp; 2515 mountlist_scan(cache_findmount_callback, &info, 2516 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 2517 return(info.result); 2518 } 2519 2520 /* 2521 * Resolve an unresolved namecache entry, generally by looking it up. 2522 * The passed ncp must be locked and refd. 2523 * 2524 * Theoretically since a vnode cannot be recycled while held, and since 2525 * the nc_parent chain holds its vnode as long as children exist, the 2526 * direct parent of the cache entry we are trying to resolve should 2527 * have a valid vnode. If not then generate an error that we can 2528 * determine is related to a resolver bug. 2529 * 2530 * However, if a vnode was in the middle of a recyclement when the NCP 2531 * got locked, ncp->nc_vp might point to a vnode that is about to become 2532 * invalid. cache_resolve() handles this case by unresolving the entry 2533 * and then re-resolving it. 2534 * 2535 * Note that successful resolution does not necessarily return an error 2536 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 2537 * will be returned. 2538 * 2539 * MPSAFE 2540 */ 2541 int 2542 cache_resolve(struct nchandle *nch, struct ucred *cred) 2543 { 2544 struct namecache *par_tmp; 2545 struct namecache *par; 2546 struct namecache *ncp; 2547 struct nchandle nctmp; 2548 struct mount *mp; 2549 struct vnode *dvp; 2550 int error; 2551 2552 ncp = nch->ncp; 2553 mp = nch->mount; 2554 restart: 2555 /* 2556 * If the ncp is already resolved we have nothing to do. However, 2557 * we do want to guarentee that a usable vnode is returned when 2558 * a vnode is present, so make sure it hasn't been reclaimed. 2559 */ 2560 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2561 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2562 _cache_setunresolved(ncp); 2563 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 2564 return (ncp->nc_error); 2565 } 2566 2567 /* 2568 * Mount points need special handling because the parent does not 2569 * belong to the same filesystem as the ncp. 2570 */ 2571 if (ncp == mp->mnt_ncmountpt.ncp) 2572 return (cache_resolve_mp(mp)); 2573 2574 /* 2575 * We expect an unbroken chain of ncps to at least the mount point, 2576 * and even all the way to root (but this code doesn't have to go 2577 * past the mount point). 2578 */ 2579 if (ncp->nc_parent == NULL) { 2580 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 2581 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2582 ncp->nc_error = EXDEV; 2583 return(ncp->nc_error); 2584 } 2585 2586 /* 2587 * The vp's of the parent directories in the chain are held via vhold() 2588 * due to the existance of the child, and should not disappear. 2589 * However, there are cases where they can disappear: 2590 * 2591 * - due to filesystem I/O errors. 2592 * - due to NFS being stupid about tracking the namespace and 2593 * destroys the namespace for entire directories quite often. 2594 * - due to forced unmounts. 2595 * - due to an rmdir (parent will be marked DESTROYED) 2596 * 2597 * When this occurs we have to track the chain backwards and resolve 2598 * it, looping until the resolver catches up to the current node. We 2599 * could recurse here but we might run ourselves out of kernel stack 2600 * so we do it in a more painful manner. This situation really should 2601 * not occur all that often, or if it does not have to go back too 2602 * many nodes to resolve the ncp. 2603 */ 2604 while ((dvp = cache_dvpref(ncp)) == NULL) { 2605 /* 2606 * This case can occur if a process is CD'd into a 2607 * directory which is then rmdir'd. If the parent is marked 2608 * destroyed there is no point trying to resolve it. 2609 */ 2610 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2611 return(ENOENT); 2612 par = ncp->nc_parent; 2613 _cache_hold(par); 2614 _cache_lock(par); 2615 while ((par_tmp = par->nc_parent) != NULL && 2616 par_tmp->nc_vp == NULL) { 2617 _cache_hold(par_tmp); 2618 _cache_lock(par_tmp); 2619 _cache_put(par); 2620 par = par_tmp; 2621 } 2622 if (par->nc_parent == NULL) { 2623 kprintf("EXDEV case 2 %*.*s\n", 2624 par->nc_nlen, par->nc_nlen, par->nc_name); 2625 _cache_put(par); 2626 return (EXDEV); 2627 } 2628 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2629 par->nc_nlen, par->nc_nlen, par->nc_name); 2630 /* 2631 * The parent is not set in stone, ref and lock it to prevent 2632 * it from disappearing. Also note that due to renames it 2633 * is possible for our ncp to move and for par to no longer 2634 * be one of its parents. We resolve it anyway, the loop 2635 * will handle any moves. 2636 */ 2637 _cache_get(par); /* additional hold/lock */ 2638 _cache_put(par); /* from earlier hold/lock */ 2639 if (par == nch->mount->mnt_ncmountpt.ncp) { 2640 cache_resolve_mp(nch->mount); 2641 } else if ((dvp = cache_dvpref(par)) == NULL) { 2642 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2643 _cache_put(par); 2644 continue; 2645 } else { 2646 if (par->nc_flag & NCF_UNRESOLVED) { 2647 nctmp.mount = mp; 2648 nctmp.ncp = par; 2649 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2650 } 2651 vrele(dvp); 2652 } 2653 if ((error = par->nc_error) != 0) { 2654 if (par->nc_error != EAGAIN) { 2655 kprintf("EXDEV case 3 %*.*s error %d\n", 2656 par->nc_nlen, par->nc_nlen, par->nc_name, 2657 par->nc_error); 2658 _cache_put(par); 2659 return(error); 2660 } 2661 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2662 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2663 } 2664 _cache_put(par); 2665 /* loop */ 2666 } 2667 2668 /* 2669 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2670 * ncp's and reattach them. If this occurs the original ncp is marked 2671 * EAGAIN to force a relookup. 2672 * 2673 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2674 * ncp must already be resolved. 2675 */ 2676 if (dvp) { 2677 nctmp.mount = mp; 2678 nctmp.ncp = ncp; 2679 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2680 vrele(dvp); 2681 } else { 2682 ncp->nc_error = EPERM; 2683 } 2684 if (ncp->nc_error == EAGAIN) { 2685 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2686 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2687 goto restart; 2688 } 2689 return(ncp->nc_error); 2690 } 2691 2692 /* 2693 * Resolve the ncp associated with a mount point. Such ncp's almost always 2694 * remain resolved and this routine is rarely called. NFS MPs tends to force 2695 * re-resolution more often due to its mac-truck-smash-the-namecache 2696 * method of tracking namespace changes. 2697 * 2698 * The semantics for this call is that the passed ncp must be locked on 2699 * entry and will be locked on return. However, if we actually have to 2700 * resolve the mount point we temporarily unlock the entry in order to 2701 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2702 * the unlock we have to recheck the flags after we relock. 2703 */ 2704 static int 2705 cache_resolve_mp(struct mount *mp) 2706 { 2707 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2708 struct vnode *vp; 2709 int error; 2710 2711 KKASSERT(mp != NULL); 2712 2713 /* 2714 * If the ncp is already resolved we have nothing to do. However, 2715 * we do want to guarentee that a usable vnode is returned when 2716 * a vnode is present, so make sure it hasn't been reclaimed. 2717 */ 2718 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2719 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2720 _cache_setunresolved(ncp); 2721 } 2722 2723 if (ncp->nc_flag & NCF_UNRESOLVED) { 2724 _cache_unlock(ncp); 2725 while (vfs_busy(mp, 0)) 2726 ; 2727 error = VFS_ROOT(mp, &vp); 2728 _cache_lock(ncp); 2729 2730 /* 2731 * recheck the ncp state after relocking. 2732 */ 2733 if (ncp->nc_flag & NCF_UNRESOLVED) { 2734 ncp->nc_error = error; 2735 if (error == 0) { 2736 _cache_setvp(mp, ncp, vp); 2737 vput(vp); 2738 } else { 2739 kprintf("[diagnostic] cache_resolve_mp: failed" 2740 " to resolve mount %p err=%d ncp=%p\n", 2741 mp, error, ncp); 2742 _cache_setvp(mp, ncp, NULL); 2743 } 2744 } else if (error == 0) { 2745 vput(vp); 2746 } 2747 vfs_unbusy(mp); 2748 } 2749 return(ncp->nc_error); 2750 } 2751 2752 /* 2753 * Clean out negative cache entries when too many have accumulated. 2754 * 2755 * MPSAFE 2756 */ 2757 static void 2758 _cache_cleanneg(int count) 2759 { 2760 struct namecache *ncp; 2761 2762 /* 2763 * Attempt to clean out the specified number of negative cache 2764 * entries. 2765 */ 2766 while (count) { 2767 spin_lock(&ncspin); 2768 ncp = TAILQ_FIRST(&ncneglist); 2769 if (ncp == NULL) { 2770 spin_unlock(&ncspin); 2771 break; 2772 } 2773 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2774 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2775 _cache_hold(ncp); 2776 spin_unlock(&ncspin); 2777 if (_cache_lock_special(ncp) == 0) { 2778 ncp = cache_zap(ncp, 1); 2779 if (ncp) 2780 _cache_drop(ncp); 2781 } else { 2782 _cache_drop(ncp); 2783 } 2784 --count; 2785 } 2786 } 2787 2788 /* 2789 * Clean out positive cache entries when too many have accumulated. 2790 * 2791 * MPSAFE 2792 */ 2793 static void 2794 _cache_cleanpos(int count) 2795 { 2796 static volatile int rover; 2797 struct nchash_head *nchpp; 2798 struct namecache *ncp; 2799 int rover_copy; 2800 2801 /* 2802 * Attempt to clean out the specified number of negative cache 2803 * entries. 2804 */ 2805 while (count) { 2806 rover_copy = ++rover; /* MPSAFEENOUGH */ 2807 nchpp = NCHHASH(rover_copy); 2808 2809 spin_lock(&nchpp->spin); 2810 ncp = LIST_FIRST(&nchpp->list); 2811 if (ncp) 2812 _cache_hold(ncp); 2813 spin_unlock(&nchpp->spin); 2814 2815 if (ncp) { 2816 if (_cache_lock_special(ncp) == 0) { 2817 ncp = cache_zap(ncp, 1); 2818 if (ncp) 2819 _cache_drop(ncp); 2820 } else { 2821 _cache_drop(ncp); 2822 } 2823 } 2824 --count; 2825 } 2826 } 2827 2828 /* 2829 * This is a kitchen sink function to clean out ncps which we 2830 * tried to zap from cache_drop() but failed because we were 2831 * unable to acquire the parent lock. 2832 * 2833 * Such entries can also be removed via cache_inval_vp(), such 2834 * as when unmounting. 2835 * 2836 * MPSAFE 2837 */ 2838 static void 2839 _cache_cleandefered(void) 2840 { 2841 struct nchash_head *nchpp; 2842 struct namecache *ncp; 2843 struct namecache dummy; 2844 int i; 2845 2846 numdefered = 0; 2847 bzero(&dummy, sizeof(dummy)); 2848 dummy.nc_flag = NCF_DESTROYED; 2849 2850 for (i = 0; i <= nchash; ++i) { 2851 nchpp = &nchashtbl[i]; 2852 2853 spin_lock(&nchpp->spin); 2854 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash); 2855 ncp = &dummy; 2856 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) { 2857 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0) 2858 continue; 2859 LIST_REMOVE(&dummy, nc_hash); 2860 LIST_INSERT_AFTER(ncp, &dummy, nc_hash); 2861 _cache_hold(ncp); 2862 spin_unlock(&nchpp->spin); 2863 if (_cache_lock_nonblock(ncp) == 0) { 2864 ncp->nc_flag &= ~NCF_DEFEREDZAP; 2865 _cache_unlock(ncp); 2866 } 2867 _cache_drop(ncp); 2868 spin_lock(&nchpp->spin); 2869 ncp = &dummy; 2870 } 2871 LIST_REMOVE(&dummy, nc_hash); 2872 spin_unlock(&nchpp->spin); 2873 } 2874 } 2875 2876 /* 2877 * Name cache initialization, from vfsinit() when we are booting 2878 */ 2879 void 2880 nchinit(void) 2881 { 2882 int i; 2883 globaldata_t gd; 2884 2885 /* initialise per-cpu namecache effectiveness statistics. */ 2886 for (i = 0; i < ncpus; ++i) { 2887 gd = globaldata_find(i); 2888 gd->gd_nchstats = &nchstats[i]; 2889 } 2890 TAILQ_INIT(&ncneglist); 2891 spin_init(&ncspin); 2892 nchashtbl = hashinit_ext(desiredvnodes / 2, 2893 sizeof(struct nchash_head), 2894 M_VFSCACHE, &nchash); 2895 for (i = 0; i <= (int)nchash; ++i) { 2896 LIST_INIT(&nchashtbl[i].list); 2897 spin_init(&nchashtbl[i].spin); 2898 } 2899 nclockwarn = 5 * hz; 2900 } 2901 2902 /* 2903 * Called from start_init() to bootstrap the root filesystem. Returns 2904 * a referenced, unlocked namecache record. 2905 */ 2906 void 2907 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2908 { 2909 nch->ncp = cache_alloc(0); 2910 nch->mount = mp; 2911 atomic_add_int(&mp->mnt_refs, 1); 2912 if (vp) 2913 _cache_setvp(nch->mount, nch->ncp, vp); 2914 } 2915 2916 /* 2917 * vfs_cache_setroot() 2918 * 2919 * Create an association between the root of our namecache and 2920 * the root vnode. This routine may be called several times during 2921 * booting. 2922 * 2923 * If the caller intends to save the returned namecache pointer somewhere 2924 * it must cache_hold() it. 2925 */ 2926 void 2927 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2928 { 2929 struct vnode *ovp; 2930 struct nchandle onch; 2931 2932 ovp = rootvnode; 2933 onch = rootnch; 2934 rootvnode = nvp; 2935 if (nch) 2936 rootnch = *nch; 2937 else 2938 cache_zero(&rootnch); 2939 if (ovp) 2940 vrele(ovp); 2941 if (onch.ncp) 2942 cache_drop(&onch); 2943 } 2944 2945 /* 2946 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2947 * topology and is being removed as quickly as possible. The new VOP_N*() 2948 * API calls are required to make specific adjustments using the supplied 2949 * ncp pointers rather then just bogusly purging random vnodes. 2950 * 2951 * Invalidate all namecache entries to a particular vnode as well as 2952 * any direct children of that vnode in the namecache. This is a 2953 * 'catch all' purge used by filesystems that do not know any better. 2954 * 2955 * Note that the linkage between the vnode and its namecache entries will 2956 * be removed, but the namecache entries themselves might stay put due to 2957 * active references from elsewhere in the system or due to the existance of 2958 * the children. The namecache topology is left intact even if we do not 2959 * know what the vnode association is. Such entries will be marked 2960 * NCF_UNRESOLVED. 2961 */ 2962 void 2963 cache_purge(struct vnode *vp) 2964 { 2965 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2966 } 2967 2968 /* 2969 * Flush all entries referencing a particular filesystem. 2970 * 2971 * Since we need to check it anyway, we will flush all the invalid 2972 * entries at the same time. 2973 */ 2974 #if 0 2975 2976 void 2977 cache_purgevfs(struct mount *mp) 2978 { 2979 struct nchash_head *nchpp; 2980 struct namecache *ncp, *nnp; 2981 2982 /* 2983 * Scan hash tables for applicable entries. 2984 */ 2985 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2986 spin_lock_wr(&nchpp->spin); XXX 2987 ncp = LIST_FIRST(&nchpp->list); 2988 if (ncp) 2989 _cache_hold(ncp); 2990 while (ncp) { 2991 nnp = LIST_NEXT(ncp, nc_hash); 2992 if (nnp) 2993 _cache_hold(nnp); 2994 if (ncp->nc_mount == mp) { 2995 _cache_lock(ncp); 2996 ncp = cache_zap(ncp, 0); 2997 if (ncp) 2998 _cache_drop(ncp); 2999 } else { 3000 _cache_drop(ncp); 3001 } 3002 ncp = nnp; 3003 } 3004 spin_unlock_wr(&nchpp->spin); XXX 3005 } 3006 } 3007 3008 #endif 3009 3010 static int disablecwd; 3011 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 3012 "Disable getcwd"); 3013 3014 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 3015 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 3016 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 3017 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 3018 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 3019 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 3020 3021 /* 3022 * MPALMOSTSAFE 3023 */ 3024 int 3025 sys___getcwd(struct __getcwd_args *uap) 3026 { 3027 u_int buflen; 3028 int error; 3029 char *buf; 3030 char *bp; 3031 3032 if (disablecwd) 3033 return (ENODEV); 3034 3035 buflen = uap->buflen; 3036 if (buflen == 0) 3037 return (EINVAL); 3038 if (buflen > MAXPATHLEN) 3039 buflen = MAXPATHLEN; 3040 3041 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 3042 get_mplock(); 3043 bp = kern_getcwd(buf, buflen, &error); 3044 rel_mplock(); 3045 if (error == 0) 3046 error = copyout(bp, uap->buf, strlen(bp) + 1); 3047 kfree(buf, M_TEMP); 3048 return (error); 3049 } 3050 3051 char * 3052 kern_getcwd(char *buf, size_t buflen, int *error) 3053 { 3054 struct proc *p = curproc; 3055 char *bp; 3056 int i, slash_prefixed; 3057 struct filedesc *fdp; 3058 struct nchandle nch; 3059 struct namecache *ncp; 3060 3061 numcwdcalls++; 3062 bp = buf; 3063 bp += buflen - 1; 3064 *bp = '\0'; 3065 fdp = p->p_fd; 3066 slash_prefixed = 0; 3067 3068 nch = fdp->fd_ncdir; 3069 ncp = nch.ncp; 3070 if (ncp) 3071 _cache_hold(ncp); 3072 3073 while (ncp && (ncp != fdp->fd_nrdir.ncp || 3074 nch.mount != fdp->fd_nrdir.mount) 3075 ) { 3076 /* 3077 * While traversing upwards if we encounter the root 3078 * of the current mount we have to skip to the mount point 3079 * in the underlying filesystem. 3080 */ 3081 if (ncp == nch.mount->mnt_ncmountpt.ncp) { 3082 nch = nch.mount->mnt_ncmounton; 3083 _cache_drop(ncp); 3084 ncp = nch.ncp; 3085 if (ncp) 3086 _cache_hold(ncp); 3087 continue; 3088 } 3089 3090 /* 3091 * Prepend the path segment 3092 */ 3093 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3094 if (bp == buf) { 3095 numcwdfail4++; 3096 *error = ERANGE; 3097 bp = NULL; 3098 goto done; 3099 } 3100 *--bp = ncp->nc_name[i]; 3101 } 3102 if (bp == buf) { 3103 numcwdfail4++; 3104 *error = ERANGE; 3105 bp = NULL; 3106 goto done; 3107 } 3108 *--bp = '/'; 3109 slash_prefixed = 1; 3110 3111 /* 3112 * Go up a directory. This isn't a mount point so we don't 3113 * have to check again. 3114 */ 3115 while ((nch.ncp = ncp->nc_parent) != NULL) { 3116 _cache_lock(ncp); 3117 if (nch.ncp != ncp->nc_parent) { 3118 _cache_unlock(ncp); 3119 continue; 3120 } 3121 _cache_hold(nch.ncp); 3122 _cache_unlock(ncp); 3123 break; 3124 } 3125 _cache_drop(ncp); 3126 ncp = nch.ncp; 3127 } 3128 if (ncp == NULL) { 3129 numcwdfail2++; 3130 *error = ENOENT; 3131 bp = NULL; 3132 goto done; 3133 } 3134 if (!slash_prefixed) { 3135 if (bp == buf) { 3136 numcwdfail4++; 3137 *error = ERANGE; 3138 bp = NULL; 3139 goto done; 3140 } 3141 *--bp = '/'; 3142 } 3143 numcwdfound++; 3144 *error = 0; 3145 done: 3146 if (ncp) 3147 _cache_drop(ncp); 3148 return (bp); 3149 } 3150 3151 /* 3152 * Thus begins the fullpath magic. 3153 * 3154 * The passed nchp is referenced but not locked. 3155 */ 3156 #undef STATNODE 3157 #define STATNODE(name) \ 3158 static u_int name; \ 3159 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 3160 3161 static int disablefullpath; 3162 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 3163 &disablefullpath, 0, 3164 "Disable fullpath lookups"); 3165 3166 STATNODE(numfullpathcalls); 3167 STATNODE(numfullpathfail1); 3168 STATNODE(numfullpathfail2); 3169 STATNODE(numfullpathfail3); 3170 STATNODE(numfullpathfail4); 3171 STATNODE(numfullpathfound); 3172 3173 int 3174 cache_fullpath(struct proc *p, struct nchandle *nchp, 3175 char **retbuf, char **freebuf, int guess) 3176 { 3177 struct nchandle fd_nrdir; 3178 struct nchandle nch; 3179 struct namecache *ncp; 3180 struct mount *mp, *new_mp; 3181 char *bp, *buf; 3182 int slash_prefixed; 3183 int error = 0; 3184 int i; 3185 3186 atomic_add_int(&numfullpathcalls, -1); 3187 3188 *retbuf = NULL; 3189 *freebuf = NULL; 3190 3191 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 3192 bp = buf + MAXPATHLEN - 1; 3193 *bp = '\0'; 3194 if (p != NULL) 3195 fd_nrdir = p->p_fd->fd_nrdir; 3196 else 3197 fd_nrdir = rootnch; 3198 slash_prefixed = 0; 3199 nch = *nchp; 3200 ncp = nch.ncp; 3201 if (ncp) 3202 _cache_hold(ncp); 3203 mp = nch.mount; 3204 3205 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) { 3206 new_mp = NULL; 3207 3208 /* 3209 * If we are asked to guess the upwards path, we do so whenever 3210 * we encounter an ncp marked as a mountpoint. We try to find 3211 * the actual mountpoint by finding the mountpoint with this ncp. 3212 */ 3213 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) { 3214 new_mp = mount_get_by_nc(ncp); 3215 } 3216 /* 3217 * While traversing upwards if we encounter the root 3218 * of the current mount we have to skip to the mount point. 3219 */ 3220 if (ncp == mp->mnt_ncmountpt.ncp) { 3221 new_mp = mp; 3222 } 3223 if (new_mp) { 3224 nch = new_mp->mnt_ncmounton; 3225 _cache_drop(ncp); 3226 ncp = nch.ncp; 3227 if (ncp) 3228 _cache_hold(ncp); 3229 mp = nch.mount; 3230 continue; 3231 } 3232 3233 /* 3234 * Prepend the path segment 3235 */ 3236 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 3237 if (bp == buf) { 3238 numfullpathfail4++; 3239 kfree(buf, M_TEMP); 3240 error = ENOMEM; 3241 goto done; 3242 } 3243 *--bp = ncp->nc_name[i]; 3244 } 3245 if (bp == buf) { 3246 numfullpathfail4++; 3247 kfree(buf, M_TEMP); 3248 error = ENOMEM; 3249 goto done; 3250 } 3251 *--bp = '/'; 3252 slash_prefixed = 1; 3253 3254 /* 3255 * Go up a directory. This isn't a mount point so we don't 3256 * have to check again. 3257 * 3258 * We can only safely access nc_parent with ncp held locked. 3259 */ 3260 while ((nch.ncp = ncp->nc_parent) != NULL) { 3261 _cache_lock(ncp); 3262 if (nch.ncp != ncp->nc_parent) { 3263 _cache_unlock(ncp); 3264 continue; 3265 } 3266 _cache_hold(nch.ncp); 3267 _cache_unlock(ncp); 3268 break; 3269 } 3270 _cache_drop(ncp); 3271 ncp = nch.ncp; 3272 } 3273 if (ncp == NULL) { 3274 numfullpathfail2++; 3275 kfree(buf, M_TEMP); 3276 error = ENOENT; 3277 goto done; 3278 } 3279 3280 if (!slash_prefixed) { 3281 if (bp == buf) { 3282 numfullpathfail4++; 3283 kfree(buf, M_TEMP); 3284 error = ENOMEM; 3285 goto done; 3286 } 3287 *--bp = '/'; 3288 } 3289 numfullpathfound++; 3290 *retbuf = bp; 3291 *freebuf = buf; 3292 error = 0; 3293 done: 3294 if (ncp) 3295 _cache_drop(ncp); 3296 return(error); 3297 } 3298 3299 int 3300 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int guess) 3301 { 3302 struct namecache *ncp; 3303 struct nchandle nch; 3304 int error; 3305 3306 atomic_add_int(&numfullpathcalls, 1); 3307 if (disablefullpath) 3308 return (ENODEV); 3309 3310 if (p == NULL) 3311 return (EINVAL); 3312 3313 /* vn is NULL, client wants us to use p->p_textvp */ 3314 if (vn == NULL) { 3315 if ((vn = p->p_textvp) == NULL) 3316 return (EINVAL); 3317 } 3318 spin_lock(&vn->v_spinlock); 3319 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 3320 if (ncp->nc_nlen) 3321 break; 3322 } 3323 if (ncp == NULL) { 3324 spin_unlock(&vn->v_spinlock); 3325 return (EINVAL); 3326 } 3327 _cache_hold(ncp); 3328 spin_unlock(&vn->v_spinlock); 3329 3330 atomic_add_int(&numfullpathcalls, -1); 3331 nch.ncp = ncp;; 3332 nch.mount = vn->v_mount; 3333 error = cache_fullpath(p, &nch, retbuf, freebuf, guess); 3334 _cache_drop(ncp); 3335 return (error); 3336 } 3337