1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.88 2008/02/06 08:53:15 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 #include <sys/sysref2.h> 92 93 #define MAX_RECURSION_DEPTH 64 94 95 /* 96 * Random lookups in the cache are accomplished with a hash table using 97 * a hash key of (nc_src_vp, name). 98 * 99 * Negative entries may exist and correspond to structures where nc_vp 100 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 101 * corresponds to a whited-out directory entry (verses simply not finding the 102 * entry at all). 103 * 104 * Upon reaching the last segment of a path, if the reference is for DELETE, 105 * or NOCACHE is set (rewrite), and the name is located in the cache, it 106 * will be dropped. 107 */ 108 109 /* 110 * Structures associated with name cacheing. 111 */ 112 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 113 #define MINNEG 1024 114 115 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 116 117 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 118 static struct namecache_list ncneglist; /* instead of vnode */ 119 120 /* 121 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 122 * to create the namecache infrastructure leading to a dangling vnode. 123 * 124 * 0 Only errors are reported 125 * 1 Successes are reported 126 * 2 Successes + the whole directory scan is reported 127 * 3 Force the directory scan code run as if the parent vnode did not 128 * have a namecache record, even if it does have one. 129 */ 130 static int ncvp_debug; 131 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 132 133 static u_long nchash; /* size of hash table */ 134 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 135 136 static u_long ncnegfactor = 16; /* ratio of negative entries */ 137 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 138 139 static int nclockwarn; /* warn on locked entries in ticks */ 140 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 141 142 static u_long numneg; /* number of cache entries allocated */ 143 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 144 145 static u_long numcache; /* number of cache entries allocated */ 146 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 147 148 static u_long numunres; /* number of unresolved entries */ 149 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 150 151 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 152 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 153 154 static int cache_resolve_mp(struct mount *mp); 155 static void _cache_rehash(struct namecache *ncp); 156 static void _cache_lock(struct namecache *ncp); 157 static void _cache_setunresolved(struct namecache *ncp); 158 159 /* 160 * The new name cache statistics 161 */ 162 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 163 #define STATNODE(mode, name, var) \ 164 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 165 STATNODE(CTLFLAG_RD, numneg, &numneg); 166 STATNODE(CTLFLAG_RD, numcache, &numcache); 167 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 168 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 169 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 170 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 171 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 172 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 173 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 174 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 175 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 176 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 177 178 struct nchstats nchstats[SMP_MAXCPU]; 179 /* 180 * Export VFS cache effectiveness statistics to user-land. 181 * 182 * The statistics are left for aggregation to user-land so 183 * neat things can be achieved, like observing per-CPU cache 184 * distribution. 185 */ 186 static int 187 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 188 { 189 struct globaldata *gd; 190 int i, error; 191 192 error = 0; 193 for (i = 0; i < ncpus; ++i) { 194 gd = globaldata_find(i); 195 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 196 sizeof(struct nchstats)))) 197 break; 198 } 199 200 return (error); 201 } 202 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 203 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 204 205 static void cache_zap(struct namecache *ncp); 206 207 /* 208 * cache_hold() and cache_drop() prevent the premature deletion of a 209 * namecache entry but do not prevent operations (such as zapping) on 210 * that namecache entry. 211 * 212 * This routine may only be called from outside this source module if 213 * nc_refs is already at least 1. 214 * 215 * This is a rare case where callers are allowed to hold a spinlock, 216 * so we can't ourselves. 217 */ 218 static __inline 219 struct namecache * 220 _cache_hold(struct namecache *ncp) 221 { 222 atomic_add_int(&ncp->nc_refs, 1); 223 return(ncp); 224 } 225 226 /* 227 * When dropping an entry, if only one ref remains and the entry has not 228 * been resolved, zap it. Since the one reference is being dropped the 229 * entry had better not be locked. 230 */ 231 static __inline 232 void 233 _cache_drop(struct namecache *ncp) 234 { 235 KKASSERT(ncp->nc_refs > 0); 236 if (ncp->nc_refs == 1 && 237 (ncp->nc_flag & NCF_UNRESOLVED) && 238 TAILQ_EMPTY(&ncp->nc_list) 239 ) { 240 KKASSERT(ncp->nc_exlocks == 0); 241 _cache_lock(ncp); 242 cache_zap(ncp); 243 } else { 244 atomic_subtract_int(&ncp->nc_refs, 1); 245 } 246 } 247 248 /* 249 * Link a new namecache entry to its parent. Be careful to avoid races 250 * if vhold() blocks in the future. 251 */ 252 static void 253 cache_link_parent(struct namecache *ncp, struct namecache *par) 254 { 255 KKASSERT(ncp->nc_parent == NULL); 256 ncp->nc_parent = par; 257 if (TAILQ_EMPTY(&par->nc_list)) { 258 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 259 /* 260 * Any vp associated with an ncp which has children must 261 * be held to prevent it from being recycled. 262 */ 263 if (par->nc_vp) 264 vhold(par->nc_vp); 265 } else { 266 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 267 } 268 } 269 270 /* 271 * Remove the parent association from a namecache structure. If this is 272 * the last child of the parent the cache_drop(par) will attempt to 273 * recursively zap the parent. 274 */ 275 static void 276 cache_unlink_parent(struct namecache *ncp) 277 { 278 struct namecache *par; 279 280 if ((par = ncp->nc_parent) != NULL) { 281 ncp->nc_parent = NULL; 282 par = _cache_hold(par); 283 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 284 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 285 vdrop(par->nc_vp); 286 _cache_drop(par); 287 } 288 } 289 290 /* 291 * Allocate a new namecache structure. Most of the code does not require 292 * zero-termination of the string but it makes vop_compat_ncreate() easier. 293 */ 294 static struct namecache * 295 cache_alloc(int nlen) 296 { 297 struct namecache *ncp; 298 299 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 300 if (nlen) 301 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 302 ncp->nc_nlen = nlen; 303 ncp->nc_flag = NCF_UNRESOLVED; 304 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 305 ncp->nc_refs = 1; 306 307 /* 308 * Construct a fake FSMID based on the time of day and a 32 bit 309 * roller for uniqueness. This is used to generate a useful 310 * FSMID for filesystems which do not support it. 311 */ 312 ncp->nc_fsmid = cache_getnewfsmid(); 313 TAILQ_INIT(&ncp->nc_list); 314 _cache_lock(ncp); 315 return(ncp); 316 } 317 318 static void 319 _cache_free(struct namecache *ncp) 320 { 321 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 322 if (ncp->nc_name) 323 kfree(ncp->nc_name, M_VFSCACHE); 324 kfree(ncp, M_VFSCACHE); 325 } 326 327 void 328 cache_zero(struct nchandle *nch) 329 { 330 nch->ncp = NULL; 331 nch->mount = NULL; 332 } 333 334 /* 335 * Ref and deref a namecache structure. 336 * 337 * Warning: caller may hold an unrelated read spinlock, which means we can't 338 * use read spinlocks here. 339 */ 340 struct nchandle * 341 cache_hold(struct nchandle *nch) 342 { 343 _cache_hold(nch->ncp); 344 ++nch->mount->mnt_refs; 345 return(nch); 346 } 347 348 void 349 cache_copy(struct nchandle *nch, struct nchandle *target) 350 { 351 *target = *nch; 352 _cache_hold(target->ncp); 353 ++nch->mount->mnt_refs; 354 } 355 356 void 357 cache_changemount(struct nchandle *nch, struct mount *mp) 358 { 359 --nch->mount->mnt_refs; 360 nch->mount = mp; 361 ++nch->mount->mnt_refs; 362 } 363 364 void 365 cache_drop(struct nchandle *nch) 366 { 367 --nch->mount->mnt_refs; 368 _cache_drop(nch->ncp); 369 nch->ncp = NULL; 370 nch->mount = NULL; 371 } 372 373 /* 374 * Namespace locking. The caller must already hold a reference to the 375 * namecache structure in order to lock/unlock it. This function prevents 376 * the namespace from being created or destroyed by accessors other then 377 * the lock holder. 378 * 379 * Note that holding a locked namecache structure prevents other threads 380 * from making namespace changes (e.g. deleting or creating), prevents 381 * vnode association state changes by other threads, and prevents the 382 * namecache entry from being resolved or unresolved by other threads. 383 * 384 * The lock owner has full authority to associate/disassociate vnodes 385 * and resolve/unresolve the locked ncp. 386 * 387 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 388 * or recycled, but it does NOT help you if the vnode had already initiated 389 * a recyclement. If this is important, use cache_get() rather then 390 * cache_lock() (and deal with the differences in the way the refs counter 391 * is handled). Or, alternatively, make an unconditional call to 392 * cache_validate() or cache_resolve() after cache_lock() returns. 393 */ 394 static 395 void 396 _cache_lock(struct namecache *ncp) 397 { 398 thread_t td; 399 int didwarn; 400 401 KKASSERT(ncp->nc_refs != 0); 402 didwarn = 0; 403 td = curthread; 404 405 for (;;) { 406 if (ncp->nc_exlocks == 0) { 407 ncp->nc_exlocks = 1; 408 ncp->nc_locktd = td; 409 /* 410 * The vp associated with a locked ncp must be held 411 * to prevent it from being recycled (which would 412 * cause the ncp to become unresolved). 413 * 414 * WARNING! If VRECLAIMED is set the vnode could 415 * already be in the middle of a recycle. Callers 416 * should not assume that nc_vp is usable when 417 * not NULL. cache_vref() or cache_vget() must be 418 * called. 419 * 420 * XXX loop on race for later MPSAFE work. 421 */ 422 if (ncp->nc_vp) 423 vhold(ncp->nc_vp); 424 break; 425 } 426 if (ncp->nc_locktd == td) { 427 ++ncp->nc_exlocks; 428 break; 429 } 430 ncp->nc_flag |= NCF_LOCKREQ; 431 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 432 if (didwarn) 433 continue; 434 didwarn = 1; 435 kprintf("[diagnostic] cache_lock: blocked on %p", ncp); 436 kprintf(" \"%*.*s\"\n", 437 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 438 } 439 } 440 441 if (didwarn == 1) { 442 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n", 443 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 444 } 445 } 446 447 void 448 cache_lock(struct nchandle *nch) 449 { 450 _cache_lock(nch->ncp); 451 } 452 453 static 454 int 455 _cache_lock_nonblock(struct namecache *ncp) 456 { 457 thread_t td; 458 459 KKASSERT(ncp->nc_refs != 0); 460 td = curthread; 461 if (ncp->nc_exlocks == 0) { 462 ncp->nc_exlocks = 1; 463 ncp->nc_locktd = td; 464 /* 465 * The vp associated with a locked ncp must be held 466 * to prevent it from being recycled (which would 467 * cause the ncp to become unresolved). 468 * 469 * WARNING! If VRECLAIMED is set the vnode could 470 * already be in the middle of a recycle. Callers 471 * should not assume that nc_vp is usable when 472 * not NULL. cache_vref() or cache_vget() must be 473 * called. 474 * 475 * XXX loop on race for later MPSAFE work. 476 */ 477 if (ncp->nc_vp) 478 vhold(ncp->nc_vp); 479 return(0); 480 } else { 481 return(EWOULDBLOCK); 482 } 483 } 484 485 int 486 cache_lock_nonblock(struct nchandle *nch) 487 { 488 return(_cache_lock_nonblock(nch->ncp)); 489 } 490 491 static 492 void 493 _cache_unlock(struct namecache *ncp) 494 { 495 thread_t td = curthread; 496 497 KKASSERT(ncp->nc_refs > 0); 498 KKASSERT(ncp->nc_exlocks > 0); 499 KKASSERT(ncp->nc_locktd == td); 500 if (--ncp->nc_exlocks == 0) { 501 if (ncp->nc_vp) 502 vdrop(ncp->nc_vp); 503 ncp->nc_locktd = NULL; 504 if (ncp->nc_flag & NCF_LOCKREQ) { 505 ncp->nc_flag &= ~NCF_LOCKREQ; 506 wakeup(ncp); 507 } 508 } 509 } 510 511 void 512 cache_unlock(struct nchandle *nch) 513 { 514 _cache_unlock(nch->ncp); 515 } 516 517 /* 518 * ref-and-lock, unlock-and-deref functions. 519 * 520 * This function is primarily used by nlookup. Even though cache_lock 521 * holds the vnode, it is possible that the vnode may have already 522 * initiated a recyclement. We want cache_get() to return a definitively 523 * usable vnode or a definitively unresolved ncp. 524 */ 525 static 526 struct namecache * 527 _cache_get(struct namecache *ncp) 528 { 529 _cache_hold(ncp); 530 _cache_lock(ncp); 531 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 532 _cache_setunresolved(ncp); 533 return(ncp); 534 } 535 536 /* 537 * note: the same nchandle can be passed for both arguments. 538 */ 539 void 540 cache_get(struct nchandle *nch, struct nchandle *target) 541 { 542 target->mount = nch->mount; 543 target->ncp = _cache_get(nch->ncp); 544 ++target->mount->mnt_refs; 545 } 546 547 static int 548 _cache_get_nonblock(struct namecache *ncp) 549 { 550 /* XXX MP */ 551 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 552 _cache_hold(ncp); 553 _cache_lock(ncp); 554 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 555 _cache_setunresolved(ncp); 556 return(0); 557 } 558 return(EWOULDBLOCK); 559 } 560 561 int 562 cache_get_nonblock(struct nchandle *nch) 563 { 564 int error; 565 566 if ((error = _cache_get_nonblock(nch->ncp)) == 0) 567 ++nch->mount->mnt_refs; 568 return (error); 569 } 570 571 static __inline 572 void 573 _cache_put(struct namecache *ncp) 574 { 575 _cache_unlock(ncp); 576 _cache_drop(ncp); 577 } 578 579 void 580 cache_put(struct nchandle *nch) 581 { 582 --nch->mount->mnt_refs; 583 _cache_put(nch->ncp); 584 nch->ncp = NULL; 585 nch->mount = NULL; 586 } 587 588 /* 589 * Resolve an unresolved ncp by associating a vnode with it. If the 590 * vnode is NULL, a negative cache entry is created. 591 * 592 * The ncp should be locked on entry and will remain locked on return. 593 */ 594 static 595 void 596 _cache_setvp(struct namecache *ncp, struct vnode *vp) 597 { 598 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 599 ncp->nc_vp = vp; 600 if (vp != NULL) { 601 /* 602 * Any vp associated with an ncp which has children must 603 * be held. Any vp associated with a locked ncp must be held. 604 */ 605 if (!TAILQ_EMPTY(&ncp->nc_list)) 606 vhold(vp); 607 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 608 if (ncp->nc_exlocks) 609 vhold(vp); 610 611 /* 612 * Set auxiliary flags 613 */ 614 switch(vp->v_type) { 615 case VDIR: 616 ncp->nc_flag |= NCF_ISDIR; 617 break; 618 case VLNK: 619 ncp->nc_flag |= NCF_ISSYMLINK; 620 /* XXX cache the contents of the symlink */ 621 break; 622 default: 623 break; 624 } 625 ++numcache; 626 ncp->nc_error = 0; 627 } else { 628 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 629 ++numneg; 630 ncp->nc_error = ENOENT; 631 } 632 ncp->nc_flag &= ~NCF_UNRESOLVED; 633 } 634 635 void 636 cache_setvp(struct nchandle *nch, struct vnode *vp) 637 { 638 _cache_setvp(nch->ncp, vp); 639 } 640 641 void 642 cache_settimeout(struct nchandle *nch, int nticks) 643 { 644 struct namecache *ncp = nch->ncp; 645 646 if ((ncp->nc_timeout = ticks + nticks) == 0) 647 ncp->nc_timeout = 1; 648 } 649 650 /* 651 * Disassociate the vnode or negative-cache association and mark a 652 * namecache entry as unresolved again. Note that the ncp is still 653 * left in the hash table and still linked to its parent. 654 * 655 * The ncp should be locked and refd on entry and will remain locked and refd 656 * on return. 657 * 658 * This routine is normally never called on a directory containing children. 659 * However, NFS often does just that in its rename() code as a cop-out to 660 * avoid complex namespace operations. This disconnects a directory vnode 661 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 662 * sync. 663 * 664 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as 665 * in a create, properly propogates flag up the chain. 666 */ 667 static 668 void 669 _cache_setunresolved(struct namecache *ncp) 670 { 671 struct vnode *vp; 672 673 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 674 ncp->nc_flag |= NCF_UNRESOLVED; 675 ncp->nc_timeout = 0; 676 ncp->nc_error = ENOTCONN; 677 ++numunres; 678 if ((vp = ncp->nc_vp) != NULL) { 679 --numcache; 680 ncp->nc_vp = NULL; 681 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 682 683 /* 684 * Any vp associated with an ncp with children is 685 * held by that ncp. Any vp associated with a locked 686 * ncp is held by that ncp. These conditions must be 687 * undone when the vp is cleared out from the ncp. 688 */ 689 if (ncp->nc_flag & NCF_FSMID) 690 vupdatefsmid(vp); 691 if (!TAILQ_EMPTY(&ncp->nc_list)) 692 vdrop(vp); 693 if (ncp->nc_exlocks) 694 vdrop(vp); 695 } else { 696 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 697 --numneg; 698 } 699 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK| 700 NCF_FSMID); 701 } 702 } 703 704 void 705 cache_setunresolved(struct nchandle *nch) 706 { 707 _cache_setunresolved(nch->ncp); 708 } 709 710 /* 711 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 712 * looking for matches. This flag tells the lookup code when it must 713 * check for a mount linkage and also prevents the directories in question 714 * from being deleted or renamed. 715 */ 716 static 717 int 718 cache_clrmountpt_callback(struct mount *mp, void *data) 719 { 720 struct nchandle *nch = data; 721 722 if (mp->mnt_ncmounton.ncp == nch->ncp) 723 return(1); 724 if (mp->mnt_ncmountpt.ncp == nch->ncp) 725 return(1); 726 return(0); 727 } 728 729 void 730 cache_clrmountpt(struct nchandle *nch) 731 { 732 int count; 733 734 count = mountlist_scan(cache_clrmountpt_callback, nch, 735 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 736 if (count == 0) 737 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 738 } 739 740 /* 741 * Invalidate portions of the namecache topology given a starting entry. 742 * The passed ncp is set to an unresolved state and: 743 * 744 * The passed ncp must be locked. 745 * 746 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 747 * that the physical underlying nodes have been 748 * destroyed... as in deleted. For example, when 749 * a directory is removed. This will cause record 750 * lookups on the name to no longer be able to find 751 * the record and tells the resolver to return failure 752 * rather then trying to resolve through the parent. 753 * 754 * The topology itself, including ncp->nc_name, 755 * remains intact. 756 * 757 * This only applies to the passed ncp, if CINV_CHILDREN 758 * is specified the children are not flagged. 759 * 760 * CINV_CHILDREN - Set all children (recursively) to an unresolved 761 * state as well. 762 * 763 * Note that this will also have the side effect of 764 * cleaning out any unreferenced nodes in the topology 765 * from the leaves up as the recursion backs out. 766 * 767 * Note that the topology for any referenced nodes remains intact. 768 * 769 * It is possible for cache_inval() to race a cache_resolve(), meaning that 770 * the namecache entry may not actually be invalidated on return if it was 771 * revalidated while recursing down into its children. This code guarentees 772 * that the node(s) will go through an invalidation cycle, but does not 773 * guarentee that they will remain in an invalidated state. 774 * 775 * Returns non-zero if a revalidation was detected during the invalidation 776 * recursion, zero otherwise. Note that since only the original ncp is 777 * locked the revalidation ultimately can only indicate that the original ncp 778 * *MIGHT* no have been reresolved. 779 * 780 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 781 * have to avoid blowing out the kernel stack. We do this by saving the 782 * deep namecache node and aborting the recursion, then re-recursing at that 783 * node using a depth-first algorithm in order to allow multiple deep 784 * recursions to chain through each other, then we restart the invalidation 785 * from scratch. 786 */ 787 788 struct cinvtrack { 789 struct namecache *resume_ncp; 790 int depth; 791 }; 792 793 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 794 795 static 796 int 797 _cache_inval(struct namecache *ncp, int flags) 798 { 799 struct cinvtrack track; 800 struct namecache *ncp2; 801 int r; 802 803 track.depth = 0; 804 track.resume_ncp = NULL; 805 806 for (;;) { 807 r = _cache_inval_internal(ncp, flags, &track); 808 if (track.resume_ncp == NULL) 809 break; 810 kprintf("Warning: deep namecache recursion at %s\n", 811 ncp->nc_name); 812 _cache_unlock(ncp); 813 while ((ncp2 = track.resume_ncp) != NULL) { 814 track.resume_ncp = NULL; 815 _cache_lock(ncp2); 816 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 817 &track); 818 _cache_put(ncp2); 819 } 820 _cache_lock(ncp); 821 } 822 return(r); 823 } 824 825 int 826 cache_inval(struct nchandle *nch, int flags) 827 { 828 return(_cache_inval(nch->ncp, flags)); 829 } 830 831 static int 832 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 833 { 834 struct namecache *kid; 835 struct namecache *nextkid; 836 int rcnt = 0; 837 838 KKASSERT(ncp->nc_exlocks); 839 840 _cache_setunresolved(ncp); 841 if (flags & CINV_DESTROY) 842 ncp->nc_flag |= NCF_DESTROYED; 843 844 if ((flags & CINV_CHILDREN) && 845 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 846 ) { 847 if (++track->depth > MAX_RECURSION_DEPTH) { 848 track->resume_ncp = ncp; 849 _cache_hold(ncp); 850 ++rcnt; 851 } 852 _cache_hold(kid); 853 _cache_unlock(ncp); 854 while (kid) { 855 if (track->resume_ncp) { 856 _cache_drop(kid); 857 break; 858 } 859 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 860 _cache_hold(nextkid); 861 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 862 TAILQ_FIRST(&kid->nc_list) 863 ) { 864 _cache_lock(kid); 865 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 866 _cache_unlock(kid); 867 } 868 _cache_drop(kid); 869 kid = nextkid; 870 } 871 --track->depth; 872 _cache_lock(ncp); 873 } 874 875 /* 876 * Someone could have gotten in there while ncp was unlocked, 877 * retry if so. 878 */ 879 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 880 ++rcnt; 881 return (rcnt); 882 } 883 884 /* 885 * Invalidate a vnode's namecache associations. To avoid races against 886 * the resolver we do not invalidate a node which we previously invalidated 887 * but which was then re-resolved while we were in the invalidation loop. 888 * 889 * Returns non-zero if any namecache entries remain after the invalidation 890 * loop completed. 891 * 892 * NOTE: unlike the namecache topology which guarentees that ncp's will not 893 * be ripped out of the topology while held, the vnode's v_namecache list 894 * has no such restriction. NCP's can be ripped out of the list at virtually 895 * any time if not locked, even if held. 896 */ 897 int 898 cache_inval_vp(struct vnode *vp, int flags) 899 { 900 struct namecache *ncp; 901 struct namecache *next; 902 903 restart: 904 ncp = TAILQ_FIRST(&vp->v_namecache); 905 if (ncp) 906 _cache_hold(ncp); 907 while (ncp) { 908 /* loop entered with ncp held */ 909 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 910 _cache_hold(next); 911 _cache_lock(ncp); 912 if (ncp->nc_vp != vp) { 913 kprintf("Warning: cache_inval_vp: race-A detected on " 914 "%s\n", ncp->nc_name); 915 _cache_put(ncp); 916 if (next) 917 _cache_drop(next); 918 goto restart; 919 } 920 _cache_inval(ncp, flags); 921 _cache_put(ncp); /* also releases reference */ 922 ncp = next; 923 if (ncp && ncp->nc_vp != vp) { 924 kprintf("Warning: cache_inval_vp: race-B detected on " 925 "%s\n", ncp->nc_name); 926 _cache_drop(ncp); 927 goto restart; 928 } 929 } 930 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 931 } 932 933 /* 934 * This routine is used instead of the normal cache_inval_vp() when we 935 * are trying to recycle otherwise good vnodes. 936 * 937 * Return 0 on success, non-zero if not all namecache records could be 938 * disassociated from the vnode (for various reasons). 939 */ 940 int 941 cache_inval_vp_nonblock(struct vnode *vp) 942 { 943 struct namecache *ncp; 944 struct namecache *next; 945 946 ncp = TAILQ_FIRST(&vp->v_namecache); 947 if (ncp) 948 _cache_hold(ncp); 949 while (ncp) { 950 /* loop entered with ncp held */ 951 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 952 _cache_hold(next); 953 if (_cache_lock_nonblock(ncp)) { 954 _cache_drop(ncp); 955 if (next) 956 _cache_drop(next); 957 break; 958 } 959 if (ncp->nc_vp != vp) { 960 kprintf("Warning: cache_inval_vp: race-A detected on " 961 "%s\n", ncp->nc_name); 962 _cache_put(ncp); 963 if (next) 964 _cache_drop(next); 965 break; 966 } 967 _cache_inval(ncp, 0); 968 _cache_put(ncp); /* also releases reference */ 969 ncp = next; 970 if (ncp && ncp->nc_vp != vp) { 971 kprintf("Warning: cache_inval_vp: race-B detected on " 972 "%s\n", ncp->nc_name); 973 _cache_drop(ncp); 974 break; 975 } 976 } 977 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 978 } 979 980 /* 981 * The source ncp has been renamed to the target ncp. Both fncp and tncp 982 * must be locked. The target ncp is destroyed (as a normal rename-over 983 * would destroy the target file or directory). 984 * 985 * Because there may be references to the source ncp we cannot copy its 986 * contents to the target. Instead the source ncp is relinked as the target 987 * and the target ncp is removed from the namecache topology. 988 */ 989 void 990 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 991 { 992 struct namecache *fncp = fnch->ncp; 993 struct namecache *tncp = tnch->ncp; 994 char *oname; 995 996 _cache_setunresolved(tncp); 997 cache_unlink_parent(fncp); 998 cache_link_parent(fncp, tncp->nc_parent); 999 cache_unlink_parent(tncp); 1000 oname = fncp->nc_name; 1001 fncp->nc_name = tncp->nc_name; 1002 fncp->nc_nlen = tncp->nc_nlen; 1003 tncp->nc_name = NULL; 1004 tncp->nc_nlen = 0; 1005 if (fncp->nc_flag & NCF_HASHED) 1006 _cache_rehash(fncp); 1007 if (tncp->nc_flag & NCF_HASHED) 1008 _cache_rehash(tncp); 1009 if (oname) 1010 kfree(oname, M_VFSCACHE); 1011 } 1012 1013 /* 1014 * vget the vnode associated with the namecache entry. Resolve the namecache 1015 * entry if necessary and deal with namecache/vp races. The passed ncp must 1016 * be referenced and may be locked. The ncp's ref/locking state is not 1017 * effected by this call. 1018 * 1019 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1020 * (depending on the passed lk_type) will be returned in *vpp with an error 1021 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1022 * most typical error is ENOENT, meaning that the ncp represents a negative 1023 * cache hit and there is no vnode to retrieve, but other errors can occur 1024 * too. 1025 * 1026 * The main race we have to deal with are namecache zaps. The ncp itself 1027 * will not disappear since it is referenced, and it turns out that the 1028 * validity of the vp pointer can be checked simply by rechecking the 1029 * contents of ncp->nc_vp. 1030 */ 1031 int 1032 cache_vget(struct nchandle *nch, struct ucred *cred, 1033 int lk_type, struct vnode **vpp) 1034 { 1035 struct namecache *ncp; 1036 struct vnode *vp; 1037 int error; 1038 1039 ncp = nch->ncp; 1040 again: 1041 vp = NULL; 1042 if (ncp->nc_flag & NCF_UNRESOLVED) { 1043 _cache_lock(ncp); 1044 error = cache_resolve(nch, cred); 1045 _cache_unlock(ncp); 1046 } else { 1047 error = 0; 1048 } 1049 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1050 /* 1051 * Accessing the vnode from the namecache is a bit 1052 * dangerous. Because there are no refs on the vnode, it 1053 * could be in the middle of a reclaim. 1054 */ 1055 if (vp->v_flag & VRECLAIMED) { 1056 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name); 1057 _cache_lock(ncp); 1058 _cache_setunresolved(ncp); 1059 _cache_unlock(ncp); 1060 goto again; 1061 } 1062 error = vget(vp, lk_type); 1063 if (error) { 1064 if (vp != ncp->nc_vp) 1065 goto again; 1066 vp = NULL; 1067 } else if (vp != ncp->nc_vp) { 1068 vput(vp); 1069 goto again; 1070 } else if (vp->v_flag & VRECLAIMED) { 1071 panic("vget succeeded on a VRECLAIMED node! vp %p", vp); 1072 } 1073 } 1074 if (error == 0 && vp == NULL) 1075 error = ENOENT; 1076 *vpp = vp; 1077 return(error); 1078 } 1079 1080 int 1081 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1082 { 1083 struct namecache *ncp; 1084 struct vnode *vp; 1085 int error; 1086 1087 ncp = nch->ncp; 1088 1089 again: 1090 vp = NULL; 1091 if (ncp->nc_flag & NCF_UNRESOLVED) { 1092 _cache_lock(ncp); 1093 error = cache_resolve(nch, cred); 1094 _cache_unlock(ncp); 1095 } else { 1096 error = 0; 1097 } 1098 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1099 /* 1100 * Since we did not obtain any locks, a cache zap 1101 * race can occur here if the vnode is in the middle 1102 * of being reclaimed and has not yet been able to 1103 * clean out its cache node. If that case occurs, 1104 * we must lock and unresolve the cache, then loop 1105 * to retry. 1106 */ 1107 if ((error = vget(vp, LK_SHARED)) != 0) { 1108 if (error == ENOENT) { 1109 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name); 1110 _cache_lock(ncp); 1111 _cache_setunresolved(ncp); 1112 _cache_unlock(ncp); 1113 goto again; 1114 } 1115 /* fatal error */ 1116 } else { 1117 /* caller does not want a lock */ 1118 vn_unlock(vp); 1119 } 1120 } 1121 if (error == 0 && vp == NULL) 1122 error = ENOENT; 1123 *vpp = vp; 1124 return(error); 1125 } 1126 1127 /* 1128 * Recursively set the FSMID update flag for namecache nodes leading 1129 * to root. This will cause the next getattr or reclaim to increment the 1130 * fsmid and mark the inode for lazy updating. 1131 * 1132 * Stop recursing when we hit a node whos NCF_FSMID flag is already set. 1133 * This makes FSMIDs work in an Einsteinian fashion - where the observation 1134 * effects the result. In this case a program monitoring a higher level 1135 * node will have detected some prior change and started its scan (clearing 1136 * NCF_FSMID in higher level nodes), but since it has not yet observed the 1137 * node where we find NCF_FSMID still set, we can safely make the related 1138 * modification without interfering with the theorized program. 1139 * 1140 * This also means that FSMIDs cannot represent time-domain quantities 1141 * in a hierarchical sense. But the main reason for doing it this way 1142 * is to reduce the amount of recursion that occurs in the critical path 1143 * when e.g. a program is writing to a file that sits deep in a directory 1144 * hierarchy. 1145 */ 1146 void 1147 cache_update_fsmid(struct nchandle *nch) 1148 { 1149 struct namecache *ncp; 1150 struct namecache *scan; 1151 struct vnode *vp; 1152 1153 ncp = nch->ncp; 1154 1155 /* 1156 * Warning: even if we get a non-NULL vp it could still be in the 1157 * middle of a recyclement. Don't do anything fancy, just set 1158 * NCF_FSMID. 1159 */ 1160 if ((vp = ncp->nc_vp) != NULL) { 1161 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1162 for (scan = ncp; scan; scan = scan->nc_parent) { 1163 if (scan->nc_flag & NCF_FSMID) 1164 break; 1165 scan->nc_flag |= NCF_FSMID; 1166 } 1167 } 1168 } else { 1169 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) { 1170 ncp->nc_flag |= NCF_FSMID; 1171 ncp = ncp->nc_parent; 1172 } 1173 } 1174 } 1175 1176 void 1177 cache_update_fsmid_vp(struct vnode *vp) 1178 { 1179 struct namecache *ncp; 1180 struct namecache *scan; 1181 1182 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1183 for (scan = ncp; scan; scan = scan->nc_parent) { 1184 if (scan->nc_flag & NCF_FSMID) 1185 break; 1186 scan->nc_flag |= NCF_FSMID; 1187 } 1188 } 1189 } 1190 1191 /* 1192 * If getattr is called on a vnode (e.g. a stat call), the filesystem 1193 * may call this routine to determine if the namecache has the hierarchical 1194 * change flag set, requiring the fsmid to be updated. 1195 * 1196 * Since 0 indicates no support, make sure the filesystem fsmid is at least 1197 * 1. 1198 */ 1199 int 1200 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid) 1201 { 1202 struct namecache *ncp; 1203 int changed = 0; 1204 1205 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1206 if (ncp->nc_flag & NCF_FSMID) { 1207 ncp->nc_flag &= ~NCF_FSMID; 1208 changed = 1; 1209 } 1210 } 1211 if (*fsmid == 0) 1212 ++*fsmid; 1213 if (changed) 1214 ++*fsmid; 1215 return(changed); 1216 } 1217 1218 /* 1219 * Obtain the FSMID for a vnode for filesystems which do not support 1220 * a built-in FSMID. 1221 */ 1222 int64_t 1223 cache_sync_fsmid_vp(struct vnode *vp) 1224 { 1225 struct namecache *ncp; 1226 1227 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 1228 if (ncp->nc_flag & NCF_FSMID) { 1229 ncp->nc_flag &= ~NCF_FSMID; 1230 ++ncp->nc_fsmid; 1231 } 1232 return(ncp->nc_fsmid); 1233 } 1234 return(VNOVAL); 1235 } 1236 1237 /* 1238 * Convert a directory vnode to a namecache record without any other 1239 * knowledge of the topology. This ONLY works with directory vnodes and 1240 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1241 * returned ncp (if not NULL) will be held and unlocked. 1242 * 1243 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1244 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1245 * for dvp. This will fail only if the directory has been deleted out from 1246 * under the caller. 1247 * 1248 * Callers must always check for a NULL return no matter the value of 'makeit'. 1249 * 1250 * To avoid underflowing the kernel stack each recursive call increments 1251 * the makeit variable. 1252 */ 1253 1254 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1255 struct vnode *dvp, char *fakename); 1256 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1257 struct vnode **saved_dvp); 1258 1259 int 1260 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1261 struct nchandle *nch) 1262 { 1263 struct vnode *saved_dvp; 1264 struct vnode *pvp; 1265 char *fakename; 1266 int error; 1267 1268 nch->ncp = NULL; 1269 nch->mount = dvp->v_mount; 1270 saved_dvp = NULL; 1271 fakename = NULL; 1272 1273 /* 1274 * Temporary debugging code to force the directory scanning code 1275 * to be exercised. 1276 */ 1277 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 1278 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1279 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name); 1280 goto force; 1281 } 1282 1283 /* 1284 * Loop until resolution, inside code will break out on error. 1285 */ 1286 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 1287 force: 1288 /* 1289 * If dvp is the root of its filesystem it should already 1290 * have a namecache pointer associated with it as a side 1291 * effect of the mount, but it may have been disassociated. 1292 */ 1293 if (dvp->v_flag & VROOT) { 1294 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1295 error = cache_resolve_mp(nch->mount); 1296 _cache_put(nch->ncp); 1297 if (ncvp_debug) { 1298 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1299 dvp->v_mount, error); 1300 } 1301 if (error) { 1302 if (ncvp_debug) 1303 kprintf(" failed\n"); 1304 nch->ncp = NULL; 1305 break; 1306 } 1307 if (ncvp_debug) 1308 kprintf(" succeeded\n"); 1309 continue; 1310 } 1311 1312 /* 1313 * If we are recursed too deeply resort to an O(n^2) 1314 * algorithm to resolve the namecache topology. The 1315 * resolved pvp is left referenced in saved_dvp to 1316 * prevent the tree from being destroyed while we loop. 1317 */ 1318 if (makeit > 20) { 1319 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1320 if (error) { 1321 kprintf("lookupdotdot(longpath) failed %d " 1322 "dvp %p\n", error, dvp); 1323 nch->ncp = NULL; 1324 break; 1325 } 1326 continue; 1327 } 1328 1329 /* 1330 * Get the parent directory and resolve its ncp. 1331 */ 1332 if (fakename) { 1333 kfree(fakename, M_TEMP); 1334 fakename = NULL; 1335 } 1336 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1337 &fakename); 1338 if (error) { 1339 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1340 break; 1341 } 1342 vn_unlock(pvp); 1343 1344 /* 1345 * Reuse makeit as a recursion depth counter. On success 1346 * nch will be fully referenced. 1347 */ 1348 cache_fromdvp(pvp, cred, makeit + 1, nch); 1349 vrele(pvp); 1350 if (nch->ncp == NULL) 1351 break; 1352 1353 /* 1354 * Do an inefficient scan of pvp (embodied by ncp) to look 1355 * for dvp. This will create a namecache record for dvp on 1356 * success. We loop up to recheck on success. 1357 * 1358 * ncp and dvp are both held but not locked. 1359 */ 1360 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1361 if (error) { 1362 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1363 pvp, nch->ncp->nc_name, dvp); 1364 cache_drop(nch); 1365 /* nch was NULLed out, reload mount */ 1366 nch->mount = dvp->v_mount; 1367 break; 1368 } 1369 if (ncvp_debug) { 1370 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1371 pvp, nch->ncp->nc_name); 1372 } 1373 cache_drop(nch); 1374 /* nch was NULLed out, reload mount */ 1375 nch->mount = dvp->v_mount; 1376 } 1377 1378 if (fakename) 1379 kfree(fakename, M_TEMP); 1380 1381 /* 1382 * hold it for real so the mount gets a ref 1383 */ 1384 if (nch->ncp) 1385 cache_hold(nch); 1386 if (saved_dvp) 1387 vrele(saved_dvp); 1388 if (nch->ncp) 1389 return (0); 1390 return (EINVAL); 1391 } 1392 1393 /* 1394 * Go up the chain of parent directories until we find something 1395 * we can resolve into the namecache. This is very inefficient. 1396 */ 1397 static 1398 int 1399 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1400 struct vnode **saved_dvp) 1401 { 1402 struct nchandle nch; 1403 struct vnode *pvp; 1404 int error; 1405 static time_t last_fromdvp_report; 1406 char *fakename; 1407 1408 /* 1409 * Loop getting the parent directory vnode until we get something we 1410 * can resolve in the namecache. 1411 */ 1412 vref(dvp); 1413 nch.mount = dvp->v_mount; 1414 nch.ncp = NULL; 1415 fakename = NULL; 1416 1417 for (;;) { 1418 if (fakename) { 1419 kfree(fakename, M_TEMP); 1420 fakename = NULL; 1421 } 1422 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1423 &fakename); 1424 if (error) { 1425 vrele(dvp); 1426 break; 1427 } 1428 vn_unlock(pvp); 1429 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1430 _cache_hold(nch.ncp); 1431 vrele(pvp); 1432 break; 1433 } 1434 if (pvp->v_flag & VROOT) { 1435 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1436 error = cache_resolve_mp(nch.mount); 1437 _cache_unlock(nch.ncp); 1438 vrele(pvp); 1439 if (error) { 1440 _cache_drop(nch.ncp); 1441 nch.ncp = NULL; 1442 vrele(dvp); 1443 } 1444 break; 1445 } 1446 vrele(dvp); 1447 dvp = pvp; 1448 } 1449 if (error == 0) { 1450 if (last_fromdvp_report != time_second) { 1451 last_fromdvp_report = time_second; 1452 kprintf("Warning: extremely inefficient path " 1453 "resolution on %s\n", 1454 nch.ncp->nc_name); 1455 } 1456 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1457 1458 /* 1459 * Hopefully dvp now has a namecache record associated with 1460 * it. Leave it referenced to prevent the kernel from 1461 * recycling the vnode. Otherwise extremely long directory 1462 * paths could result in endless recycling. 1463 */ 1464 if (*saved_dvp) 1465 vrele(*saved_dvp); 1466 *saved_dvp = dvp; 1467 _cache_drop(nch.ncp); 1468 } 1469 if (fakename) 1470 kfree(fakename, M_TEMP); 1471 return (error); 1472 } 1473 1474 /* 1475 * Do an inefficient scan of the directory represented by ncp looking for 1476 * the directory vnode dvp. ncp must be held but not locked on entry and 1477 * will be held on return. dvp must be refd but not locked on entry and 1478 * will remain refd on return. 1479 * 1480 * Why do this at all? Well, due to its stateless nature the NFS server 1481 * converts file handles directly to vnodes without necessarily going through 1482 * the namecache ops that would otherwise create the namecache topology 1483 * leading to the vnode. We could either (1) Change the namecache algorithms 1484 * to allow disconnect namecache records that are re-merged opportunistically, 1485 * or (2) Make the NFS server backtrack and scan to recover a connected 1486 * namecache topology in order to then be able to issue new API lookups. 1487 * 1488 * It turns out that (1) is a huge mess. It takes a nice clean set of 1489 * namecache algorithms and introduces a lot of complication in every subsystem 1490 * that calls into the namecache to deal with the re-merge case, especially 1491 * since we are using the namecache to placehold negative lookups and the 1492 * vnode might not be immediately assigned. (2) is certainly far less 1493 * efficient then (1), but since we are only talking about directories here 1494 * (which are likely to remain cached), the case does not actually run all 1495 * that often and has the supreme advantage of not polluting the namecache 1496 * algorithms. 1497 * 1498 * If a fakename is supplied just construct a namecache entry using the 1499 * fake name. 1500 */ 1501 static int 1502 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1503 struct vnode *dvp, char *fakename) 1504 { 1505 struct nlcomponent nlc; 1506 struct nchandle rncp; 1507 struct dirent *den; 1508 struct vnode *pvp; 1509 struct vattr vat; 1510 struct iovec iov; 1511 struct uio uio; 1512 int blksize; 1513 int eofflag; 1514 int bytes; 1515 char *rbuf; 1516 int error; 1517 1518 vat.va_blocksize = 0; 1519 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1520 return (error); 1521 if ((error = cache_vref(nch, cred, &pvp)) != 0) 1522 return (error); 1523 if (ncvp_debug) 1524 kprintf("inefficient_scan: directory iosize %ld vattr fileid = %lld\n", vat.va_blocksize, vat.va_fileid); 1525 1526 /* 1527 * Use the supplied fakename if not NULL. Fake names are typically 1528 * not in the actual filesystem hierarchy. This is used by HAMMER 1529 * to glue @@timestamp recursions together. 1530 */ 1531 if (fakename) { 1532 nlc.nlc_nameptr = fakename; 1533 nlc.nlc_namelen = strlen(fakename); 1534 rncp = cache_nlookup(nch, &nlc); 1535 goto done; 1536 } 1537 1538 if ((blksize = vat.va_blocksize) == 0) 1539 blksize = DEV_BSIZE; 1540 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1541 rncp.ncp = NULL; 1542 1543 eofflag = 0; 1544 uio.uio_offset = 0; 1545 again: 1546 iov.iov_base = rbuf; 1547 iov.iov_len = blksize; 1548 uio.uio_iov = &iov; 1549 uio.uio_iovcnt = 1; 1550 uio.uio_resid = blksize; 1551 uio.uio_segflg = UIO_SYSSPACE; 1552 uio.uio_rw = UIO_READ; 1553 uio.uio_td = curthread; 1554 1555 if (ncvp_debug >= 2) 1556 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1557 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1558 if (error == 0) { 1559 den = (struct dirent *)rbuf; 1560 bytes = blksize - uio.uio_resid; 1561 1562 while (bytes > 0) { 1563 if (ncvp_debug >= 2) { 1564 kprintf("cache_inefficient_scan: %*.*s\n", 1565 den->d_namlen, den->d_namlen, 1566 den->d_name); 1567 } 1568 if (den->d_type != DT_WHT && 1569 den->d_ino == vat.va_fileid) { 1570 if (ncvp_debug) { 1571 kprintf("cache_inefficient_scan: " 1572 "MATCHED inode %lld path %s/%*.*s\n", 1573 vat.va_fileid, nch->ncp->nc_name, 1574 den->d_namlen, den->d_namlen, 1575 den->d_name); 1576 } 1577 nlc.nlc_nameptr = den->d_name; 1578 nlc.nlc_namelen = den->d_namlen; 1579 rncp = cache_nlookup(nch, &nlc); 1580 KKASSERT(rncp.ncp != NULL); 1581 break; 1582 } 1583 bytes -= _DIRENT_DIRSIZ(den); 1584 den = _DIRENT_NEXT(den); 1585 } 1586 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1587 goto again; 1588 } 1589 kfree(rbuf, M_TEMP); 1590 done: 1591 vrele(pvp); 1592 if (rncp.ncp) { 1593 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1594 _cache_setvp(rncp.ncp, dvp); 1595 if (ncvp_debug >= 2) { 1596 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1597 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1598 } 1599 } else { 1600 if (ncvp_debug >= 2) { 1601 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1602 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1603 rncp.ncp->nc_vp); 1604 } 1605 } 1606 if (rncp.ncp->nc_vp == NULL) 1607 error = rncp.ncp->nc_error; 1608 /* 1609 * Release rncp after a successful nlookup. rncp was fully 1610 * referenced. 1611 */ 1612 cache_put(&rncp); 1613 } else { 1614 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1615 dvp, nch->ncp->nc_name); 1616 error = ENOENT; 1617 } 1618 return (error); 1619 } 1620 1621 /* 1622 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1623 * state, which disassociates it from its vnode or ncneglist. 1624 * 1625 * Then, if there are no additional references to the ncp and no children, 1626 * the ncp is removed from the topology and destroyed. This function will 1627 * also run through the nc_parent chain and destroy parent ncps if possible. 1628 * As a side benefit, it turns out the only conditions that allow running 1629 * up the chain are also the conditions to ensure no deadlock will occur. 1630 * 1631 * References and/or children may exist if the ncp is in the middle of the 1632 * topology, preventing the ncp from being destroyed. 1633 * 1634 * This function must be called with the ncp held and locked and will unlock 1635 * and drop it during zapping. 1636 */ 1637 static void 1638 cache_zap(struct namecache *ncp) 1639 { 1640 struct namecache *par; 1641 1642 /* 1643 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1644 */ 1645 _cache_setunresolved(ncp); 1646 1647 /* 1648 * Try to scrap the entry and possibly tail-recurse on its parent. 1649 * We only scrap unref'd (other then our ref) unresolved entries, 1650 * we do not scrap 'live' entries. 1651 */ 1652 while (ncp->nc_flag & NCF_UNRESOLVED) { 1653 /* 1654 * Someone other then us has a ref, stop. 1655 */ 1656 if (ncp->nc_refs > 1) 1657 goto done; 1658 1659 /* 1660 * We have children, stop. 1661 */ 1662 if (!TAILQ_EMPTY(&ncp->nc_list)) 1663 goto done; 1664 1665 /* 1666 * Remove ncp from the topology: hash table and parent linkage. 1667 */ 1668 if (ncp->nc_flag & NCF_HASHED) { 1669 ncp->nc_flag &= ~NCF_HASHED; 1670 LIST_REMOVE(ncp, nc_hash); 1671 } 1672 if ((par = ncp->nc_parent) != NULL) { 1673 par = _cache_hold(par); 1674 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1675 ncp->nc_parent = NULL; 1676 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1677 vdrop(par->nc_vp); 1678 } 1679 1680 /* 1681 * ncp should not have picked up any refs. Physically 1682 * destroy the ncp. 1683 */ 1684 KKASSERT(ncp->nc_refs == 1); 1685 --numunres; 1686 /* _cache_unlock(ncp) not required */ 1687 ncp->nc_refs = -1; /* safety */ 1688 if (ncp->nc_name) 1689 kfree(ncp->nc_name, M_VFSCACHE); 1690 kfree(ncp, M_VFSCACHE); 1691 1692 /* 1693 * Loop on the parent (it may be NULL). Only bother looping 1694 * if the parent has a single ref (ours), which also means 1695 * we can lock it trivially. 1696 */ 1697 ncp = par; 1698 if (ncp == NULL) 1699 return; 1700 if (ncp->nc_refs != 1) { 1701 _cache_drop(ncp); 1702 return; 1703 } 1704 KKASSERT(par->nc_exlocks == 0); 1705 _cache_lock(ncp); 1706 } 1707 done: 1708 _cache_unlock(ncp); 1709 atomic_subtract_int(&ncp->nc_refs, 1); 1710 } 1711 1712 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1713 1714 static __inline 1715 void 1716 cache_hysteresis(void) 1717 { 1718 /* 1719 * Don't cache too many negative hits. We use hysteresis to reduce 1720 * the impact on the critical path. 1721 */ 1722 switch(cache_hysteresis_state) { 1723 case CHI_LOW: 1724 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1725 cache_cleanneg(10); 1726 cache_hysteresis_state = CHI_HIGH; 1727 } 1728 break; 1729 case CHI_HIGH: 1730 if (numneg > MINNEG * 9 / 10 && 1731 numneg * ncnegfactor * 9 / 10 > numcache 1732 ) { 1733 cache_cleanneg(10); 1734 } else { 1735 cache_hysteresis_state = CHI_LOW; 1736 } 1737 break; 1738 } 1739 } 1740 1741 /* 1742 * NEW NAMECACHE LOOKUP API 1743 * 1744 * Lookup an entry in the cache. A locked, referenced, non-NULL 1745 * entry is *always* returned, even if the supplied component is illegal. 1746 * The resulting namecache entry should be returned to the system with 1747 * cache_put() or _cache_unlock() + cache_drop(). 1748 * 1749 * namecache locks are recursive but care must be taken to avoid lock order 1750 * reversals. 1751 * 1752 * Nobody else will be able to manipulate the associated namespace (e.g. 1753 * create, delete, rename, rename-target) until the caller unlocks the 1754 * entry. 1755 * 1756 * The returned entry will be in one of three states: positive hit (non-null 1757 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1758 * Unresolved entries must be resolved through the filesystem to associate the 1759 * vnode and/or determine whether a positive or negative hit has occured. 1760 * 1761 * It is not necessary to lock a directory in order to lock namespace under 1762 * that directory. In fact, it is explicitly not allowed to do that. A 1763 * directory is typically only locked when being created, renamed, or 1764 * destroyed. 1765 * 1766 * The directory (par) may be unresolved, in which case any returned child 1767 * will likely also be marked unresolved. Likely but not guarenteed. Since 1768 * the filesystem lookup requires a resolved directory vnode the caller is 1769 * responsible for resolving the namecache chain top-down. This API 1770 * specifically allows whole chains to be created in an unresolved state. 1771 */ 1772 struct nchandle 1773 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 1774 { 1775 struct nchandle nch; 1776 struct namecache *ncp; 1777 struct namecache *new_ncp; 1778 struct nchashhead *nchpp; 1779 u_int32_t hash; 1780 globaldata_t gd; 1781 1782 numcalls++; 1783 gd = mycpu; 1784 1785 /* 1786 * Try to locate an existing entry 1787 */ 1788 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1789 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 1790 new_ncp = NULL; 1791 restart: 1792 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1793 numchecks++; 1794 1795 /* 1796 * Try to zap entries that have timed out. We have 1797 * to be careful here because locked leafs may depend 1798 * on the vnode remaining intact in a parent, so only 1799 * do this under very specific conditions. 1800 */ 1801 if (ncp->nc_timeout && 1802 (int)(ncp->nc_timeout - ticks) < 0 && 1803 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1804 ncp->nc_exlocks == 0 && 1805 TAILQ_EMPTY(&ncp->nc_list) 1806 ) { 1807 cache_zap(_cache_get(ncp)); 1808 goto restart; 1809 } 1810 1811 /* 1812 * Break out if we find a matching entry. Note that 1813 * UNRESOLVED entries may match, but DESTROYED entries 1814 * do not. 1815 */ 1816 if (ncp->nc_parent == par_nch->ncp && 1817 ncp->nc_nlen == nlc->nlc_namelen && 1818 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1819 (ncp->nc_flag & NCF_DESTROYED) == 0 1820 ) { 1821 if (_cache_get_nonblock(ncp) == 0) { 1822 if (new_ncp) 1823 _cache_free(new_ncp); 1824 goto found; 1825 } 1826 _cache_get(ncp); 1827 _cache_put(ncp); 1828 goto restart; 1829 } 1830 } 1831 1832 /* 1833 * We failed to locate an entry, create a new entry and add it to 1834 * the cache. We have to relookup after possibly blocking in 1835 * malloc. 1836 */ 1837 if (new_ncp == NULL) { 1838 new_ncp = cache_alloc(nlc->nlc_namelen); 1839 goto restart; 1840 } 1841 1842 ncp = new_ncp; 1843 1844 /* 1845 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1846 * and link to the parent. The mount point is usually inherited 1847 * from the parent unless this is a special case such as a mount 1848 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will 1849 * be NULL. 1850 */ 1851 if (nlc->nlc_namelen) { 1852 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1853 ncp->nc_name[nlc->nlc_namelen] = 0; 1854 } 1855 nchpp = NCHHASH(hash); 1856 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1857 ncp->nc_flag |= NCF_HASHED; 1858 cache_link_parent(ncp, par_nch->ncp); 1859 found: 1860 /* 1861 * stats and namecache size management 1862 */ 1863 if (ncp->nc_flag & NCF_UNRESOLVED) 1864 ++gd->gd_nchstats->ncs_miss; 1865 else if (ncp->nc_vp) 1866 ++gd->gd_nchstats->ncs_goodhits; 1867 else 1868 ++gd->gd_nchstats->ncs_neghits; 1869 cache_hysteresis(); 1870 nch.mount = par_nch->mount; 1871 nch.ncp = ncp; 1872 ++nch.mount->mnt_refs; 1873 return(nch); 1874 } 1875 1876 /* 1877 * The namecache entry is marked as being used as a mount point. 1878 * Locate the mount if it is visible to the caller. 1879 */ 1880 struct findmount_info { 1881 struct mount *result; 1882 struct mount *nch_mount; 1883 struct namecache *nch_ncp; 1884 }; 1885 1886 static 1887 int 1888 cache_findmount_callback(struct mount *mp, void *data) 1889 { 1890 struct findmount_info *info = data; 1891 1892 /* 1893 * Check the mount's mounted-on point against the passed nch. 1894 */ 1895 if (mp->mnt_ncmounton.mount == info->nch_mount && 1896 mp->mnt_ncmounton.ncp == info->nch_ncp 1897 ) { 1898 info->result = mp; 1899 return(-1); 1900 } 1901 return(0); 1902 } 1903 1904 struct mount * 1905 cache_findmount(struct nchandle *nch) 1906 { 1907 struct findmount_info info; 1908 1909 info.result = NULL; 1910 info.nch_mount = nch->mount; 1911 info.nch_ncp = nch->ncp; 1912 mountlist_scan(cache_findmount_callback, &info, 1913 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1914 return(info.result); 1915 } 1916 1917 /* 1918 * Resolve an unresolved namecache entry, generally by looking it up. 1919 * The passed ncp must be locked and refd. 1920 * 1921 * Theoretically since a vnode cannot be recycled while held, and since 1922 * the nc_parent chain holds its vnode as long as children exist, the 1923 * direct parent of the cache entry we are trying to resolve should 1924 * have a valid vnode. If not then generate an error that we can 1925 * determine is related to a resolver bug. 1926 * 1927 * However, if a vnode was in the middle of a recyclement when the NCP 1928 * got locked, ncp->nc_vp might point to a vnode that is about to become 1929 * invalid. cache_resolve() handles this case by unresolving the entry 1930 * and then re-resolving it. 1931 * 1932 * Note that successful resolution does not necessarily return an error 1933 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1934 * will be returned. 1935 */ 1936 int 1937 cache_resolve(struct nchandle *nch, struct ucred *cred) 1938 { 1939 struct namecache *par; 1940 struct namecache *ncp; 1941 struct nchandle nctmp; 1942 struct mount *mp; 1943 struct vnode *dvp; 1944 int error; 1945 1946 ncp = nch->ncp; 1947 mp = nch->mount; 1948 restart: 1949 /* 1950 * If the ncp is already resolved we have nothing to do. However, 1951 * we do want to guarentee that a usable vnode is returned when 1952 * a vnode is present, so make sure it hasn't been reclaimed. 1953 */ 1954 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1955 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1956 _cache_setunresolved(ncp); 1957 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1958 return (ncp->nc_error); 1959 } 1960 1961 /* 1962 * Mount points need special handling because the parent does not 1963 * belong to the same filesystem as the ncp. 1964 */ 1965 if (ncp == mp->mnt_ncmountpt.ncp) 1966 return (cache_resolve_mp(mp)); 1967 1968 /* 1969 * We expect an unbroken chain of ncps to at least the mount point, 1970 * and even all the way to root (but this code doesn't have to go 1971 * past the mount point). 1972 */ 1973 if (ncp->nc_parent == NULL) { 1974 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 1975 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1976 ncp->nc_error = EXDEV; 1977 return(ncp->nc_error); 1978 } 1979 1980 /* 1981 * The vp's of the parent directories in the chain are held via vhold() 1982 * due to the existance of the child, and should not disappear. 1983 * However, there are cases where they can disappear: 1984 * 1985 * - due to filesystem I/O errors. 1986 * - due to NFS being stupid about tracking the namespace and 1987 * destroys the namespace for entire directories quite often. 1988 * - due to forced unmounts. 1989 * - due to an rmdir (parent will be marked DESTROYED) 1990 * 1991 * When this occurs we have to track the chain backwards and resolve 1992 * it, looping until the resolver catches up to the current node. We 1993 * could recurse here but we might run ourselves out of kernel stack 1994 * so we do it in a more painful manner. This situation really should 1995 * not occur all that often, or if it does not have to go back too 1996 * many nodes to resolve the ncp. 1997 */ 1998 while (ncp->nc_parent->nc_vp == NULL) { 1999 /* 2000 * This case can occur if a process is CD'd into a 2001 * directory which is then rmdir'd. If the parent is marked 2002 * destroyed there is no point trying to resolve it. 2003 */ 2004 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2005 return(ENOENT); 2006 2007 par = ncp->nc_parent; 2008 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 2009 par = par->nc_parent; 2010 if (par->nc_parent == NULL) { 2011 kprintf("EXDEV case 2 %*.*s\n", 2012 par->nc_nlen, par->nc_nlen, par->nc_name); 2013 return (EXDEV); 2014 } 2015 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2016 par->nc_nlen, par->nc_nlen, par->nc_name); 2017 /* 2018 * The parent is not set in stone, ref and lock it to prevent 2019 * it from disappearing. Also note that due to renames it 2020 * is possible for our ncp to move and for par to no longer 2021 * be one of its parents. We resolve it anyway, the loop 2022 * will handle any moves. 2023 */ 2024 _cache_get(par); 2025 if (par == nch->mount->mnt_ncmountpt.ncp) { 2026 cache_resolve_mp(nch->mount); 2027 } else if ((dvp = par->nc_parent->nc_vp) == NULL) { 2028 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2029 _cache_put(par); 2030 continue; 2031 } else if (par->nc_flag & NCF_UNRESOLVED) { 2032 /* vhold(dvp); - DVP can't go away */ 2033 nctmp.mount = mp; 2034 nctmp.ncp = par; 2035 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2036 /* vdrop(dvp); */ 2037 } 2038 if ((error = par->nc_error) != 0) { 2039 if (par->nc_error != EAGAIN) { 2040 kprintf("EXDEV case 3 %*.*s error %d\n", 2041 par->nc_nlen, par->nc_nlen, par->nc_name, 2042 par->nc_error); 2043 _cache_put(par); 2044 return(error); 2045 } 2046 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2047 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2048 } 2049 _cache_put(par); 2050 /* loop */ 2051 } 2052 2053 /* 2054 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2055 * ncp's and reattach them. If this occurs the original ncp is marked 2056 * EAGAIN to force a relookup. 2057 * 2058 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2059 * ncp must already be resolved. 2060 */ 2061 dvp = ncp->nc_parent->nc_vp; 2062 /* vhold(dvp); - dvp can't go away */ 2063 nctmp.mount = mp; 2064 nctmp.ncp = ncp; 2065 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2066 /* vdrop(dvp); */ 2067 if (ncp->nc_error == EAGAIN) { 2068 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2069 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2070 goto restart; 2071 } 2072 return(ncp->nc_error); 2073 } 2074 2075 /* 2076 * Resolve the ncp associated with a mount point. Such ncp's almost always 2077 * remain resolved and this routine is rarely called. NFS MPs tends to force 2078 * re-resolution more often due to its mac-truck-smash-the-namecache 2079 * method of tracking namespace changes. 2080 * 2081 * The semantics for this call is that the passed ncp must be locked on 2082 * entry and will be locked on return. However, if we actually have to 2083 * resolve the mount point we temporarily unlock the entry in order to 2084 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2085 * the unlock we have to recheck the flags after we relock. 2086 */ 2087 static int 2088 cache_resolve_mp(struct mount *mp) 2089 { 2090 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2091 struct vnode *vp; 2092 int error; 2093 2094 KKASSERT(mp != NULL); 2095 2096 /* 2097 * If the ncp is already resolved we have nothing to do. However, 2098 * we do want to guarentee that a usable vnode is returned when 2099 * a vnode is present, so make sure it hasn't been reclaimed. 2100 */ 2101 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2102 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2103 _cache_setunresolved(ncp); 2104 } 2105 2106 if (ncp->nc_flag & NCF_UNRESOLVED) { 2107 _cache_unlock(ncp); 2108 while (vfs_busy(mp, 0)) 2109 ; 2110 error = VFS_ROOT(mp, &vp); 2111 _cache_lock(ncp); 2112 2113 /* 2114 * recheck the ncp state after relocking. 2115 */ 2116 if (ncp->nc_flag & NCF_UNRESOLVED) { 2117 ncp->nc_error = error; 2118 if (error == 0) { 2119 _cache_setvp(ncp, vp); 2120 vput(vp); 2121 } else { 2122 kprintf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 2123 _cache_setvp(ncp, NULL); 2124 } 2125 } else if (error == 0) { 2126 vput(vp); 2127 } 2128 vfs_unbusy(mp); 2129 } 2130 return(ncp->nc_error); 2131 } 2132 2133 void 2134 cache_cleanneg(int count) 2135 { 2136 struct namecache *ncp; 2137 2138 /* 2139 * Automode from the vnlru proc - clean out 10% of the negative cache 2140 * entries. 2141 */ 2142 if (count == 0) 2143 count = numneg / 10 + 1; 2144 2145 /* 2146 * Attempt to clean out the specified number of negative cache 2147 * entries. 2148 */ 2149 while (count) { 2150 ncp = TAILQ_FIRST(&ncneglist); 2151 if (ncp == NULL) { 2152 KKASSERT(numneg == 0); 2153 break; 2154 } 2155 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2156 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2157 if (_cache_get_nonblock(ncp) == 0) 2158 cache_zap(ncp); 2159 --count; 2160 } 2161 } 2162 2163 /* 2164 * Rehash a ncp. Rehashing is typically required if the name changes (should 2165 * not generally occur) or the parent link changes. This function will 2166 * unhash the ncp if the ncp is no longer hashable. 2167 */ 2168 static void 2169 _cache_rehash(struct namecache *ncp) 2170 { 2171 struct nchashhead *nchpp; 2172 u_int32_t hash; 2173 2174 if (ncp->nc_flag & NCF_HASHED) { 2175 ncp->nc_flag &= ~NCF_HASHED; 2176 LIST_REMOVE(ncp, nc_hash); 2177 } 2178 if (ncp->nc_nlen && ncp->nc_parent) { 2179 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 2180 hash = fnv_32_buf(&ncp->nc_parent, 2181 sizeof(ncp->nc_parent), hash); 2182 nchpp = NCHHASH(hash); 2183 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 2184 ncp->nc_flag |= NCF_HASHED; 2185 } 2186 } 2187 2188 /* 2189 * Name cache initialization, from vfsinit() when we are booting 2190 */ 2191 void 2192 nchinit(void) 2193 { 2194 int i; 2195 globaldata_t gd; 2196 2197 /* initialise per-cpu namecache effectiveness statistics. */ 2198 for (i = 0; i < ncpus; ++i) { 2199 gd = globaldata_find(i); 2200 gd->gd_nchstats = &nchstats[i]; 2201 } 2202 TAILQ_INIT(&ncneglist); 2203 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 2204 nclockwarn = 1 * hz; 2205 } 2206 2207 /* 2208 * Called from start_init() to bootstrap the root filesystem. Returns 2209 * a referenced, unlocked namecache record. 2210 */ 2211 void 2212 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2213 { 2214 nch->ncp = cache_alloc(0); 2215 nch->mount = mp; 2216 ++mp->mnt_refs; 2217 if (vp) 2218 _cache_setvp(nch->ncp, vp); 2219 } 2220 2221 /* 2222 * vfs_cache_setroot() 2223 * 2224 * Create an association between the root of our namecache and 2225 * the root vnode. This routine may be called several times during 2226 * booting. 2227 * 2228 * If the caller intends to save the returned namecache pointer somewhere 2229 * it must cache_hold() it. 2230 */ 2231 void 2232 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2233 { 2234 struct vnode *ovp; 2235 struct nchandle onch; 2236 2237 ovp = rootvnode; 2238 onch = rootnch; 2239 rootvnode = nvp; 2240 if (nch) 2241 rootnch = *nch; 2242 else 2243 cache_zero(&rootnch); 2244 if (ovp) 2245 vrele(ovp); 2246 if (onch.ncp) 2247 cache_drop(&onch); 2248 } 2249 2250 /* 2251 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2252 * topology and is being removed as quickly as possible. The new VOP_N*() 2253 * API calls are required to make specific adjustments using the supplied 2254 * ncp pointers rather then just bogusly purging random vnodes. 2255 * 2256 * Invalidate all namecache entries to a particular vnode as well as 2257 * any direct children of that vnode in the namecache. This is a 2258 * 'catch all' purge used by filesystems that do not know any better. 2259 * 2260 * Note that the linkage between the vnode and its namecache entries will 2261 * be removed, but the namecache entries themselves might stay put due to 2262 * active references from elsewhere in the system or due to the existance of 2263 * the children. The namecache topology is left intact even if we do not 2264 * know what the vnode association is. Such entries will be marked 2265 * NCF_UNRESOLVED. 2266 */ 2267 void 2268 cache_purge(struct vnode *vp) 2269 { 2270 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2271 } 2272 2273 /* 2274 * Flush all entries referencing a particular filesystem. 2275 * 2276 * Since we need to check it anyway, we will flush all the invalid 2277 * entries at the same time. 2278 */ 2279 #if 0 2280 2281 void 2282 cache_purgevfs(struct mount *mp) 2283 { 2284 struct nchashhead *nchpp; 2285 struct namecache *ncp, *nnp; 2286 2287 /* 2288 * Scan hash tables for applicable entries. 2289 */ 2290 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2291 ncp = LIST_FIRST(nchpp); 2292 if (ncp) 2293 _cache_hold(ncp); 2294 while (ncp) { 2295 nnp = LIST_NEXT(ncp, nc_hash); 2296 if (nnp) 2297 _cache_hold(nnp); 2298 if (ncp->nc_mount == mp) { 2299 _cache_lock(ncp); 2300 cache_zap(ncp); 2301 } else { 2302 _cache_drop(ncp); 2303 } 2304 ncp = nnp; 2305 } 2306 } 2307 } 2308 2309 #endif 2310 2311 /* 2312 * Create a new (theoretically) unique fsmid 2313 */ 2314 int64_t 2315 cache_getnewfsmid(void) 2316 { 2317 static int fsmid_roller; 2318 int64_t fsmid; 2319 2320 ++fsmid_roller; 2321 fsmid = ((int64_t)time_second << 32) | 2322 (fsmid_roller & 0x7FFFFFFF); 2323 return (fsmid); 2324 } 2325 2326 2327 static int disablecwd; 2328 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 2329 2330 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 2331 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 2332 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 2333 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 2334 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 2335 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 2336 2337 int 2338 sys___getcwd(struct __getcwd_args *uap) 2339 { 2340 int buflen; 2341 int error; 2342 char *buf; 2343 char *bp; 2344 2345 if (disablecwd) 2346 return (ENODEV); 2347 2348 buflen = uap->buflen; 2349 if (buflen < 2) 2350 return (EINVAL); 2351 if (buflen > MAXPATHLEN) 2352 buflen = MAXPATHLEN; 2353 2354 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 2355 bp = kern_getcwd(buf, buflen, &error); 2356 if (error == 0) 2357 error = copyout(bp, uap->buf, strlen(bp) + 1); 2358 kfree(buf, M_TEMP); 2359 return (error); 2360 } 2361 2362 char * 2363 kern_getcwd(char *buf, size_t buflen, int *error) 2364 { 2365 struct proc *p = curproc; 2366 char *bp; 2367 int i, slash_prefixed; 2368 struct filedesc *fdp; 2369 struct nchandle nch; 2370 2371 numcwdcalls++; 2372 bp = buf; 2373 bp += buflen - 1; 2374 *bp = '\0'; 2375 fdp = p->p_fd; 2376 slash_prefixed = 0; 2377 2378 nch = fdp->fd_ncdir; 2379 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp || 2380 nch.mount != fdp->fd_nrdir.mount) 2381 ) { 2382 /* 2383 * While traversing upwards if we encounter the root 2384 * of the current mount we have to skip to the mount point 2385 * in the underlying filesystem. 2386 */ 2387 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2388 nch = nch.mount->mnt_ncmounton; 2389 continue; 2390 } 2391 2392 /* 2393 * Prepend the path segment 2394 */ 2395 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2396 if (bp == buf) { 2397 numcwdfail4++; 2398 *error = ENOMEM; 2399 return(NULL); 2400 } 2401 *--bp = nch.ncp->nc_name[i]; 2402 } 2403 if (bp == buf) { 2404 numcwdfail4++; 2405 *error = ENOMEM; 2406 return(NULL); 2407 } 2408 *--bp = '/'; 2409 slash_prefixed = 1; 2410 2411 /* 2412 * Go up a directory. This isn't a mount point so we don't 2413 * have to check again. 2414 */ 2415 nch.ncp = nch.ncp->nc_parent; 2416 } 2417 if (nch.ncp == NULL) { 2418 numcwdfail2++; 2419 *error = ENOENT; 2420 return(NULL); 2421 } 2422 if (!slash_prefixed) { 2423 if (bp == buf) { 2424 numcwdfail4++; 2425 *error = ENOMEM; 2426 return(NULL); 2427 } 2428 *--bp = '/'; 2429 } 2430 numcwdfound++; 2431 *error = 0; 2432 return (bp); 2433 } 2434 2435 /* 2436 * Thus begins the fullpath magic. 2437 */ 2438 2439 #undef STATNODE 2440 #define STATNODE(name) \ 2441 static u_int name; \ 2442 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 2443 2444 static int disablefullpath; 2445 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 2446 &disablefullpath, 0, ""); 2447 2448 STATNODE(numfullpathcalls); 2449 STATNODE(numfullpathfail1); 2450 STATNODE(numfullpathfail2); 2451 STATNODE(numfullpathfail3); 2452 STATNODE(numfullpathfail4); 2453 STATNODE(numfullpathfound); 2454 2455 int 2456 cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf) 2457 { 2458 char *bp, *buf; 2459 int i, slash_prefixed; 2460 struct nchandle fd_nrdir; 2461 struct nchandle nch; 2462 2463 numfullpathcalls--; 2464 2465 *retbuf = NULL; 2466 *freebuf = NULL; 2467 2468 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2469 bp = buf + MAXPATHLEN - 1; 2470 *bp = '\0'; 2471 if (p != NULL) 2472 fd_nrdir = p->p_fd->fd_nrdir; 2473 else 2474 fd_nrdir = rootnch; 2475 slash_prefixed = 0; 2476 nch = *nchp; 2477 2478 while (nch.ncp && 2479 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount) 2480 ) { 2481 /* 2482 * While traversing upwards if we encounter the root 2483 * of the current mount we have to skip to the mount point. 2484 */ 2485 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2486 nch = nch.mount->mnt_ncmounton; 2487 continue; 2488 } 2489 2490 /* 2491 * Prepend the path segment 2492 */ 2493 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2494 if (bp == buf) { 2495 numfullpathfail4++; 2496 kfree(buf, M_TEMP); 2497 return(ENOMEM); 2498 } 2499 *--bp = nch.ncp->nc_name[i]; 2500 } 2501 if (bp == buf) { 2502 numfullpathfail4++; 2503 kfree(buf, M_TEMP); 2504 return(ENOMEM); 2505 } 2506 *--bp = '/'; 2507 slash_prefixed = 1; 2508 2509 /* 2510 * Go up a directory. This isn't a mount point so we don't 2511 * have to check again. 2512 */ 2513 nch.ncp = nch.ncp->nc_parent; 2514 } 2515 if (nch.ncp == NULL) { 2516 numfullpathfail2++; 2517 kfree(buf, M_TEMP); 2518 return(ENOENT); 2519 } 2520 2521 if (!slash_prefixed) { 2522 if (bp == buf) { 2523 numfullpathfail4++; 2524 kfree(buf, M_TEMP); 2525 return(ENOMEM); 2526 } 2527 *--bp = '/'; 2528 } 2529 numfullpathfound++; 2530 *retbuf = bp; 2531 *freebuf = buf; 2532 2533 return(0); 2534 } 2535 2536 int 2537 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 2538 { 2539 struct namecache *ncp; 2540 struct nchandle nch; 2541 2542 numfullpathcalls++; 2543 if (disablefullpath) 2544 return (ENODEV); 2545 2546 if (p == NULL) 2547 return (EINVAL); 2548 2549 /* vn is NULL, client wants us to use p->p_textvp */ 2550 if (vn == NULL) { 2551 if ((vn = p->p_textvp) == NULL) 2552 return (EINVAL); 2553 } 2554 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 2555 if (ncp->nc_nlen) 2556 break; 2557 } 2558 if (ncp == NULL) 2559 return (EINVAL); 2560 2561 numfullpathcalls--; 2562 nch.ncp = ncp;; 2563 nch.mount = vn->v_mount; 2564 return(cache_fullpath(p, &nch, retbuf, freebuf)); 2565 } 2566