1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.91 2008/06/14 05:34:06 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 #include <sys/sysref2.h> 92 93 #define MAX_RECURSION_DEPTH 64 94 95 /* 96 * Random lookups in the cache are accomplished with a hash table using 97 * a hash key of (nc_src_vp, name). 98 * 99 * Negative entries may exist and correspond to structures where nc_vp 100 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 101 * corresponds to a whited-out directory entry (verses simply not finding the 102 * entry at all). 103 * 104 * Upon reaching the last segment of a path, if the reference is for DELETE, 105 * or NOCACHE is set (rewrite), and the name is located in the cache, it 106 * will be dropped. 107 */ 108 109 /* 110 * Structures associated with name cacheing. 111 */ 112 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 113 #define MINNEG 1024 114 115 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 116 117 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 118 static struct namecache_list ncneglist; /* instead of vnode */ 119 120 /* 121 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 122 * to create the namecache infrastructure leading to a dangling vnode. 123 * 124 * 0 Only errors are reported 125 * 1 Successes are reported 126 * 2 Successes + the whole directory scan is reported 127 * 3 Force the directory scan code run as if the parent vnode did not 128 * have a namecache record, even if it does have one. 129 */ 130 static int ncvp_debug; 131 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 132 133 static u_long nchash; /* size of hash table */ 134 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 135 136 static u_long ncnegfactor = 16; /* ratio of negative entries */ 137 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 138 139 static int nclockwarn; /* warn on locked entries in ticks */ 140 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 141 142 static u_long numneg; /* number of cache entries allocated */ 143 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 144 145 static u_long numcache; /* number of cache entries allocated */ 146 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 147 148 static u_long numunres; /* number of unresolved entries */ 149 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 150 151 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 152 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 153 154 static int cache_resolve_mp(struct mount *mp); 155 static struct vnode *cache_dvpref(struct namecache *ncp); 156 static void _cache_rehash(struct namecache *ncp); 157 static void _cache_lock(struct namecache *ncp); 158 static void _cache_setunresolved(struct namecache *ncp); 159 160 /* 161 * The new name cache statistics 162 */ 163 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 164 #define STATNODE(mode, name, var) \ 165 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 166 STATNODE(CTLFLAG_RD, numneg, &numneg); 167 STATNODE(CTLFLAG_RD, numcache, &numcache); 168 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 169 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 170 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 171 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 172 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 173 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 174 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 175 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 176 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 177 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 178 179 struct nchstats nchstats[SMP_MAXCPU]; 180 /* 181 * Export VFS cache effectiveness statistics to user-land. 182 * 183 * The statistics are left for aggregation to user-land so 184 * neat things can be achieved, like observing per-CPU cache 185 * distribution. 186 */ 187 static int 188 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 189 { 190 struct globaldata *gd; 191 int i, error; 192 193 error = 0; 194 for (i = 0; i < ncpus; ++i) { 195 gd = globaldata_find(i); 196 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 197 sizeof(struct nchstats)))) 198 break; 199 } 200 201 return (error); 202 } 203 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 204 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 205 206 static void cache_zap(struct namecache *ncp); 207 208 /* 209 * cache_hold() and cache_drop() prevent the premature deletion of a 210 * namecache entry but do not prevent operations (such as zapping) on 211 * that namecache entry. 212 * 213 * This routine may only be called from outside this source module if 214 * nc_refs is already at least 1. 215 * 216 * This is a rare case where callers are allowed to hold a spinlock, 217 * so we can't ourselves. 218 */ 219 static __inline 220 struct namecache * 221 _cache_hold(struct namecache *ncp) 222 { 223 atomic_add_int(&ncp->nc_refs, 1); 224 return(ncp); 225 } 226 227 /* 228 * When dropping an entry, if only one ref remains and the entry has not 229 * been resolved, zap it. Since the one reference is being dropped the 230 * entry had better not be locked. 231 */ 232 static __inline 233 void 234 _cache_drop(struct namecache *ncp) 235 { 236 KKASSERT(ncp->nc_refs > 0); 237 if (ncp->nc_refs == 1 && 238 (ncp->nc_flag & NCF_UNRESOLVED) && 239 TAILQ_EMPTY(&ncp->nc_list) 240 ) { 241 KKASSERT(ncp->nc_exlocks == 0); 242 _cache_lock(ncp); 243 cache_zap(ncp); 244 } else { 245 atomic_subtract_int(&ncp->nc_refs, 1); 246 } 247 } 248 249 /* 250 * Link a new namecache entry to its parent. Be careful to avoid races 251 * if vhold() blocks in the future. 252 */ 253 static void 254 cache_link_parent(struct namecache *ncp, struct namecache *par) 255 { 256 KKASSERT(ncp->nc_parent == NULL); 257 ncp->nc_parent = par; 258 if (TAILQ_EMPTY(&par->nc_list)) { 259 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 260 /* 261 * Any vp associated with an ncp which has children must 262 * be held to prevent it from being recycled. 263 */ 264 if (par->nc_vp) 265 vhold(par->nc_vp); 266 } else { 267 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 268 } 269 } 270 271 /* 272 * Remove the parent association from a namecache structure. If this is 273 * the last child of the parent the cache_drop(par) will attempt to 274 * recursively zap the parent. 275 */ 276 static void 277 cache_unlink_parent(struct namecache *ncp) 278 { 279 struct namecache *par; 280 281 if ((par = ncp->nc_parent) != NULL) { 282 ncp->nc_parent = NULL; 283 par = _cache_hold(par); 284 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 285 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 286 vdrop(par->nc_vp); 287 _cache_drop(par); 288 } 289 } 290 291 /* 292 * Allocate a new namecache structure. Most of the code does not require 293 * zero-termination of the string but it makes vop_compat_ncreate() easier. 294 */ 295 static struct namecache * 296 cache_alloc(int nlen) 297 { 298 struct namecache *ncp; 299 300 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 301 if (nlen) 302 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 303 ncp->nc_nlen = nlen; 304 ncp->nc_flag = NCF_UNRESOLVED; 305 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 306 ncp->nc_refs = 1; 307 308 /* 309 * Construct a fake FSMID based on the time of day and a 32 bit 310 * roller for uniqueness. This is used to generate a useful 311 * FSMID for filesystems which do not support it. 312 */ 313 ncp->nc_fsmid = cache_getnewfsmid(); 314 TAILQ_INIT(&ncp->nc_list); 315 _cache_lock(ncp); 316 return(ncp); 317 } 318 319 static void 320 _cache_free(struct namecache *ncp) 321 { 322 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 323 if (ncp->nc_name) 324 kfree(ncp->nc_name, M_VFSCACHE); 325 kfree(ncp, M_VFSCACHE); 326 } 327 328 void 329 cache_zero(struct nchandle *nch) 330 { 331 nch->ncp = NULL; 332 nch->mount = NULL; 333 } 334 335 /* 336 * Ref and deref a namecache structure. 337 * 338 * Warning: caller may hold an unrelated read spinlock, which means we can't 339 * use read spinlocks here. 340 */ 341 struct nchandle * 342 cache_hold(struct nchandle *nch) 343 { 344 _cache_hold(nch->ncp); 345 ++nch->mount->mnt_refs; 346 return(nch); 347 } 348 349 void 350 cache_copy(struct nchandle *nch, struct nchandle *target) 351 { 352 *target = *nch; 353 _cache_hold(target->ncp); 354 ++nch->mount->mnt_refs; 355 } 356 357 void 358 cache_changemount(struct nchandle *nch, struct mount *mp) 359 { 360 --nch->mount->mnt_refs; 361 nch->mount = mp; 362 ++nch->mount->mnt_refs; 363 } 364 365 void 366 cache_drop(struct nchandle *nch) 367 { 368 --nch->mount->mnt_refs; 369 _cache_drop(nch->ncp); 370 nch->ncp = NULL; 371 nch->mount = NULL; 372 } 373 374 /* 375 * Namespace locking. The caller must already hold a reference to the 376 * namecache structure in order to lock/unlock it. This function prevents 377 * the namespace from being created or destroyed by accessors other then 378 * the lock holder. 379 * 380 * Note that holding a locked namecache structure prevents other threads 381 * from making namespace changes (e.g. deleting or creating), prevents 382 * vnode association state changes by other threads, and prevents the 383 * namecache entry from being resolved or unresolved by other threads. 384 * 385 * The lock owner has full authority to associate/disassociate vnodes 386 * and resolve/unresolve the locked ncp. 387 * 388 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 389 * or recycled, but it does NOT help you if the vnode had already initiated 390 * a recyclement. If this is important, use cache_get() rather then 391 * cache_lock() (and deal with the differences in the way the refs counter 392 * is handled). Or, alternatively, make an unconditional call to 393 * cache_validate() or cache_resolve() after cache_lock() returns. 394 */ 395 static 396 void 397 _cache_lock(struct namecache *ncp) 398 { 399 thread_t td; 400 int didwarn; 401 402 KKASSERT(ncp->nc_refs != 0); 403 didwarn = 0; 404 td = curthread; 405 406 for (;;) { 407 if (ncp->nc_exlocks == 0) { 408 ncp->nc_exlocks = 1; 409 ncp->nc_locktd = td; 410 /* 411 * The vp associated with a locked ncp must be held 412 * to prevent it from being recycled (which would 413 * cause the ncp to become unresolved). 414 * 415 * WARNING! If VRECLAIMED is set the vnode could 416 * already be in the middle of a recycle. Callers 417 * should not assume that nc_vp is usable when 418 * not NULL. cache_vref() or cache_vget() must be 419 * called. 420 * 421 * XXX loop on race for later MPSAFE work. 422 */ 423 if (ncp->nc_vp) 424 vhold(ncp->nc_vp); 425 break; 426 } 427 if (ncp->nc_locktd == td) { 428 ++ncp->nc_exlocks; 429 break; 430 } 431 ncp->nc_flag |= NCF_LOCKREQ; 432 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 433 if (didwarn) 434 continue; 435 didwarn = 1; 436 kprintf("[diagnostic] cache_lock: blocked on %p", ncp); 437 kprintf(" \"%*.*s\"\n", 438 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 439 } 440 } 441 442 if (didwarn == 1) { 443 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n", 444 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 445 } 446 } 447 448 void 449 cache_lock(struct nchandle *nch) 450 { 451 _cache_lock(nch->ncp); 452 } 453 454 static 455 int 456 _cache_lock_nonblock(struct namecache *ncp) 457 { 458 thread_t td; 459 460 KKASSERT(ncp->nc_refs != 0); 461 td = curthread; 462 if (ncp->nc_exlocks == 0) { 463 ncp->nc_exlocks = 1; 464 ncp->nc_locktd = td; 465 /* 466 * The vp associated with a locked ncp must be held 467 * to prevent it from being recycled (which would 468 * cause the ncp to become unresolved). 469 * 470 * WARNING! If VRECLAIMED is set the vnode could 471 * already be in the middle of a recycle. Callers 472 * should not assume that nc_vp is usable when 473 * not NULL. cache_vref() or cache_vget() must be 474 * called. 475 * 476 * XXX loop on race for later MPSAFE work. 477 */ 478 if (ncp->nc_vp) 479 vhold(ncp->nc_vp); 480 return(0); 481 } else { 482 return(EWOULDBLOCK); 483 } 484 } 485 486 int 487 cache_lock_nonblock(struct nchandle *nch) 488 { 489 return(_cache_lock_nonblock(nch->ncp)); 490 } 491 492 static 493 void 494 _cache_unlock(struct namecache *ncp) 495 { 496 thread_t td = curthread; 497 498 KKASSERT(ncp->nc_refs > 0); 499 KKASSERT(ncp->nc_exlocks > 0); 500 KKASSERT(ncp->nc_locktd == td); 501 if (--ncp->nc_exlocks == 0) { 502 if (ncp->nc_vp) 503 vdrop(ncp->nc_vp); 504 ncp->nc_locktd = NULL; 505 if (ncp->nc_flag & NCF_LOCKREQ) { 506 ncp->nc_flag &= ~NCF_LOCKREQ; 507 wakeup(ncp); 508 } 509 } 510 } 511 512 void 513 cache_unlock(struct nchandle *nch) 514 { 515 _cache_unlock(nch->ncp); 516 } 517 518 /* 519 * ref-and-lock, unlock-and-deref functions. 520 * 521 * This function is primarily used by nlookup. Even though cache_lock 522 * holds the vnode, it is possible that the vnode may have already 523 * initiated a recyclement. We want cache_get() to return a definitively 524 * usable vnode or a definitively unresolved ncp. 525 */ 526 static 527 struct namecache * 528 _cache_get(struct namecache *ncp) 529 { 530 _cache_hold(ncp); 531 _cache_lock(ncp); 532 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 533 _cache_setunresolved(ncp); 534 return(ncp); 535 } 536 537 /* 538 * note: the same nchandle can be passed for both arguments. 539 */ 540 void 541 cache_get(struct nchandle *nch, struct nchandle *target) 542 { 543 target->mount = nch->mount; 544 target->ncp = _cache_get(nch->ncp); 545 ++target->mount->mnt_refs; 546 } 547 548 static int 549 _cache_get_nonblock(struct namecache *ncp) 550 { 551 /* XXX MP */ 552 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 553 _cache_hold(ncp); 554 _cache_lock(ncp); 555 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 556 _cache_setunresolved(ncp); 557 return(0); 558 } 559 return(EWOULDBLOCK); 560 } 561 562 int 563 cache_get_nonblock(struct nchandle *nch) 564 { 565 int error; 566 567 if ((error = _cache_get_nonblock(nch->ncp)) == 0) 568 ++nch->mount->mnt_refs; 569 return (error); 570 } 571 572 static __inline 573 void 574 _cache_put(struct namecache *ncp) 575 { 576 _cache_unlock(ncp); 577 _cache_drop(ncp); 578 } 579 580 void 581 cache_put(struct nchandle *nch) 582 { 583 --nch->mount->mnt_refs; 584 _cache_put(nch->ncp); 585 nch->ncp = NULL; 586 nch->mount = NULL; 587 } 588 589 /* 590 * Resolve an unresolved ncp by associating a vnode with it. If the 591 * vnode is NULL, a negative cache entry is created. 592 * 593 * The ncp should be locked on entry and will remain locked on return. 594 */ 595 static 596 void 597 _cache_setvp(struct namecache *ncp, struct vnode *vp) 598 { 599 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 600 ncp->nc_vp = vp; 601 if (vp != NULL) { 602 /* 603 * Any vp associated with an ncp which has children must 604 * be held. Any vp associated with a locked ncp must be held. 605 */ 606 if (!TAILQ_EMPTY(&ncp->nc_list)) 607 vhold(vp); 608 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 609 if (ncp->nc_exlocks) 610 vhold(vp); 611 612 /* 613 * Set auxiliary flags 614 */ 615 switch(vp->v_type) { 616 case VDIR: 617 ncp->nc_flag |= NCF_ISDIR; 618 break; 619 case VLNK: 620 ncp->nc_flag |= NCF_ISSYMLINK; 621 /* XXX cache the contents of the symlink */ 622 break; 623 default: 624 break; 625 } 626 ++numcache; 627 ncp->nc_error = 0; 628 } else { 629 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 630 ++numneg; 631 ncp->nc_error = ENOENT; 632 } 633 ncp->nc_flag &= ~NCF_UNRESOLVED; 634 } 635 636 void 637 cache_setvp(struct nchandle *nch, struct vnode *vp) 638 { 639 _cache_setvp(nch->ncp, vp); 640 } 641 642 void 643 cache_settimeout(struct nchandle *nch, int nticks) 644 { 645 struct namecache *ncp = nch->ncp; 646 647 if ((ncp->nc_timeout = ticks + nticks) == 0) 648 ncp->nc_timeout = 1; 649 } 650 651 /* 652 * Disassociate the vnode or negative-cache association and mark a 653 * namecache entry as unresolved again. Note that the ncp is still 654 * left in the hash table and still linked to its parent. 655 * 656 * The ncp should be locked and refd on entry and will remain locked and refd 657 * on return. 658 * 659 * This routine is normally never called on a directory containing children. 660 * However, NFS often does just that in its rename() code as a cop-out to 661 * avoid complex namespace operations. This disconnects a directory vnode 662 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 663 * sync. 664 * 665 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as 666 * in a create, properly propogates flag up the chain. 667 */ 668 static 669 void 670 _cache_setunresolved(struct namecache *ncp) 671 { 672 struct vnode *vp; 673 674 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 675 ncp->nc_flag |= NCF_UNRESOLVED; 676 ncp->nc_timeout = 0; 677 ncp->nc_error = ENOTCONN; 678 ++numunres; 679 if ((vp = ncp->nc_vp) != NULL) { 680 --numcache; 681 ncp->nc_vp = NULL; 682 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 683 684 /* 685 * Any vp associated with an ncp with children is 686 * held by that ncp. Any vp associated with a locked 687 * ncp is held by that ncp. These conditions must be 688 * undone when the vp is cleared out from the ncp. 689 */ 690 if (ncp->nc_flag & NCF_FSMID) 691 vupdatefsmid(vp); 692 if (!TAILQ_EMPTY(&ncp->nc_list)) 693 vdrop(vp); 694 if (ncp->nc_exlocks) 695 vdrop(vp); 696 } else { 697 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 698 --numneg; 699 } 700 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK| 701 NCF_FSMID); 702 } 703 } 704 705 void 706 cache_setunresolved(struct nchandle *nch) 707 { 708 _cache_setunresolved(nch->ncp); 709 } 710 711 /* 712 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 713 * looking for matches. This flag tells the lookup code when it must 714 * check for a mount linkage and also prevents the directories in question 715 * from being deleted or renamed. 716 */ 717 static 718 int 719 cache_clrmountpt_callback(struct mount *mp, void *data) 720 { 721 struct nchandle *nch = data; 722 723 if (mp->mnt_ncmounton.ncp == nch->ncp) 724 return(1); 725 if (mp->mnt_ncmountpt.ncp == nch->ncp) 726 return(1); 727 return(0); 728 } 729 730 void 731 cache_clrmountpt(struct nchandle *nch) 732 { 733 int count; 734 735 count = mountlist_scan(cache_clrmountpt_callback, nch, 736 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 737 if (count == 0) 738 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 739 } 740 741 /* 742 * Invalidate portions of the namecache topology given a starting entry. 743 * The passed ncp is set to an unresolved state and: 744 * 745 * The passed ncp must be locked. 746 * 747 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 748 * that the physical underlying nodes have been 749 * destroyed... as in deleted. For example, when 750 * a directory is removed. This will cause record 751 * lookups on the name to no longer be able to find 752 * the record and tells the resolver to return failure 753 * rather then trying to resolve through the parent. 754 * 755 * The topology itself, including ncp->nc_name, 756 * remains intact. 757 * 758 * This only applies to the passed ncp, if CINV_CHILDREN 759 * is specified the children are not flagged. 760 * 761 * CINV_CHILDREN - Set all children (recursively) to an unresolved 762 * state as well. 763 * 764 * Note that this will also have the side effect of 765 * cleaning out any unreferenced nodes in the topology 766 * from the leaves up as the recursion backs out. 767 * 768 * Note that the topology for any referenced nodes remains intact. 769 * 770 * It is possible for cache_inval() to race a cache_resolve(), meaning that 771 * the namecache entry may not actually be invalidated on return if it was 772 * revalidated while recursing down into its children. This code guarentees 773 * that the node(s) will go through an invalidation cycle, but does not 774 * guarentee that they will remain in an invalidated state. 775 * 776 * Returns non-zero if a revalidation was detected during the invalidation 777 * recursion, zero otherwise. Note that since only the original ncp is 778 * locked the revalidation ultimately can only indicate that the original ncp 779 * *MIGHT* no have been reresolved. 780 * 781 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 782 * have to avoid blowing out the kernel stack. We do this by saving the 783 * deep namecache node and aborting the recursion, then re-recursing at that 784 * node using a depth-first algorithm in order to allow multiple deep 785 * recursions to chain through each other, then we restart the invalidation 786 * from scratch. 787 */ 788 789 struct cinvtrack { 790 struct namecache *resume_ncp; 791 int depth; 792 }; 793 794 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 795 796 static 797 int 798 _cache_inval(struct namecache *ncp, int flags) 799 { 800 struct cinvtrack track; 801 struct namecache *ncp2; 802 int r; 803 804 track.depth = 0; 805 track.resume_ncp = NULL; 806 807 for (;;) { 808 r = _cache_inval_internal(ncp, flags, &track); 809 if (track.resume_ncp == NULL) 810 break; 811 kprintf("Warning: deep namecache recursion at %s\n", 812 ncp->nc_name); 813 _cache_unlock(ncp); 814 while ((ncp2 = track.resume_ncp) != NULL) { 815 track.resume_ncp = NULL; 816 _cache_lock(ncp2); 817 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 818 &track); 819 _cache_put(ncp2); 820 } 821 _cache_lock(ncp); 822 } 823 return(r); 824 } 825 826 int 827 cache_inval(struct nchandle *nch, int flags) 828 { 829 return(_cache_inval(nch->ncp, flags)); 830 } 831 832 static int 833 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 834 { 835 struct namecache *kid; 836 struct namecache *nextkid; 837 int rcnt = 0; 838 839 KKASSERT(ncp->nc_exlocks); 840 841 _cache_setunresolved(ncp); 842 if (flags & CINV_DESTROY) 843 ncp->nc_flag |= NCF_DESTROYED; 844 845 if ((flags & CINV_CHILDREN) && 846 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 847 ) { 848 if (++track->depth > MAX_RECURSION_DEPTH) { 849 track->resume_ncp = ncp; 850 _cache_hold(ncp); 851 ++rcnt; 852 } 853 _cache_hold(kid); 854 _cache_unlock(ncp); 855 while (kid) { 856 if (track->resume_ncp) { 857 _cache_drop(kid); 858 break; 859 } 860 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 861 _cache_hold(nextkid); 862 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 863 TAILQ_FIRST(&kid->nc_list) 864 ) { 865 _cache_lock(kid); 866 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 867 _cache_unlock(kid); 868 } 869 _cache_drop(kid); 870 kid = nextkid; 871 } 872 --track->depth; 873 _cache_lock(ncp); 874 } 875 876 /* 877 * Someone could have gotten in there while ncp was unlocked, 878 * retry if so. 879 */ 880 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 881 ++rcnt; 882 return (rcnt); 883 } 884 885 /* 886 * Invalidate a vnode's namecache associations. To avoid races against 887 * the resolver we do not invalidate a node which we previously invalidated 888 * but which was then re-resolved while we were in the invalidation loop. 889 * 890 * Returns non-zero if any namecache entries remain after the invalidation 891 * loop completed. 892 * 893 * NOTE: unlike the namecache topology which guarentees that ncp's will not 894 * be ripped out of the topology while held, the vnode's v_namecache list 895 * has no such restriction. NCP's can be ripped out of the list at virtually 896 * any time if not locked, even if held. 897 */ 898 int 899 cache_inval_vp(struct vnode *vp, int flags) 900 { 901 struct namecache *ncp; 902 struct namecache *next; 903 904 restart: 905 ncp = TAILQ_FIRST(&vp->v_namecache); 906 if (ncp) 907 _cache_hold(ncp); 908 while (ncp) { 909 /* loop entered with ncp held */ 910 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 911 _cache_hold(next); 912 _cache_lock(ncp); 913 if (ncp->nc_vp != vp) { 914 kprintf("Warning: cache_inval_vp: race-A detected on " 915 "%s\n", ncp->nc_name); 916 _cache_put(ncp); 917 if (next) 918 _cache_drop(next); 919 goto restart; 920 } 921 _cache_inval(ncp, flags); 922 _cache_put(ncp); /* also releases reference */ 923 ncp = next; 924 if (ncp && ncp->nc_vp != vp) { 925 kprintf("Warning: cache_inval_vp: race-B detected on " 926 "%s\n", ncp->nc_name); 927 _cache_drop(ncp); 928 goto restart; 929 } 930 } 931 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 932 } 933 934 /* 935 * This routine is used instead of the normal cache_inval_vp() when we 936 * are trying to recycle otherwise good vnodes. 937 * 938 * Return 0 on success, non-zero if not all namecache records could be 939 * disassociated from the vnode (for various reasons). 940 */ 941 int 942 cache_inval_vp_nonblock(struct vnode *vp) 943 { 944 struct namecache *ncp; 945 struct namecache *next; 946 947 ncp = TAILQ_FIRST(&vp->v_namecache); 948 if (ncp) 949 _cache_hold(ncp); 950 while (ncp) { 951 /* loop entered with ncp held */ 952 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 953 _cache_hold(next); 954 if (_cache_lock_nonblock(ncp)) { 955 _cache_drop(ncp); 956 if (next) 957 _cache_drop(next); 958 break; 959 } 960 if (ncp->nc_vp != vp) { 961 kprintf("Warning: cache_inval_vp: race-A detected on " 962 "%s\n", ncp->nc_name); 963 _cache_put(ncp); 964 if (next) 965 _cache_drop(next); 966 break; 967 } 968 _cache_inval(ncp, 0); 969 _cache_put(ncp); /* also releases reference */ 970 ncp = next; 971 if (ncp && ncp->nc_vp != vp) { 972 kprintf("Warning: cache_inval_vp: race-B detected on " 973 "%s\n", ncp->nc_name); 974 _cache_drop(ncp); 975 break; 976 } 977 } 978 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 979 } 980 981 /* 982 * The source ncp has been renamed to the target ncp. Both fncp and tncp 983 * must be locked. The target ncp is destroyed (as a normal rename-over 984 * would destroy the target file or directory). 985 * 986 * Because there may be references to the source ncp we cannot copy its 987 * contents to the target. Instead the source ncp is relinked as the target 988 * and the target ncp is removed from the namecache topology. 989 */ 990 void 991 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 992 { 993 struct namecache *fncp = fnch->ncp; 994 struct namecache *tncp = tnch->ncp; 995 char *oname; 996 997 _cache_setunresolved(tncp); 998 cache_unlink_parent(fncp); 999 cache_link_parent(fncp, tncp->nc_parent); 1000 cache_unlink_parent(tncp); 1001 oname = fncp->nc_name; 1002 fncp->nc_name = tncp->nc_name; 1003 fncp->nc_nlen = tncp->nc_nlen; 1004 tncp->nc_name = NULL; 1005 tncp->nc_nlen = 0; 1006 if (fncp->nc_flag & NCF_HASHED) 1007 _cache_rehash(fncp); 1008 if (tncp->nc_flag & NCF_HASHED) 1009 _cache_rehash(tncp); 1010 if (oname) 1011 kfree(oname, M_VFSCACHE); 1012 } 1013 1014 /* 1015 * vget the vnode associated with the namecache entry. Resolve the namecache 1016 * entry if necessary and deal with namecache/vp races. The passed ncp must 1017 * be referenced and may be locked. The ncp's ref/locking state is not 1018 * effected by this call. 1019 * 1020 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1021 * (depending on the passed lk_type) will be returned in *vpp with an error 1022 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1023 * most typical error is ENOENT, meaning that the ncp represents a negative 1024 * cache hit and there is no vnode to retrieve, but other errors can occur 1025 * too. 1026 * 1027 * The main race we have to deal with are namecache zaps. The ncp itself 1028 * will not disappear since it is referenced, and it turns out that the 1029 * validity of the vp pointer can be checked simply by rechecking the 1030 * contents of ncp->nc_vp. 1031 */ 1032 int 1033 cache_vget(struct nchandle *nch, struct ucred *cred, 1034 int lk_type, struct vnode **vpp) 1035 { 1036 struct namecache *ncp; 1037 struct vnode *vp; 1038 int error; 1039 1040 ncp = nch->ncp; 1041 again: 1042 vp = NULL; 1043 if (ncp->nc_flag & NCF_UNRESOLVED) { 1044 _cache_lock(ncp); 1045 error = cache_resolve(nch, cred); 1046 _cache_unlock(ncp); 1047 } else { 1048 error = 0; 1049 } 1050 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1051 /* 1052 * Accessing the vnode from the namecache is a bit 1053 * dangerous. Because there are no refs on the vnode, it 1054 * could be in the middle of a reclaim. 1055 */ 1056 if (vp->v_flag & VRECLAIMED) { 1057 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name); 1058 _cache_lock(ncp); 1059 _cache_setunresolved(ncp); 1060 _cache_unlock(ncp); 1061 goto again; 1062 } 1063 error = vget(vp, lk_type); 1064 if (error) { 1065 if (vp != ncp->nc_vp) 1066 goto again; 1067 vp = NULL; 1068 } else if (vp != ncp->nc_vp) { 1069 vput(vp); 1070 goto again; 1071 } else if (vp->v_flag & VRECLAIMED) { 1072 panic("vget succeeded on a VRECLAIMED node! vp %p", vp); 1073 } 1074 } 1075 if (error == 0 && vp == NULL) 1076 error = ENOENT; 1077 *vpp = vp; 1078 return(error); 1079 } 1080 1081 int 1082 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1083 { 1084 struct namecache *ncp; 1085 struct vnode *vp; 1086 int error; 1087 1088 ncp = nch->ncp; 1089 1090 again: 1091 vp = NULL; 1092 if (ncp->nc_flag & NCF_UNRESOLVED) { 1093 _cache_lock(ncp); 1094 error = cache_resolve(nch, cred); 1095 _cache_unlock(ncp); 1096 } else { 1097 error = 0; 1098 } 1099 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1100 /* 1101 * Since we did not obtain any locks, a cache zap 1102 * race can occur here if the vnode is in the middle 1103 * of being reclaimed and has not yet been able to 1104 * clean out its cache node. If that case occurs, 1105 * we must lock and unresolve the cache, then loop 1106 * to retry. 1107 */ 1108 if ((error = vget(vp, LK_SHARED)) != 0) { 1109 if (error == ENOENT) { 1110 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name); 1111 _cache_lock(ncp); 1112 _cache_setunresolved(ncp); 1113 _cache_unlock(ncp); 1114 goto again; 1115 } 1116 /* fatal error */ 1117 } else { 1118 /* caller does not want a lock */ 1119 vn_unlock(vp); 1120 } 1121 } 1122 if (error == 0 && vp == NULL) 1123 error = ENOENT; 1124 *vpp = vp; 1125 return(error); 1126 } 1127 1128 /* 1129 * Return a referenced vnode representing the parent directory of 1130 * ncp. Because the caller has locked the ncp it should not be possible for 1131 * the parent ncp to go away. 1132 * 1133 * However, we might race against the parent dvp and not be able to 1134 * reference it. If we race, return NULL. 1135 */ 1136 static struct vnode * 1137 cache_dvpref(struct namecache *ncp) 1138 { 1139 struct namecache *par; 1140 struct vnode *dvp; 1141 1142 dvp = NULL; 1143 if ((par = ncp->nc_parent) != NULL) { 1144 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1145 if ((dvp = par->nc_vp) != NULL) { 1146 if (vget(dvp, LK_SHARED) == 0) { 1147 vn_unlock(dvp); 1148 /* return referenced, unlocked dvp */ 1149 } else { 1150 dvp = NULL; 1151 } 1152 } 1153 } 1154 } 1155 return(dvp); 1156 } 1157 1158 /* 1159 * Recursively set the FSMID update flag for namecache nodes leading 1160 * to root. This will cause the next getattr or reclaim to increment the 1161 * fsmid and mark the inode for lazy updating. 1162 * 1163 * Stop recursing when we hit a node whos NCF_FSMID flag is already set. 1164 * This makes FSMIDs work in an Einsteinian fashion - where the observation 1165 * effects the result. In this case a program monitoring a higher level 1166 * node will have detected some prior change and started its scan (clearing 1167 * NCF_FSMID in higher level nodes), but since it has not yet observed the 1168 * node where we find NCF_FSMID still set, we can safely make the related 1169 * modification without interfering with the theorized program. 1170 * 1171 * This also means that FSMIDs cannot represent time-domain quantities 1172 * in a hierarchical sense. But the main reason for doing it this way 1173 * is to reduce the amount of recursion that occurs in the critical path 1174 * when e.g. a program is writing to a file that sits deep in a directory 1175 * hierarchy. 1176 */ 1177 void 1178 cache_update_fsmid(struct nchandle *nch) 1179 { 1180 struct namecache *ncp; 1181 struct namecache *scan; 1182 struct vnode *vp; 1183 1184 ncp = nch->ncp; 1185 1186 /* 1187 * Warning: even if we get a non-NULL vp it could still be in the 1188 * middle of a recyclement. Don't do anything fancy, just set 1189 * NCF_FSMID. 1190 */ 1191 if ((vp = ncp->nc_vp) != NULL) { 1192 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1193 for (scan = ncp; scan; scan = scan->nc_parent) { 1194 if (scan->nc_flag & NCF_FSMID) 1195 break; 1196 scan->nc_flag |= NCF_FSMID; 1197 } 1198 } 1199 } else { 1200 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) { 1201 ncp->nc_flag |= NCF_FSMID; 1202 ncp = ncp->nc_parent; 1203 } 1204 } 1205 } 1206 1207 void 1208 cache_update_fsmid_vp(struct vnode *vp) 1209 { 1210 struct namecache *ncp; 1211 struct namecache *scan; 1212 1213 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1214 for (scan = ncp; scan; scan = scan->nc_parent) { 1215 if (scan->nc_flag & NCF_FSMID) 1216 break; 1217 scan->nc_flag |= NCF_FSMID; 1218 } 1219 } 1220 } 1221 1222 /* 1223 * If getattr is called on a vnode (e.g. a stat call), the filesystem 1224 * may call this routine to determine if the namecache has the hierarchical 1225 * change flag set, requiring the fsmid to be updated. 1226 * 1227 * Since 0 indicates no support, make sure the filesystem fsmid is at least 1228 * 1. 1229 */ 1230 int 1231 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid) 1232 { 1233 struct namecache *ncp; 1234 int changed = 0; 1235 1236 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1237 if (ncp->nc_flag & NCF_FSMID) { 1238 ncp->nc_flag &= ~NCF_FSMID; 1239 changed = 1; 1240 } 1241 } 1242 if (*fsmid == 0) 1243 ++*fsmid; 1244 if (changed) 1245 ++*fsmid; 1246 return(changed); 1247 } 1248 1249 /* 1250 * Obtain the FSMID for a vnode for filesystems which do not support 1251 * a built-in FSMID. 1252 */ 1253 int64_t 1254 cache_sync_fsmid_vp(struct vnode *vp) 1255 { 1256 struct namecache *ncp; 1257 1258 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 1259 if (ncp->nc_flag & NCF_FSMID) { 1260 ncp->nc_flag &= ~NCF_FSMID; 1261 ++ncp->nc_fsmid; 1262 } 1263 return(ncp->nc_fsmid); 1264 } 1265 return(VNOVAL); 1266 } 1267 1268 /* 1269 * Convert a directory vnode to a namecache record without any other 1270 * knowledge of the topology. This ONLY works with directory vnodes and 1271 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1272 * returned ncp (if not NULL) will be held and unlocked. 1273 * 1274 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1275 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1276 * for dvp. This will fail only if the directory has been deleted out from 1277 * under the caller. 1278 * 1279 * Callers must always check for a NULL return no matter the value of 'makeit'. 1280 * 1281 * To avoid underflowing the kernel stack each recursive call increments 1282 * the makeit variable. 1283 */ 1284 1285 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1286 struct vnode *dvp, char *fakename); 1287 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1288 struct vnode **saved_dvp); 1289 1290 int 1291 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1292 struct nchandle *nch) 1293 { 1294 struct vnode *saved_dvp; 1295 struct vnode *pvp; 1296 char *fakename; 1297 int error; 1298 1299 nch->ncp = NULL; 1300 nch->mount = dvp->v_mount; 1301 saved_dvp = NULL; 1302 fakename = NULL; 1303 1304 /* 1305 * Temporary debugging code to force the directory scanning code 1306 * to be exercised. 1307 */ 1308 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 1309 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1310 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name); 1311 goto force; 1312 } 1313 1314 /* 1315 * Loop until resolution, inside code will break out on error. 1316 */ 1317 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 1318 force: 1319 /* 1320 * If dvp is the root of its filesystem it should already 1321 * have a namecache pointer associated with it as a side 1322 * effect of the mount, but it may have been disassociated. 1323 */ 1324 if (dvp->v_flag & VROOT) { 1325 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1326 error = cache_resolve_mp(nch->mount); 1327 _cache_put(nch->ncp); 1328 if (ncvp_debug) { 1329 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1330 dvp->v_mount, error); 1331 } 1332 if (error) { 1333 if (ncvp_debug) 1334 kprintf(" failed\n"); 1335 nch->ncp = NULL; 1336 break; 1337 } 1338 if (ncvp_debug) 1339 kprintf(" succeeded\n"); 1340 continue; 1341 } 1342 1343 /* 1344 * If we are recursed too deeply resort to an O(n^2) 1345 * algorithm to resolve the namecache topology. The 1346 * resolved pvp is left referenced in saved_dvp to 1347 * prevent the tree from being destroyed while we loop. 1348 */ 1349 if (makeit > 20) { 1350 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1351 if (error) { 1352 kprintf("lookupdotdot(longpath) failed %d " 1353 "dvp %p\n", error, dvp); 1354 nch->ncp = NULL; 1355 break; 1356 } 1357 continue; 1358 } 1359 1360 /* 1361 * Get the parent directory and resolve its ncp. 1362 */ 1363 if (fakename) { 1364 kfree(fakename, M_TEMP); 1365 fakename = NULL; 1366 } 1367 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1368 &fakename); 1369 if (error) { 1370 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1371 break; 1372 } 1373 vn_unlock(pvp); 1374 1375 /* 1376 * Reuse makeit as a recursion depth counter. On success 1377 * nch will be fully referenced. 1378 */ 1379 cache_fromdvp(pvp, cred, makeit + 1, nch); 1380 vrele(pvp); 1381 if (nch->ncp == NULL) 1382 break; 1383 1384 /* 1385 * Do an inefficient scan of pvp (embodied by ncp) to look 1386 * for dvp. This will create a namecache record for dvp on 1387 * success. We loop up to recheck on success. 1388 * 1389 * ncp and dvp are both held but not locked. 1390 */ 1391 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1392 if (error) { 1393 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1394 pvp, nch->ncp->nc_name, dvp); 1395 cache_drop(nch); 1396 /* nch was NULLed out, reload mount */ 1397 nch->mount = dvp->v_mount; 1398 break; 1399 } 1400 if (ncvp_debug) { 1401 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1402 pvp, nch->ncp->nc_name); 1403 } 1404 cache_drop(nch); 1405 /* nch was NULLed out, reload mount */ 1406 nch->mount = dvp->v_mount; 1407 } 1408 1409 if (fakename) 1410 kfree(fakename, M_TEMP); 1411 1412 /* 1413 * hold it for real so the mount gets a ref 1414 */ 1415 if (nch->ncp) 1416 cache_hold(nch); 1417 if (saved_dvp) 1418 vrele(saved_dvp); 1419 if (nch->ncp) 1420 return (0); 1421 return (EINVAL); 1422 } 1423 1424 /* 1425 * Go up the chain of parent directories until we find something 1426 * we can resolve into the namecache. This is very inefficient. 1427 */ 1428 static 1429 int 1430 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1431 struct vnode **saved_dvp) 1432 { 1433 struct nchandle nch; 1434 struct vnode *pvp; 1435 int error; 1436 static time_t last_fromdvp_report; 1437 char *fakename; 1438 1439 /* 1440 * Loop getting the parent directory vnode until we get something we 1441 * can resolve in the namecache. 1442 */ 1443 vref(dvp); 1444 nch.mount = dvp->v_mount; 1445 nch.ncp = NULL; 1446 fakename = NULL; 1447 1448 for (;;) { 1449 if (fakename) { 1450 kfree(fakename, M_TEMP); 1451 fakename = NULL; 1452 } 1453 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1454 &fakename); 1455 if (error) { 1456 vrele(dvp); 1457 break; 1458 } 1459 vn_unlock(pvp); 1460 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1461 _cache_hold(nch.ncp); 1462 vrele(pvp); 1463 break; 1464 } 1465 if (pvp->v_flag & VROOT) { 1466 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1467 error = cache_resolve_mp(nch.mount); 1468 _cache_unlock(nch.ncp); 1469 vrele(pvp); 1470 if (error) { 1471 _cache_drop(nch.ncp); 1472 nch.ncp = NULL; 1473 vrele(dvp); 1474 } 1475 break; 1476 } 1477 vrele(dvp); 1478 dvp = pvp; 1479 } 1480 if (error == 0) { 1481 if (last_fromdvp_report != time_second) { 1482 last_fromdvp_report = time_second; 1483 kprintf("Warning: extremely inefficient path " 1484 "resolution on %s\n", 1485 nch.ncp->nc_name); 1486 } 1487 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1488 1489 /* 1490 * Hopefully dvp now has a namecache record associated with 1491 * it. Leave it referenced to prevent the kernel from 1492 * recycling the vnode. Otherwise extremely long directory 1493 * paths could result in endless recycling. 1494 */ 1495 if (*saved_dvp) 1496 vrele(*saved_dvp); 1497 *saved_dvp = dvp; 1498 _cache_drop(nch.ncp); 1499 } 1500 if (fakename) 1501 kfree(fakename, M_TEMP); 1502 return (error); 1503 } 1504 1505 /* 1506 * Do an inefficient scan of the directory represented by ncp looking for 1507 * the directory vnode dvp. ncp must be held but not locked on entry and 1508 * will be held on return. dvp must be refd but not locked on entry and 1509 * will remain refd on return. 1510 * 1511 * Why do this at all? Well, due to its stateless nature the NFS server 1512 * converts file handles directly to vnodes without necessarily going through 1513 * the namecache ops that would otherwise create the namecache topology 1514 * leading to the vnode. We could either (1) Change the namecache algorithms 1515 * to allow disconnect namecache records that are re-merged opportunistically, 1516 * or (2) Make the NFS server backtrack and scan to recover a connected 1517 * namecache topology in order to then be able to issue new API lookups. 1518 * 1519 * It turns out that (1) is a huge mess. It takes a nice clean set of 1520 * namecache algorithms and introduces a lot of complication in every subsystem 1521 * that calls into the namecache to deal with the re-merge case, especially 1522 * since we are using the namecache to placehold negative lookups and the 1523 * vnode might not be immediately assigned. (2) is certainly far less 1524 * efficient then (1), but since we are only talking about directories here 1525 * (which are likely to remain cached), the case does not actually run all 1526 * that often and has the supreme advantage of not polluting the namecache 1527 * algorithms. 1528 * 1529 * If a fakename is supplied just construct a namecache entry using the 1530 * fake name. 1531 */ 1532 static int 1533 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1534 struct vnode *dvp, char *fakename) 1535 { 1536 struct nlcomponent nlc; 1537 struct nchandle rncp; 1538 struct dirent *den; 1539 struct vnode *pvp; 1540 struct vattr vat; 1541 struct iovec iov; 1542 struct uio uio; 1543 int blksize; 1544 int eofflag; 1545 int bytes; 1546 char *rbuf; 1547 int error; 1548 1549 vat.va_blocksize = 0; 1550 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1551 return (error); 1552 if ((error = cache_vref(nch, cred, &pvp)) != 0) 1553 return (error); 1554 if (ncvp_debug) 1555 kprintf("inefficient_scan: directory iosize %ld vattr fileid = %lld\n", vat.va_blocksize, vat.va_fileid); 1556 1557 /* 1558 * Use the supplied fakename if not NULL. Fake names are typically 1559 * not in the actual filesystem hierarchy. This is used by HAMMER 1560 * to glue @@timestamp recursions together. 1561 */ 1562 if (fakename) { 1563 nlc.nlc_nameptr = fakename; 1564 nlc.nlc_namelen = strlen(fakename); 1565 rncp = cache_nlookup(nch, &nlc); 1566 goto done; 1567 } 1568 1569 if ((blksize = vat.va_blocksize) == 0) 1570 blksize = DEV_BSIZE; 1571 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1572 rncp.ncp = NULL; 1573 1574 eofflag = 0; 1575 uio.uio_offset = 0; 1576 again: 1577 iov.iov_base = rbuf; 1578 iov.iov_len = blksize; 1579 uio.uio_iov = &iov; 1580 uio.uio_iovcnt = 1; 1581 uio.uio_resid = blksize; 1582 uio.uio_segflg = UIO_SYSSPACE; 1583 uio.uio_rw = UIO_READ; 1584 uio.uio_td = curthread; 1585 1586 if (ncvp_debug >= 2) 1587 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1588 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1589 if (error == 0) { 1590 den = (struct dirent *)rbuf; 1591 bytes = blksize - uio.uio_resid; 1592 1593 while (bytes > 0) { 1594 if (ncvp_debug >= 2) { 1595 kprintf("cache_inefficient_scan: %*.*s\n", 1596 den->d_namlen, den->d_namlen, 1597 den->d_name); 1598 } 1599 if (den->d_type != DT_WHT && 1600 den->d_ino == vat.va_fileid) { 1601 if (ncvp_debug) { 1602 kprintf("cache_inefficient_scan: " 1603 "MATCHED inode %lld path %s/%*.*s\n", 1604 vat.va_fileid, nch->ncp->nc_name, 1605 den->d_namlen, den->d_namlen, 1606 den->d_name); 1607 } 1608 nlc.nlc_nameptr = den->d_name; 1609 nlc.nlc_namelen = den->d_namlen; 1610 rncp = cache_nlookup(nch, &nlc); 1611 KKASSERT(rncp.ncp != NULL); 1612 break; 1613 } 1614 bytes -= _DIRENT_DIRSIZ(den); 1615 den = _DIRENT_NEXT(den); 1616 } 1617 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1618 goto again; 1619 } 1620 kfree(rbuf, M_TEMP); 1621 done: 1622 vrele(pvp); 1623 if (rncp.ncp) { 1624 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1625 _cache_setvp(rncp.ncp, dvp); 1626 if (ncvp_debug >= 2) { 1627 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1628 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1629 } 1630 } else { 1631 if (ncvp_debug >= 2) { 1632 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1633 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1634 rncp.ncp->nc_vp); 1635 } 1636 } 1637 if (rncp.ncp->nc_vp == NULL) 1638 error = rncp.ncp->nc_error; 1639 /* 1640 * Release rncp after a successful nlookup. rncp was fully 1641 * referenced. 1642 */ 1643 cache_put(&rncp); 1644 } else { 1645 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1646 dvp, nch->ncp->nc_name); 1647 error = ENOENT; 1648 } 1649 return (error); 1650 } 1651 1652 /* 1653 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1654 * state, which disassociates it from its vnode or ncneglist. 1655 * 1656 * Then, if there are no additional references to the ncp and no children, 1657 * the ncp is removed from the topology and destroyed. This function will 1658 * also run through the nc_parent chain and destroy parent ncps if possible. 1659 * As a side benefit, it turns out the only conditions that allow running 1660 * up the chain are also the conditions to ensure no deadlock will occur. 1661 * 1662 * References and/or children may exist if the ncp is in the middle of the 1663 * topology, preventing the ncp from being destroyed. 1664 * 1665 * This function must be called with the ncp held and locked and will unlock 1666 * and drop it during zapping. 1667 */ 1668 static void 1669 cache_zap(struct namecache *ncp) 1670 { 1671 struct namecache *par; 1672 1673 /* 1674 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1675 */ 1676 _cache_setunresolved(ncp); 1677 1678 /* 1679 * Try to scrap the entry and possibly tail-recurse on its parent. 1680 * We only scrap unref'd (other then our ref) unresolved entries, 1681 * we do not scrap 'live' entries. 1682 */ 1683 while (ncp->nc_flag & NCF_UNRESOLVED) { 1684 /* 1685 * Someone other then us has a ref, stop. 1686 */ 1687 if (ncp->nc_refs > 1) 1688 goto done; 1689 1690 /* 1691 * We have children, stop. 1692 */ 1693 if (!TAILQ_EMPTY(&ncp->nc_list)) 1694 goto done; 1695 1696 /* 1697 * Remove ncp from the topology: hash table and parent linkage. 1698 */ 1699 if (ncp->nc_flag & NCF_HASHED) { 1700 ncp->nc_flag &= ~NCF_HASHED; 1701 LIST_REMOVE(ncp, nc_hash); 1702 } 1703 if ((par = ncp->nc_parent) != NULL) { 1704 par = _cache_hold(par); 1705 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1706 ncp->nc_parent = NULL; 1707 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1708 vdrop(par->nc_vp); 1709 } 1710 1711 /* 1712 * ncp should not have picked up any refs. Physically 1713 * destroy the ncp. 1714 */ 1715 KKASSERT(ncp->nc_refs == 1); 1716 --numunres; 1717 /* _cache_unlock(ncp) not required */ 1718 ncp->nc_refs = -1; /* safety */ 1719 if (ncp->nc_name) 1720 kfree(ncp->nc_name, M_VFSCACHE); 1721 kfree(ncp, M_VFSCACHE); 1722 1723 /* 1724 * Loop on the parent (it may be NULL). Only bother looping 1725 * if the parent has a single ref (ours), which also means 1726 * we can lock it trivially. 1727 */ 1728 ncp = par; 1729 if (ncp == NULL) 1730 return; 1731 if (ncp->nc_refs != 1) { 1732 _cache_drop(ncp); 1733 return; 1734 } 1735 KKASSERT(par->nc_exlocks == 0); 1736 _cache_lock(ncp); 1737 } 1738 done: 1739 _cache_unlock(ncp); 1740 atomic_subtract_int(&ncp->nc_refs, 1); 1741 } 1742 1743 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1744 1745 static __inline 1746 void 1747 cache_hysteresis(void) 1748 { 1749 /* 1750 * Don't cache too many negative hits. We use hysteresis to reduce 1751 * the impact on the critical path. 1752 */ 1753 switch(cache_hysteresis_state) { 1754 case CHI_LOW: 1755 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1756 cache_cleanneg(10); 1757 cache_hysteresis_state = CHI_HIGH; 1758 } 1759 break; 1760 case CHI_HIGH: 1761 if (numneg > MINNEG * 9 / 10 && 1762 numneg * ncnegfactor * 9 / 10 > numcache 1763 ) { 1764 cache_cleanneg(10); 1765 } else { 1766 cache_hysteresis_state = CHI_LOW; 1767 } 1768 break; 1769 } 1770 } 1771 1772 /* 1773 * NEW NAMECACHE LOOKUP API 1774 * 1775 * Lookup an entry in the cache. A locked, referenced, non-NULL 1776 * entry is *always* returned, even if the supplied component is illegal. 1777 * The resulting namecache entry should be returned to the system with 1778 * cache_put() or _cache_unlock() + cache_drop(). 1779 * 1780 * namecache locks are recursive but care must be taken to avoid lock order 1781 * reversals. 1782 * 1783 * Nobody else will be able to manipulate the associated namespace (e.g. 1784 * create, delete, rename, rename-target) until the caller unlocks the 1785 * entry. 1786 * 1787 * The returned entry will be in one of three states: positive hit (non-null 1788 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1789 * Unresolved entries must be resolved through the filesystem to associate the 1790 * vnode and/or determine whether a positive or negative hit has occured. 1791 * 1792 * It is not necessary to lock a directory in order to lock namespace under 1793 * that directory. In fact, it is explicitly not allowed to do that. A 1794 * directory is typically only locked when being created, renamed, or 1795 * destroyed. 1796 * 1797 * The directory (par) may be unresolved, in which case any returned child 1798 * will likely also be marked unresolved. Likely but not guarenteed. Since 1799 * the filesystem lookup requires a resolved directory vnode the caller is 1800 * responsible for resolving the namecache chain top-down. This API 1801 * specifically allows whole chains to be created in an unresolved state. 1802 */ 1803 struct nchandle 1804 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 1805 { 1806 struct nchandle nch; 1807 struct namecache *ncp; 1808 struct namecache *new_ncp; 1809 struct nchashhead *nchpp; 1810 u_int32_t hash; 1811 globaldata_t gd; 1812 1813 numcalls++; 1814 gd = mycpu; 1815 1816 /* 1817 * Try to locate an existing entry 1818 */ 1819 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1820 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 1821 new_ncp = NULL; 1822 restart: 1823 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1824 numchecks++; 1825 1826 /* 1827 * Try to zap entries that have timed out. We have 1828 * to be careful here because locked leafs may depend 1829 * on the vnode remaining intact in a parent, so only 1830 * do this under very specific conditions. 1831 */ 1832 if (ncp->nc_timeout && 1833 (int)(ncp->nc_timeout - ticks) < 0 && 1834 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1835 ncp->nc_exlocks == 0 && 1836 TAILQ_EMPTY(&ncp->nc_list) 1837 ) { 1838 cache_zap(_cache_get(ncp)); 1839 goto restart; 1840 } 1841 1842 /* 1843 * Break out if we find a matching entry. Note that 1844 * UNRESOLVED entries may match, but DESTROYED entries 1845 * do not. 1846 */ 1847 if (ncp->nc_parent == par_nch->ncp && 1848 ncp->nc_nlen == nlc->nlc_namelen && 1849 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1850 (ncp->nc_flag & NCF_DESTROYED) == 0 1851 ) { 1852 if (_cache_get_nonblock(ncp) == 0) { 1853 if (new_ncp) 1854 _cache_free(new_ncp); 1855 goto found; 1856 } 1857 _cache_get(ncp); 1858 _cache_put(ncp); 1859 goto restart; 1860 } 1861 } 1862 1863 /* 1864 * We failed to locate an entry, create a new entry and add it to 1865 * the cache. We have to relookup after possibly blocking in 1866 * malloc. 1867 */ 1868 if (new_ncp == NULL) { 1869 new_ncp = cache_alloc(nlc->nlc_namelen); 1870 goto restart; 1871 } 1872 1873 ncp = new_ncp; 1874 1875 /* 1876 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1877 * and link to the parent. The mount point is usually inherited 1878 * from the parent unless this is a special case such as a mount 1879 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will 1880 * be NULL. 1881 */ 1882 if (nlc->nlc_namelen) { 1883 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1884 ncp->nc_name[nlc->nlc_namelen] = 0; 1885 } 1886 nchpp = NCHHASH(hash); 1887 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1888 ncp->nc_flag |= NCF_HASHED; 1889 cache_link_parent(ncp, par_nch->ncp); 1890 found: 1891 /* 1892 * stats and namecache size management 1893 */ 1894 if (ncp->nc_flag & NCF_UNRESOLVED) 1895 ++gd->gd_nchstats->ncs_miss; 1896 else if (ncp->nc_vp) 1897 ++gd->gd_nchstats->ncs_goodhits; 1898 else 1899 ++gd->gd_nchstats->ncs_neghits; 1900 cache_hysteresis(); 1901 nch.mount = par_nch->mount; 1902 nch.ncp = ncp; 1903 ++nch.mount->mnt_refs; 1904 return(nch); 1905 } 1906 1907 /* 1908 * The namecache entry is marked as being used as a mount point. 1909 * Locate the mount if it is visible to the caller. 1910 */ 1911 struct findmount_info { 1912 struct mount *result; 1913 struct mount *nch_mount; 1914 struct namecache *nch_ncp; 1915 }; 1916 1917 static 1918 int 1919 cache_findmount_callback(struct mount *mp, void *data) 1920 { 1921 struct findmount_info *info = data; 1922 1923 /* 1924 * Check the mount's mounted-on point against the passed nch. 1925 */ 1926 if (mp->mnt_ncmounton.mount == info->nch_mount && 1927 mp->mnt_ncmounton.ncp == info->nch_ncp 1928 ) { 1929 info->result = mp; 1930 return(-1); 1931 } 1932 return(0); 1933 } 1934 1935 struct mount * 1936 cache_findmount(struct nchandle *nch) 1937 { 1938 struct findmount_info info; 1939 1940 info.result = NULL; 1941 info.nch_mount = nch->mount; 1942 info.nch_ncp = nch->ncp; 1943 mountlist_scan(cache_findmount_callback, &info, 1944 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1945 return(info.result); 1946 } 1947 1948 /* 1949 * Resolve an unresolved namecache entry, generally by looking it up. 1950 * The passed ncp must be locked and refd. 1951 * 1952 * Theoretically since a vnode cannot be recycled while held, and since 1953 * the nc_parent chain holds its vnode as long as children exist, the 1954 * direct parent of the cache entry we are trying to resolve should 1955 * have a valid vnode. If not then generate an error that we can 1956 * determine is related to a resolver bug. 1957 * 1958 * However, if a vnode was in the middle of a recyclement when the NCP 1959 * got locked, ncp->nc_vp might point to a vnode that is about to become 1960 * invalid. cache_resolve() handles this case by unresolving the entry 1961 * and then re-resolving it. 1962 * 1963 * Note that successful resolution does not necessarily return an error 1964 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1965 * will be returned. 1966 */ 1967 int 1968 cache_resolve(struct nchandle *nch, struct ucred *cred) 1969 { 1970 struct namecache *par; 1971 struct namecache *ncp; 1972 struct nchandle nctmp; 1973 struct mount *mp; 1974 struct vnode *dvp; 1975 int error; 1976 1977 ncp = nch->ncp; 1978 mp = nch->mount; 1979 restart: 1980 /* 1981 * If the ncp is already resolved we have nothing to do. However, 1982 * we do want to guarentee that a usable vnode is returned when 1983 * a vnode is present, so make sure it hasn't been reclaimed. 1984 */ 1985 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 1986 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 1987 _cache_setunresolved(ncp); 1988 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1989 return (ncp->nc_error); 1990 } 1991 1992 /* 1993 * Mount points need special handling because the parent does not 1994 * belong to the same filesystem as the ncp. 1995 */ 1996 if (ncp == mp->mnt_ncmountpt.ncp) 1997 return (cache_resolve_mp(mp)); 1998 1999 /* 2000 * We expect an unbroken chain of ncps to at least the mount point, 2001 * and even all the way to root (but this code doesn't have to go 2002 * past the mount point). 2003 */ 2004 if (ncp->nc_parent == NULL) { 2005 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 2006 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2007 ncp->nc_error = EXDEV; 2008 return(ncp->nc_error); 2009 } 2010 2011 /* 2012 * The vp's of the parent directories in the chain are held via vhold() 2013 * due to the existance of the child, and should not disappear. 2014 * However, there are cases where they can disappear: 2015 * 2016 * - due to filesystem I/O errors. 2017 * - due to NFS being stupid about tracking the namespace and 2018 * destroys the namespace for entire directories quite often. 2019 * - due to forced unmounts. 2020 * - due to an rmdir (parent will be marked DESTROYED) 2021 * 2022 * When this occurs we have to track the chain backwards and resolve 2023 * it, looping until the resolver catches up to the current node. We 2024 * could recurse here but we might run ourselves out of kernel stack 2025 * so we do it in a more painful manner. This situation really should 2026 * not occur all that often, or if it does not have to go back too 2027 * many nodes to resolve the ncp. 2028 */ 2029 while ((dvp = cache_dvpref(ncp)) == NULL) { 2030 /* 2031 * This case can occur if a process is CD'd into a 2032 * directory which is then rmdir'd. If the parent is marked 2033 * destroyed there is no point trying to resolve it. 2034 */ 2035 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2036 return(ENOENT); 2037 2038 par = ncp->nc_parent; 2039 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 2040 par = par->nc_parent; 2041 if (par->nc_parent == NULL) { 2042 kprintf("EXDEV case 2 %*.*s\n", 2043 par->nc_nlen, par->nc_nlen, par->nc_name); 2044 return (EXDEV); 2045 } 2046 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2047 par->nc_nlen, par->nc_nlen, par->nc_name); 2048 /* 2049 * The parent is not set in stone, ref and lock it to prevent 2050 * it from disappearing. Also note that due to renames it 2051 * is possible for our ncp to move and for par to no longer 2052 * be one of its parents. We resolve it anyway, the loop 2053 * will handle any moves. 2054 */ 2055 _cache_get(par); 2056 if (par == nch->mount->mnt_ncmountpt.ncp) { 2057 cache_resolve_mp(nch->mount); 2058 } else if ((dvp = cache_dvpref(par)) == NULL) { 2059 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2060 _cache_put(par); 2061 continue; 2062 } else { 2063 if (par->nc_flag & NCF_UNRESOLVED) { 2064 nctmp.mount = mp; 2065 nctmp.ncp = par; 2066 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2067 } 2068 vrele(dvp); 2069 } 2070 if ((error = par->nc_error) != 0) { 2071 if (par->nc_error != EAGAIN) { 2072 kprintf("EXDEV case 3 %*.*s error %d\n", 2073 par->nc_nlen, par->nc_nlen, par->nc_name, 2074 par->nc_error); 2075 _cache_put(par); 2076 return(error); 2077 } 2078 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2079 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2080 } 2081 _cache_put(par); 2082 /* loop */ 2083 } 2084 2085 /* 2086 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2087 * ncp's and reattach them. If this occurs the original ncp is marked 2088 * EAGAIN to force a relookup. 2089 * 2090 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2091 * ncp must already be resolved. 2092 */ 2093 if (dvp) { 2094 nctmp.mount = mp; 2095 nctmp.ncp = ncp; 2096 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2097 vrele(dvp); 2098 } else { 2099 ncp->nc_error = EPERM; 2100 } 2101 if (ncp->nc_error == EAGAIN) { 2102 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2103 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2104 goto restart; 2105 } 2106 return(ncp->nc_error); 2107 } 2108 2109 /* 2110 * Resolve the ncp associated with a mount point. Such ncp's almost always 2111 * remain resolved and this routine is rarely called. NFS MPs tends to force 2112 * re-resolution more often due to its mac-truck-smash-the-namecache 2113 * method of tracking namespace changes. 2114 * 2115 * The semantics for this call is that the passed ncp must be locked on 2116 * entry and will be locked on return. However, if we actually have to 2117 * resolve the mount point we temporarily unlock the entry in order to 2118 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2119 * the unlock we have to recheck the flags after we relock. 2120 */ 2121 static int 2122 cache_resolve_mp(struct mount *mp) 2123 { 2124 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2125 struct vnode *vp; 2126 int error; 2127 2128 KKASSERT(mp != NULL); 2129 2130 /* 2131 * If the ncp is already resolved we have nothing to do. However, 2132 * we do want to guarentee that a usable vnode is returned when 2133 * a vnode is present, so make sure it hasn't been reclaimed. 2134 */ 2135 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2136 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2137 _cache_setunresolved(ncp); 2138 } 2139 2140 if (ncp->nc_flag & NCF_UNRESOLVED) { 2141 _cache_unlock(ncp); 2142 while (vfs_busy(mp, 0)) 2143 ; 2144 error = VFS_ROOT(mp, &vp); 2145 _cache_lock(ncp); 2146 2147 /* 2148 * recheck the ncp state after relocking. 2149 */ 2150 if (ncp->nc_flag & NCF_UNRESOLVED) { 2151 ncp->nc_error = error; 2152 if (error == 0) { 2153 _cache_setvp(ncp, vp); 2154 vput(vp); 2155 } else { 2156 kprintf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 2157 _cache_setvp(ncp, NULL); 2158 } 2159 } else if (error == 0) { 2160 vput(vp); 2161 } 2162 vfs_unbusy(mp); 2163 } 2164 return(ncp->nc_error); 2165 } 2166 2167 void 2168 cache_cleanneg(int count) 2169 { 2170 struct namecache *ncp; 2171 2172 /* 2173 * Automode from the vnlru proc - clean out 10% of the negative cache 2174 * entries. 2175 */ 2176 if (count == 0) 2177 count = numneg / 10 + 1; 2178 2179 /* 2180 * Attempt to clean out the specified number of negative cache 2181 * entries. 2182 */ 2183 while (count) { 2184 ncp = TAILQ_FIRST(&ncneglist); 2185 if (ncp == NULL) { 2186 KKASSERT(numneg == 0); 2187 break; 2188 } 2189 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2190 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2191 if (_cache_get_nonblock(ncp) == 0) 2192 cache_zap(ncp); 2193 --count; 2194 } 2195 } 2196 2197 /* 2198 * Rehash a ncp. Rehashing is typically required if the name changes (should 2199 * not generally occur) or the parent link changes. This function will 2200 * unhash the ncp if the ncp is no longer hashable. 2201 */ 2202 static void 2203 _cache_rehash(struct namecache *ncp) 2204 { 2205 struct nchashhead *nchpp; 2206 u_int32_t hash; 2207 2208 if (ncp->nc_flag & NCF_HASHED) { 2209 ncp->nc_flag &= ~NCF_HASHED; 2210 LIST_REMOVE(ncp, nc_hash); 2211 } 2212 if (ncp->nc_nlen && ncp->nc_parent) { 2213 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 2214 hash = fnv_32_buf(&ncp->nc_parent, 2215 sizeof(ncp->nc_parent), hash); 2216 nchpp = NCHHASH(hash); 2217 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 2218 ncp->nc_flag |= NCF_HASHED; 2219 } 2220 } 2221 2222 /* 2223 * Name cache initialization, from vfsinit() when we are booting 2224 */ 2225 void 2226 nchinit(void) 2227 { 2228 int i; 2229 globaldata_t gd; 2230 2231 /* initialise per-cpu namecache effectiveness statistics. */ 2232 for (i = 0; i < ncpus; ++i) { 2233 gd = globaldata_find(i); 2234 gd->gd_nchstats = &nchstats[i]; 2235 } 2236 TAILQ_INIT(&ncneglist); 2237 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 2238 nclockwarn = 5 * hz; 2239 } 2240 2241 /* 2242 * Called from start_init() to bootstrap the root filesystem. Returns 2243 * a referenced, unlocked namecache record. 2244 */ 2245 void 2246 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2247 { 2248 nch->ncp = cache_alloc(0); 2249 nch->mount = mp; 2250 ++mp->mnt_refs; 2251 if (vp) 2252 _cache_setvp(nch->ncp, vp); 2253 } 2254 2255 /* 2256 * vfs_cache_setroot() 2257 * 2258 * Create an association between the root of our namecache and 2259 * the root vnode. This routine may be called several times during 2260 * booting. 2261 * 2262 * If the caller intends to save the returned namecache pointer somewhere 2263 * it must cache_hold() it. 2264 */ 2265 void 2266 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2267 { 2268 struct vnode *ovp; 2269 struct nchandle onch; 2270 2271 ovp = rootvnode; 2272 onch = rootnch; 2273 rootvnode = nvp; 2274 if (nch) 2275 rootnch = *nch; 2276 else 2277 cache_zero(&rootnch); 2278 if (ovp) 2279 vrele(ovp); 2280 if (onch.ncp) 2281 cache_drop(&onch); 2282 } 2283 2284 /* 2285 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2286 * topology and is being removed as quickly as possible. The new VOP_N*() 2287 * API calls are required to make specific adjustments using the supplied 2288 * ncp pointers rather then just bogusly purging random vnodes. 2289 * 2290 * Invalidate all namecache entries to a particular vnode as well as 2291 * any direct children of that vnode in the namecache. This is a 2292 * 'catch all' purge used by filesystems that do not know any better. 2293 * 2294 * Note that the linkage between the vnode and its namecache entries will 2295 * be removed, but the namecache entries themselves might stay put due to 2296 * active references from elsewhere in the system or due to the existance of 2297 * the children. The namecache topology is left intact even if we do not 2298 * know what the vnode association is. Such entries will be marked 2299 * NCF_UNRESOLVED. 2300 */ 2301 void 2302 cache_purge(struct vnode *vp) 2303 { 2304 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2305 } 2306 2307 /* 2308 * Flush all entries referencing a particular filesystem. 2309 * 2310 * Since we need to check it anyway, we will flush all the invalid 2311 * entries at the same time. 2312 */ 2313 #if 0 2314 2315 void 2316 cache_purgevfs(struct mount *mp) 2317 { 2318 struct nchashhead *nchpp; 2319 struct namecache *ncp, *nnp; 2320 2321 /* 2322 * Scan hash tables for applicable entries. 2323 */ 2324 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2325 ncp = LIST_FIRST(nchpp); 2326 if (ncp) 2327 _cache_hold(ncp); 2328 while (ncp) { 2329 nnp = LIST_NEXT(ncp, nc_hash); 2330 if (nnp) 2331 _cache_hold(nnp); 2332 if (ncp->nc_mount == mp) { 2333 _cache_lock(ncp); 2334 cache_zap(ncp); 2335 } else { 2336 _cache_drop(ncp); 2337 } 2338 ncp = nnp; 2339 } 2340 } 2341 } 2342 2343 #endif 2344 2345 /* 2346 * Create a new (theoretically) unique fsmid 2347 */ 2348 int64_t 2349 cache_getnewfsmid(void) 2350 { 2351 static int fsmid_roller; 2352 int64_t fsmid; 2353 2354 ++fsmid_roller; 2355 fsmid = ((int64_t)time_second << 32) | 2356 (fsmid_roller & 0x7FFFFFFF); 2357 return (fsmid); 2358 } 2359 2360 2361 static int disablecwd; 2362 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 2363 2364 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 2365 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 2366 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 2367 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 2368 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 2369 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 2370 2371 int 2372 sys___getcwd(struct __getcwd_args *uap) 2373 { 2374 int buflen; 2375 int error; 2376 char *buf; 2377 char *bp; 2378 2379 if (disablecwd) 2380 return (ENODEV); 2381 2382 buflen = uap->buflen; 2383 if (buflen < 2) 2384 return (EINVAL); 2385 if (buflen > MAXPATHLEN) 2386 buflen = MAXPATHLEN; 2387 2388 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 2389 bp = kern_getcwd(buf, buflen, &error); 2390 if (error == 0) 2391 error = copyout(bp, uap->buf, strlen(bp) + 1); 2392 kfree(buf, M_TEMP); 2393 return (error); 2394 } 2395 2396 char * 2397 kern_getcwd(char *buf, size_t buflen, int *error) 2398 { 2399 struct proc *p = curproc; 2400 char *bp; 2401 int i, slash_prefixed; 2402 struct filedesc *fdp; 2403 struct nchandle nch; 2404 2405 numcwdcalls++; 2406 bp = buf; 2407 bp += buflen - 1; 2408 *bp = '\0'; 2409 fdp = p->p_fd; 2410 slash_prefixed = 0; 2411 2412 nch = fdp->fd_ncdir; 2413 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp || 2414 nch.mount != fdp->fd_nrdir.mount) 2415 ) { 2416 /* 2417 * While traversing upwards if we encounter the root 2418 * of the current mount we have to skip to the mount point 2419 * in the underlying filesystem. 2420 */ 2421 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2422 nch = nch.mount->mnt_ncmounton; 2423 continue; 2424 } 2425 2426 /* 2427 * Prepend the path segment 2428 */ 2429 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2430 if (bp == buf) { 2431 numcwdfail4++; 2432 *error = ENOMEM; 2433 return(NULL); 2434 } 2435 *--bp = nch.ncp->nc_name[i]; 2436 } 2437 if (bp == buf) { 2438 numcwdfail4++; 2439 *error = ENOMEM; 2440 return(NULL); 2441 } 2442 *--bp = '/'; 2443 slash_prefixed = 1; 2444 2445 /* 2446 * Go up a directory. This isn't a mount point so we don't 2447 * have to check again. 2448 */ 2449 nch.ncp = nch.ncp->nc_parent; 2450 } 2451 if (nch.ncp == NULL) { 2452 numcwdfail2++; 2453 *error = ENOENT; 2454 return(NULL); 2455 } 2456 if (!slash_prefixed) { 2457 if (bp == buf) { 2458 numcwdfail4++; 2459 *error = ENOMEM; 2460 return(NULL); 2461 } 2462 *--bp = '/'; 2463 } 2464 numcwdfound++; 2465 *error = 0; 2466 return (bp); 2467 } 2468 2469 /* 2470 * Thus begins the fullpath magic. 2471 */ 2472 2473 #undef STATNODE 2474 #define STATNODE(name) \ 2475 static u_int name; \ 2476 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 2477 2478 static int disablefullpath; 2479 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 2480 &disablefullpath, 0, ""); 2481 2482 STATNODE(numfullpathcalls); 2483 STATNODE(numfullpathfail1); 2484 STATNODE(numfullpathfail2); 2485 STATNODE(numfullpathfail3); 2486 STATNODE(numfullpathfail4); 2487 STATNODE(numfullpathfound); 2488 2489 int 2490 cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf) 2491 { 2492 char *bp, *buf; 2493 int i, slash_prefixed; 2494 struct nchandle fd_nrdir; 2495 struct nchandle nch; 2496 2497 numfullpathcalls--; 2498 2499 *retbuf = NULL; 2500 *freebuf = NULL; 2501 2502 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2503 bp = buf + MAXPATHLEN - 1; 2504 *bp = '\0'; 2505 if (p != NULL) 2506 fd_nrdir = p->p_fd->fd_nrdir; 2507 else 2508 fd_nrdir = rootnch; 2509 slash_prefixed = 0; 2510 nch = *nchp; 2511 2512 while (nch.ncp && 2513 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount) 2514 ) { 2515 /* 2516 * While traversing upwards if we encounter the root 2517 * of the current mount we have to skip to the mount point. 2518 */ 2519 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2520 nch = nch.mount->mnt_ncmounton; 2521 continue; 2522 } 2523 2524 /* 2525 * Prepend the path segment 2526 */ 2527 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2528 if (bp == buf) { 2529 numfullpathfail4++; 2530 kfree(buf, M_TEMP); 2531 return(ENOMEM); 2532 } 2533 *--bp = nch.ncp->nc_name[i]; 2534 } 2535 if (bp == buf) { 2536 numfullpathfail4++; 2537 kfree(buf, M_TEMP); 2538 return(ENOMEM); 2539 } 2540 *--bp = '/'; 2541 slash_prefixed = 1; 2542 2543 /* 2544 * Go up a directory. This isn't a mount point so we don't 2545 * have to check again. 2546 */ 2547 nch.ncp = nch.ncp->nc_parent; 2548 } 2549 if (nch.ncp == NULL) { 2550 numfullpathfail2++; 2551 kfree(buf, M_TEMP); 2552 return(ENOENT); 2553 } 2554 2555 if (!slash_prefixed) { 2556 if (bp == buf) { 2557 numfullpathfail4++; 2558 kfree(buf, M_TEMP); 2559 return(ENOMEM); 2560 } 2561 *--bp = '/'; 2562 } 2563 numfullpathfound++; 2564 *retbuf = bp; 2565 *freebuf = buf; 2566 2567 return(0); 2568 } 2569 2570 int 2571 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 2572 { 2573 struct namecache *ncp; 2574 struct nchandle nch; 2575 2576 numfullpathcalls++; 2577 if (disablefullpath) 2578 return (ENODEV); 2579 2580 if (p == NULL) 2581 return (EINVAL); 2582 2583 /* vn is NULL, client wants us to use p->p_textvp */ 2584 if (vn == NULL) { 2585 if ((vn = p->p_textvp) == NULL) 2586 return (EINVAL); 2587 } 2588 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 2589 if (ncp->nc_nlen) 2590 break; 2591 } 2592 if (ncp == NULL) 2593 return (EINVAL); 2594 2595 numfullpathcalls--; 2596 nch.ncp = ncp;; 2597 nch.mount = vn->v_mount; 2598 return(cache_fullpath(p, &nch, retbuf, freebuf)); 2599 } 2600