1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.91 2008/06/14 05:34:06 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 #include <sys/sysref2.h> 92 93 #define MAX_RECURSION_DEPTH 64 94 95 /* 96 * Random lookups in the cache are accomplished with a hash table using 97 * a hash key of (nc_src_vp, name). 98 * 99 * Negative entries may exist and correspond to structures where nc_vp 100 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 101 * corresponds to a whited-out directory entry (verses simply not finding the 102 * entry at all). 103 * 104 * Upon reaching the last segment of a path, if the reference is for DELETE, 105 * or NOCACHE is set (rewrite), and the name is located in the cache, it 106 * will be dropped. 107 */ 108 109 /* 110 * Structures associated with name cacheing. 111 */ 112 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 113 #define MINNEG 1024 114 115 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 116 117 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 118 static struct namecache_list ncneglist; /* instead of vnode */ 119 120 /* 121 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 122 * to create the namecache infrastructure leading to a dangling vnode. 123 * 124 * 0 Only errors are reported 125 * 1 Successes are reported 126 * 2 Successes + the whole directory scan is reported 127 * 3 Force the directory scan code run as if the parent vnode did not 128 * have a namecache record, even if it does have one. 129 */ 130 static int ncvp_debug; 131 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 132 133 static u_long nchash; /* size of hash table */ 134 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 135 136 static u_long ncnegfactor = 16; /* ratio of negative entries */ 137 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 138 139 static int nclockwarn; /* warn on locked entries in ticks */ 140 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, ""); 141 142 static u_long numneg; /* number of cache entries allocated */ 143 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 144 145 static u_long numcache; /* number of cache entries allocated */ 146 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 147 148 static u_long numunres; /* number of unresolved entries */ 149 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 150 151 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 152 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 153 154 static int cache_resolve_mp(struct mount *mp); 155 static struct vnode *cache_dvpref(struct namecache *ncp); 156 static void _cache_rehash(struct namecache *ncp); 157 static void _cache_lock(struct namecache *ncp); 158 static void _cache_setunresolved(struct namecache *ncp); 159 160 /* 161 * The new name cache statistics 162 */ 163 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 164 #define STATNODE(mode, name, var) \ 165 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 166 STATNODE(CTLFLAG_RD, numneg, &numneg); 167 STATNODE(CTLFLAG_RD, numcache, &numcache); 168 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 169 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 170 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 171 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 172 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 173 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 174 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 175 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 176 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 177 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 178 179 struct nchstats nchstats[SMP_MAXCPU]; 180 /* 181 * Export VFS cache effectiveness statistics to user-land. 182 * 183 * The statistics are left for aggregation to user-land so 184 * neat things can be achieved, like observing per-CPU cache 185 * distribution. 186 */ 187 static int 188 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 189 { 190 struct globaldata *gd; 191 int i, error; 192 193 error = 0; 194 for (i = 0; i < ncpus; ++i) { 195 gd = globaldata_find(i); 196 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 197 sizeof(struct nchstats)))) 198 break; 199 } 200 201 return (error); 202 } 203 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 204 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 205 206 static void cache_zap(struct namecache *ncp); 207 208 /* 209 * cache_hold() and cache_drop() prevent the premature deletion of a 210 * namecache entry but do not prevent operations (such as zapping) on 211 * that namecache entry. 212 * 213 * This routine may only be called from outside this source module if 214 * nc_refs is already at least 1. 215 * 216 * This is a rare case where callers are allowed to hold a spinlock, 217 * so we can't ourselves. 218 */ 219 static __inline 220 struct namecache * 221 _cache_hold(struct namecache *ncp) 222 { 223 atomic_add_int(&ncp->nc_refs, 1); 224 return(ncp); 225 } 226 227 /* 228 * When dropping an entry, if only one ref remains and the entry has not 229 * been resolved, zap it. Since the one reference is being dropped the 230 * entry had better not be locked. 231 */ 232 static __inline 233 void 234 _cache_drop(struct namecache *ncp) 235 { 236 KKASSERT(ncp->nc_refs > 0); 237 if (ncp->nc_refs == 1 && 238 (ncp->nc_flag & NCF_UNRESOLVED) && 239 TAILQ_EMPTY(&ncp->nc_list) 240 ) { 241 KKASSERT(ncp->nc_exlocks == 0); 242 _cache_lock(ncp); 243 cache_zap(ncp); 244 } else { 245 atomic_subtract_int(&ncp->nc_refs, 1); 246 } 247 } 248 249 /* 250 * Link a new namecache entry to its parent. Be careful to avoid races 251 * if vhold() blocks in the future. 252 */ 253 static void 254 cache_link_parent(struct namecache *ncp, struct namecache *par) 255 { 256 KKASSERT(ncp->nc_parent == NULL); 257 ncp->nc_parent = par; 258 if (TAILQ_EMPTY(&par->nc_list)) { 259 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 260 /* 261 * Any vp associated with an ncp which has children must 262 * be held to prevent it from being recycled. 263 */ 264 if (par->nc_vp) 265 vhold(par->nc_vp); 266 } else { 267 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 268 } 269 } 270 271 /* 272 * Remove the parent association from a namecache structure. If this is 273 * the last child of the parent the cache_drop(par) will attempt to 274 * recursively zap the parent. 275 */ 276 static void 277 cache_unlink_parent(struct namecache *ncp) 278 { 279 struct namecache *par; 280 281 if ((par = ncp->nc_parent) != NULL) { 282 ncp->nc_parent = NULL; 283 par = _cache_hold(par); 284 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 285 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 286 vdrop(par->nc_vp); 287 _cache_drop(par); 288 } 289 } 290 291 /* 292 * Allocate a new namecache structure. Most of the code does not require 293 * zero-termination of the string but it makes vop_compat_ncreate() easier. 294 */ 295 static struct namecache * 296 cache_alloc(int nlen) 297 { 298 struct namecache *ncp; 299 300 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 301 if (nlen) 302 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK); 303 ncp->nc_nlen = nlen; 304 ncp->nc_flag = NCF_UNRESOLVED; 305 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 306 ncp->nc_refs = 1; 307 308 /* 309 * Construct a fake FSMID based on the time of day and a 32 bit 310 * roller for uniqueness. This is used to generate a useful 311 * FSMID for filesystems which do not support it. 312 */ 313 ncp->nc_fsmid = cache_getnewfsmid(); 314 TAILQ_INIT(&ncp->nc_list); 315 _cache_lock(ncp); 316 return(ncp); 317 } 318 319 static void 320 _cache_free(struct namecache *ncp) 321 { 322 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 323 if (ncp->nc_name) 324 kfree(ncp->nc_name, M_VFSCACHE); 325 kfree(ncp, M_VFSCACHE); 326 } 327 328 void 329 cache_zero(struct nchandle *nch) 330 { 331 nch->ncp = NULL; 332 nch->mount = NULL; 333 } 334 335 /* 336 * Ref and deref a namecache structure. 337 * 338 * Warning: caller may hold an unrelated read spinlock, which means we can't 339 * use read spinlocks here. 340 */ 341 struct nchandle * 342 cache_hold(struct nchandle *nch) 343 { 344 _cache_hold(nch->ncp); 345 ++nch->mount->mnt_refs; 346 return(nch); 347 } 348 349 void 350 cache_copy(struct nchandle *nch, struct nchandle *target) 351 { 352 *target = *nch; 353 _cache_hold(target->ncp); 354 ++nch->mount->mnt_refs; 355 } 356 357 void 358 cache_changemount(struct nchandle *nch, struct mount *mp) 359 { 360 --nch->mount->mnt_refs; 361 nch->mount = mp; 362 ++nch->mount->mnt_refs; 363 } 364 365 void 366 cache_drop(struct nchandle *nch) 367 { 368 --nch->mount->mnt_refs; 369 _cache_drop(nch->ncp); 370 nch->ncp = NULL; 371 nch->mount = NULL; 372 } 373 374 /* 375 * Namespace locking. The caller must already hold a reference to the 376 * namecache structure in order to lock/unlock it. This function prevents 377 * the namespace from being created or destroyed by accessors other then 378 * the lock holder. 379 * 380 * Note that holding a locked namecache structure prevents other threads 381 * from making namespace changes (e.g. deleting or creating), prevents 382 * vnode association state changes by other threads, and prevents the 383 * namecache entry from being resolved or unresolved by other threads. 384 * 385 * The lock owner has full authority to associate/disassociate vnodes 386 * and resolve/unresolve the locked ncp. 387 * 388 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed 389 * or recycled, but it does NOT help you if the vnode had already initiated 390 * a recyclement. If this is important, use cache_get() rather then 391 * cache_lock() (and deal with the differences in the way the refs counter 392 * is handled). Or, alternatively, make an unconditional call to 393 * cache_validate() or cache_resolve() after cache_lock() returns. 394 */ 395 static 396 void 397 _cache_lock(struct namecache *ncp) 398 { 399 thread_t td; 400 int didwarn; 401 402 KKASSERT(ncp->nc_refs != 0); 403 didwarn = 0; 404 td = curthread; 405 406 for (;;) { 407 if (ncp->nc_exlocks == 0) { 408 ncp->nc_exlocks = 1; 409 ncp->nc_locktd = td; 410 /* 411 * The vp associated with a locked ncp must be held 412 * to prevent it from being recycled (which would 413 * cause the ncp to become unresolved). 414 * 415 * WARNING! If VRECLAIMED is set the vnode could 416 * already be in the middle of a recycle. Callers 417 * should not assume that nc_vp is usable when 418 * not NULL. cache_vref() or cache_vget() must be 419 * called. 420 * 421 * XXX loop on race for later MPSAFE work. 422 */ 423 if (ncp->nc_vp) 424 vhold(ncp->nc_vp); 425 break; 426 } 427 if (ncp->nc_locktd == td) { 428 ++ncp->nc_exlocks; 429 break; 430 } 431 ncp->nc_flag |= NCF_LOCKREQ; 432 if (tsleep(ncp, 0, "clock", nclockwarn) == EWOULDBLOCK) { 433 if (didwarn) 434 continue; 435 didwarn = 1; 436 kprintf("[diagnostic] cache_lock: blocked on %p", ncp); 437 kprintf(" \"%*.*s\"\n", 438 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 439 } 440 } 441 442 if (didwarn == 1) { 443 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n", 444 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 445 } 446 } 447 448 void 449 cache_lock(struct nchandle *nch) 450 { 451 _cache_lock(nch->ncp); 452 } 453 454 static 455 int 456 _cache_lock_nonblock(struct namecache *ncp) 457 { 458 thread_t td; 459 460 KKASSERT(ncp->nc_refs != 0); 461 td = curthread; 462 if (ncp->nc_exlocks == 0) { 463 ncp->nc_exlocks = 1; 464 ncp->nc_locktd = td; 465 /* 466 * The vp associated with a locked ncp must be held 467 * to prevent it from being recycled (which would 468 * cause the ncp to become unresolved). 469 * 470 * WARNING! If VRECLAIMED is set the vnode could 471 * already be in the middle of a recycle. Callers 472 * should not assume that nc_vp is usable when 473 * not NULL. cache_vref() or cache_vget() must be 474 * called. 475 * 476 * XXX loop on race for later MPSAFE work. 477 */ 478 if (ncp->nc_vp) 479 vhold(ncp->nc_vp); 480 return(0); 481 } else { 482 return(EWOULDBLOCK); 483 } 484 } 485 486 int 487 cache_lock_nonblock(struct nchandle *nch) 488 { 489 return(_cache_lock_nonblock(nch->ncp)); 490 } 491 492 static 493 void 494 _cache_unlock(struct namecache *ncp) 495 { 496 thread_t td = curthread; 497 498 KKASSERT(ncp->nc_refs > 0); 499 KKASSERT(ncp->nc_exlocks > 0); 500 KKASSERT(ncp->nc_locktd == td); 501 if (--ncp->nc_exlocks == 0) { 502 if (ncp->nc_vp) 503 vdrop(ncp->nc_vp); 504 ncp->nc_locktd = NULL; 505 if (ncp->nc_flag & NCF_LOCKREQ) { 506 ncp->nc_flag &= ~NCF_LOCKREQ; 507 wakeup(ncp); 508 } 509 } 510 } 511 512 void 513 cache_unlock(struct nchandle *nch) 514 { 515 _cache_unlock(nch->ncp); 516 } 517 518 /* 519 * ref-and-lock, unlock-and-deref functions. 520 * 521 * This function is primarily used by nlookup. Even though cache_lock 522 * holds the vnode, it is possible that the vnode may have already 523 * initiated a recyclement. We want cache_get() to return a definitively 524 * usable vnode or a definitively unresolved ncp. 525 */ 526 static 527 struct namecache * 528 _cache_get(struct namecache *ncp) 529 { 530 _cache_hold(ncp); 531 _cache_lock(ncp); 532 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 533 _cache_setunresolved(ncp); 534 return(ncp); 535 } 536 537 /* 538 * note: the same nchandle can be passed for both arguments. 539 */ 540 void 541 cache_get(struct nchandle *nch, struct nchandle *target) 542 { 543 target->mount = nch->mount; 544 target->ncp = _cache_get(nch->ncp); 545 ++target->mount->mnt_refs; 546 } 547 548 static int 549 _cache_get_nonblock(struct namecache *ncp) 550 { 551 /* XXX MP */ 552 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 553 _cache_hold(ncp); 554 _cache_lock(ncp); 555 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 556 _cache_setunresolved(ncp); 557 return(0); 558 } 559 return(EWOULDBLOCK); 560 } 561 562 int 563 cache_get_nonblock(struct nchandle *nch) 564 { 565 int error; 566 567 if ((error = _cache_get_nonblock(nch->ncp)) == 0) 568 ++nch->mount->mnt_refs; 569 return (error); 570 } 571 572 static __inline 573 void 574 _cache_put(struct namecache *ncp) 575 { 576 _cache_unlock(ncp); 577 _cache_drop(ncp); 578 } 579 580 void 581 cache_put(struct nchandle *nch) 582 { 583 --nch->mount->mnt_refs; 584 _cache_put(nch->ncp); 585 nch->ncp = NULL; 586 nch->mount = NULL; 587 } 588 589 /* 590 * Resolve an unresolved ncp by associating a vnode with it. If the 591 * vnode is NULL, a negative cache entry is created. 592 * 593 * The ncp should be locked on entry and will remain locked on return. 594 */ 595 static 596 void 597 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp) 598 { 599 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 600 ncp->nc_vp = vp; 601 if (vp != NULL) { 602 /* 603 * Any vp associated with an ncp which has children must 604 * be held. Any vp associated with a locked ncp must be held. 605 */ 606 if (!TAILQ_EMPTY(&ncp->nc_list)) 607 vhold(vp); 608 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 609 if (ncp->nc_exlocks) 610 vhold(vp); 611 612 /* 613 * Set auxiliary flags 614 */ 615 switch(vp->v_type) { 616 case VDIR: 617 ncp->nc_flag |= NCF_ISDIR; 618 break; 619 case VLNK: 620 ncp->nc_flag |= NCF_ISSYMLINK; 621 /* XXX cache the contents of the symlink */ 622 break; 623 default: 624 break; 625 } 626 ++numcache; 627 ncp->nc_error = 0; 628 } else { 629 /* 630 * When creating a negative cache hit we set the 631 * namecache_gen. A later resolve will clean out the 632 * negative cache hit if the mount point's namecache_gen 633 * has changed. Used by devfs, could also be used by 634 * other remote FSs. 635 */ 636 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 637 ++numneg; 638 ncp->nc_error = ENOENT; 639 if (mp) 640 ncp->nc_namecache_gen = mp->mnt_namecache_gen; 641 } 642 ncp->nc_flag &= ~NCF_UNRESOLVED; 643 } 644 645 void 646 cache_setvp(struct nchandle *nch, struct vnode *vp) 647 { 648 _cache_setvp(nch->mount, nch->ncp, vp); 649 } 650 651 void 652 cache_settimeout(struct nchandle *nch, int nticks) 653 { 654 struct namecache *ncp = nch->ncp; 655 656 if ((ncp->nc_timeout = ticks + nticks) == 0) 657 ncp->nc_timeout = 1; 658 } 659 660 /* 661 * Disassociate the vnode or negative-cache association and mark a 662 * namecache entry as unresolved again. Note that the ncp is still 663 * left in the hash table and still linked to its parent. 664 * 665 * The ncp should be locked and refd on entry and will remain locked and refd 666 * on return. 667 * 668 * This routine is normally never called on a directory containing children. 669 * However, NFS often does just that in its rename() code as a cop-out to 670 * avoid complex namespace operations. This disconnects a directory vnode 671 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 672 * sync. 673 * 674 * NOTE: NCF_FSMID must be cleared so a refurbishment of the ncp, such as 675 * in a create, properly propogates flag up the chain. 676 */ 677 static 678 void 679 _cache_setunresolved(struct namecache *ncp) 680 { 681 struct vnode *vp; 682 683 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 684 ncp->nc_flag |= NCF_UNRESOLVED; 685 ncp->nc_timeout = 0; 686 ncp->nc_error = ENOTCONN; 687 ++numunres; 688 if ((vp = ncp->nc_vp) != NULL) { 689 --numcache; 690 ncp->nc_vp = NULL; 691 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 692 693 /* 694 * Any vp associated with an ncp with children is 695 * held by that ncp. Any vp associated with a locked 696 * ncp is held by that ncp. These conditions must be 697 * undone when the vp is cleared out from the ncp. 698 */ 699 if (ncp->nc_flag & NCF_FSMID) 700 vupdatefsmid(vp); 701 if (!TAILQ_EMPTY(&ncp->nc_list)) 702 vdrop(vp); 703 if (ncp->nc_exlocks) 704 vdrop(vp); 705 } else { 706 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 707 --numneg; 708 } 709 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK| 710 NCF_FSMID); 711 } 712 } 713 714 /* 715 * The cache_nresolve() code calls this function to automatically 716 * set a resolved cache element to unresolved if it has timed out 717 * or if it is a negative cache hit and the mount point namecache_gen 718 * has changed. 719 */ 720 static __inline void 721 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp) 722 { 723 /* 724 * Already in an unresolved state, nothing to do. 725 */ 726 if (ncp->nc_flag & NCF_UNRESOLVED) 727 return; 728 729 /* 730 * Try to zap entries that have timed out. We have 731 * to be careful here because locked leafs may depend 732 * on the vnode remaining intact in a parent, so only 733 * do this under very specific conditions. 734 */ 735 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 && 736 TAILQ_EMPTY(&ncp->nc_list)) { 737 _cache_setunresolved(ncp); 738 return; 739 } 740 741 /* 742 * If a resolved negative cache hit is invalid due to 743 * the mount's namecache generation being bumped, zap it. 744 */ 745 if (ncp->nc_vp == NULL && 746 ncp->nc_namecache_gen != mp->mnt_namecache_gen) { 747 _cache_setunresolved(ncp); 748 return; 749 } 750 } 751 752 void 753 cache_setunresolved(struct nchandle *nch) 754 { 755 _cache_setunresolved(nch->ncp); 756 } 757 758 /* 759 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist 760 * looking for matches. This flag tells the lookup code when it must 761 * check for a mount linkage and also prevents the directories in question 762 * from being deleted or renamed. 763 */ 764 static 765 int 766 cache_clrmountpt_callback(struct mount *mp, void *data) 767 { 768 struct nchandle *nch = data; 769 770 if (mp->mnt_ncmounton.ncp == nch->ncp) 771 return(1); 772 if (mp->mnt_ncmountpt.ncp == nch->ncp) 773 return(1); 774 return(0); 775 } 776 777 void 778 cache_clrmountpt(struct nchandle *nch) 779 { 780 int count; 781 782 count = mountlist_scan(cache_clrmountpt_callback, nch, 783 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 784 if (count == 0) 785 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT; 786 } 787 788 /* 789 * Invalidate portions of the namecache topology given a starting entry. 790 * The passed ncp is set to an unresolved state and: 791 * 792 * The passed ncp must be locked. 793 * 794 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 795 * that the physical underlying nodes have been 796 * destroyed... as in deleted. For example, when 797 * a directory is removed. This will cause record 798 * lookups on the name to no longer be able to find 799 * the record and tells the resolver to return failure 800 * rather then trying to resolve through the parent. 801 * 802 * The topology itself, including ncp->nc_name, 803 * remains intact. 804 * 805 * This only applies to the passed ncp, if CINV_CHILDREN 806 * is specified the children are not flagged. 807 * 808 * CINV_CHILDREN - Set all children (recursively) to an unresolved 809 * state as well. 810 * 811 * Note that this will also have the side effect of 812 * cleaning out any unreferenced nodes in the topology 813 * from the leaves up as the recursion backs out. 814 * 815 * Note that the topology for any referenced nodes remains intact. 816 * 817 * It is possible for cache_inval() to race a cache_resolve(), meaning that 818 * the namecache entry may not actually be invalidated on return if it was 819 * revalidated while recursing down into its children. This code guarentees 820 * that the node(s) will go through an invalidation cycle, but does not 821 * guarentee that they will remain in an invalidated state. 822 * 823 * Returns non-zero if a revalidation was detected during the invalidation 824 * recursion, zero otherwise. Note that since only the original ncp is 825 * locked the revalidation ultimately can only indicate that the original ncp 826 * *MIGHT* no have been reresolved. 827 * 828 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we 829 * have to avoid blowing out the kernel stack. We do this by saving the 830 * deep namecache node and aborting the recursion, then re-recursing at that 831 * node using a depth-first algorithm in order to allow multiple deep 832 * recursions to chain through each other, then we restart the invalidation 833 * from scratch. 834 */ 835 836 struct cinvtrack { 837 struct namecache *resume_ncp; 838 int depth; 839 }; 840 841 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *); 842 843 static 844 int 845 _cache_inval(struct namecache *ncp, int flags) 846 { 847 struct cinvtrack track; 848 struct namecache *ncp2; 849 int r; 850 851 track.depth = 0; 852 track.resume_ncp = NULL; 853 854 for (;;) { 855 r = _cache_inval_internal(ncp, flags, &track); 856 if (track.resume_ncp == NULL) 857 break; 858 kprintf("Warning: deep namecache recursion at %s\n", 859 ncp->nc_name); 860 _cache_unlock(ncp); 861 while ((ncp2 = track.resume_ncp) != NULL) { 862 track.resume_ncp = NULL; 863 _cache_lock(ncp2); 864 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY, 865 &track); 866 _cache_put(ncp2); 867 } 868 _cache_lock(ncp); 869 } 870 return(r); 871 } 872 873 int 874 cache_inval(struct nchandle *nch, int flags) 875 { 876 return(_cache_inval(nch->ncp, flags)); 877 } 878 879 static int 880 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track) 881 { 882 struct namecache *kid; 883 struct namecache *nextkid; 884 int rcnt = 0; 885 886 KKASSERT(ncp->nc_exlocks); 887 888 _cache_setunresolved(ncp); 889 if (flags & CINV_DESTROY) 890 ncp->nc_flag |= NCF_DESTROYED; 891 892 if ((flags & CINV_CHILDREN) && 893 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 894 ) { 895 if (++track->depth > MAX_RECURSION_DEPTH) { 896 track->resume_ncp = ncp; 897 _cache_hold(ncp); 898 ++rcnt; 899 } 900 _cache_hold(kid); 901 _cache_unlock(ncp); 902 while (kid) { 903 if (track->resume_ncp) { 904 _cache_drop(kid); 905 break; 906 } 907 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 908 _cache_hold(nextkid); 909 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 910 TAILQ_FIRST(&kid->nc_list) 911 ) { 912 _cache_lock(kid); 913 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track); 914 _cache_unlock(kid); 915 } 916 _cache_drop(kid); 917 kid = nextkid; 918 } 919 --track->depth; 920 _cache_lock(ncp); 921 } 922 923 /* 924 * Someone could have gotten in there while ncp was unlocked, 925 * retry if so. 926 */ 927 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 928 ++rcnt; 929 return (rcnt); 930 } 931 932 /* 933 * Invalidate a vnode's namecache associations. To avoid races against 934 * the resolver we do not invalidate a node which we previously invalidated 935 * but which was then re-resolved while we were in the invalidation loop. 936 * 937 * Returns non-zero if any namecache entries remain after the invalidation 938 * loop completed. 939 * 940 * NOTE: unlike the namecache topology which guarentees that ncp's will not 941 * be ripped out of the topology while held, the vnode's v_namecache list 942 * has no such restriction. NCP's can be ripped out of the list at virtually 943 * any time if not locked, even if held. 944 */ 945 int 946 cache_inval_vp(struct vnode *vp, int flags) 947 { 948 struct namecache *ncp; 949 struct namecache *next; 950 951 restart: 952 ncp = TAILQ_FIRST(&vp->v_namecache); 953 if (ncp) 954 _cache_hold(ncp); 955 while (ncp) { 956 /* loop entered with ncp held */ 957 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 958 _cache_hold(next); 959 _cache_lock(ncp); 960 if (ncp->nc_vp != vp) { 961 kprintf("Warning: cache_inval_vp: race-A detected on " 962 "%s\n", ncp->nc_name); 963 _cache_put(ncp); 964 if (next) 965 _cache_drop(next); 966 goto restart; 967 } 968 _cache_inval(ncp, flags); 969 _cache_put(ncp); /* also releases reference */ 970 ncp = next; 971 if (ncp && ncp->nc_vp != vp) { 972 kprintf("Warning: cache_inval_vp: race-B detected on " 973 "%s\n", ncp->nc_name); 974 _cache_drop(ncp); 975 goto restart; 976 } 977 } 978 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 979 } 980 981 /* 982 * This routine is used instead of the normal cache_inval_vp() when we 983 * are trying to recycle otherwise good vnodes. 984 * 985 * Return 0 on success, non-zero if not all namecache records could be 986 * disassociated from the vnode (for various reasons). 987 */ 988 int 989 cache_inval_vp_nonblock(struct vnode *vp) 990 { 991 struct namecache *ncp; 992 struct namecache *next; 993 994 ncp = TAILQ_FIRST(&vp->v_namecache); 995 if (ncp) 996 _cache_hold(ncp); 997 while (ncp) { 998 /* loop entered with ncp held */ 999 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL) 1000 _cache_hold(next); 1001 if (_cache_lock_nonblock(ncp)) { 1002 _cache_drop(ncp); 1003 if (next) 1004 _cache_drop(next); 1005 break; 1006 } 1007 if (ncp->nc_vp != vp) { 1008 kprintf("Warning: cache_inval_vp: race-A detected on " 1009 "%s\n", ncp->nc_name); 1010 _cache_put(ncp); 1011 if (next) 1012 _cache_drop(next); 1013 break; 1014 } 1015 _cache_inval(ncp, 0); 1016 _cache_put(ncp); /* also releases reference */ 1017 ncp = next; 1018 if (ncp && ncp->nc_vp != vp) { 1019 kprintf("Warning: cache_inval_vp: race-B detected on " 1020 "%s\n", ncp->nc_name); 1021 _cache_drop(ncp); 1022 break; 1023 } 1024 } 1025 return(TAILQ_FIRST(&vp->v_namecache) != NULL); 1026 } 1027 1028 /* 1029 * The source ncp has been renamed to the target ncp. Both fncp and tncp 1030 * must be locked. The target ncp is destroyed (as a normal rename-over 1031 * would destroy the target file or directory). 1032 * 1033 * Because there may be references to the source ncp we cannot copy its 1034 * contents to the target. Instead the source ncp is relinked as the target 1035 * and the target ncp is removed from the namecache topology. 1036 */ 1037 void 1038 cache_rename(struct nchandle *fnch, struct nchandle *tnch) 1039 { 1040 struct namecache *fncp = fnch->ncp; 1041 struct namecache *tncp = tnch->ncp; 1042 char *oname; 1043 1044 _cache_setunresolved(tncp); 1045 cache_unlink_parent(fncp); 1046 cache_link_parent(fncp, tncp->nc_parent); 1047 cache_unlink_parent(tncp); 1048 oname = fncp->nc_name; 1049 fncp->nc_name = tncp->nc_name; 1050 fncp->nc_nlen = tncp->nc_nlen; 1051 tncp->nc_name = NULL; 1052 tncp->nc_nlen = 0; 1053 if (fncp->nc_flag & NCF_HASHED) 1054 _cache_rehash(fncp); 1055 if (tncp->nc_flag & NCF_HASHED) 1056 _cache_rehash(tncp); 1057 if (oname) 1058 kfree(oname, M_VFSCACHE); 1059 } 1060 1061 /* 1062 * vget the vnode associated with the namecache entry. Resolve the namecache 1063 * entry if necessary and deal with namecache/vp races. The passed ncp must 1064 * be referenced and may be locked. The ncp's ref/locking state is not 1065 * effected by this call. 1066 * 1067 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 1068 * (depending on the passed lk_type) will be returned in *vpp with an error 1069 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 1070 * most typical error is ENOENT, meaning that the ncp represents a negative 1071 * cache hit and there is no vnode to retrieve, but other errors can occur 1072 * too. 1073 * 1074 * The main race we have to deal with are namecache zaps. The ncp itself 1075 * will not disappear since it is referenced, and it turns out that the 1076 * validity of the vp pointer can be checked simply by rechecking the 1077 * contents of ncp->nc_vp. 1078 */ 1079 int 1080 cache_vget(struct nchandle *nch, struct ucred *cred, 1081 int lk_type, struct vnode **vpp) 1082 { 1083 struct namecache *ncp; 1084 struct vnode *vp; 1085 int error; 1086 1087 ncp = nch->ncp; 1088 again: 1089 vp = NULL; 1090 if (ncp->nc_flag & NCF_UNRESOLVED) { 1091 _cache_lock(ncp); 1092 error = cache_resolve(nch, cred); 1093 _cache_unlock(ncp); 1094 } else { 1095 error = 0; 1096 } 1097 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1098 /* 1099 * Accessing the vnode from the namecache is a bit 1100 * dangerous. Because there are no refs on the vnode, it 1101 * could be in the middle of a reclaim. 1102 */ 1103 if (vp->v_flag & VRECLAIMED) { 1104 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp, ncp->nc_name); 1105 _cache_lock(ncp); 1106 _cache_setunresolved(ncp); 1107 _cache_unlock(ncp); 1108 goto again; 1109 } 1110 error = vget(vp, lk_type); 1111 if (error) { 1112 if (vp != ncp->nc_vp) 1113 goto again; 1114 vp = NULL; 1115 } else if (vp != ncp->nc_vp) { 1116 vput(vp); 1117 goto again; 1118 } else if (vp->v_flag & VRECLAIMED) { 1119 panic("vget succeeded on a VRECLAIMED node! vp %p", vp); 1120 } 1121 } 1122 if (error == 0 && vp == NULL) 1123 error = ENOENT; 1124 *vpp = vp; 1125 return(error); 1126 } 1127 1128 int 1129 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp) 1130 { 1131 struct namecache *ncp; 1132 struct vnode *vp; 1133 int error; 1134 1135 ncp = nch->ncp; 1136 1137 again: 1138 vp = NULL; 1139 if (ncp->nc_flag & NCF_UNRESOLVED) { 1140 _cache_lock(ncp); 1141 error = cache_resolve(nch, cred); 1142 _cache_unlock(ncp); 1143 } else { 1144 error = 0; 1145 } 1146 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 1147 /* 1148 * Since we did not obtain any locks, a cache zap 1149 * race can occur here if the vnode is in the middle 1150 * of being reclaimed and has not yet been able to 1151 * clean out its cache node. If that case occurs, 1152 * we must lock and unresolve the cache, then loop 1153 * to retry. 1154 */ 1155 if ((error = vget(vp, LK_SHARED)) != 0) { 1156 if (error == ENOENT) { 1157 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp, ncp->nc_name); 1158 _cache_lock(ncp); 1159 _cache_setunresolved(ncp); 1160 _cache_unlock(ncp); 1161 goto again; 1162 } 1163 /* fatal error */ 1164 } else { 1165 /* caller does not want a lock */ 1166 vn_unlock(vp); 1167 } 1168 } 1169 if (error == 0 && vp == NULL) 1170 error = ENOENT; 1171 *vpp = vp; 1172 return(error); 1173 } 1174 1175 /* 1176 * Return a referenced vnode representing the parent directory of 1177 * ncp. Because the caller has locked the ncp it should not be possible for 1178 * the parent ncp to go away. 1179 * 1180 * However, we might race against the parent dvp and not be able to 1181 * reference it. If we race, return NULL. 1182 */ 1183 static struct vnode * 1184 cache_dvpref(struct namecache *ncp) 1185 { 1186 struct namecache *par; 1187 struct vnode *dvp; 1188 1189 dvp = NULL; 1190 if ((par = ncp->nc_parent) != NULL) { 1191 if ((par->nc_flag & NCF_UNRESOLVED) == 0) { 1192 if ((dvp = par->nc_vp) != NULL) { 1193 if (vget(dvp, LK_SHARED) == 0) { 1194 vn_unlock(dvp); 1195 /* return referenced, unlocked dvp */ 1196 } else { 1197 dvp = NULL; 1198 } 1199 } 1200 } 1201 } 1202 return(dvp); 1203 } 1204 1205 /* 1206 * Recursively set the FSMID update flag for namecache nodes leading 1207 * to root. This will cause the next getattr or reclaim to increment the 1208 * fsmid and mark the inode for lazy updating. 1209 * 1210 * Stop recursing when we hit a node whos NCF_FSMID flag is already set. 1211 * This makes FSMIDs work in an Einsteinian fashion - where the observation 1212 * effects the result. In this case a program monitoring a higher level 1213 * node will have detected some prior change and started its scan (clearing 1214 * NCF_FSMID in higher level nodes), but since it has not yet observed the 1215 * node where we find NCF_FSMID still set, we can safely make the related 1216 * modification without interfering with the theorized program. 1217 * 1218 * This also means that FSMIDs cannot represent time-domain quantities 1219 * in a hierarchical sense. But the main reason for doing it this way 1220 * is to reduce the amount of recursion that occurs in the critical path 1221 * when e.g. a program is writing to a file that sits deep in a directory 1222 * hierarchy. 1223 */ 1224 void 1225 cache_update_fsmid(struct nchandle *nch) 1226 { 1227 struct namecache *ncp; 1228 struct namecache *scan; 1229 struct vnode *vp; 1230 1231 ncp = nch->ncp; 1232 1233 /* 1234 * Warning: even if we get a non-NULL vp it could still be in the 1235 * middle of a recyclement. Don't do anything fancy, just set 1236 * NCF_FSMID. 1237 */ 1238 if ((vp = ncp->nc_vp) != NULL) { 1239 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1240 for (scan = ncp; scan; scan = scan->nc_parent) { 1241 if (scan->nc_flag & NCF_FSMID) 1242 break; 1243 scan->nc_flag |= NCF_FSMID; 1244 } 1245 } 1246 } else { 1247 while (ncp && (ncp->nc_flag & NCF_FSMID) == 0) { 1248 ncp->nc_flag |= NCF_FSMID; 1249 ncp = ncp->nc_parent; 1250 } 1251 } 1252 } 1253 1254 void 1255 cache_update_fsmid_vp(struct vnode *vp) 1256 { 1257 struct namecache *ncp; 1258 struct namecache *scan; 1259 1260 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1261 for (scan = ncp; scan; scan = scan->nc_parent) { 1262 if (scan->nc_flag & NCF_FSMID) 1263 break; 1264 scan->nc_flag |= NCF_FSMID; 1265 } 1266 } 1267 } 1268 1269 /* 1270 * If getattr is called on a vnode (e.g. a stat call), the filesystem 1271 * may call this routine to determine if the namecache has the hierarchical 1272 * change flag set, requiring the fsmid to be updated. 1273 * 1274 * Since 0 indicates no support, make sure the filesystem fsmid is at least 1275 * 1. 1276 */ 1277 int 1278 cache_check_fsmid_vp(struct vnode *vp, int64_t *fsmid) 1279 { 1280 struct namecache *ncp; 1281 int changed = 0; 1282 1283 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 1284 if (ncp->nc_flag & NCF_FSMID) { 1285 ncp->nc_flag &= ~NCF_FSMID; 1286 changed = 1; 1287 } 1288 } 1289 if (*fsmid == 0) 1290 ++*fsmid; 1291 if (changed) 1292 ++*fsmid; 1293 return(changed); 1294 } 1295 1296 /* 1297 * Obtain the FSMID for a vnode for filesystems which do not support 1298 * a built-in FSMID. 1299 */ 1300 int64_t 1301 cache_sync_fsmid_vp(struct vnode *vp) 1302 { 1303 struct namecache *ncp; 1304 1305 if ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 1306 if (ncp->nc_flag & NCF_FSMID) { 1307 ncp->nc_flag &= ~NCF_FSMID; 1308 ++ncp->nc_fsmid; 1309 } 1310 return(ncp->nc_fsmid); 1311 } 1312 return(VNOVAL); 1313 } 1314 1315 /* 1316 * Convert a directory vnode to a namecache record without any other 1317 * knowledge of the topology. This ONLY works with directory vnodes and 1318 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 1319 * returned ncp (if not NULL) will be held and unlocked. 1320 * 1321 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 1322 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 1323 * for dvp. This will fail only if the directory has been deleted out from 1324 * under the caller. 1325 * 1326 * Callers must always check for a NULL return no matter the value of 'makeit'. 1327 * 1328 * To avoid underflowing the kernel stack each recursive call increments 1329 * the makeit variable. 1330 */ 1331 1332 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1333 struct vnode *dvp, char *fakename); 1334 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1335 struct vnode **saved_dvp); 1336 1337 int 1338 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit, 1339 struct nchandle *nch) 1340 { 1341 struct vnode *saved_dvp; 1342 struct vnode *pvp; 1343 char *fakename; 1344 int error; 1345 1346 nch->ncp = NULL; 1347 nch->mount = dvp->v_mount; 1348 saved_dvp = NULL; 1349 fakename = NULL; 1350 1351 /* 1352 * Temporary debugging code to force the directory scanning code 1353 * to be exercised. 1354 */ 1355 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 1356 nch->ncp = TAILQ_FIRST(&dvp->v_namecache); 1357 kprintf("cache_fromdvp: forcing %s\n", nch->ncp->nc_name); 1358 goto force; 1359 } 1360 1361 /* 1362 * Loop until resolution, inside code will break out on error. 1363 */ 1364 while ((nch->ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 1365 force: 1366 /* 1367 * If dvp is the root of its filesystem it should already 1368 * have a namecache pointer associated with it as a side 1369 * effect of the mount, but it may have been disassociated. 1370 */ 1371 if (dvp->v_flag & VROOT) { 1372 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp); 1373 error = cache_resolve_mp(nch->mount); 1374 _cache_put(nch->ncp); 1375 if (ncvp_debug) { 1376 kprintf("cache_fromdvp: resolve root of mount %p error %d", 1377 dvp->v_mount, error); 1378 } 1379 if (error) { 1380 if (ncvp_debug) 1381 kprintf(" failed\n"); 1382 nch->ncp = NULL; 1383 break; 1384 } 1385 if (ncvp_debug) 1386 kprintf(" succeeded\n"); 1387 continue; 1388 } 1389 1390 /* 1391 * If we are recursed too deeply resort to an O(n^2) 1392 * algorithm to resolve the namecache topology. The 1393 * resolved pvp is left referenced in saved_dvp to 1394 * prevent the tree from being destroyed while we loop. 1395 */ 1396 if (makeit > 20) { 1397 error = cache_fromdvp_try(dvp, cred, &saved_dvp); 1398 if (error) { 1399 kprintf("lookupdotdot(longpath) failed %d " 1400 "dvp %p\n", error, dvp); 1401 nch->ncp = NULL; 1402 break; 1403 } 1404 continue; 1405 } 1406 1407 /* 1408 * Get the parent directory and resolve its ncp. 1409 */ 1410 if (fakename) { 1411 kfree(fakename, M_TEMP); 1412 fakename = NULL; 1413 } 1414 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1415 &fakename); 1416 if (error) { 1417 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp); 1418 break; 1419 } 1420 vn_unlock(pvp); 1421 1422 /* 1423 * Reuse makeit as a recursion depth counter. On success 1424 * nch will be fully referenced. 1425 */ 1426 cache_fromdvp(pvp, cred, makeit + 1, nch); 1427 vrele(pvp); 1428 if (nch->ncp == NULL) 1429 break; 1430 1431 /* 1432 * Do an inefficient scan of pvp (embodied by ncp) to look 1433 * for dvp. This will create a namecache record for dvp on 1434 * success. We loop up to recheck on success. 1435 * 1436 * ncp and dvp are both held but not locked. 1437 */ 1438 error = cache_inefficient_scan(nch, cred, dvp, fakename); 1439 if (error) { 1440 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 1441 pvp, nch->ncp->nc_name, dvp); 1442 cache_drop(nch); 1443 /* nch was NULLed out, reload mount */ 1444 nch->mount = dvp->v_mount; 1445 break; 1446 } 1447 if (ncvp_debug) { 1448 kprintf("cache_fromdvp: scan %p (%s) succeeded\n", 1449 pvp, nch->ncp->nc_name); 1450 } 1451 cache_drop(nch); 1452 /* nch was NULLed out, reload mount */ 1453 nch->mount = dvp->v_mount; 1454 } 1455 1456 if (fakename) 1457 kfree(fakename, M_TEMP); 1458 1459 /* 1460 * hold it for real so the mount gets a ref 1461 */ 1462 if (nch->ncp) 1463 cache_hold(nch); 1464 if (saved_dvp) 1465 vrele(saved_dvp); 1466 if (nch->ncp) 1467 return (0); 1468 return (EINVAL); 1469 } 1470 1471 /* 1472 * Go up the chain of parent directories until we find something 1473 * we can resolve into the namecache. This is very inefficient. 1474 */ 1475 static 1476 int 1477 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred, 1478 struct vnode **saved_dvp) 1479 { 1480 struct nchandle nch; 1481 struct vnode *pvp; 1482 int error; 1483 static time_t last_fromdvp_report; 1484 char *fakename; 1485 1486 /* 1487 * Loop getting the parent directory vnode until we get something we 1488 * can resolve in the namecache. 1489 */ 1490 vref(dvp); 1491 nch.mount = dvp->v_mount; 1492 nch.ncp = NULL; 1493 fakename = NULL; 1494 1495 for (;;) { 1496 if (fakename) { 1497 kfree(fakename, M_TEMP); 1498 fakename = NULL; 1499 } 1500 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred, 1501 &fakename); 1502 if (error) { 1503 vrele(dvp); 1504 break; 1505 } 1506 vn_unlock(pvp); 1507 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) { 1508 _cache_hold(nch.ncp); 1509 vrele(pvp); 1510 break; 1511 } 1512 if (pvp->v_flag & VROOT) { 1513 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp); 1514 error = cache_resolve_mp(nch.mount); 1515 _cache_unlock(nch.ncp); 1516 vrele(pvp); 1517 if (error) { 1518 _cache_drop(nch.ncp); 1519 nch.ncp = NULL; 1520 vrele(dvp); 1521 } 1522 break; 1523 } 1524 vrele(dvp); 1525 dvp = pvp; 1526 } 1527 if (error == 0) { 1528 if (last_fromdvp_report != time_second) { 1529 last_fromdvp_report = time_second; 1530 kprintf("Warning: extremely inefficient path " 1531 "resolution on %s\n", 1532 nch.ncp->nc_name); 1533 } 1534 error = cache_inefficient_scan(&nch, cred, dvp, fakename); 1535 1536 /* 1537 * Hopefully dvp now has a namecache record associated with 1538 * it. Leave it referenced to prevent the kernel from 1539 * recycling the vnode. Otherwise extremely long directory 1540 * paths could result in endless recycling. 1541 */ 1542 if (*saved_dvp) 1543 vrele(*saved_dvp); 1544 *saved_dvp = dvp; 1545 _cache_drop(nch.ncp); 1546 } 1547 if (fakename) 1548 kfree(fakename, M_TEMP); 1549 return (error); 1550 } 1551 1552 /* 1553 * Do an inefficient scan of the directory represented by ncp looking for 1554 * the directory vnode dvp. ncp must be held but not locked on entry and 1555 * will be held on return. dvp must be refd but not locked on entry and 1556 * will remain refd on return. 1557 * 1558 * Why do this at all? Well, due to its stateless nature the NFS server 1559 * converts file handles directly to vnodes without necessarily going through 1560 * the namecache ops that would otherwise create the namecache topology 1561 * leading to the vnode. We could either (1) Change the namecache algorithms 1562 * to allow disconnect namecache records that are re-merged opportunistically, 1563 * or (2) Make the NFS server backtrack and scan to recover a connected 1564 * namecache topology in order to then be able to issue new API lookups. 1565 * 1566 * It turns out that (1) is a huge mess. It takes a nice clean set of 1567 * namecache algorithms and introduces a lot of complication in every subsystem 1568 * that calls into the namecache to deal with the re-merge case, especially 1569 * since we are using the namecache to placehold negative lookups and the 1570 * vnode might not be immediately assigned. (2) is certainly far less 1571 * efficient then (1), but since we are only talking about directories here 1572 * (which are likely to remain cached), the case does not actually run all 1573 * that often and has the supreme advantage of not polluting the namecache 1574 * algorithms. 1575 * 1576 * If a fakename is supplied just construct a namecache entry using the 1577 * fake name. 1578 */ 1579 static int 1580 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred, 1581 struct vnode *dvp, char *fakename) 1582 { 1583 struct nlcomponent nlc; 1584 struct nchandle rncp; 1585 struct dirent *den; 1586 struct vnode *pvp; 1587 struct vattr vat; 1588 struct iovec iov; 1589 struct uio uio; 1590 int blksize; 1591 int eofflag; 1592 int bytes; 1593 char *rbuf; 1594 int error; 1595 1596 vat.va_blocksize = 0; 1597 if ((error = VOP_GETATTR(dvp, &vat)) != 0) 1598 return (error); 1599 if ((error = cache_vref(nch, cred, &pvp)) != 0) 1600 return (error); 1601 if (ncvp_debug) { 1602 kprintf("inefficient_scan: directory iosize %ld " 1603 "vattr fileid = %lld\n", 1604 vat.va_blocksize, 1605 (long long)vat.va_fileid); 1606 } 1607 1608 /* 1609 * Use the supplied fakename if not NULL. Fake names are typically 1610 * not in the actual filesystem hierarchy. This is used by HAMMER 1611 * to glue @@timestamp recursions together. 1612 */ 1613 if (fakename) { 1614 nlc.nlc_nameptr = fakename; 1615 nlc.nlc_namelen = strlen(fakename); 1616 rncp = cache_nlookup(nch, &nlc); 1617 goto done; 1618 } 1619 1620 if ((blksize = vat.va_blocksize) == 0) 1621 blksize = DEV_BSIZE; 1622 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK); 1623 rncp.ncp = NULL; 1624 1625 eofflag = 0; 1626 uio.uio_offset = 0; 1627 again: 1628 iov.iov_base = rbuf; 1629 iov.iov_len = blksize; 1630 uio.uio_iov = &iov; 1631 uio.uio_iovcnt = 1; 1632 uio.uio_resid = blksize; 1633 uio.uio_segflg = UIO_SYSSPACE; 1634 uio.uio_rw = UIO_READ; 1635 uio.uio_td = curthread; 1636 1637 if (ncvp_debug >= 2) 1638 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset); 1639 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL); 1640 if (error == 0) { 1641 den = (struct dirent *)rbuf; 1642 bytes = blksize - uio.uio_resid; 1643 1644 while (bytes > 0) { 1645 if (ncvp_debug >= 2) { 1646 kprintf("cache_inefficient_scan: %*.*s\n", 1647 den->d_namlen, den->d_namlen, 1648 den->d_name); 1649 } 1650 if (den->d_type != DT_WHT && 1651 den->d_ino == vat.va_fileid) { 1652 if (ncvp_debug) { 1653 kprintf("cache_inefficient_scan: " 1654 "MATCHED inode %lld path %s/%*.*s\n", 1655 (long long)vat.va_fileid, 1656 nch->ncp->nc_name, 1657 den->d_namlen, den->d_namlen, 1658 den->d_name); 1659 } 1660 nlc.nlc_nameptr = den->d_name; 1661 nlc.nlc_namelen = den->d_namlen; 1662 rncp = cache_nlookup(nch, &nlc); 1663 KKASSERT(rncp.ncp != NULL); 1664 break; 1665 } 1666 bytes -= _DIRENT_DIRSIZ(den); 1667 den = _DIRENT_NEXT(den); 1668 } 1669 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 1670 goto again; 1671 } 1672 kfree(rbuf, M_TEMP); 1673 done: 1674 vrele(pvp); 1675 if (rncp.ncp) { 1676 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) { 1677 _cache_setvp(rncp.mount, rncp.ncp, dvp); 1678 if (ncvp_debug >= 2) { 1679 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n", 1680 nch->ncp->nc_name, rncp.ncp->nc_name, dvp); 1681 } 1682 } else { 1683 if (ncvp_debug >= 2) { 1684 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1685 nch->ncp->nc_name, rncp.ncp->nc_name, dvp, 1686 rncp.ncp->nc_vp); 1687 } 1688 } 1689 if (rncp.ncp->nc_vp == NULL) 1690 error = rncp.ncp->nc_error; 1691 /* 1692 * Release rncp after a successful nlookup. rncp was fully 1693 * referenced. 1694 */ 1695 cache_put(&rncp); 1696 } else { 1697 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1698 dvp, nch->ncp->nc_name); 1699 error = ENOENT; 1700 } 1701 return (error); 1702 } 1703 1704 /* 1705 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1706 * state, which disassociates it from its vnode or ncneglist. 1707 * 1708 * Then, if there are no additional references to the ncp and no children, 1709 * the ncp is removed from the topology and destroyed. This function will 1710 * also run through the nc_parent chain and destroy parent ncps if possible. 1711 * As a side benefit, it turns out the only conditions that allow running 1712 * up the chain are also the conditions to ensure no deadlock will occur. 1713 * 1714 * References and/or children may exist if the ncp is in the middle of the 1715 * topology, preventing the ncp from being destroyed. 1716 * 1717 * This function must be called with the ncp held and locked and will unlock 1718 * and drop it during zapping. 1719 */ 1720 static void 1721 cache_zap(struct namecache *ncp) 1722 { 1723 struct namecache *par; 1724 1725 /* 1726 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1727 */ 1728 _cache_setunresolved(ncp); 1729 1730 /* 1731 * Try to scrap the entry and possibly tail-recurse on its parent. 1732 * We only scrap unref'd (other then our ref) unresolved entries, 1733 * we do not scrap 'live' entries. 1734 */ 1735 while (ncp->nc_flag & NCF_UNRESOLVED) { 1736 /* 1737 * Someone other then us has a ref, stop. 1738 */ 1739 if (ncp->nc_refs > 1) 1740 goto done; 1741 1742 /* 1743 * We have children, stop. 1744 */ 1745 if (!TAILQ_EMPTY(&ncp->nc_list)) 1746 goto done; 1747 1748 /* 1749 * Remove ncp from the topology: hash table and parent linkage. 1750 */ 1751 if (ncp->nc_flag & NCF_HASHED) { 1752 ncp->nc_flag &= ~NCF_HASHED; 1753 LIST_REMOVE(ncp, nc_hash); 1754 } 1755 if ((par = ncp->nc_parent) != NULL) { 1756 par = _cache_hold(par); 1757 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1758 ncp->nc_parent = NULL; 1759 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1760 vdrop(par->nc_vp); 1761 } 1762 1763 /* 1764 * ncp should not have picked up any refs. Physically 1765 * destroy the ncp. 1766 */ 1767 KKASSERT(ncp->nc_refs == 1); 1768 --numunres; 1769 /* _cache_unlock(ncp) not required */ 1770 ncp->nc_refs = -1; /* safety */ 1771 if (ncp->nc_name) 1772 kfree(ncp->nc_name, M_VFSCACHE); 1773 kfree(ncp, M_VFSCACHE); 1774 1775 /* 1776 * Loop on the parent (it may be NULL). Only bother looping 1777 * if the parent has a single ref (ours), which also means 1778 * we can lock it trivially. 1779 */ 1780 ncp = par; 1781 if (ncp == NULL) 1782 return; 1783 if (ncp->nc_refs != 1) { 1784 _cache_drop(ncp); 1785 return; 1786 } 1787 KKASSERT(par->nc_exlocks == 0); 1788 _cache_lock(ncp); 1789 } 1790 done: 1791 _cache_unlock(ncp); 1792 atomic_subtract_int(&ncp->nc_refs, 1); 1793 } 1794 1795 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1796 1797 static __inline 1798 void 1799 cache_hysteresis(void) 1800 { 1801 /* 1802 * Don't cache too many negative hits. We use hysteresis to reduce 1803 * the impact on the critical path. 1804 */ 1805 switch(cache_hysteresis_state) { 1806 case CHI_LOW: 1807 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1808 cache_cleanneg(10); 1809 cache_hysteresis_state = CHI_HIGH; 1810 } 1811 break; 1812 case CHI_HIGH: 1813 if (numneg > MINNEG * 9 / 10 && 1814 numneg * ncnegfactor * 9 / 10 > numcache 1815 ) { 1816 cache_cleanneg(10); 1817 } else { 1818 cache_hysteresis_state = CHI_LOW; 1819 } 1820 break; 1821 } 1822 } 1823 1824 /* 1825 * NEW NAMECACHE LOOKUP API 1826 * 1827 * Lookup an entry in the cache. A locked, referenced, non-NULL 1828 * entry is *always* returned, even if the supplied component is illegal. 1829 * The resulting namecache entry should be returned to the system with 1830 * cache_put() or _cache_unlock() + cache_drop(). 1831 * 1832 * namecache locks are recursive but care must be taken to avoid lock order 1833 * reversals. 1834 * 1835 * Nobody else will be able to manipulate the associated namespace (e.g. 1836 * create, delete, rename, rename-target) until the caller unlocks the 1837 * entry. 1838 * 1839 * The returned entry will be in one of three states: positive hit (non-null 1840 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1841 * Unresolved entries must be resolved through the filesystem to associate the 1842 * vnode and/or determine whether a positive or negative hit has occured. 1843 * 1844 * It is not necessary to lock a directory in order to lock namespace under 1845 * that directory. In fact, it is explicitly not allowed to do that. A 1846 * directory is typically only locked when being created, renamed, or 1847 * destroyed. 1848 * 1849 * The directory (par) may be unresolved, in which case any returned child 1850 * will likely also be marked unresolved. Likely but not guarenteed. Since 1851 * the filesystem lookup requires a resolved directory vnode the caller is 1852 * responsible for resolving the namecache chain top-down. This API 1853 * specifically allows whole chains to be created in an unresolved state. 1854 */ 1855 struct nchandle 1856 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc) 1857 { 1858 struct nchandle nch; 1859 struct namecache *ncp; 1860 struct namecache *new_ncp; 1861 struct nchashhead *nchpp; 1862 struct mount *mp; 1863 u_int32_t hash; 1864 globaldata_t gd; 1865 1866 numcalls++; 1867 gd = mycpu; 1868 mp = par_nch->mount; 1869 1870 /* 1871 * Try to locate an existing entry 1872 */ 1873 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1874 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash); 1875 new_ncp = NULL; 1876 restart: 1877 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1878 numchecks++; 1879 1880 /* 1881 * Break out if we find a matching entry. Note that 1882 * UNRESOLVED entries may match, but DESTROYED entries 1883 * do not. 1884 */ 1885 if (ncp->nc_parent == par_nch->ncp && 1886 ncp->nc_nlen == nlc->nlc_namelen && 1887 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1888 (ncp->nc_flag & NCF_DESTROYED) == 0 1889 ) { 1890 if (_cache_get_nonblock(ncp) == 0) { 1891 _cache_auto_unresolve(mp, ncp); 1892 if (new_ncp) 1893 _cache_free(new_ncp); 1894 goto found; 1895 } 1896 _cache_get(ncp); 1897 _cache_put(ncp); 1898 goto restart; 1899 } 1900 } 1901 1902 /* 1903 * We failed to locate an entry, create a new entry and add it to 1904 * the cache. We have to relookup after possibly blocking in 1905 * malloc. 1906 */ 1907 if (new_ncp == NULL) { 1908 new_ncp = cache_alloc(nlc->nlc_namelen); 1909 goto restart; 1910 } 1911 1912 ncp = new_ncp; 1913 1914 /* 1915 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1916 * and link to the parent. The mount point is usually inherited 1917 * from the parent unless this is a special case such as a mount 1918 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will 1919 * be NULL. 1920 */ 1921 if (nlc->nlc_namelen) { 1922 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1923 ncp->nc_name[nlc->nlc_namelen] = 0; 1924 } 1925 nchpp = NCHHASH(hash); 1926 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1927 ncp->nc_flag |= NCF_HASHED; 1928 cache_link_parent(ncp, par_nch->ncp); 1929 found: 1930 /* 1931 * stats and namecache size management 1932 */ 1933 if (ncp->nc_flag & NCF_UNRESOLVED) 1934 ++gd->gd_nchstats->ncs_miss; 1935 else if (ncp->nc_vp) 1936 ++gd->gd_nchstats->ncs_goodhits; 1937 else 1938 ++gd->gd_nchstats->ncs_neghits; 1939 cache_hysteresis(); 1940 nch.mount = mp; 1941 nch.ncp = ncp; 1942 ++nch.mount->mnt_refs; 1943 return(nch); 1944 } 1945 1946 /* 1947 * The namecache entry is marked as being used as a mount point. 1948 * Locate the mount if it is visible to the caller. 1949 */ 1950 struct findmount_info { 1951 struct mount *result; 1952 struct mount *nch_mount; 1953 struct namecache *nch_ncp; 1954 }; 1955 1956 static 1957 int 1958 cache_findmount_callback(struct mount *mp, void *data) 1959 { 1960 struct findmount_info *info = data; 1961 1962 /* 1963 * Check the mount's mounted-on point against the passed nch. 1964 */ 1965 if (mp->mnt_ncmounton.mount == info->nch_mount && 1966 mp->mnt_ncmounton.ncp == info->nch_ncp 1967 ) { 1968 info->result = mp; 1969 return(-1); 1970 } 1971 return(0); 1972 } 1973 1974 struct mount * 1975 cache_findmount(struct nchandle *nch) 1976 { 1977 struct findmount_info info; 1978 1979 info.result = NULL; 1980 info.nch_mount = nch->mount; 1981 info.nch_ncp = nch->ncp; 1982 mountlist_scan(cache_findmount_callback, &info, 1983 MNTSCAN_FORWARD|MNTSCAN_NOBUSY); 1984 return(info.result); 1985 } 1986 1987 /* 1988 * Resolve an unresolved namecache entry, generally by looking it up. 1989 * The passed ncp must be locked and refd. 1990 * 1991 * Theoretically since a vnode cannot be recycled while held, and since 1992 * the nc_parent chain holds its vnode as long as children exist, the 1993 * direct parent of the cache entry we are trying to resolve should 1994 * have a valid vnode. If not then generate an error that we can 1995 * determine is related to a resolver bug. 1996 * 1997 * However, if a vnode was in the middle of a recyclement when the NCP 1998 * got locked, ncp->nc_vp might point to a vnode that is about to become 1999 * invalid. cache_resolve() handles this case by unresolving the entry 2000 * and then re-resolving it. 2001 * 2002 * Note that successful resolution does not necessarily return an error 2003 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 2004 * will be returned. 2005 */ 2006 int 2007 cache_resolve(struct nchandle *nch, struct ucred *cred) 2008 { 2009 struct namecache *par; 2010 struct namecache *ncp; 2011 struct nchandle nctmp; 2012 struct mount *mp; 2013 struct vnode *dvp; 2014 int error; 2015 2016 ncp = nch->ncp; 2017 mp = nch->mount; 2018 restart: 2019 /* 2020 * If the ncp is already resolved we have nothing to do. However, 2021 * we do want to guarentee that a usable vnode is returned when 2022 * a vnode is present, so make sure it hasn't been reclaimed. 2023 */ 2024 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2025 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2026 _cache_setunresolved(ncp); 2027 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 2028 return (ncp->nc_error); 2029 } 2030 2031 /* 2032 * Mount points need special handling because the parent does not 2033 * belong to the same filesystem as the ncp. 2034 */ 2035 if (ncp == mp->mnt_ncmountpt.ncp) 2036 return (cache_resolve_mp(mp)); 2037 2038 /* 2039 * We expect an unbroken chain of ncps to at least the mount point, 2040 * and even all the way to root (but this code doesn't have to go 2041 * past the mount point). 2042 */ 2043 if (ncp->nc_parent == NULL) { 2044 kprintf("EXDEV case 1 %p %*.*s\n", ncp, 2045 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2046 ncp->nc_error = EXDEV; 2047 return(ncp->nc_error); 2048 } 2049 2050 /* 2051 * The vp's of the parent directories in the chain are held via vhold() 2052 * due to the existance of the child, and should not disappear. 2053 * However, there are cases where they can disappear: 2054 * 2055 * - due to filesystem I/O errors. 2056 * - due to NFS being stupid about tracking the namespace and 2057 * destroys the namespace for entire directories quite often. 2058 * - due to forced unmounts. 2059 * - due to an rmdir (parent will be marked DESTROYED) 2060 * 2061 * When this occurs we have to track the chain backwards and resolve 2062 * it, looping until the resolver catches up to the current node. We 2063 * could recurse here but we might run ourselves out of kernel stack 2064 * so we do it in a more painful manner. This situation really should 2065 * not occur all that often, or if it does not have to go back too 2066 * many nodes to resolve the ncp. 2067 */ 2068 while ((dvp = cache_dvpref(ncp)) == NULL) { 2069 /* 2070 * This case can occur if a process is CD'd into a 2071 * directory which is then rmdir'd. If the parent is marked 2072 * destroyed there is no point trying to resolve it. 2073 */ 2074 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 2075 return(ENOENT); 2076 2077 par = ncp->nc_parent; 2078 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 2079 par = par->nc_parent; 2080 if (par->nc_parent == NULL) { 2081 kprintf("EXDEV case 2 %*.*s\n", 2082 par->nc_nlen, par->nc_nlen, par->nc_name); 2083 return (EXDEV); 2084 } 2085 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 2086 par->nc_nlen, par->nc_nlen, par->nc_name); 2087 /* 2088 * The parent is not set in stone, ref and lock it to prevent 2089 * it from disappearing. Also note that due to renames it 2090 * is possible for our ncp to move and for par to no longer 2091 * be one of its parents. We resolve it anyway, the loop 2092 * will handle any moves. 2093 */ 2094 _cache_get(par); 2095 if (par == nch->mount->mnt_ncmountpt.ncp) { 2096 cache_resolve_mp(nch->mount); 2097 } else if ((dvp = cache_dvpref(par)) == NULL) { 2098 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 2099 _cache_put(par); 2100 continue; 2101 } else { 2102 if (par->nc_flag & NCF_UNRESOLVED) { 2103 nctmp.mount = mp; 2104 nctmp.ncp = par; 2105 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2106 } 2107 vrele(dvp); 2108 } 2109 if ((error = par->nc_error) != 0) { 2110 if (par->nc_error != EAGAIN) { 2111 kprintf("EXDEV case 3 %*.*s error %d\n", 2112 par->nc_nlen, par->nc_nlen, par->nc_name, 2113 par->nc_error); 2114 _cache_put(par); 2115 return(error); 2116 } 2117 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 2118 par, par->nc_nlen, par->nc_nlen, par->nc_name); 2119 } 2120 _cache_put(par); 2121 /* loop */ 2122 } 2123 2124 /* 2125 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 2126 * ncp's and reattach them. If this occurs the original ncp is marked 2127 * EAGAIN to force a relookup. 2128 * 2129 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 2130 * ncp must already be resolved. 2131 */ 2132 if (dvp) { 2133 nctmp.mount = mp; 2134 nctmp.ncp = ncp; 2135 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred); 2136 vrele(dvp); 2137 } else { 2138 ncp->nc_error = EPERM; 2139 } 2140 if (ncp->nc_error == EAGAIN) { 2141 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 2142 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 2143 goto restart; 2144 } 2145 return(ncp->nc_error); 2146 } 2147 2148 /* 2149 * Resolve the ncp associated with a mount point. Such ncp's almost always 2150 * remain resolved and this routine is rarely called. NFS MPs tends to force 2151 * re-resolution more often due to its mac-truck-smash-the-namecache 2152 * method of tracking namespace changes. 2153 * 2154 * The semantics for this call is that the passed ncp must be locked on 2155 * entry and will be locked on return. However, if we actually have to 2156 * resolve the mount point we temporarily unlock the entry in order to 2157 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of 2158 * the unlock we have to recheck the flags after we relock. 2159 */ 2160 static int 2161 cache_resolve_mp(struct mount *mp) 2162 { 2163 struct namecache *ncp = mp->mnt_ncmountpt.ncp; 2164 struct vnode *vp; 2165 int error; 2166 2167 KKASSERT(mp != NULL); 2168 2169 /* 2170 * If the ncp is already resolved we have nothing to do. However, 2171 * we do want to guarentee that a usable vnode is returned when 2172 * a vnode is present, so make sure it hasn't been reclaimed. 2173 */ 2174 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 2175 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED)) 2176 _cache_setunresolved(ncp); 2177 } 2178 2179 if (ncp->nc_flag & NCF_UNRESOLVED) { 2180 _cache_unlock(ncp); 2181 while (vfs_busy(mp, 0)) 2182 ; 2183 error = VFS_ROOT(mp, &vp); 2184 _cache_lock(ncp); 2185 2186 /* 2187 * recheck the ncp state after relocking. 2188 */ 2189 if (ncp->nc_flag & NCF_UNRESOLVED) { 2190 ncp->nc_error = error; 2191 if (error == 0) { 2192 _cache_setvp(mp, ncp, vp); 2193 vput(vp); 2194 } else { 2195 kprintf("[diagnostic] cache_resolve_mp: failed" 2196 " to resolve mount %p err=%d ncp=%p\n", 2197 mp, error, ncp); 2198 _cache_setvp(mp, ncp, NULL); 2199 } 2200 } else if (error == 0) { 2201 vput(vp); 2202 } 2203 vfs_unbusy(mp); 2204 } 2205 return(ncp->nc_error); 2206 } 2207 2208 void 2209 cache_cleanneg(int count) 2210 { 2211 struct namecache *ncp; 2212 2213 /* 2214 * Automode from the vnlru proc - clean out 10% of the negative cache 2215 * entries. 2216 */ 2217 if (count == 0) 2218 count = numneg / 10 + 1; 2219 2220 /* 2221 * Attempt to clean out the specified number of negative cache 2222 * entries. 2223 */ 2224 while (count) { 2225 ncp = TAILQ_FIRST(&ncneglist); 2226 if (ncp == NULL) { 2227 KKASSERT(numneg == 0); 2228 break; 2229 } 2230 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 2231 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 2232 if (_cache_get_nonblock(ncp) == 0) 2233 cache_zap(ncp); 2234 --count; 2235 } 2236 } 2237 2238 /* 2239 * Rehash a ncp. Rehashing is typically required if the name changes (should 2240 * not generally occur) or the parent link changes. This function will 2241 * unhash the ncp if the ncp is no longer hashable. 2242 */ 2243 static void 2244 _cache_rehash(struct namecache *ncp) 2245 { 2246 struct nchashhead *nchpp; 2247 u_int32_t hash; 2248 2249 if (ncp->nc_flag & NCF_HASHED) { 2250 ncp->nc_flag &= ~NCF_HASHED; 2251 LIST_REMOVE(ncp, nc_hash); 2252 } 2253 if (ncp->nc_nlen && ncp->nc_parent) { 2254 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 2255 hash = fnv_32_buf(&ncp->nc_parent, 2256 sizeof(ncp->nc_parent), hash); 2257 nchpp = NCHHASH(hash); 2258 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 2259 ncp->nc_flag |= NCF_HASHED; 2260 } 2261 } 2262 2263 /* 2264 * Name cache initialization, from vfsinit() when we are booting 2265 */ 2266 void 2267 nchinit(void) 2268 { 2269 int i; 2270 globaldata_t gd; 2271 2272 /* initialise per-cpu namecache effectiveness statistics. */ 2273 for (i = 0; i < ncpus; ++i) { 2274 gd = globaldata_find(i); 2275 gd->gd_nchstats = &nchstats[i]; 2276 } 2277 TAILQ_INIT(&ncneglist); 2278 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 2279 nclockwarn = 5 * hz; 2280 } 2281 2282 /* 2283 * Called from start_init() to bootstrap the root filesystem. Returns 2284 * a referenced, unlocked namecache record. 2285 */ 2286 void 2287 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp) 2288 { 2289 nch->ncp = cache_alloc(0); 2290 nch->mount = mp; 2291 ++mp->mnt_refs; 2292 if (vp) 2293 _cache_setvp(nch->mount, nch->ncp, vp); 2294 } 2295 2296 /* 2297 * vfs_cache_setroot() 2298 * 2299 * Create an association between the root of our namecache and 2300 * the root vnode. This routine may be called several times during 2301 * booting. 2302 * 2303 * If the caller intends to save the returned namecache pointer somewhere 2304 * it must cache_hold() it. 2305 */ 2306 void 2307 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch) 2308 { 2309 struct vnode *ovp; 2310 struct nchandle onch; 2311 2312 ovp = rootvnode; 2313 onch = rootnch; 2314 rootvnode = nvp; 2315 if (nch) 2316 rootnch = *nch; 2317 else 2318 cache_zero(&rootnch); 2319 if (ovp) 2320 vrele(ovp); 2321 if (onch.ncp) 2322 cache_drop(&onch); 2323 } 2324 2325 /* 2326 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 2327 * topology and is being removed as quickly as possible. The new VOP_N*() 2328 * API calls are required to make specific adjustments using the supplied 2329 * ncp pointers rather then just bogusly purging random vnodes. 2330 * 2331 * Invalidate all namecache entries to a particular vnode as well as 2332 * any direct children of that vnode in the namecache. This is a 2333 * 'catch all' purge used by filesystems that do not know any better. 2334 * 2335 * Note that the linkage between the vnode and its namecache entries will 2336 * be removed, but the namecache entries themselves might stay put due to 2337 * active references from elsewhere in the system or due to the existance of 2338 * the children. The namecache topology is left intact even if we do not 2339 * know what the vnode association is. Such entries will be marked 2340 * NCF_UNRESOLVED. 2341 */ 2342 void 2343 cache_purge(struct vnode *vp) 2344 { 2345 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 2346 } 2347 2348 /* 2349 * Flush all entries referencing a particular filesystem. 2350 * 2351 * Since we need to check it anyway, we will flush all the invalid 2352 * entries at the same time. 2353 */ 2354 #if 0 2355 2356 void 2357 cache_purgevfs(struct mount *mp) 2358 { 2359 struct nchashhead *nchpp; 2360 struct namecache *ncp, *nnp; 2361 2362 /* 2363 * Scan hash tables for applicable entries. 2364 */ 2365 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 2366 ncp = LIST_FIRST(nchpp); 2367 if (ncp) 2368 _cache_hold(ncp); 2369 while (ncp) { 2370 nnp = LIST_NEXT(ncp, nc_hash); 2371 if (nnp) 2372 _cache_hold(nnp); 2373 if (ncp->nc_mount == mp) { 2374 _cache_lock(ncp); 2375 cache_zap(ncp); 2376 } else { 2377 _cache_drop(ncp); 2378 } 2379 ncp = nnp; 2380 } 2381 } 2382 } 2383 2384 #endif 2385 2386 /* 2387 * Create a new (theoretically) unique fsmid 2388 */ 2389 int64_t 2390 cache_getnewfsmid(void) 2391 { 2392 static int fsmid_roller; 2393 int64_t fsmid; 2394 2395 ++fsmid_roller; 2396 fsmid = ((int64_t)time_second << 32) | 2397 (fsmid_roller & 0x7FFFFFFF); 2398 return (fsmid); 2399 } 2400 2401 2402 static int disablecwd; 2403 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 2404 2405 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 2406 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 2407 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 2408 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 2409 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 2410 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 2411 2412 int 2413 sys___getcwd(struct __getcwd_args *uap) 2414 { 2415 int buflen; 2416 int error; 2417 char *buf; 2418 char *bp; 2419 2420 if (disablecwd) 2421 return (ENODEV); 2422 2423 buflen = uap->buflen; 2424 if (buflen == 0) 2425 return (EINVAL); 2426 if (buflen > MAXPATHLEN) 2427 buflen = MAXPATHLEN; 2428 2429 buf = kmalloc(buflen, M_TEMP, M_WAITOK); 2430 bp = kern_getcwd(buf, buflen, &error); 2431 if (error == 0) 2432 error = copyout(bp, uap->buf, strlen(bp) + 1); 2433 kfree(buf, M_TEMP); 2434 return (error); 2435 } 2436 2437 char * 2438 kern_getcwd(char *buf, size_t buflen, int *error) 2439 { 2440 struct proc *p = curproc; 2441 char *bp; 2442 int i, slash_prefixed; 2443 struct filedesc *fdp; 2444 struct nchandle nch; 2445 2446 numcwdcalls++; 2447 bp = buf; 2448 bp += buflen - 1; 2449 *bp = '\0'; 2450 fdp = p->p_fd; 2451 slash_prefixed = 0; 2452 2453 nch = fdp->fd_ncdir; 2454 while (nch.ncp && (nch.ncp != fdp->fd_nrdir.ncp || 2455 nch.mount != fdp->fd_nrdir.mount) 2456 ) { 2457 /* 2458 * While traversing upwards if we encounter the root 2459 * of the current mount we have to skip to the mount point 2460 * in the underlying filesystem. 2461 */ 2462 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2463 nch = nch.mount->mnt_ncmounton; 2464 continue; 2465 } 2466 2467 /* 2468 * Prepend the path segment 2469 */ 2470 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2471 if (bp == buf) { 2472 numcwdfail4++; 2473 *error = ERANGE; 2474 return(NULL); 2475 } 2476 *--bp = nch.ncp->nc_name[i]; 2477 } 2478 if (bp == buf) { 2479 numcwdfail4++; 2480 *error = ERANGE; 2481 return(NULL); 2482 } 2483 *--bp = '/'; 2484 slash_prefixed = 1; 2485 2486 /* 2487 * Go up a directory. This isn't a mount point so we don't 2488 * have to check again. 2489 */ 2490 nch.ncp = nch.ncp->nc_parent; 2491 } 2492 if (nch.ncp == NULL) { 2493 numcwdfail2++; 2494 *error = ENOENT; 2495 return(NULL); 2496 } 2497 if (!slash_prefixed) { 2498 if (bp == buf) { 2499 numcwdfail4++; 2500 *error = ERANGE; 2501 return(NULL); 2502 } 2503 *--bp = '/'; 2504 } 2505 numcwdfound++; 2506 *error = 0; 2507 return (bp); 2508 } 2509 2510 /* 2511 * Thus begins the fullpath magic. 2512 */ 2513 2514 #undef STATNODE 2515 #define STATNODE(name) \ 2516 static u_int name; \ 2517 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 2518 2519 static int disablefullpath; 2520 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 2521 &disablefullpath, 0, ""); 2522 2523 STATNODE(numfullpathcalls); 2524 STATNODE(numfullpathfail1); 2525 STATNODE(numfullpathfail2); 2526 STATNODE(numfullpathfail3); 2527 STATNODE(numfullpathfail4); 2528 STATNODE(numfullpathfound); 2529 2530 int 2531 cache_fullpath(struct proc *p, struct nchandle *nchp, char **retbuf, char **freebuf) 2532 { 2533 char *bp, *buf; 2534 int i, slash_prefixed; 2535 struct nchandle fd_nrdir; 2536 struct nchandle nch; 2537 2538 numfullpathcalls--; 2539 2540 *retbuf = NULL; 2541 *freebuf = NULL; 2542 2543 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2544 bp = buf + MAXPATHLEN - 1; 2545 *bp = '\0'; 2546 if (p != NULL) 2547 fd_nrdir = p->p_fd->fd_nrdir; 2548 else 2549 fd_nrdir = rootnch; 2550 slash_prefixed = 0; 2551 nch = *nchp; 2552 2553 while (nch.ncp && 2554 (nch.ncp != fd_nrdir.ncp || nch.mount != fd_nrdir.mount) 2555 ) { 2556 /* 2557 * While traversing upwards if we encounter the root 2558 * of the current mount we have to skip to the mount point. 2559 */ 2560 if (nch.ncp == nch.mount->mnt_ncmountpt.ncp) { 2561 nch = nch.mount->mnt_ncmounton; 2562 continue; 2563 } 2564 2565 /* 2566 * Prepend the path segment 2567 */ 2568 for (i = nch.ncp->nc_nlen - 1; i >= 0; i--) { 2569 if (bp == buf) { 2570 numfullpathfail4++; 2571 kfree(buf, M_TEMP); 2572 return(ENOMEM); 2573 } 2574 *--bp = nch.ncp->nc_name[i]; 2575 } 2576 if (bp == buf) { 2577 numfullpathfail4++; 2578 kfree(buf, M_TEMP); 2579 return(ENOMEM); 2580 } 2581 *--bp = '/'; 2582 slash_prefixed = 1; 2583 2584 /* 2585 * Go up a directory. This isn't a mount point so we don't 2586 * have to check again. 2587 */ 2588 nch.ncp = nch.ncp->nc_parent; 2589 } 2590 if (nch.ncp == NULL) { 2591 numfullpathfail2++; 2592 kfree(buf, M_TEMP); 2593 return(ENOENT); 2594 } 2595 2596 if (!slash_prefixed) { 2597 if (bp == buf) { 2598 numfullpathfail4++; 2599 kfree(buf, M_TEMP); 2600 return(ENOMEM); 2601 } 2602 *--bp = '/'; 2603 } 2604 numfullpathfound++; 2605 *retbuf = bp; 2606 *freebuf = buf; 2607 2608 return(0); 2609 } 2610 2611 int 2612 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 2613 { 2614 struct namecache *ncp; 2615 struct nchandle nch; 2616 2617 numfullpathcalls++; 2618 if (disablefullpath) 2619 return (ENODEV); 2620 2621 if (p == NULL) 2622 return (EINVAL); 2623 2624 /* vn is NULL, client wants us to use p->p_textvp */ 2625 if (vn == NULL) { 2626 if ((vn = p->p_textvp) == NULL) 2627 return (EINVAL); 2628 } 2629 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 2630 if (ncp->nc_nlen) 2631 break; 2632 } 2633 if (ncp == NULL) 2634 return (EINVAL); 2635 2636 numfullpathcalls--; 2637 nch.ncp = ncp;; 2638 nch.mount = vn->v_mount; 2639 return(cache_fullpath(p, &nch, retbuf, freebuf)); 2640 } 2641