1 /* 2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993, 1995 35 * The Regents of the University of California. All rights reserved. 36 * 37 * This code is derived from software contributed to Berkeley by 38 * Poul-Henning Kamp of the FreeBSD Project. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 1. Redistributions of source code must retain the above copyright 44 * notice, this list of conditions and the following disclaimer. 45 * 2. Redistributions in binary form must reproduce the above copyright 46 * notice, this list of conditions and the following disclaimer in the 47 * documentation and/or other materials provided with the distribution. 48 * 3. All advertising materials mentioning features or use of this software 49 * must display the following acknowledgement: 50 * This product includes software developed by the University of 51 * California, Berkeley and its contributors. 52 * 4. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $ 70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.43 2004/11/18 20:04:24 dillon Exp $ 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mount.h> 78 #include <sys/vnode.h> 79 #include <sys/malloc.h> 80 #include <sys/sysproto.h> 81 #include <sys/proc.h> 82 #include <sys/namei.h> 83 #include <sys/nlookup.h> 84 #include <sys/filedesc.h> 85 #include <sys/fnv_hash.h> 86 #include <sys/globaldata.h> 87 #include <sys/kern_syscall.h> 88 #include <sys/dirent.h> 89 #include <ddb/ddb.h> 90 91 /* 92 * Random lookups in the cache are accomplished with a hash table using 93 * a hash key of (nc_src_vp, name). 94 * 95 * Negative entries may exist and correspond to structures where nc_vp 96 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry 97 * corresponds to a whited-out directory entry (verses simply not finding the 98 * entry at all). 99 * 100 * Upon reaching the last segment of a path, if the reference is for DELETE, 101 * or NOCACHE is set (rewrite), and the name is located in the cache, it 102 * will be dropped. 103 */ 104 105 /* 106 * Structures associated with name cacheing. 107 */ 108 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash]) 109 #define MINNEG 1024 110 111 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 112 113 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 114 static struct namecache_list ncneglist; /* instead of vnode */ 115 116 /* 117 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server 118 * to create the namecache infrastructure leading to a dangling vnode. 119 * 120 * 0 Only errors are reported 121 * 1 Successes are reported 122 * 2 Successes + the whole directory scan is reported 123 * 3 Force the directory scan code run as if the parent vnode did not 124 * have a namecache record, even if it does have one. 125 */ 126 static int ncvp_debug; 127 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, ""); 128 129 static u_long nchash; /* size of hash table */ 130 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 131 132 static u_long ncnegfactor = 16; /* ratio of negative entries */ 133 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 134 135 static u_long numneg; /* number of cache entries allocated */ 136 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 137 138 static u_long numcache; /* number of cache entries allocated */ 139 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 140 141 static u_long numunres; /* number of unresolved entries */ 142 SYSCTL_ULONG(_debug, OID_AUTO, numunres, CTLFLAG_RD, &numunres, 0, ""); 143 144 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 145 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 146 147 static int cache_resolve_mp(struct namecache *ncp); 148 static void cache_rehash(struct namecache *ncp); 149 150 /* 151 * The new name cache statistics 152 */ 153 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 154 #define STATNODE(mode, name, var) \ 155 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 156 STATNODE(CTLFLAG_RD, numneg, &numneg); 157 STATNODE(CTLFLAG_RD, numcache, &numcache); 158 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 159 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 160 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 161 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 162 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 163 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 164 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 165 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 166 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 167 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 168 169 struct nchstats nchstats[SMP_MAXCPU]; 170 /* 171 * Export VFS cache effectiveness statistics to user-land. 172 * 173 * The statistics are left for aggregation to user-land so 174 * neat things can be achieved, like observing per-CPU cache 175 * distribution. 176 */ 177 static int 178 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 179 { 180 struct globaldata *gd; 181 int i, error; 182 183 error = 0; 184 for (i = 0; i < ncpus; ++i) { 185 gd = globaldata_find(i); 186 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats), 187 sizeof(struct nchstats)))) 188 break; 189 } 190 191 return (error); 192 } 193 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD, 194 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics"); 195 196 static void cache_zap(struct namecache *ncp); 197 198 /* 199 * cache_hold() and cache_drop() prevent the premature deletion of a 200 * namecache entry but do not prevent operations (such as zapping) on 201 * that namecache entry. 202 */ 203 static __inline 204 struct namecache * 205 _cache_hold(struct namecache *ncp) 206 { 207 ++ncp->nc_refs; 208 return(ncp); 209 } 210 211 /* 212 * When dropping an entry, if only one ref remains and the entry has not 213 * been resolved, zap it. Since the one reference is being dropped the 214 * entry had better not be locked. 215 */ 216 static __inline 217 void 218 _cache_drop(struct namecache *ncp) 219 { 220 KKASSERT(ncp->nc_refs > 0); 221 if (ncp->nc_refs == 1 && 222 (ncp->nc_flag & NCF_UNRESOLVED) && 223 TAILQ_EMPTY(&ncp->nc_list) 224 ) { 225 KKASSERT(ncp->nc_exlocks == 0); 226 cache_lock(ncp); 227 cache_zap(ncp); 228 } else { 229 --ncp->nc_refs; 230 } 231 } 232 233 /* 234 * Link a new namecache entry to its parent. Be careful to avoid races 235 * if vhold() blocks in the future. 236 * 237 * If we are creating a child under an oldapi parent we must mark the 238 * child as being an oldapi entry as well. 239 */ 240 static void 241 cache_link_parent(struct namecache *ncp, struct namecache *par) 242 { 243 KKASSERT(ncp->nc_parent == NULL); 244 ncp->nc_parent = par; 245 if (TAILQ_EMPTY(&par->nc_list)) { 246 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 247 /* 248 * Any vp associated with an ncp which has children must 249 * be held to prevent it from being recycled. 250 */ 251 if (par->nc_vp) 252 vhold(par->nc_vp); 253 } else { 254 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry); 255 } 256 } 257 258 /* 259 * Remove the parent association from a namecache structure. If this is 260 * the last child of the parent the cache_drop(par) will attempt to 261 * recursively zap the parent. 262 */ 263 static void 264 cache_unlink_parent(struct namecache *ncp) 265 { 266 struct namecache *par; 267 268 if ((par = ncp->nc_parent) != NULL) { 269 ncp->nc_parent = NULL; 270 par = cache_hold(par); 271 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 272 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 273 vdrop(par->nc_vp); 274 cache_drop(par); 275 } 276 } 277 278 /* 279 * Allocate a new namecache structure. Most of the code does not require 280 * zero-termination of the string but it makes vop_compat_ncreate() easier. 281 */ 282 static struct namecache * 283 cache_alloc(int nlen) 284 { 285 struct namecache *ncp; 286 287 ncp = malloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO); 288 if (nlen) 289 ncp->nc_name = malloc(nlen + 1, M_VFSCACHE, M_WAITOK); 290 ncp->nc_nlen = nlen; 291 ncp->nc_flag = NCF_UNRESOLVED; 292 ncp->nc_error = ENOTCONN; /* needs to be resolved */ 293 ncp->nc_refs = 1; 294 TAILQ_INIT(&ncp->nc_list); 295 cache_lock(ncp); 296 return(ncp); 297 } 298 299 static void 300 cache_free(struct namecache *ncp) 301 { 302 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1); 303 if (ncp->nc_name) 304 free(ncp->nc_name, M_VFSCACHE); 305 free(ncp, M_VFSCACHE); 306 } 307 308 /* 309 * Ref and deref a namecache structure. 310 */ 311 struct namecache * 312 cache_hold(struct namecache *ncp) 313 { 314 return(_cache_hold(ncp)); 315 } 316 317 void 318 cache_drop(struct namecache *ncp) 319 { 320 _cache_drop(ncp); 321 } 322 323 /* 324 * Namespace locking. The caller must already hold a reference to the 325 * namecache structure in order to lock/unlock it. This function prevents 326 * the namespace from being created or destroyed by accessors other then 327 * the lock holder. 328 * 329 * Note that holding a locked namecache structure prevents other threads 330 * from making namespace changes (e.g. deleting or creating), prevents 331 * vnode association state changes by other threads, and prevents the 332 * namecache entry from being resolved or unresolved by other threads. 333 * 334 * The lock owner has full authority to associate/disassociate vnodes 335 * and resolve/unresolve the locked ncp. 336 * 337 * In particular, if a vnode is associated with a locked cache entry 338 * that vnode will *NOT* be recycled. We accomplish this by vhold()ing the 339 * vnode. XXX we should find a more efficient way to prevent the vnode 340 * from being recycled, but remember that any given vnode may have multiple 341 * namecache associations (think hardlinks). 342 */ 343 void 344 cache_lock(struct namecache *ncp) 345 { 346 thread_t td; 347 int didwarn; 348 349 KKASSERT(ncp->nc_refs != 0); 350 didwarn = 0; 351 td = curthread; 352 353 for (;;) { 354 if (ncp->nc_exlocks == 0) { 355 ncp->nc_exlocks = 1; 356 ncp->nc_locktd = td; 357 /* 358 * The vp associated with a locked ncp must be held 359 * to prevent it from being recycled (which would 360 * cause the ncp to become unresolved). 361 * 362 * XXX loop on race for later MPSAFE work. 363 */ 364 if (ncp->nc_vp) 365 vhold(ncp->nc_vp); 366 break; 367 } 368 if (ncp->nc_locktd == td) { 369 ++ncp->nc_exlocks; 370 break; 371 } 372 ncp->nc_flag |= NCF_LOCKREQ; 373 if (tsleep(ncp, 0, "clock", hz) == EWOULDBLOCK) { 374 if (didwarn) 375 continue; 376 didwarn = 1; 377 printf("[diagnostic] cache_lock: blocked on %p", ncp); 378 if ((ncp->nc_flag & NCF_MOUNTPT) && ncp->nc_mount) 379 printf(" [MOUNTPT %s]\n", ncp->nc_mount->mnt_stat.f_mntonname); 380 else 381 printf(" \"%*.*s\"\n", 382 ncp->nc_nlen, ncp->nc_nlen, 383 ncp->nc_name); 384 } 385 } 386 387 if (didwarn == 1) { 388 printf("[diagnostic] cache_lock: unblocked %*.*s\n", 389 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 390 } 391 } 392 393 int 394 cache_lock_nonblock(struct namecache *ncp) 395 { 396 thread_t td; 397 398 KKASSERT(ncp->nc_refs != 0); 399 td = curthread; 400 if (ncp->nc_exlocks == 0) { 401 ncp->nc_exlocks = 1; 402 ncp->nc_locktd = td; 403 /* 404 * The vp associated with a locked ncp must be held 405 * to prevent it from being recycled (which would 406 * cause the ncp to become unresolved). 407 * 408 * XXX loop on race for later MPSAFE work. 409 */ 410 if (ncp->nc_vp) 411 vhold(ncp->nc_vp); 412 return(0); 413 } else { 414 return(EWOULDBLOCK); 415 } 416 } 417 418 void 419 cache_unlock(struct namecache *ncp) 420 { 421 thread_t td = curthread; 422 423 KKASSERT(ncp->nc_refs > 0); 424 KKASSERT(ncp->nc_exlocks > 0); 425 KKASSERT(ncp->nc_locktd == td); 426 if (--ncp->nc_exlocks == 0) { 427 if (ncp->nc_vp) 428 vdrop(ncp->nc_vp); 429 ncp->nc_locktd = NULL; 430 if (ncp->nc_flag & NCF_LOCKREQ) { 431 ncp->nc_flag &= ~NCF_LOCKREQ; 432 wakeup_one(ncp); 433 } 434 } 435 } 436 437 /* 438 * ref-and-lock, unlock-and-deref functions. 439 */ 440 struct namecache * 441 cache_get(struct namecache *ncp) 442 { 443 _cache_hold(ncp); 444 cache_lock(ncp); 445 return(ncp); 446 } 447 448 int 449 cache_get_nonblock(struct namecache *ncp) 450 { 451 /* XXX MP */ 452 if (ncp->nc_exlocks == 0 || ncp->nc_locktd == curthread) { 453 _cache_hold(ncp); 454 cache_lock(ncp); 455 return(0); 456 } 457 return(EWOULDBLOCK); 458 } 459 460 void 461 cache_put(struct namecache *ncp) 462 { 463 cache_unlock(ncp); 464 _cache_drop(ncp); 465 } 466 467 /* 468 * Resolve an unresolved ncp by associating a vnode with it. If the 469 * vnode is NULL, a negative cache entry is created. 470 * 471 * The ncp should be locked on entry and will remain locked on return. 472 */ 473 void 474 cache_setvp(struct namecache *ncp, struct vnode *vp) 475 { 476 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED); 477 ncp->nc_vp = vp; 478 if (vp != NULL) { 479 /* 480 * Any vp associated with an ncp which has children must 481 * be held. Any vp associated with a locked ncp must be held. 482 */ 483 if (!TAILQ_EMPTY(&ncp->nc_list)) 484 vhold(vp); 485 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode); 486 if (ncp->nc_exlocks) 487 vhold(vp); 488 489 /* 490 * Set auxillary flags 491 */ 492 switch(vp->v_type) { 493 case VDIR: 494 ncp->nc_flag |= NCF_ISDIR; 495 break; 496 case VLNK: 497 ncp->nc_flag |= NCF_ISSYMLINK; 498 /* XXX cache the contents of the symlink */ 499 break; 500 default: 501 break; 502 } 503 ++numcache; 504 ncp->nc_error = 0; 505 } else { 506 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 507 ++numneg; 508 ncp->nc_error = ENOENT; 509 } 510 ncp->nc_flag &= ~NCF_UNRESOLVED; 511 } 512 513 void 514 cache_settimeout(struct namecache *ncp, int nticks) 515 { 516 if ((ncp->nc_timeout = ticks + nticks) == 0) 517 ncp->nc_timeout = 1; 518 } 519 520 /* 521 * Disassociate the vnode or negative-cache association and mark a 522 * namecache entry as unresolved again. Note that the ncp is still 523 * left in the hash table and still linked to its parent. 524 * 525 * The ncp should be locked and refd on entry and will remain locked and refd 526 * on return. 527 * 528 * This routine is normally never called on a directory containing children. 529 * However, NFS often does just that in its rename() code as a cop-out to 530 * avoid complex namespace operations. This disconnects a directory vnode 531 * from its namecache and can cause the OLDAPI and NEWAPI to get out of 532 * sync. 533 */ 534 void 535 cache_setunresolved(struct namecache *ncp) 536 { 537 struct vnode *vp; 538 539 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) { 540 ncp->nc_flag |= NCF_UNRESOLVED; 541 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK); 542 ncp->nc_timeout = 0; 543 ncp->nc_error = ENOTCONN; 544 ++numunres; 545 if ((vp = ncp->nc_vp) != NULL) { 546 --numcache; 547 ncp->nc_vp = NULL; 548 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode); 549 550 /* 551 * Any vp associated with an ncp with children is 552 * held by that ncp. Any vp associated with a locked 553 * ncp is held by that ncp. These conditions must be 554 * undone when the vp is cleared out from the ncp. 555 */ 556 if (!TAILQ_EMPTY(&ncp->nc_list)) 557 vdrop(vp); 558 if (ncp->nc_exlocks) 559 vdrop(vp); 560 } else { 561 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 562 --numneg; 563 } 564 } 565 } 566 567 /* 568 * Invalidate portions of the namecache topology given a starting entry. 569 * The passed ncp is set to an unresolved state and: 570 * 571 * The passed ncp must be locked. 572 * 573 * CINV_DESTROY - Set a flag in the passed ncp entry indicating 574 * that the physical underlying nodes have been 575 * destroyed... as in deleted. For example, when 576 * a directory is removed. This will cause record 577 * lookups on the name to no longer be able to find 578 * the record and tells the resolver to return failure 579 * rather then trying to resolve through the parent. 580 * 581 * The topology itself, including ncp->nc_name, 582 * remains intact. 583 * 584 * This only applies to the passed ncp, if CINV_CHILDREN 585 * is specified the children are not flagged. 586 * 587 * CINV_CHILDREN - Set all children (recursively) to an unresolved 588 * state as well. 589 * 590 * Note that this will also have the side effect of 591 * cleaning out any unreferenced nodes in the topology 592 * from the leaves up as the recursion backs out. 593 * 594 * Note that the topology for any referenced nodes remains intact. 595 */ 596 void 597 cache_inval(struct namecache *ncp, int flags) 598 { 599 struct namecache *kid; 600 struct namecache *nextkid; 601 602 KKASSERT(ncp->nc_exlocks); 603 again: 604 cache_setunresolved(ncp); 605 if (flags & CINV_DESTROY) 606 ncp->nc_flag |= NCF_DESTROYED; 607 608 if ((flags & CINV_CHILDREN) && 609 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL 610 ) { 611 cache_hold(kid); 612 cache_unlock(ncp); 613 while (kid) { 614 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL) 615 cache_hold(nextkid); 616 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 || 617 TAILQ_FIRST(&kid->nc_list) 618 ) { 619 cache_lock(kid); 620 cache_inval(kid, flags & ~CINV_DESTROY); 621 cache_unlock(kid); 622 } 623 cache_drop(kid); 624 kid = nextkid; 625 } 626 cache_lock(ncp); 627 628 /* 629 * Someone could have gotten in there while ncp was unlocked, 630 * retry if so. 631 */ 632 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 633 goto again; 634 } 635 } 636 637 /* 638 * Invalidate a vnode's namecache associations. 639 */ 640 void 641 cache_inval_vp(struct vnode *vp, int flags) 642 { 643 struct namecache *ncp; 644 645 while ((ncp = TAILQ_FIRST(&vp->v_namecache)) != NULL) { 646 cache_get(ncp); 647 cache_inval(ncp, flags); 648 cache_put(ncp); 649 } 650 } 651 652 /* 653 * The source ncp has been renamed to the target ncp. Both fncp and tncp 654 * must be locked. Both will be set to unresolved, any children of tncp 655 * will be disconnected (the prior contents of the target is assumed to be 656 * destroyed by the rename operation, e.g. renaming over an empty directory), 657 * and all children of fncp will be moved to tncp. 658 * 659 * XXX the disconnection could pose a problem, check code paths to make 660 * sure any code that blocks can handle the parent being changed out from 661 * under it. Maybe we should lock the children (watch out for deadlocks) ? 662 * 663 * After we return the caller has the option of calling cache_setvp() if 664 * the vnode of the new target ncp is known. 665 * 666 * Any process CD'd into any of the children will no longer be able to ".." 667 * back out. An rm -rf can cause this situation to occur. 668 */ 669 void 670 cache_rename(struct namecache *fncp, struct namecache *tncp) 671 { 672 struct namecache *scan; 673 674 cache_setunresolved(fncp); 675 cache_setunresolved(tncp); 676 cache_inval(tncp, CINV_CHILDREN); 677 while ((scan = TAILQ_FIRST(&fncp->nc_list)) != NULL) { 678 cache_hold(scan); 679 cache_unlink_parent(scan); 680 cache_link_parent(scan, tncp); 681 if (scan->nc_flag & NCF_HASHED) 682 cache_rehash(scan); 683 cache_drop(scan); 684 } 685 } 686 687 /* 688 * vget the vnode associated with the namecache entry. Resolve the namecache 689 * entry if necessary and deal with namecache/vp races. The passed ncp must 690 * be referenced and may be locked. The ncp's ref/locking state is not 691 * effected by this call. 692 * 693 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked 694 * (depending on the passed lk_type) will be returned in *vpp with an error 695 * of 0, or NULL will be returned in *vpp with a non-0 error code. The 696 * most typical error is ENOENT, meaning that the ncp represents a negative 697 * cache hit and there is no vnode to retrieve, but other errors can occur 698 * too. 699 * 700 * The main race we have to deal with are namecache zaps. The ncp itself 701 * will not disappear since it is referenced, and it turns out that the 702 * validity of the vp pointer can be checked simply by rechecking the 703 * contents of ncp->nc_vp. 704 */ 705 int 706 cache_vget(struct namecache *ncp, struct ucred *cred, 707 int lk_type, struct vnode **vpp) 708 { 709 struct vnode *vp; 710 int error; 711 712 again: 713 vp = NULL; 714 if (ncp->nc_flag & NCF_UNRESOLVED) { 715 cache_lock(ncp); 716 error = cache_resolve(ncp, cred); 717 cache_unlock(ncp); 718 } else { 719 error = 0; 720 } 721 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 722 error = vget(vp, lk_type, curthread); 723 if (error) { 724 if (vp != ncp->nc_vp) /* handle cache_zap race */ 725 goto again; 726 vp = NULL; 727 } else if (vp != ncp->nc_vp) { /* handle cache_zap race */ 728 vput(vp); 729 goto again; 730 } 731 } 732 if (error == 0 && vp == NULL) 733 error = ENOENT; 734 *vpp = vp; 735 return(error); 736 } 737 738 int 739 cache_vref(struct namecache *ncp, struct ucred *cred, struct vnode **vpp) 740 { 741 struct vnode *vp; 742 int error; 743 744 again: 745 vp = NULL; 746 if (ncp->nc_flag & NCF_UNRESOLVED) { 747 cache_lock(ncp); 748 error = cache_resolve(ncp, cred); 749 cache_unlock(ncp); 750 } else { 751 error = 0; 752 } 753 if (error == 0 && (vp = ncp->nc_vp) != NULL) { 754 vref(vp); 755 if (vp != ncp->nc_vp) { /* handle cache_zap race */ 756 vrele(vp); 757 goto again; 758 } 759 } 760 if (error == 0 && vp == NULL) 761 error = ENOENT; 762 *vpp = vp; 763 return(error); 764 } 765 766 /* 767 * Convert a directory vnode to a namecache record without any other 768 * knowledge of the topology. This ONLY works with directory vnodes and 769 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the 770 * returned ncp (if not NULL) will be held and unlocked. 771 * 772 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned. 773 * If 'makeit' is 1 we attempt to track-down and create the namecache topology 774 * for dvp. This will fail only if the directory has been deleted out from 775 * under the caller. 776 * 777 * Callers must always check for a NULL return no matter the value of 'makeit'. 778 */ 779 780 static int cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 781 struct vnode *dvp); 782 783 struct namecache * 784 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit) 785 { 786 struct namecache *ncp; 787 struct vnode *pvp; 788 int error; 789 790 /* 791 * Temporary debugging code to force the directory scanning code 792 * to be exercised. 793 */ 794 ncp = NULL; 795 if (ncvp_debug >= 3 && makeit && TAILQ_FIRST(&dvp->v_namecache)) { 796 ncp = TAILQ_FIRST(&dvp->v_namecache); 797 printf("cache_fromdvp: forcing %s\n", ncp->nc_name); 798 goto force; 799 } 800 801 /* 802 * Loop until resolution, inside code will break out on error. 803 */ 804 while ((ncp = TAILQ_FIRST(&dvp->v_namecache)) == NULL && makeit) { 805 force: 806 /* 807 * If dvp is the root of its filesystem it should already 808 * have a namecache pointer associated with it as a side 809 * effect of the mount, but it may have been disassociated. 810 */ 811 if (dvp->v_flag & VROOT) { 812 ncp = cache_get(dvp->v_mount->mnt_ncp); 813 error = cache_resolve_mp(ncp); 814 cache_put(ncp); 815 if (ncvp_debug) { 816 printf("cache_fromdvp: resolve root of mount %p error %d", 817 dvp->v_mount, error); 818 } 819 if (error) { 820 if (ncvp_debug) 821 printf(" failed\n"); 822 ncp = NULL; 823 break; 824 } 825 if (ncvp_debug) 826 printf(" succeeded\n"); 827 continue; 828 } 829 830 /* 831 * Get the parent directory and resolve its ncp. 832 */ 833 error = vop_nlookupdotdot(dvp->v_ops, dvp, &pvp, cred); 834 if (error) { 835 printf("lookupdotdot failed %d %p\n", error, pvp); 836 break; 837 } 838 VOP_UNLOCK(pvp, 0, curthread); 839 840 /* 841 * XXX this recursion could run the kernel out of stack, 842 * change to a less efficient algorithm if we get too deep 843 * (use 'makeit' for a depth counter?) 844 */ 845 ncp = cache_fromdvp(pvp, cred, makeit); 846 vrele(pvp); 847 if (ncp == NULL) 848 break; 849 850 /* 851 * Do an inefficient scan of pvp (embodied by ncp) to look 852 * for dvp. This will create a namecache record for dvp on 853 * success. We loop up to recheck on success. 854 * 855 * ncp and dvp are both held but not locked. 856 */ 857 error = cache_inefficient_scan(ncp, cred, dvp); 858 cache_drop(ncp); 859 if (error) { 860 printf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n", 861 pvp, ncp->nc_name, dvp); 862 ncp = NULL; 863 break; 864 } 865 if (ncvp_debug) { 866 printf("cache_fromdvp: scan %p (%s) succeeded\n", 867 pvp, ncp->nc_name); 868 } 869 } 870 if (ncp) 871 cache_hold(ncp); 872 return (ncp); 873 } 874 875 /* 876 * Do an inefficient scan of the directory represented by ncp looking for 877 * the directory vnode dvp. ncp must be held but not locked on entry and 878 * will be held on return. dvp must be refd but not locked on entry and 879 * will remain refd on return. 880 * 881 * Why do this at all? Well, due to its stateless nature the NFS server 882 * converts file handles directly to vnodes without necessarily going through 883 * the namecache ops that would otherwise create the namecache topology 884 * leading to the vnode. We could either (1) Change the namecache algorithms 885 * to allow disconnect namecache records that are re-merged opportunistically, 886 * or (2) Make the NFS server backtrack and scan to recover a connected 887 * namecache topology in order to then be able to issue new API lookups. 888 * 889 * It turns out that (1) is a huge mess. It takes a nice clean set of 890 * namecache algorithms and introduces a lot of complication in every subsystem 891 * that calls into the namecache to deal with the re-merge case, especially 892 * since we are using the namecache to placehold negative lookups and the 893 * vnode might not be immediately assigned. (2) is certainly far less 894 * efficient then (1), but since we are only talking about directories here 895 * (which are likely to remain cached), the case does not actually run all 896 * that often and has the supreme advantage of not polluting the namecache 897 * algorithms. 898 */ 899 static int 900 cache_inefficient_scan(struct namecache *ncp, struct ucred *cred, 901 struct vnode *dvp) 902 { 903 struct nlcomponent nlc; 904 struct namecache *rncp; 905 struct dirent *den; 906 struct vnode *pvp; 907 struct vattr vat; 908 struct iovec iov; 909 struct uio uio; 910 u_long *cookies; 911 off_t baseoff; 912 int ncookies; 913 int blksize; 914 int eofflag; 915 char *rbuf; 916 int error; 917 int xoff; 918 int i; 919 920 vat.va_blocksize = 0; 921 if ((error = VOP_GETATTR(dvp, &vat, curthread)) != 0) 922 return (error); 923 if ((error = cache_vget(ncp, cred, LK_SHARED, &pvp)) != 0) 924 return (error); 925 if (ncvp_debug) 926 printf("inefficient_scan: directory iosize %ld vattr fileid = %ld\n", vat.va_blocksize, (long)vat.va_fileid); 927 if ((blksize = vat.va_blocksize) == 0) 928 blksize = DEV_BSIZE; 929 rbuf = malloc(blksize, M_TEMP, M_WAITOK); 930 rncp = NULL; 931 932 eofflag = 0; 933 uio.uio_offset = 0; 934 cookies = NULL; 935 again: 936 baseoff = uio.uio_offset; 937 iov.iov_base = rbuf; 938 iov.iov_len = blksize; 939 uio.uio_iov = &iov; 940 uio.uio_iovcnt = 1; 941 uio.uio_resid = blksize; 942 uio.uio_segflg = UIO_SYSSPACE; 943 uio.uio_rw = UIO_READ; 944 uio.uio_td = curthread; 945 946 if (cookies) { 947 free(cookies, M_TEMP); 948 cookies = NULL; 949 } 950 if (ncvp_debug >= 2) 951 printf("cache_inefficient_scan: readdir @ %08x\n", (int)baseoff); 952 error = VOP_READDIR(pvp, &uio, cred, &eofflag, &ncookies, &cookies); 953 if (error == 0 && cookies == NULL) 954 error = EPERM; 955 if (error == 0) { 956 for (i = 0; i < ncookies; ++i) { 957 xoff = (int)(cookies[i] - (u_long)baseoff); 958 /* 959 * UFS plays a little trick to skip the first entry 960 * in a directory ("."), by assigning the cookie to 961 * dpoff + dp->d_reclen in the loop. This causes 962 * the last cookie to be assigned to the data-end of 963 * the directory. XXX 964 */ 965 if (xoff == blksize) 966 break; 967 KKASSERT(xoff >= 0 && xoff <= blksize); 968 den = (struct dirent *)(rbuf + xoff); 969 if (ncvp_debug >= 2) 970 printf("cache_inefficient_scan: %*.*s\n", 971 den->d_namlen, den->d_namlen, den->d_name); 972 if (den->d_type != DT_WHT && 973 den->d_fileno == vat.va_fileid) { 974 if (ncvp_debug) 975 printf("cache_inefficient_scan: MATCHED inode %ld path %s/%*.*s\n", vat.va_fileid, ncp->nc_name, den->d_namlen, den->d_namlen, den->d_name); 976 nlc.nlc_nameptr = den->d_name; 977 nlc.nlc_namelen = den->d_namlen; 978 VOP_UNLOCK(pvp, 0, curthread); 979 rncp = cache_nlookup(ncp, &nlc); 980 KKASSERT(rncp != NULL); 981 break; 982 } 983 } 984 if (rncp == NULL && eofflag == 0 && uio.uio_resid != blksize) 985 goto again; 986 } 987 if (cookies) { 988 free(cookies, M_TEMP); 989 cookies = NULL; 990 } 991 if (rncp) { 992 vrele(pvp); 993 if (rncp->nc_flag & NCF_UNRESOLVED) { 994 cache_setvp(rncp, dvp); 995 if (ncvp_debug >= 2) { 996 printf("cache_inefficient_scan: setvp %s/%s = %p\n", 997 ncp->nc_name, rncp->nc_name, dvp); 998 } 999 } else { 1000 if (ncvp_debug >= 2) { 1001 printf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n", 1002 ncp->nc_name, rncp->nc_name, dvp, 1003 rncp->nc_vp); 1004 } 1005 } 1006 if (rncp->nc_vp == NULL) 1007 error = rncp->nc_error; 1008 cache_put(rncp); 1009 } else { 1010 printf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n", 1011 dvp, ncp->nc_name); 1012 vput(pvp); 1013 error = ENOENT; 1014 } 1015 free(rbuf, M_TEMP); 1016 return (error); 1017 } 1018 1019 /* 1020 * Zap a namecache entry. The ncp is unconditionally set to an unresolved 1021 * state, which disassociates it from its vnode or ncneglist. 1022 * 1023 * Then, if there are no additional references to the ncp and no children, 1024 * the ncp is removed from the topology and destroyed. This function will 1025 * also run through the nc_parent chain and destroy parent ncps if possible. 1026 * As a side benefit, it turns out the only conditions that allow running 1027 * up the chain are also the conditions to ensure no deadlock will occur. 1028 * 1029 * References and/or children may exist if the ncp is in the middle of the 1030 * topology, preventing the ncp from being destroyed. 1031 * 1032 * This function must be called with the ncp held and locked and will unlock 1033 * and drop it during zapping. 1034 */ 1035 static void 1036 cache_zap(struct namecache *ncp) 1037 { 1038 struct namecache *par; 1039 1040 /* 1041 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED. 1042 */ 1043 cache_setunresolved(ncp); 1044 1045 /* 1046 * Try to scrap the entry and possibly tail-recurse on its parent. 1047 * We only scrap unref'd (other then our ref) unresolved entries, 1048 * we do not scrap 'live' entries. 1049 */ 1050 while (ncp->nc_flag & NCF_UNRESOLVED) { 1051 /* 1052 * Someone other then us has a ref, stop. 1053 */ 1054 if (ncp->nc_refs > 1) 1055 goto done; 1056 1057 /* 1058 * We have children, stop. 1059 */ 1060 if (!TAILQ_EMPTY(&ncp->nc_list)) 1061 goto done; 1062 1063 /* 1064 * Remove ncp from the topology: hash table and parent linkage. 1065 */ 1066 if (ncp->nc_flag & NCF_HASHED) { 1067 ncp->nc_flag &= ~NCF_HASHED; 1068 LIST_REMOVE(ncp, nc_hash); 1069 } 1070 if ((par = ncp->nc_parent) != NULL) { 1071 par = cache_hold(par); 1072 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry); 1073 ncp->nc_parent = NULL; 1074 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list)) 1075 vdrop(par->nc_vp); 1076 } 1077 1078 /* 1079 * ncp should not have picked up any refs. Physically 1080 * destroy the ncp. 1081 */ 1082 KKASSERT(ncp->nc_refs == 1); 1083 --numunres; 1084 /* cache_unlock(ncp) not required */ 1085 ncp->nc_refs = -1; /* safety */ 1086 if (ncp->nc_name) 1087 free(ncp->nc_name, M_VFSCACHE); 1088 free(ncp, M_VFSCACHE); 1089 1090 /* 1091 * Loop on the parent (it may be NULL). Only bother looping 1092 * if the parent has a single ref (ours), which also means 1093 * we can lock it trivially. 1094 */ 1095 ncp = par; 1096 if (ncp == NULL) 1097 return; 1098 if (ncp->nc_refs != 1) { 1099 cache_drop(ncp); 1100 return; 1101 } 1102 KKASSERT(par->nc_exlocks == 0); 1103 cache_lock(ncp); 1104 } 1105 done: 1106 cache_unlock(ncp); 1107 --ncp->nc_refs; 1108 } 1109 1110 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW; 1111 1112 static __inline 1113 void 1114 cache_hysteresis(void) 1115 { 1116 /* 1117 * Don't cache too many negative hits. We use hysteresis to reduce 1118 * the impact on the critical path. 1119 */ 1120 switch(cache_hysteresis_state) { 1121 case CHI_LOW: 1122 if (numneg > MINNEG && numneg * ncnegfactor > numcache) { 1123 cache_cleanneg(10); 1124 cache_hysteresis_state = CHI_HIGH; 1125 } 1126 break; 1127 case CHI_HIGH: 1128 if (numneg > MINNEG * 9 / 10 && 1129 numneg * ncnegfactor * 9 / 10 > numcache 1130 ) { 1131 cache_cleanneg(10); 1132 } else { 1133 cache_hysteresis_state = CHI_LOW; 1134 } 1135 break; 1136 } 1137 } 1138 1139 /* 1140 * NEW NAMECACHE LOOKUP API 1141 * 1142 * Lookup an entry in the cache. A locked, referenced, non-NULL 1143 * entry is *always* returned, even if the supplied component is illegal. 1144 * The resulting namecache entry should be returned to the system with 1145 * cache_put() or cache_unlock() + cache_drop(). 1146 * 1147 * namecache locks are recursive but care must be taken to avoid lock order 1148 * reversals. 1149 * 1150 * Nobody else will be able to manipulate the associated namespace (e.g. 1151 * create, delete, rename, rename-target) until the caller unlocks the 1152 * entry. 1153 * 1154 * The returned entry will be in one of three states: positive hit (non-null 1155 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set). 1156 * Unresolved entries must be resolved through the filesystem to associate the 1157 * vnode and/or determine whether a positive or negative hit has occured. 1158 * 1159 * It is not necessary to lock a directory in order to lock namespace under 1160 * that directory. In fact, it is explicitly not allowed to do that. A 1161 * directory is typically only locked when being created, renamed, or 1162 * destroyed. 1163 * 1164 * The directory (par) may be unresolved, in which case any returned child 1165 * will likely also be marked unresolved. Likely but not guarenteed. Since 1166 * the filesystem lookup requires a resolved directory vnode the caller is 1167 * responsible for resolving the namecache chain top-down. This API 1168 * specifically allows whole chains to be created in an unresolved state. 1169 */ 1170 struct namecache * 1171 cache_nlookup(struct namecache *par, struct nlcomponent *nlc) 1172 { 1173 struct namecache *ncp; 1174 struct namecache *new_ncp; 1175 struct nchashhead *nchpp; 1176 u_int32_t hash; 1177 globaldata_t gd; 1178 1179 numcalls++; 1180 gd = mycpu; 1181 1182 /* 1183 * Try to locate an existing entry 1184 */ 1185 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT); 1186 hash = fnv_32_buf(&par, sizeof(par), hash); 1187 new_ncp = NULL; 1188 restart: 1189 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1190 numchecks++; 1191 1192 /* 1193 * Zap entries that have timed out. 1194 */ 1195 if (ncp->nc_timeout && 1196 (int)(ncp->nc_timeout - ticks) < 0 && 1197 (ncp->nc_flag & NCF_UNRESOLVED) == 0 && 1198 ncp->nc_exlocks == 0 1199 ) { 1200 cache_zap(cache_get(ncp)); 1201 goto restart; 1202 } 1203 1204 /* 1205 * Break out if we find a matching entry. Note that 1206 * UNRESOLVED entries may match, but DESTROYED entries 1207 * do not. 1208 */ 1209 if (ncp->nc_parent == par && 1210 ncp->nc_nlen == nlc->nlc_namelen && 1211 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 && 1212 (ncp->nc_flag & NCF_DESTROYED) == 0 1213 ) { 1214 if (cache_get_nonblock(ncp) == 0) { 1215 if (new_ncp) 1216 cache_free(new_ncp); 1217 goto found; 1218 } 1219 cache_get(ncp); 1220 cache_put(ncp); 1221 goto restart; 1222 } 1223 } 1224 1225 /* 1226 * We failed to locate an entry, create a new entry and add it to 1227 * the cache. We have to relookup after possibly blocking in 1228 * malloc. 1229 */ 1230 if (new_ncp == NULL) { 1231 new_ncp = cache_alloc(nlc->nlc_namelen); 1232 goto restart; 1233 } 1234 1235 ncp = new_ncp; 1236 1237 /* 1238 * Initialize as a new UNRESOLVED entry, lock (non-blocking), 1239 * and link to the parent. The mount point is usually inherited 1240 * from the parent unless this is a special case such as a mount 1241 * point where nlc_namelen is 0. The caller is responsible for 1242 * setting nc_mount in that case. If nlc_namelen is 0 nc_name will 1243 * be NULL. 1244 */ 1245 if (nlc->nlc_namelen) { 1246 bcopy(nlc->nlc_nameptr, ncp->nc_name, nlc->nlc_namelen); 1247 ncp->nc_name[nlc->nlc_namelen] = 0; 1248 ncp->nc_mount = par->nc_mount; 1249 } 1250 nchpp = NCHHASH(hash); 1251 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1252 ncp->nc_flag |= NCF_HASHED; 1253 cache_link_parent(ncp, par); 1254 found: 1255 /* 1256 * stats and namecache size management 1257 */ 1258 if (ncp->nc_flag & NCF_UNRESOLVED) 1259 ++gd->gd_nchstats->ncs_miss; 1260 else if (ncp->nc_vp) 1261 ++gd->gd_nchstats->ncs_goodhits; 1262 else 1263 ++gd->gd_nchstats->ncs_neghits; 1264 cache_hysteresis(); 1265 return(ncp); 1266 } 1267 1268 /* 1269 * Resolve an unresolved namecache entry, generally by looking it up. 1270 * The passed ncp must be locked and refd. 1271 * 1272 * Theoretically since a vnode cannot be recycled while held, and since 1273 * the nc_parent chain holds its vnode as long as children exist, the 1274 * direct parent of the cache entry we are trying to resolve should 1275 * have a valid vnode. If not then generate an error that we can 1276 * determine is related to a resolver bug. 1277 * 1278 * Note that successful resolution does not necessarily return an error 1279 * code of 0. If the ncp resolves to a negative cache hit then ENOENT 1280 * will be returned. 1281 */ 1282 int 1283 cache_resolve(struct namecache *ncp, struct ucred *cred) 1284 { 1285 struct namecache *par; 1286 int error; 1287 1288 restart: 1289 /* 1290 * If the ncp is already resolved we have nothing to do. 1291 */ 1292 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) 1293 return (ncp->nc_error); 1294 1295 /* 1296 * Mount points need special handling because the parent does not 1297 * belong to the same filesystem as the ncp. 1298 */ 1299 if (ncp->nc_flag & NCF_MOUNTPT) 1300 return (cache_resolve_mp(ncp)); 1301 1302 /* 1303 * We expect an unbroken chain of ncps to at least the mount point, 1304 * and even all the way to root (but this code doesn't have to go 1305 * past the mount point). 1306 */ 1307 if (ncp->nc_parent == NULL) { 1308 printf("EXDEV case 1 %p %*.*s\n", ncp, 1309 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1310 ncp->nc_error = EXDEV; 1311 return(ncp->nc_error); 1312 } 1313 1314 /* 1315 * The vp's of the parent directories in the chain are held via vhold() 1316 * due to the existance of the child, and should not disappear. 1317 * However, there are cases where they can disappear: 1318 * 1319 * - due to filesystem I/O errors. 1320 * - due to NFS being stupid about tracking the namespace and 1321 * destroys the namespace for entire directories quite often. 1322 * - due to forced unmounts. 1323 * - due to an rmdir (parent will be marked DESTROYED) 1324 * 1325 * When this occurs we have to track the chain backwards and resolve 1326 * it, looping until the resolver catches up to the current node. We 1327 * could recurse here but we might run ourselves out of kernel stack 1328 * so we do it in a more painful manner. This situation really should 1329 * not occur all that often, or if it does not have to go back too 1330 * many nodes to resolve the ncp. 1331 */ 1332 while (ncp->nc_parent->nc_vp == NULL) { 1333 /* 1334 * This case can occur if a process is CD'd into a 1335 * directory which is then rmdir'd. If the parent is marked 1336 * destroyed there is no point trying to resolve it. 1337 */ 1338 if (ncp->nc_parent->nc_flag & NCF_DESTROYED) 1339 return(ENOENT); 1340 1341 par = ncp->nc_parent; 1342 while (par->nc_parent && par->nc_parent->nc_vp == NULL) 1343 par = par->nc_parent; 1344 if (par->nc_parent == NULL) { 1345 printf("EXDEV case 2 %*.*s\n", 1346 par->nc_nlen, par->nc_nlen, par->nc_name); 1347 return (EXDEV); 1348 } 1349 printf("[diagnostic] cache_resolve: had to recurse on %*.*s\n", 1350 par->nc_nlen, par->nc_nlen, par->nc_name); 1351 /* 1352 * The parent is not set in stone, ref and lock it to prevent 1353 * it from disappearing. Also note that due to renames it 1354 * is possible for our ncp to move and for par to no longer 1355 * be one of its parents. We resolve it anyway, the loop 1356 * will handle any moves. 1357 */ 1358 cache_get(par); 1359 if (par->nc_flag & NCF_MOUNTPT) { 1360 cache_resolve_mp(par); 1361 } else if (par->nc_parent->nc_vp == NULL) { 1362 printf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name); 1363 cache_put(par); 1364 continue; 1365 } else if (par->nc_flag & NCF_UNRESOLVED) { 1366 par->nc_error = VOP_NRESOLVE(par, cred); 1367 } 1368 if ((error = par->nc_error) != 0) { 1369 if (par->nc_error != EAGAIN) { 1370 printf("EXDEV case 3 %*.*s error %d\n", 1371 par->nc_nlen, par->nc_nlen, par->nc_name, 1372 par->nc_error); 1373 cache_put(par); 1374 return(error); 1375 } 1376 printf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n", 1377 par, par->nc_nlen, par->nc_nlen, par->nc_name); 1378 } 1379 cache_put(par); 1380 /* loop */ 1381 } 1382 1383 /* 1384 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected 1385 * ncp's and reattach them. If this occurs the original ncp is marked 1386 * EAGAIN to force a relookup. 1387 * 1388 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed 1389 * ncp must already be resolved. 1390 */ 1391 KKASSERT((ncp->nc_flag & NCF_MOUNTPT) == 0); 1392 ncp->nc_error = VOP_NRESOLVE(ncp, cred); 1393 /*vop_nresolve(ncp->nc_parent->nc_vp->v_ops, ncp, cred);*/ 1394 if (ncp->nc_error == EAGAIN) { 1395 printf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n", 1396 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name); 1397 goto restart; 1398 } 1399 return(ncp->nc_error); 1400 } 1401 1402 /* 1403 * Resolve the ncp associated with a mount point. Such ncp's almost always 1404 * remain resolved and this routine is rarely called. NFS MPs tends to force 1405 * re-resolution more often due to its mac-truck-smash-the-namecache 1406 * method of tracking namespace changes. 1407 * 1408 * The passed ncp must be locked. 1409 */ 1410 static int 1411 cache_resolve_mp(struct namecache *ncp) 1412 { 1413 struct vnode *vp; 1414 struct mount *mp = ncp->nc_mount; 1415 1416 KKASSERT(mp != NULL); 1417 if (ncp->nc_flag & NCF_UNRESOLVED) { 1418 while (vfs_busy(mp, 0, NULL, curthread)) 1419 ; 1420 ncp->nc_error = VFS_ROOT(mp, &vp); 1421 if (ncp->nc_error == 0) { 1422 cache_setvp(ncp, vp); 1423 vput(vp); 1424 } else { 1425 printf("[diagnostic] cache_resolve_mp: failed to resolve mount %p\n", mp); 1426 cache_setvp(ncp, NULL); 1427 } 1428 vfs_unbusy(mp, curthread); 1429 } 1430 return(ncp->nc_error); 1431 } 1432 1433 void 1434 cache_cleanneg(int count) 1435 { 1436 struct namecache *ncp; 1437 1438 /* 1439 * Automode from the vnlru proc - clean out 10% of the negative cache 1440 * entries. 1441 */ 1442 if (count == 0) 1443 count = numneg / 10 + 1; 1444 1445 /* 1446 * Attempt to clean out the specified number of negative cache 1447 * entries. 1448 */ 1449 while (count) { 1450 ncp = TAILQ_FIRST(&ncneglist); 1451 if (ncp == NULL) { 1452 KKASSERT(numneg == 0); 1453 break; 1454 } 1455 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode); 1456 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode); 1457 if (cache_get_nonblock(ncp) == 0) 1458 cache_zap(ncp); 1459 --count; 1460 } 1461 } 1462 1463 /* 1464 * Rehash a ncp. Rehashing is typically required if the name changes (should 1465 * not generally occur) or the parent link changes. This function will 1466 * unhash the ncp if the ncp is no longer hashable. 1467 */ 1468 static void 1469 cache_rehash(struct namecache *ncp) 1470 { 1471 struct nchashhead *nchpp; 1472 u_int32_t hash; 1473 1474 if (ncp->nc_flag & NCF_HASHED) { 1475 ncp->nc_flag &= ~NCF_HASHED; 1476 LIST_REMOVE(ncp, nc_hash); 1477 } 1478 if (ncp->nc_nlen && ncp->nc_parent) { 1479 hash = fnv_32_buf(ncp->nc_name, ncp->nc_nlen, FNV1_32_INIT); 1480 hash = fnv_32_buf(&ncp->nc_parent, 1481 sizeof(ncp->nc_parent), hash); 1482 nchpp = NCHHASH(hash); 1483 LIST_INSERT_HEAD(nchpp, ncp, nc_hash); 1484 ncp->nc_flag |= NCF_HASHED; 1485 } 1486 } 1487 1488 /* 1489 * Name cache initialization, from vfsinit() when we are booting 1490 */ 1491 void 1492 nchinit(void) 1493 { 1494 int i; 1495 globaldata_t gd; 1496 1497 /* initialise per-cpu namecache effectiveness statistics. */ 1498 for (i = 0; i < ncpus; ++i) { 1499 gd = globaldata_find(i); 1500 gd->gd_nchstats = &nchstats[i]; 1501 } 1502 1503 TAILQ_INIT(&ncneglist); 1504 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 1505 } 1506 1507 /* 1508 * Called from start_init() to bootstrap the root filesystem. Returns 1509 * a referenced, unlocked namecache record. 1510 */ 1511 struct namecache * 1512 cache_allocroot(struct mount *mp, struct vnode *vp) 1513 { 1514 struct namecache *ncp = cache_alloc(0); 1515 1516 ncp->nc_flag |= NCF_MOUNTPT | NCF_ROOT; 1517 ncp->nc_mount = mp; 1518 cache_setvp(ncp, vp); 1519 return(ncp); 1520 } 1521 1522 /* 1523 * vfs_cache_setroot() 1524 * 1525 * Create an association between the root of our namecache and 1526 * the root vnode. This routine may be called several times during 1527 * booting. 1528 * 1529 * If the caller intends to save the returned namecache pointer somewhere 1530 * it must cache_hold() it. 1531 */ 1532 void 1533 vfs_cache_setroot(struct vnode *nvp, struct namecache *ncp) 1534 { 1535 struct vnode *ovp; 1536 struct namecache *oncp; 1537 1538 ovp = rootvnode; 1539 oncp = rootncp; 1540 rootvnode = nvp; 1541 rootncp = ncp; 1542 1543 if (ovp) 1544 vrele(ovp); 1545 if (oncp) 1546 cache_drop(oncp); 1547 } 1548 1549 /* 1550 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache 1551 * topology and is being removed as quickly as possible. The new VOP_N*() 1552 * API calls are required to make specific adjustments using the supplied 1553 * ncp pointers rather then just bogusly purging random vnodes. 1554 * 1555 * Invalidate all namecache entries to a particular vnode as well as 1556 * any direct children of that vnode in the namecache. This is a 1557 * 'catch all' purge used by filesystems that do not know any better. 1558 * 1559 * A new vnode v_id is generated. Note that no vnode will ever have a 1560 * v_id of 0. 1561 * 1562 * Note that the linkage between the vnode and its namecache entries will 1563 * be removed, but the namecache entries themselves might stay put due to 1564 * active references from elsewhere in the system or due to the existance of 1565 * the children. The namecache topology is left intact even if we do not 1566 * know what the vnode association is. Such entries will be marked 1567 * NCF_UNRESOLVED. 1568 * 1569 * XXX: Only time and the size of v_id prevents this from failing: 1570 * XXX: In theory we should hunt down all (struct vnode*, v_id) 1571 * XXX: soft references and nuke them, at least on the global 1572 * XXX: v_id wraparound. The period of resistance can be extended 1573 * XXX: by incrementing each vnodes v_id individually instead of 1574 * XXX: using the global v_id. 1575 */ 1576 void 1577 cache_purge(struct vnode *vp) 1578 { 1579 static u_long nextid; 1580 1581 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN); 1582 1583 /* 1584 * Calculate a new unique id for ".." handling 1585 */ 1586 do { 1587 nextid++; 1588 } while (nextid == vp->v_id || nextid == 0); 1589 vp->v_id = nextid; 1590 } 1591 1592 /* 1593 * Flush all entries referencing a particular filesystem. 1594 * 1595 * Since we need to check it anyway, we will flush all the invalid 1596 * entries at the same time. 1597 */ 1598 void 1599 cache_purgevfs(struct mount *mp) 1600 { 1601 struct nchashhead *nchpp; 1602 struct namecache *ncp, *nnp; 1603 1604 /* 1605 * Scan hash tables for applicable entries. 1606 */ 1607 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) { 1608 ncp = LIST_FIRST(nchpp); 1609 if (ncp) 1610 cache_hold(ncp); 1611 while (ncp) { 1612 nnp = LIST_NEXT(ncp, nc_hash); 1613 if (nnp) 1614 cache_hold(nnp); 1615 if (ncp->nc_mount == mp) { 1616 cache_lock(ncp); 1617 cache_zap(ncp); 1618 } else { 1619 cache_drop(ncp); 1620 } 1621 ncp = nnp; 1622 } 1623 } 1624 } 1625 1626 static int disablecwd; 1627 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 1628 1629 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 1630 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 1631 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 1632 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 1633 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 1634 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 1635 1636 int 1637 __getcwd(struct __getcwd_args *uap) 1638 { 1639 int buflen; 1640 int error; 1641 char *buf; 1642 char *bp; 1643 1644 if (disablecwd) 1645 return (ENODEV); 1646 1647 buflen = uap->buflen; 1648 if (buflen < 2) 1649 return (EINVAL); 1650 if (buflen > MAXPATHLEN) 1651 buflen = MAXPATHLEN; 1652 1653 buf = malloc(buflen, M_TEMP, M_WAITOK); 1654 bp = kern_getcwd(buf, buflen, &error); 1655 if (error == 0) 1656 error = copyout(bp, uap->buf, strlen(bp) + 1); 1657 free(buf, M_TEMP); 1658 return (error); 1659 } 1660 1661 char * 1662 kern_getcwd(char *buf, size_t buflen, int *error) 1663 { 1664 struct proc *p = curproc; 1665 char *bp; 1666 int i, slash_prefixed; 1667 struct filedesc *fdp; 1668 struct namecache *ncp; 1669 1670 numcwdcalls++; 1671 bp = buf; 1672 bp += buflen - 1; 1673 *bp = '\0'; 1674 fdp = p->p_fd; 1675 slash_prefixed = 0; 1676 1677 ncp = fdp->fd_ncdir; 1678 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 1679 if (ncp->nc_flag & NCF_MOUNTPT) { 1680 if (ncp->nc_mount == NULL) { 1681 *error = EBADF; /* forced unmount? */ 1682 return(NULL); 1683 } 1684 ncp = ncp->nc_parent; 1685 continue; 1686 } 1687 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 1688 if (bp == buf) { 1689 numcwdfail4++; 1690 *error = ENOMEM; 1691 return(NULL); 1692 } 1693 *--bp = ncp->nc_name[i]; 1694 } 1695 if (bp == buf) { 1696 numcwdfail4++; 1697 *error = ENOMEM; 1698 return(NULL); 1699 } 1700 *--bp = '/'; 1701 slash_prefixed = 1; 1702 ncp = ncp->nc_parent; 1703 } 1704 if (ncp == NULL) { 1705 numcwdfail2++; 1706 *error = ENOENT; 1707 return(NULL); 1708 } 1709 if (!slash_prefixed) { 1710 if (bp == buf) { 1711 numcwdfail4++; 1712 *error = ENOMEM; 1713 return(NULL); 1714 } 1715 *--bp = '/'; 1716 } 1717 numcwdfound++; 1718 *error = 0; 1719 return (bp); 1720 } 1721 1722 /* 1723 * Thus begins the fullpath magic. 1724 */ 1725 1726 #undef STATNODE 1727 #define STATNODE(name) \ 1728 static u_int name; \ 1729 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 1730 1731 static int disablefullpath; 1732 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 1733 &disablefullpath, 0, ""); 1734 1735 STATNODE(numfullpathcalls); 1736 STATNODE(numfullpathfail1); 1737 STATNODE(numfullpathfail2); 1738 STATNODE(numfullpathfail3); 1739 STATNODE(numfullpathfail4); 1740 STATNODE(numfullpathfound); 1741 1742 int 1743 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf) 1744 { 1745 char *bp, *buf; 1746 int i, slash_prefixed; 1747 struct filedesc *fdp; 1748 struct namecache *ncp; 1749 1750 numfullpathcalls++; 1751 if (disablefullpath) 1752 return (ENODEV); 1753 1754 if (p == NULL) 1755 return (EINVAL); 1756 1757 /* vn is NULL, client wants us to use p->p_textvp */ 1758 if (vn == NULL) { 1759 if ((vn = p->p_textvp) == NULL) 1760 return (EINVAL); 1761 } 1762 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) { 1763 if (ncp->nc_nlen) 1764 break; 1765 } 1766 if (ncp == NULL) 1767 return (EINVAL); 1768 1769 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1770 bp = buf + MAXPATHLEN - 1; 1771 *bp = '\0'; 1772 fdp = p->p_fd; 1773 slash_prefixed = 0; 1774 while (ncp && ncp != fdp->fd_nrdir && (ncp->nc_flag & NCF_ROOT) == 0) { 1775 if (ncp->nc_flag & NCF_MOUNTPT) { 1776 if (ncp->nc_mount == NULL) { 1777 free(buf, M_TEMP); 1778 return(EBADF); 1779 } 1780 ncp = ncp->nc_parent; 1781 continue; 1782 } 1783 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 1784 if (bp == buf) { 1785 numfullpathfail4++; 1786 free(buf, M_TEMP); 1787 return (ENOMEM); 1788 } 1789 *--bp = ncp->nc_name[i]; 1790 } 1791 if (bp == buf) { 1792 numfullpathfail4++; 1793 free(buf, M_TEMP); 1794 return (ENOMEM); 1795 } 1796 *--bp = '/'; 1797 slash_prefixed = 1; 1798 ncp = ncp->nc_parent; 1799 } 1800 if (ncp == NULL) { 1801 numfullpathfail2++; 1802 free(buf, M_TEMP); 1803 return (ENOENT); 1804 } 1805 if (!slash_prefixed) { 1806 if (bp == buf) { 1807 numfullpathfail4++; 1808 free(buf, M_TEMP); 1809 return (ENOMEM); 1810 } 1811 *--bp = '/'; 1812 } 1813 numfullpathfound++; 1814 *retbuf = bp; 1815 *freebuf = buf; 1816 return (0); 1817 } 1818 1819