1 /* $OpenBSD: vfs_cache.c,v 1.37 2014/09/13 16:06:37 doug Exp $ */ 2 /* $NetBSD: vfs_cache.c,v 1.13 1996/02/04 02:18:09 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/time.h> 38 #include <sys/mount.h> 39 #include <sys/vnode.h> 40 #include <sys/namei.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/pool.h> 44 45 /* 46 * TODO: namecache access should really be locked. 47 */ 48 49 /* 50 * For simplicity (and economy of storage), names longer than 51 * a maximum length of NAMECACHE_MAXLEN are not cached; they occur 52 * infrequently in any case, and are almost never of interest. 53 * 54 * Upon reaching the last segment of a path, if the reference 55 * is for DELETE, or NOCACHE is set (rewrite), and the 56 * name is located in the cache, it will be dropped. 57 */ 58 59 /* 60 * Structures associated with name caching. 61 */ 62 long numcache; /* total number of cache entries allocated */ 63 long numneg; /* number of negative cache entries */ 64 65 TAILQ_HEAD(, namecache) nclruhead; /* Regular Entry LRU chain */ 66 TAILQ_HEAD(, namecache) nclruneghead; /* Negative Entry LRU chain */ 67 struct nchstats nchstats; /* cache effectiveness statistics */ 68 69 int doingcache = 1; /* 1 => enable the cache */ 70 71 struct pool nch_pool; 72 73 void cache_zap(struct namecache *); 74 u_long nextvnodeid; 75 76 static int 77 namecache_compare(struct namecache *n1, struct namecache *n2) 78 { 79 if (n1->nc_nlen == n2->nc_nlen) 80 return (memcmp(n1->nc_name, n2->nc_name, n1->nc_nlen)); 81 else 82 return (n1->nc_nlen - n2->nc_nlen); 83 } 84 85 RB_GENERATE(namecache_rb_cache, namecache, n_rbcache, namecache_compare); 86 87 /* 88 * blow away a namecache entry 89 */ 90 void 91 cache_zap(struct namecache *ncp) 92 { 93 struct vnode *dvp = NULL; 94 95 if (ncp->nc_vp != NULL) { 96 TAILQ_REMOVE(&nclruhead, ncp, nc_lru); 97 numcache--; 98 } else { 99 TAILQ_REMOVE(&nclruneghead, ncp, nc_neg); 100 numneg--; 101 } 102 if (ncp->nc_dvp) { 103 RB_REMOVE(namecache_rb_cache, &ncp->nc_dvp->v_nc_tree, ncp); 104 if (RB_EMPTY(&ncp->nc_dvp->v_nc_tree)) 105 dvp = ncp->nc_dvp; 106 } 107 if (ncp->nc_vp && (ncp->nc_vpid == ncp->nc_vp->v_id)) { 108 if (ncp->nc_vp != ncp->nc_dvp && 109 ncp->nc_vp->v_type == VDIR && 110 (ncp->nc_nlen > 2 || 111 (ncp->nc_nlen > 1 && 112 ncp->nc_name[1] != '.') || 113 (ncp->nc_nlen > 0 && 114 ncp->nc_name[0] != '.'))) { 115 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_me); 116 } 117 } 118 pool_put(&nch_pool, ncp); 119 if (dvp) 120 vdrop(dvp); 121 } 122 123 /* 124 * Look for a name in the cache. We don't do this if the segment name is 125 * long, simply so the cache can avoid holding long names (which would 126 * either waste space, or add greatly to the complexity). 127 * dvp points to the directory to search. The componentname cnp holds 128 * the information on the entry being sought, such as its length 129 * and its name. If the lookup succeeds, vpp is set to point to the vnode 130 * and an error of 0 is returned. If the lookup determines the name does 131 * not exist (negative caching) an error of ENOENT is returned. If the 132 * lookup fails, an error of -1 is returned. 133 */ 134 int 135 cache_lookup(struct vnode *dvp, struct vnode **vpp, 136 struct componentname *cnp) 137 { 138 struct namecache *ncp; 139 struct namecache n; 140 struct vnode *vp; 141 struct proc *p = curproc; 142 u_long vpid; 143 int error; 144 145 *vpp = NULL; 146 147 if (!doingcache) { 148 cnp->cn_flags &= ~MAKEENTRY; 149 return (-1); 150 } 151 if (cnp->cn_namelen > NAMECACHE_MAXLEN) { 152 nchstats.ncs_long++; 153 cnp->cn_flags &= ~MAKEENTRY; 154 return (-1); 155 } 156 157 /* lookup in directory vnode's redblack tree */ 158 n.nc_nlen = cnp->cn_namelen; 159 memcpy(n.nc_name, cnp->cn_nameptr, n.nc_nlen); 160 ncp = RB_FIND(namecache_rb_cache, &dvp->v_nc_tree, &n); 161 162 if (ncp == NULL) { 163 nchstats.ncs_miss++; 164 return (-1); 165 } 166 if ((cnp->cn_flags & MAKEENTRY) == 0) { 167 nchstats.ncs_badhits++; 168 goto remove; 169 } else if (ncp->nc_vp == NULL) { 170 if (cnp->cn_nameiop != CREATE || 171 (cnp->cn_flags & ISLASTCN) == 0) { 172 nchstats.ncs_neghits++; 173 /* 174 * Move this slot to end of the negative LRU chain, 175 */ 176 if (TAILQ_NEXT(ncp, nc_neg) != NULL) { 177 TAILQ_REMOVE(&nclruneghead, ncp, nc_neg); 178 TAILQ_INSERT_TAIL(&nclruneghead, ncp, 179 nc_neg); 180 } 181 return (ENOENT); 182 } else { 183 nchstats.ncs_badhits++; 184 goto remove; 185 } 186 } else if (ncp->nc_vpid != ncp->nc_vp->v_id) { 187 nchstats.ncs_falsehits++; 188 goto remove; 189 } 190 191 /* 192 * Move this slot to end of the regular LRU chain. 193 */ 194 if (TAILQ_NEXT(ncp, nc_lru) != NULL) { 195 TAILQ_REMOVE(&nclruhead, ncp, nc_lru); 196 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); 197 } 198 199 vp = ncp->nc_vp; 200 vpid = vp->v_id; 201 if (vp == dvp) { /* lookup on "." */ 202 vref(dvp); 203 error = 0; 204 } else if (cnp->cn_flags & ISDOTDOT) { 205 VOP_UNLOCK(dvp, 0, p); 206 cnp->cn_flags |= PDIRUNLOCK; 207 error = vget(vp, LK_EXCLUSIVE, p); 208 /* 209 * If the above vget() succeeded and both LOCKPARENT and 210 * ISLASTCN is set, lock the directory vnode as well. 211 */ 212 if (!error && (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) == 0) { 213 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) { 214 vput(vp); 215 return (error); 216 } 217 cnp->cn_flags &= ~PDIRUNLOCK; 218 } 219 } else { 220 error = vget(vp, LK_EXCLUSIVE, p); 221 /* 222 * If the above vget() failed or either of LOCKPARENT or 223 * ISLASTCN is set, unlock the directory vnode. 224 */ 225 if (error || (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) { 226 VOP_UNLOCK(dvp, 0, p); 227 cnp->cn_flags |= PDIRUNLOCK; 228 } 229 } 230 231 /* 232 * Check that the lock succeeded, and that the capability number did 233 * not change while we were waiting for the lock. 234 */ 235 if (error || vpid != vp->v_id) { 236 if (!error) { 237 vput(vp); 238 nchstats.ncs_falsehits++; 239 } else 240 nchstats.ncs_badhits++; 241 /* 242 * The parent needs to be locked when we return to VOP_LOOKUP(). 243 * The `.' case here should be extremely rare (if it can happen 244 * at all), so we don't bother optimizing out the unlock/relock. 245 */ 246 if (vp == dvp || error || 247 (~cnp->cn_flags & (LOCKPARENT|ISLASTCN)) != 0) { 248 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) != 0) 249 return (error); 250 cnp->cn_flags &= ~PDIRUNLOCK; 251 } 252 return (-1); 253 } 254 255 nchstats.ncs_goodhits++; 256 *vpp = vp; 257 return (0); 258 259 remove: 260 /* 261 * Last component and we are renaming or deleting, 262 * the cache entry is invalid, or otherwise don't 263 * want cache entry to exist. 264 */ 265 cache_zap(ncp); 266 return (-1); 267 } 268 269 /* 270 * Scan cache looking for name of directory entry pointing at vp. 271 * 272 * Fill in dvpp. 273 * 274 * If bufp is non-NULL, also place the name in the buffer which starts 275 * at bufp, immediately before *bpp, and move bpp backwards to point 276 * at the start of it. (Yes, this is a little baroque, but it's done 277 * this way to cater to the whims of getcwd). 278 * 279 * Returns 0 on success, -1 on cache miss, positive errno on failure. 280 * 281 * TODO: should we return *dvpp locked? 282 */ 283 284 int 285 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp) 286 { 287 struct namecache *ncp; 288 struct vnode *dvp = NULL; 289 char *bp; 290 291 if (!doingcache) 292 goto out; 293 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_me) { 294 dvp = ncp->nc_dvp; 295 if (dvp && dvp != vp && ncp->nc_dvpid == dvp->v_id) 296 goto found; 297 } 298 goto miss; 299 found: 300 #ifdef DIAGNOSTIC 301 if (ncp->nc_nlen == 1 && 302 ncp->nc_name[0] == '.') 303 panic("cache_revlookup: found entry for ."); 304 if (ncp->nc_nlen == 2 && 305 ncp->nc_name[0] == '.' && 306 ncp->nc_name[1] == '.') 307 panic("cache_revlookup: found entry for .."); 308 #endif 309 nchstats.ncs_revhits++; 310 311 if (bufp != NULL) { 312 bp = *bpp; 313 bp -= ncp->nc_nlen; 314 if (bp <= bufp) { 315 *dvpp = NULL; 316 return (ERANGE); 317 } 318 memcpy(bp, ncp->nc_name, ncp->nc_nlen); 319 *bpp = bp; 320 } 321 322 *dvpp = dvp; 323 324 /* 325 * XXX: Should we vget() here to have more 326 * consistent semantics with cache_lookup()? 327 */ 328 return (0); 329 330 miss: 331 nchstats.ncs_revmiss++; 332 out: 333 *dvpp = NULL; 334 return (-1); 335 } 336 337 /* 338 * Add an entry to the cache 339 */ 340 void 341 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 342 { 343 struct namecache *ncp, *lncp; 344 345 if (!doingcache || cnp->cn_namelen > NAMECACHE_MAXLEN) 346 return; 347 348 /* 349 * allocate, or recycle (free and allocate) an ncp. 350 */ 351 if (numcache >= desiredvnodes) { 352 if ((ncp = TAILQ_FIRST(&nclruhead)) != NULL) 353 cache_zap(ncp); 354 else if ((ncp = TAILQ_FIRST(&nclruneghead)) != NULL) 355 cache_zap(ncp); 356 else 357 panic("wtf? leak?"); 358 } 359 ncp = pool_get(&nch_pool, PR_WAITOK|PR_ZERO); 360 361 /* grab the vnode we just found */ 362 ncp->nc_vp = vp; 363 if (vp) 364 ncp->nc_vpid = vp->v_id; 365 366 /* fill in cache info */ 367 ncp->nc_dvp = dvp; 368 ncp->nc_dvpid = dvp->v_id; 369 ncp->nc_nlen = cnp->cn_namelen; 370 bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen); 371 if (RB_EMPTY(&dvp->v_nc_tree)) { 372 vhold(dvp); 373 } 374 if ((lncp = RB_INSERT(namecache_rb_cache, &dvp->v_nc_tree, ncp)) 375 != NULL) { 376 /* someone has raced us and added a different entry 377 * for the same vnode (different ncp) - we don't need 378 * this entry, so free it and we are done. 379 */ 380 pool_put(&nch_pool, ncp); 381 /* we know now dvp->v_nc_tree is not empty, no need 382 * to vdrop here 383 */ 384 goto done; 385 } 386 if (vp) { 387 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); 388 numcache++; 389 /* don't put . or .. in the reverse map */ 390 if (vp != dvp && vp->v_type == VDIR && 391 (ncp->nc_nlen > 2 || 392 (ncp->nc_nlen > 1 && 393 ncp->nc_name[1] != '.') || 394 (ncp->nc_nlen > 0 && 395 ncp->nc_name[0] != '.'))) 396 TAILQ_INSERT_TAIL(&vp->v_cache_dst, ncp, 397 nc_me); 398 } else { 399 TAILQ_INSERT_TAIL(&nclruneghead, ncp, nc_neg); 400 numneg++; 401 } 402 if (numneg > desiredvnodes) { 403 if ((ncp = TAILQ_FIRST(&nclruneghead)) 404 != NULL) 405 cache_zap(ncp); 406 } 407 done: 408 return; 409 } 410 411 412 /* 413 * Name cache initialization, from vfs_init() when we are booting 414 */ 415 void 416 nchinit() 417 { 418 TAILQ_INIT(&nclruhead); 419 TAILQ_INIT(&nclruneghead); 420 pool_init(&nch_pool, sizeof(struct namecache), 0, 0, 0, "nchpl", 421 &pool_allocator_nointr); 422 } 423 424 /* 425 * Cache flush, a particular vnode; called when a vnode is renamed to 426 * hide entries that would now be invalid 427 */ 428 void 429 cache_purge(struct vnode *vp) 430 { 431 struct namecache *ncp; 432 433 /* We should never have destinations cached for a non-VDIR vnode. */ 434 KASSERT(vp->v_type == VDIR || TAILQ_EMPTY(&vp->v_cache_dst)); 435 436 while ((ncp = TAILQ_FIRST(&vp->v_cache_dst))) 437 cache_zap(ncp); 438 while ((ncp = RB_ROOT(&vp->v_nc_tree))) 439 cache_zap(ncp); 440 441 /* XXX this blows goats */ 442 vp->v_id = ++nextvnodeid; 443 if (vp->v_id == 0) 444 vp->v_id = ++nextvnodeid; 445 } 446 447 /* 448 * Cache flush, a whole filesystem; called when filesys is umounted to 449 * remove entries that would now be invalid 450 * 451 * The line "nxtcp = nchhead" near the end is to avoid potential problems 452 * if the cache lru chain is modified while we are dumping the 453 * inode. This makes the algorithm O(n^2), but do you think I care? 454 */ 455 void 456 cache_purgevfs(struct mount *mp) 457 { 458 struct namecache *ncp, *nxtcp; 459 460 /* whack the regular entries */ 461 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) { 462 if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) { 463 nxtcp = TAILQ_NEXT(ncp, nc_lru); 464 continue; 465 } 466 /* free the resources we had */ 467 cache_zap(ncp); 468 /* cause rescan of list, it may have altered */ 469 nxtcp = TAILQ_FIRST(&nclruhead); 470 } 471 /* whack the negative entries */ 472 for (ncp = TAILQ_FIRST(&nclruneghead); ncp != NULL; ncp = nxtcp) { 473 if (ncp->nc_dvp == NULL || ncp->nc_dvp->v_mount != mp) { 474 nxtcp = TAILQ_NEXT(ncp, nc_neg); 475 continue; 476 } 477 /* free the resources we had */ 478 cache_zap(ncp); 479 /* cause rescan of list, it may have altered */ 480 nxtcp = TAILQ_FIRST(&nclruneghead); 481 } 482 } 483