1 /* $NetBSD: vfs_cache.c,v 1.154 2023/04/29 10:07:22 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1993
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
61 */
62
63 /*
64 * Name caching:
65 *
66 * Names found by directory scans are retained in a cache for future
67 * reference. It is managed LRU, so frequently used names will hang
68 * around. The cache is indexed by hash value obtained from the name.
69 *
70 * The name cache is the brainchild of Robert Elz and was introduced in
71 * 4.3BSD. See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
72 * McKusick, May 21 1984.
73 *
74 * Data structures:
75 *
76 * Most Unix namecaches very sensibly use a global hash table to index
77 * names. The global hash table works well, but can cause concurrency
78 * headaches for the kernel hacker. In the NetBSD 10.0 implementation
79 * we are not sensible, and use a per-directory data structure to index
80 * names, but the cache otherwise functions the same.
81 *
82 * The index is a red-black tree. There are no special concurrency
83 * requirements placed on it, because it's per-directory and protected
84 * by the namecache's per-directory locks. It should therefore not be
85 * difficult to experiment with other types of index.
86 *
87 * Each cached name is stored in a struct namecache, along with a
88 * pointer to the associated vnode (nc_vp). Names longer than a
89 * maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
90 * occur infrequently, and names shorter than this are stored directly
91 * in struct namecache. If it is a "negative" entry, (i.e. for a name
92 * that is known NOT to exist) the vnode pointer will be NULL.
93 *
94 * For a directory with 3 cached names for 3 distinct vnodes, the
95 * various vnodes and namecache structs would be connected like this
96 * (the root is at the bottom of the diagram):
97 *
98 * ...
99 * ^
100 * |- vi_nc_tree
101 * |
102 * +----o----+ +---------+ +---------+
103 * | VDIR | | VCHR | | VREG |
104 * | vnode o-----+ | vnode o-----+ | vnode o------+
105 * +---------+ | +---------+ | +---------+ |
106 * ^ | ^ | ^ |
107 * |- nc_vp |- vi_nc_list |- nc_vp |- vi_nc_list |- nc_vp |
108 * | | | | | |
109 * +----o----+ | +----o----+ | +----o----+ |
110 * +---onamecache|<----+ +---onamecache|<----+ +---onamecache|<-----+
111 * | +---------+ | +---------+ | +---------+
112 * | ^ | ^ | ^
113 * | | | | | |
114 * | | +----------------------+ | |
115 * |-nc_dvp | +-------------------------------------------------+
116 * | |/- vi_nc_tree | |
117 * | | |- nc_dvp |- nc_dvp
118 * | +----o----+ | |
119 * +-->| VDIR |<----------+ |
120 * | vnode |<------------------------------------+
121 * +---------+
122 *
123 * START HERE
124 *
125 * Replacement:
126 *
127 * As the cache becomes full, old and unused entries are purged as new
128 * entries are added. The synchronization overhead in maintaining a
129 * strict ordering would be prohibitive, so the VM system's "clock" or
130 * "second chance" page replacement algorithm is aped here. New
131 * entries go to the tail of the active list. After they age out and
132 * reach the head of the list, they are moved to the tail of the
133 * inactive list. Any use of the deactivated cache entry reactivates
134 * it, saving it from impending doom; if not reactivated, the entry
135 * eventually reaches the head of the inactive list and is purged.
136 *
137 * Concurrency:
138 *
139 * From a performance perspective, cache_lookup(nameiop == LOOKUP) is
140 * what really matters; insertion of new entries with cache_enter() is
141 * comparatively infrequent, and overshadowed by the cost of expensive
142 * file system metadata operations (which may involve disk I/O). We
143 * therefore want to make everything simplest in the lookup path.
144 *
145 * struct namecache is mostly stable except for list and tree related
146 * entries, changes to which don't affect the cached name or vnode.
147 * For changes to name+vnode, entries are purged in preference to
148 * modifying them.
149 *
150 * Read access to namecache entries is made via tree, list, or LRU
151 * list. A lock corresponding to the direction of access should be
152 * held. See definition of "struct namecache" in src/sys/namei.src,
153 * and the definition of "struct vnode" for the particulars.
154 *
155 * Per-CPU statistics, and LRU list totals are read unlocked, since
156 * an approximate value is OK. We maintain 32-bit sized per-CPU
157 * counters and 64-bit global counters under the theory that 32-bit
158 * sized counters are less likely to be hosed by nonatomic increment
159 * (on 32-bit platforms).
160 *
161 * The lock order is:
162 *
163 * 1) vi->vi_nc_lock (tree or parent -> child direction,
164 * used during forward lookup)
165 *
166 * 2) vi->vi_nc_listlock (list or child -> parent direction,
167 * used during reverse lookup)
168 *
169 * 3) cache_lru_lock (LRU list direction, used during reclaim)
170 *
171 * 4) vp->v_interlock (what the cache entry points to)
172 */
173
174 #include <sys/cdefs.h>
175 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.154 2023/04/29 10:07:22 riastradh Exp $");
176
177 #define __NAMECACHE_PRIVATE
178 #ifdef _KERNEL_OPT
179 #include "opt_ddb.h"
180 #include "opt_dtrace.h"
181 #endif
182
183 #include <sys/param.h>
184 #include <sys/types.h>
185 #include <sys/atomic.h>
186 #include <sys/callout.h>
187 #include <sys/cpu.h>
188 #include <sys/errno.h>
189 #include <sys/evcnt.h>
190 #include <sys/hash.h>
191 #include <sys/kernel.h>
192 #include <sys/mount.h>
193 #include <sys/mutex.h>
194 #include <sys/namei.h>
195 #include <sys/param.h>
196 #include <sys/pool.h>
197 #include <sys/sdt.h>
198 #include <sys/sysctl.h>
199 #include <sys/systm.h>
200 #include <sys/time.h>
201 #include <sys/vnode_impl.h>
202
203 #include <miscfs/genfs/genfs.h>
204
205 static void cache_activate(struct namecache *);
206 static void cache_update_stats(void *);
207 static int cache_compare_nodes(void *, const void *, const void *);
208 static void cache_deactivate(void);
209 static void cache_reclaim(void);
210 static int cache_stat_sysctl(SYSCTLFN_ARGS);
211
212 /*
213 * Global pool cache.
214 */
215 static pool_cache_t cache_pool __read_mostly;
216
217 /*
218 * LRU replacement.
219 */
220 enum cache_lru_id {
221 LRU_ACTIVE,
222 LRU_INACTIVE,
223 LRU_COUNT
224 };
225
226 static struct {
227 TAILQ_HEAD(, namecache) list[LRU_COUNT];
228 u_int count[LRU_COUNT];
229 } cache_lru __cacheline_aligned;
230
231 static kmutex_t cache_lru_lock __cacheline_aligned;
232
233 /*
234 * Cache effectiveness statistics. nchstats holds system-wide total.
235 */
236 struct nchstats nchstats;
237 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
238 struct nchcpu {
239 struct nchstats_percpu cur;
240 struct nchstats_percpu last;
241 };
242 static callout_t cache_stat_callout;
243 static kmutex_t cache_stat_lock __cacheline_aligned;
244
245 #define COUNT(f) do { \
246 lwp_t *l = curlwp; \
247 KPREEMPT_DISABLE(l); \
248 struct nchcpu *nchcpu = curcpu()->ci_data.cpu_nch; \
249 nchcpu->cur.f++; \
250 KPREEMPT_ENABLE(l); \
251 } while (/* CONSTCOND */ 0);
252
253 #define UPDATE(nchcpu, f) do { \
254 uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
255 nchstats.f += (uint32_t)(cur - nchcpu->last.f); \
256 nchcpu->last.f = cur; \
257 } while (/* CONSTCOND */ 0)
258
259 /*
260 * Tunables. cache_maxlen replaces the historical doingcache:
261 * set it zero to disable caching for debugging purposes.
262 */
263 int cache_lru_maxdeact __read_mostly = 2; /* max # to deactivate */
264 int cache_lru_maxscan __read_mostly = 64; /* max # to scan/reclaim */
265 int cache_maxlen __read_mostly = USHRT_MAX; /* max name length to cache */
266 int cache_stat_interval __read_mostly = 300; /* in seconds */
267
268 /*
269 * sysctl stuff.
270 */
271 static struct sysctllog *cache_sysctllog;
272
273 /*
274 * This is a dummy name that cannot usually occur anywhere in the cache nor
275 * file system. It's used when caching the root vnode of mounted file
276 * systems. The name is attached to the directory that the file system is
277 * mounted on.
278 */
279 static const char cache_mp_name[] = "";
280 static const int cache_mp_nlen = sizeof(cache_mp_name) - 1;
281
282 /*
283 * Red-black tree stuff.
284 */
285 static const rb_tree_ops_t cache_rbtree_ops = {
286 .rbto_compare_nodes = cache_compare_nodes,
287 .rbto_compare_key = cache_compare_nodes,
288 .rbto_node_offset = offsetof(struct namecache, nc_tree),
289 .rbto_context = NULL
290 };
291
292 /*
293 * dtrace probes.
294 */
295 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
296 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
297 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
298 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
299 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
300 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
301 "char *", "size_t");
302 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
303 "char *", "size_t");
304 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
305 "char *", "size_t");
306 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
307 "struct vnode *");
308 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
309 "int");
310 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
311 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
312 "char *", "size_t");
313 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
314 "char *", "size_t");
315
316 /*
317 * rbtree: compare two nodes.
318 */
319 static int
cache_compare_nodes(void * context,const void * n1,const void * n2)320 cache_compare_nodes(void *context, const void *n1, const void *n2)
321 {
322 const struct namecache *nc1 = n1;
323 const struct namecache *nc2 = n2;
324
325 if (nc1->nc_key < nc2->nc_key) {
326 return -1;
327 }
328 if (nc1->nc_key > nc2->nc_key) {
329 return 1;
330 }
331 KASSERT(nc1->nc_nlen == nc2->nc_nlen);
332 return memcmp(nc1->nc_name, nc2->nc_name, nc1->nc_nlen);
333 }
334
335 /*
336 * Compute a key value for the given name. The name length is encoded in
337 * the key value to try and improve uniqueness, and so that length doesn't
338 * need to be compared separately for string comparisons.
339 */
340 static inline uint64_t
cache_key(const char * name,size_t nlen)341 cache_key(const char *name, size_t nlen)
342 {
343 uint64_t key;
344
345 KASSERT(nlen <= USHRT_MAX);
346
347 key = hash32_buf(name, nlen, HASH32_STR_INIT);
348 return (key << 32) | nlen;
349 }
350
351 /*
352 * Remove an entry from the cache. vi_nc_lock must be held, and if dir2node
353 * is true, then we're locking in the conventional direction and the list
354 * lock will be acquired when removing the entry from the vnode list.
355 */
356 static void
cache_remove(struct namecache * ncp,const bool dir2node)357 cache_remove(struct namecache *ncp, const bool dir2node)
358 {
359 struct vnode *vp, *dvp = ncp->nc_dvp;
360 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
361
362 KASSERT(rw_write_held(&dvi->vi_nc_lock));
363 KASSERT(cache_key(ncp->nc_name, ncp->nc_nlen) == ncp->nc_key);
364 KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, ncp) == ncp);
365
366 SDT_PROBE(vfs, namecache, invalidate, done, ncp,
367 0, 0, 0, 0);
368
369 /*
370 * Remove from the vnode's list. This excludes cache_revlookup(),
371 * and then it's safe to remove from the LRU lists.
372 */
373 if ((vp = ncp->nc_vp) != NULL) {
374 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
375 if (__predict_true(dir2node)) {
376 rw_enter(&vi->vi_nc_listlock, RW_WRITER);
377 TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
378 rw_exit(&vi->vi_nc_listlock);
379 } else {
380 TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
381 }
382 }
383
384 /* Remove from the directory's rbtree. */
385 rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
386
387 /* Remove from the LRU lists. */
388 mutex_enter(&cache_lru_lock);
389 TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
390 cache_lru.count[ncp->nc_lrulist]--;
391 mutex_exit(&cache_lru_lock);
392
393 /* Finally, free it. */
394 if (ncp->nc_nlen > NCHNAMLEN) {
395 size_t sz = offsetof(struct namecache, nc_name[ncp->nc_nlen]);
396 kmem_free(ncp, sz);
397 } else {
398 pool_cache_put(cache_pool, ncp);
399 }
400 }
401
402 /*
403 * Find a single cache entry and return it. vi_nc_lock must be held.
404 */
405 static struct namecache * __noinline
cache_lookup_entry(struct vnode * dvp,const char * name,size_t namelen,uint64_t key)406 cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
407 uint64_t key)
408 {
409 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
410 struct rb_node *node = dvi->vi_nc_tree.rbt_root;
411 struct namecache *ncp;
412 int lrulist, diff;
413
414 KASSERT(rw_lock_held(&dvi->vi_nc_lock));
415
416 /*
417 * Search the RB tree for the key. This is an inlined lookup
418 * tailored for exactly what's needed here (64-bit key and so on)
419 * that is quite a bit faster than using rb_tree_find_node().
420 *
421 * For a matching key memcmp() needs to be called once to confirm
422 * that the correct name has been found. Very rarely there will be
423 * a key value collision and the search will continue.
424 */
425 for (;;) {
426 if (__predict_false(RB_SENTINEL_P(node))) {
427 return NULL;
428 }
429 ncp = (struct namecache *)node;
430 KASSERT((void *)&ncp->nc_tree == (void *)ncp);
431 KASSERT(ncp->nc_dvp == dvp);
432 if (ncp->nc_key == key) {
433 KASSERT(ncp->nc_nlen == namelen);
434 diff = memcmp(ncp->nc_name, name, namelen);
435 if (__predict_true(diff == 0)) {
436 break;
437 }
438 node = node->rb_nodes[diff < 0];
439 } else {
440 node = node->rb_nodes[ncp->nc_key < key];
441 }
442 }
443
444 /*
445 * If the entry is on the wrong LRU list, requeue it. This is an
446 * unlocked check, but it will rarely be wrong and even then there
447 * will be no harm caused.
448 */
449 lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
450 if (__predict_false(lrulist != LRU_ACTIVE)) {
451 cache_activate(ncp);
452 }
453 return ncp;
454 }
455
456 /*
457 * Look for a the name in the cache. We don't do this
458 * if the segment name is long, simply so the cache can avoid
459 * holding long names (which would either waste space, or
460 * add greatly to the complexity).
461 *
462 * Lookup is called with DVP pointing to the directory to search,
463 * and CNP providing the name of the entry being sought: cn_nameptr
464 * is the name, cn_namelen is its length, and cn_flags is the flags
465 * word from the namei operation.
466 *
467 * DVP must be locked.
468 *
469 * There are three possible non-error return states:
470 * 1. Nothing was found in the cache. Nothing is known about
471 * the requested name.
472 * 2. A negative entry was found in the cache, meaning that the
473 * requested name definitely does not exist.
474 * 3. A positive entry was found in the cache, meaning that the
475 * requested name does exist and that we are providing the
476 * vnode.
477 * In these cases the results are:
478 * 1. 0 returned; VN is set to NULL.
479 * 2. 1 returned; VN is set to NULL.
480 * 3. 1 returned; VN is set to the vnode found.
481 *
482 * The additional result argument ISWHT is set to zero, unless a
483 * negative entry is found that was entered as a whiteout, in which
484 * case ISWHT is set to one.
485 *
486 * The ISWHT_RET argument pointer may be null. In this case an
487 * assertion is made that the whiteout flag is not set. File systems
488 * that do not support whiteouts can/should do this.
489 *
490 * Filesystems that do support whiteouts should add ISWHITEOUT to
491 * cnp->cn_flags if ISWHT comes back nonzero.
492 *
493 * When a vnode is returned, it is locked, as per the vnode lookup
494 * locking protocol.
495 *
496 * There is no way for this function to fail, in the sense of
497 * generating an error that requires aborting the namei operation.
498 *
499 * (Prior to October 2012, this function returned an integer status,
500 * and a vnode, and mucked with the flags word in CNP for whiteouts.
501 * The integer status was -1 for "nothing found", ENOENT for "a
502 * negative entry found", 0 for "a positive entry found", and possibly
503 * other errors, and the value of VN might or might not have been set
504 * depending on what error occurred.)
505 */
506 bool
cache_lookup(struct vnode * dvp,const char * name,size_t namelen,uint32_t nameiop,uint32_t cnflags,int * iswht_ret,struct vnode ** vn_ret)507 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
508 uint32_t nameiop, uint32_t cnflags,
509 int *iswht_ret, struct vnode **vn_ret)
510 {
511 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
512 struct namecache *ncp;
513 struct vnode *vp;
514 uint64_t key;
515 int error;
516 bool hit;
517 krw_t op;
518
519 KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
520
521 /* Establish default result values */
522 if (iswht_ret != NULL) {
523 *iswht_ret = 0;
524 }
525 *vn_ret = NULL;
526
527 if (__predict_false(namelen > cache_maxlen)) {
528 SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
529 name, namelen, 0, 0);
530 COUNT(ncs_long);
531 return false;
532 }
533
534 /* Compute the key up front - don't need the lock. */
535 key = cache_key(name, namelen);
536
537 /* Could the entry be purged below? */
538 if ((cnflags & ISLASTCN) != 0 &&
539 ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
540 op = RW_WRITER;
541 } else {
542 op = RW_READER;
543 }
544
545 /* Now look for the name. */
546 rw_enter(&dvi->vi_nc_lock, op);
547 ncp = cache_lookup_entry(dvp, name, namelen, key);
548 if (__predict_false(ncp == NULL)) {
549 rw_exit(&dvi->vi_nc_lock);
550 COUNT(ncs_miss);
551 SDT_PROBE(vfs, namecache, lookup, miss, dvp,
552 name, namelen, 0, 0);
553 return false;
554 }
555 if (__predict_false((cnflags & MAKEENTRY) == 0)) {
556 /*
557 * Last component and we are renaming or deleting,
558 * the cache entry is invalid, or otherwise don't
559 * want cache entry to exist.
560 */
561 KASSERT((cnflags & ISLASTCN) != 0);
562 cache_remove(ncp, true);
563 rw_exit(&dvi->vi_nc_lock);
564 COUNT(ncs_badhits);
565 return false;
566 }
567 if (ncp->nc_vp == NULL) {
568 if (iswht_ret != NULL) {
569 /*
570 * Restore the ISWHITEOUT flag saved earlier.
571 */
572 *iswht_ret = ncp->nc_whiteout;
573 } else {
574 KASSERT(!ncp->nc_whiteout);
575 }
576 if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
577 /*
578 * Last component and we are preparing to create
579 * the named object, so flush the negative cache
580 * entry.
581 */
582 COUNT(ncs_badhits);
583 cache_remove(ncp, true);
584 hit = false;
585 } else {
586 COUNT(ncs_neghits);
587 SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
588 namelen, 0, 0);
589 /* found neg entry; vn is already null from above */
590 hit = true;
591 }
592 rw_exit(&dvi->vi_nc_lock);
593 return hit;
594 }
595 vp = ncp->nc_vp;
596 error = vcache_tryvget(vp);
597 rw_exit(&dvi->vi_nc_lock);
598 if (error) {
599 KASSERT(error == EBUSY);
600 /*
601 * This vnode is being cleaned out.
602 * XXX badhits?
603 */
604 COUNT(ncs_falsehits);
605 return false;
606 }
607
608 COUNT(ncs_goodhits);
609 SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
610 /* found it */
611 *vn_ret = vp;
612 return true;
613 }
614
615 /*
616 * Version of the above without the nameiop argument, for NFS.
617 */
618 bool
cache_lookup_raw(struct vnode * dvp,const char * name,size_t namelen,uint32_t cnflags,int * iswht_ret,struct vnode ** vn_ret)619 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
620 uint32_t cnflags,
621 int *iswht_ret, struct vnode **vn_ret)
622 {
623
624 return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
625 iswht_ret, vn_ret);
626 }
627
628 /*
629 * Used by namei() to walk down a path, component by component by looking up
630 * names in the cache. The node locks are chained along the way: a parent's
631 * lock is not dropped until the child's is acquired.
632 */
633 bool
cache_lookup_linked(struct vnode * dvp,const char * name,size_t namelen,struct vnode ** vn_ret,krwlock_t ** plock,kauth_cred_t cred)634 cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
635 struct vnode **vn_ret, krwlock_t **plock,
636 kauth_cred_t cred)
637 {
638 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
639 struct namecache *ncp;
640 krwlock_t *oldlock, *newlock;
641 uint64_t key;
642 int error;
643
644 KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
645
646 /* If disabled, or file system doesn't support this, bail out. */
647 if (__predict_false((dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
648 return false;
649 }
650
651 if (__predict_false(namelen > cache_maxlen)) {
652 COUNT(ncs_long);
653 return false;
654 }
655
656 /* Compute the key up front - don't need the lock. */
657 key = cache_key(name, namelen);
658
659 /*
660 * Acquire the directory lock. Once we have that, we can drop the
661 * previous one (if any).
662 *
663 * The two lock holds mean that the directory can't go away while
664 * here: the directory must be purged with cache_purge() before
665 * being freed, and both parent & child's vi_nc_lock must be taken
666 * before that point is passed.
667 *
668 * However if there's no previous lock, like at the root of the
669 * chain, then "dvp" must be referenced to prevent dvp going away
670 * before we get its lock.
671 *
672 * Note that the two locks can be the same if looking up a dot, for
673 * example: /usr/bin/. If looking up the parent (..) we can't wait
674 * on the lock as child -> parent is the wrong direction.
675 */
676 if (*plock != &dvi->vi_nc_lock) {
677 oldlock = *plock;
678 newlock = &dvi->vi_nc_lock;
679 if (!rw_tryenter(&dvi->vi_nc_lock, RW_READER)) {
680 return false;
681 }
682 } else {
683 oldlock = NULL;
684 newlock = NULL;
685 if (*plock == NULL) {
686 KASSERT(vrefcnt(dvp) > 0);
687 }
688 }
689
690 /*
691 * First up check if the user is allowed to look up files in this
692 * directory.
693 */
694 if (cred != FSCRED) {
695 if (dvi->vi_nc_mode == VNOVAL) {
696 if (newlock != NULL) {
697 rw_exit(newlock);
698 }
699 return false;
700 }
701 KASSERT(dvi->vi_nc_uid != VNOVAL);
702 KASSERT(dvi->vi_nc_gid != VNOVAL);
703 error = kauth_authorize_vnode(cred,
704 KAUTH_ACCESS_ACTION(VEXEC,
705 dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
706 genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
707 dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
708 if (error != 0) {
709 if (newlock != NULL) {
710 rw_exit(newlock);
711 }
712 COUNT(ncs_denied);
713 return false;
714 }
715 }
716
717 /*
718 * Now look for a matching cache entry.
719 */
720 ncp = cache_lookup_entry(dvp, name, namelen, key);
721 if (__predict_false(ncp == NULL)) {
722 if (newlock != NULL) {
723 rw_exit(newlock);
724 }
725 COUNT(ncs_miss);
726 SDT_PROBE(vfs, namecache, lookup, miss, dvp,
727 name, namelen, 0, 0);
728 return false;
729 }
730 if (ncp->nc_vp == NULL) {
731 /* found negative entry; vn is already null from above */
732 KASSERT(namelen != cache_mp_nlen);
733 KASSERT(name != cache_mp_name);
734 COUNT(ncs_neghits);
735 } else {
736 COUNT(ncs_goodhits); /* XXX can be "badhits" */
737 }
738 SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
739
740 /*
741 * Return with the directory lock still held. It will either be
742 * returned to us with another call to cache_lookup_linked() when
743 * looking up the next component, or the caller will release it
744 * manually when finished.
745 */
746 if (oldlock) {
747 rw_exit(oldlock);
748 }
749 if (newlock) {
750 *plock = newlock;
751 }
752 *vn_ret = ncp->nc_vp;
753 return true;
754 }
755
756 /*
757 * Scan cache looking for name of directory entry pointing at vp.
758 * Will not search for "." or "..".
759 *
760 * If the lookup succeeds the vnode is referenced and stored in dvpp.
761 *
762 * If bufp is non-NULL, also place the name in the buffer which starts
763 * at bufp, immediately before *bpp, and move bpp backwards to point
764 * at the start of it. (Yes, this is a little baroque, but it's done
765 * this way to cater to the whims of getcwd).
766 *
767 * Returns 0 on success, -1 on cache miss, positive errno on failure.
768 */
769 int
cache_revlookup(struct vnode * vp,struct vnode ** dvpp,char ** bpp,char * bufp,bool checkaccess,accmode_t accmode)770 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
771 bool checkaccess, accmode_t accmode)
772 {
773 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
774 struct namecache *ncp;
775 struct vnode *dvp;
776 int error, nlen, lrulist;
777 char *bp;
778
779 KASSERT(vp != NULL);
780
781 if (cache_maxlen == 0)
782 goto out;
783
784 rw_enter(&vi->vi_nc_listlock, RW_READER);
785 if (checkaccess) {
786 /*
787 * Check if the user is allowed to see. NOTE: this is
788 * checking for access on the "wrong" directory. getcwd()
789 * wants to see that there is access on every component
790 * along the way, not that there is access to any individual
791 * component. Don't use this to check you can look in vp.
792 *
793 * I don't like it, I didn't come up with it, don't blame me!
794 */
795 if (vi->vi_nc_mode == VNOVAL) {
796 rw_exit(&vi->vi_nc_listlock);
797 return -1;
798 }
799 KASSERT(vi->vi_nc_uid != VNOVAL);
800 KASSERT(vi->vi_nc_gid != VNOVAL);
801 error = kauth_authorize_vnode(kauth_cred_get(),
802 KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
803 ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred,
804 vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS,
805 NULL, accmode));
806 if (error != 0) {
807 rw_exit(&vi->vi_nc_listlock);
808 COUNT(ncs_denied);
809 return EACCES;
810 }
811 }
812 TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
813 KASSERT(ncp->nc_vp == vp);
814 KASSERT(ncp->nc_dvp != NULL);
815 nlen = ncp->nc_nlen;
816
817 /*
818 * Ignore mountpoint entries.
819 */
820 if (ncp->nc_nlen == cache_mp_nlen) {
821 continue;
822 }
823
824 /*
825 * The queue is partially sorted. Once we hit dots, nothing
826 * else remains but dots and dotdots, so bail out.
827 */
828 if (ncp->nc_name[0] == '.') {
829 if (nlen == 1 ||
830 (nlen == 2 && ncp->nc_name[1] == '.')) {
831 break;
832 }
833 }
834
835 /*
836 * Record a hit on the entry. This is an unlocked read but
837 * even if wrong it doesn't matter too much.
838 */
839 lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
840 if (lrulist != LRU_ACTIVE) {
841 cache_activate(ncp);
842 }
843
844 if (bufp) {
845 bp = *bpp;
846 bp -= nlen;
847 if (bp <= bufp) {
848 *dvpp = NULL;
849 rw_exit(&vi->vi_nc_listlock);
850 SDT_PROBE(vfs, namecache, revlookup,
851 fail, vp, ERANGE, 0, 0, 0);
852 return (ERANGE);
853 }
854 memcpy(bp, ncp->nc_name, nlen);
855 *bpp = bp;
856 }
857
858 dvp = ncp->nc_dvp;
859 error = vcache_tryvget(dvp);
860 rw_exit(&vi->vi_nc_listlock);
861 if (error) {
862 KASSERT(error == EBUSY);
863 if (bufp)
864 (*bpp) += nlen;
865 *dvpp = NULL;
866 SDT_PROBE(vfs, namecache, revlookup, fail, vp,
867 error, 0, 0, 0);
868 return -1;
869 }
870 *dvpp = dvp;
871 SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
872 0, 0, 0);
873 COUNT(ncs_revhits);
874 return (0);
875 }
876 rw_exit(&vi->vi_nc_listlock);
877 COUNT(ncs_revmiss);
878 out:
879 *dvpp = NULL;
880 return (-1);
881 }
882
883 /*
884 * Add an entry to the cache.
885 */
886 void
cache_enter(struct vnode * dvp,struct vnode * vp,const char * name,size_t namelen,uint32_t cnflags)887 cache_enter(struct vnode *dvp, struct vnode *vp,
888 const char *name, size_t namelen, uint32_t cnflags)
889 {
890 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
891 struct namecache *ncp, *oncp;
892 int total;
893
894 KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
895
896 /* First, check whether we can/should add a cache entry. */
897 if ((cnflags & MAKEENTRY) == 0 ||
898 __predict_false(namelen > cache_maxlen)) {
899 SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
900 0, 0);
901 return;
902 }
903
904 SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
905
906 /*
907 * Reclaim some entries if over budget. This is an unlocked check,
908 * but it doesn't matter. Just need to catch up with things
909 * eventually: it doesn't matter if we go over temporarily.
910 */
911 total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
912 total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
913 if (__predict_false(total > desiredvnodes)) {
914 cache_reclaim();
915 }
916
917 /* Now allocate a fresh entry. */
918 if (__predict_true(namelen <= NCHNAMLEN)) {
919 ncp = pool_cache_get(cache_pool, PR_WAITOK);
920 } else {
921 size_t sz = offsetof(struct namecache, nc_name[namelen]);
922 ncp = kmem_alloc(sz, KM_SLEEP);
923 }
924
925 /*
926 * Fill in cache info. For negative hits, save the ISWHITEOUT flag
927 * so we can restore it later when the cache entry is used again.
928 */
929 ncp->nc_vp = vp;
930 ncp->nc_dvp = dvp;
931 ncp->nc_key = cache_key(name, namelen);
932 ncp->nc_nlen = namelen;
933 ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
934 memcpy(ncp->nc_name, name, namelen);
935
936 /*
937 * Insert to the directory. Concurrent lookups may race for a cache
938 * entry. If there's a entry there already, purge it.
939 */
940 rw_enter(&dvi->vi_nc_lock, RW_WRITER);
941 oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
942 if (oncp != ncp) {
943 KASSERT(oncp->nc_key == ncp->nc_key);
944 KASSERT(oncp->nc_nlen == ncp->nc_nlen);
945 KASSERT(memcmp(oncp->nc_name, name, namelen) == 0);
946 cache_remove(oncp, true);
947 oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
948 KASSERT(oncp == ncp);
949 }
950
951 /*
952 * With the directory lock still held, insert to the tail of the
953 * ACTIVE LRU list (new) and take the opportunity to incrementally
954 * balance the lists.
955 */
956 mutex_enter(&cache_lru_lock);
957 ncp->nc_lrulist = LRU_ACTIVE;
958 cache_lru.count[LRU_ACTIVE]++;
959 TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
960 cache_deactivate();
961 mutex_exit(&cache_lru_lock);
962
963 /*
964 * Finally, insert to the vnode and unlock. With everything set up
965 * it's safe to let cache_revlookup() see the entry. Partially sort
966 * the per-vnode list: dots go to back so cache_revlookup() doesn't
967 * have to consider them.
968 */
969 if (vp != NULL) {
970 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
971 rw_enter(&vi->vi_nc_listlock, RW_WRITER);
972 if ((namelen == 1 && name[0] == '.') ||
973 (namelen == 2 && name[0] == '.' && name[1] == '.')) {
974 TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
975 } else {
976 TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
977 }
978 rw_exit(&vi->vi_nc_listlock);
979 }
980 rw_exit(&dvi->vi_nc_lock);
981 }
982
983 /*
984 * Set identity info in cache for a vnode. We only care about directories
985 * so ignore other updates. The cached info may be marked invalid if the
986 * inode has an ACL.
987 */
988 void
cache_enter_id(struct vnode * vp,mode_t mode,uid_t uid,gid_t gid,bool valid)989 cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid)
990 {
991 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
992
993 if (vp->v_type == VDIR) {
994 /* Grab both locks, for forward & reverse lookup. */
995 rw_enter(&vi->vi_nc_lock, RW_WRITER);
996 rw_enter(&vi->vi_nc_listlock, RW_WRITER);
997 if (valid) {
998 vi->vi_nc_mode = mode;
999 vi->vi_nc_uid = uid;
1000 vi->vi_nc_gid = gid;
1001 } else {
1002 vi->vi_nc_mode = VNOVAL;
1003 vi->vi_nc_uid = VNOVAL;
1004 vi->vi_nc_gid = VNOVAL;
1005 }
1006 rw_exit(&vi->vi_nc_listlock);
1007 rw_exit(&vi->vi_nc_lock);
1008 }
1009 }
1010
1011 /*
1012 * Return true if we have identity for the given vnode, and use as an
1013 * opportunity to confirm that everything squares up.
1014 *
1015 * Because of shared code, some file systems could provide partial
1016 * information, missing some updates, so check the mount flag too.
1017 */
1018 bool
cache_have_id(struct vnode * vp)1019 cache_have_id(struct vnode *vp)
1020 {
1021
1022 if (vp->v_type == VDIR &&
1023 (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 &&
1024 atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) {
1025 return true;
1026 } else {
1027 return false;
1028 }
1029 }
1030
1031 /*
1032 * Enter a mount point. cvp is the covered vnode, and rvp is the root of
1033 * the mounted file system.
1034 */
1035 void
cache_enter_mount(struct vnode * cvp,struct vnode * rvp)1036 cache_enter_mount(struct vnode *cvp, struct vnode *rvp)
1037 {
1038
1039 KASSERT(vrefcnt(cvp) > 0);
1040 KASSERT(vrefcnt(rvp) > 0);
1041 KASSERT(cvp->v_type == VDIR);
1042 KASSERT((rvp->v_vflag & VV_ROOT) != 0);
1043
1044 if (rvp->v_type == VDIR) {
1045 cache_enter(cvp, rvp, cache_mp_name, cache_mp_nlen, MAKEENTRY);
1046 }
1047 }
1048
1049 /*
1050 * Look up a cached mount point. Used in the strongly locked path.
1051 */
1052 bool
cache_lookup_mount(struct vnode * dvp,struct vnode ** vn_ret)1053 cache_lookup_mount(struct vnode *dvp, struct vnode **vn_ret)
1054 {
1055 bool ret;
1056
1057 ret = cache_lookup(dvp, cache_mp_name, cache_mp_nlen, LOOKUP,
1058 MAKEENTRY, NULL, vn_ret);
1059 KASSERT((*vn_ret != NULL) == ret);
1060 return ret;
1061 }
1062
1063 /*
1064 * Try to cross a mount point. For use with cache_lookup_linked().
1065 */
1066 bool
cache_cross_mount(struct vnode ** dvp,krwlock_t ** plock)1067 cache_cross_mount(struct vnode **dvp, krwlock_t **plock)
1068 {
1069
1070 return cache_lookup_linked(*dvp, cache_mp_name, cache_mp_nlen,
1071 dvp, plock, FSCRED);
1072 }
1073
1074 /*
1075 * Name cache initialization, from vfs_init() when the system is booting.
1076 */
1077 void
nchinit(void)1078 nchinit(void)
1079 {
1080
1081 cache_pool = pool_cache_init(sizeof(struct namecache),
1082 coherency_unit, 0, 0, "namecache", NULL, IPL_NONE, NULL,
1083 NULL, NULL);
1084 KASSERT(cache_pool != NULL);
1085
1086 mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
1087 TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
1088 TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
1089
1090 mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
1091 callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
1092 callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
1093 callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1094
1095 KASSERT(cache_sysctllog == NULL);
1096 sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
1097 CTLFLAG_PERMANENT,
1098 CTLTYPE_STRUCT, "namecache_stats",
1099 SYSCTL_DESCR("namecache statistics"),
1100 cache_stat_sysctl, 0, NULL, 0,
1101 CTL_VFS, CTL_CREATE, CTL_EOL);
1102 }
1103
1104 /*
1105 * Called once for each CPU in the system as attached.
1106 */
1107 void
cache_cpu_init(struct cpu_info * ci)1108 cache_cpu_init(struct cpu_info *ci)
1109 {
1110 void *p;
1111 size_t sz;
1112
1113 sz = roundup2(sizeof(struct nchcpu), coherency_unit) + coherency_unit;
1114 p = kmem_zalloc(sz, KM_SLEEP);
1115 ci->ci_data.cpu_nch = (void *)roundup2((uintptr_t)p, coherency_unit);
1116 }
1117
1118 /*
1119 * A vnode is being allocated: set up cache structures.
1120 */
1121 void
cache_vnode_init(struct vnode * vp)1122 cache_vnode_init(struct vnode *vp)
1123 {
1124 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1125
1126 rw_init(&vi->vi_nc_lock);
1127 rw_init(&vi->vi_nc_listlock);
1128 rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
1129 TAILQ_INIT(&vi->vi_nc_list);
1130 vi->vi_nc_mode = VNOVAL;
1131 vi->vi_nc_uid = VNOVAL;
1132 vi->vi_nc_gid = VNOVAL;
1133 }
1134
1135 /*
1136 * A vnode is being freed: finish cache structures.
1137 */
1138 void
cache_vnode_fini(struct vnode * vp)1139 cache_vnode_fini(struct vnode *vp)
1140 {
1141 vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1142
1143 KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
1144 KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
1145 rw_destroy(&vi->vi_nc_lock);
1146 rw_destroy(&vi->vi_nc_listlock);
1147 }
1148
1149 /*
1150 * Helper for cache_purge1(): purge cache entries for the given vnode from
1151 * all directories that the vnode is cached in.
1152 */
1153 static void
cache_purge_parents(struct vnode * vp)1154 cache_purge_parents(struct vnode *vp)
1155 {
1156 vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
1157 struct vnode *dvp, *blocked;
1158 struct namecache *ncp;
1159
1160 SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
1161
1162 blocked = NULL;
1163
1164 rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1165 while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
1166 /*
1167 * Locking in the wrong direction. Try for a hold on the
1168 * directory node's lock, and if we get it then all good,
1169 * nuke the entry and move on to the next.
1170 */
1171 dvp = ncp->nc_dvp;
1172 dvi = VNODE_TO_VIMPL(dvp);
1173 if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1174 cache_remove(ncp, false);
1175 rw_exit(&dvi->vi_nc_lock);
1176 blocked = NULL;
1177 continue;
1178 }
1179
1180 /*
1181 * We can't wait on the directory node's lock with our list
1182 * lock held or the system could deadlock.
1183 *
1184 * Take a hold on the directory vnode to prevent it from
1185 * being freed (taking the vnode & lock with it). Then
1186 * wait for the lock to become available with no other locks
1187 * held, and retry.
1188 *
1189 * If this happens twice in a row, give the other side a
1190 * breather; we can do nothing until it lets go.
1191 */
1192 vhold(dvp);
1193 rw_exit(&vi->vi_nc_listlock);
1194 rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1195 /* Do nothing. */
1196 rw_exit(&dvi->vi_nc_lock);
1197 holdrele(dvp);
1198 if (blocked == dvp) {
1199 kpause("ncpurge", false, 1, NULL);
1200 }
1201 rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1202 blocked = dvp;
1203 }
1204 rw_exit(&vi->vi_nc_listlock);
1205 }
1206
1207 /*
1208 * Helper for cache_purge1(): purge all cache entries hanging off the given
1209 * directory vnode.
1210 */
1211 static void
cache_purge_children(struct vnode * dvp)1212 cache_purge_children(struct vnode *dvp)
1213 {
1214 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1215 struct namecache *ncp;
1216
1217 SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
1218
1219 rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1220 while ((ncp = RB_TREE_MIN(&dvi->vi_nc_tree)) != NULL) {
1221 cache_remove(ncp, true);
1222 }
1223 rw_exit(&dvi->vi_nc_lock);
1224 }
1225
1226 /*
1227 * Helper for cache_purge1(): purge cache entry from the given vnode,
1228 * finding it by name.
1229 */
1230 static void
cache_purge_name(struct vnode * dvp,const char * name,size_t namelen)1231 cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
1232 {
1233 vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1234 struct namecache *ncp;
1235 uint64_t key;
1236
1237 SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
1238
1239 key = cache_key(name, namelen);
1240 rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1241 ncp = cache_lookup_entry(dvp, name, namelen, key);
1242 if (ncp) {
1243 cache_remove(ncp, true);
1244 }
1245 rw_exit(&dvi->vi_nc_lock);
1246 }
1247
1248 /*
1249 * Cache flush, a particular vnode; called when a vnode is renamed to
1250 * hide entries that would now be invalid.
1251 */
1252 void
cache_purge1(struct vnode * vp,const char * name,size_t namelen,int flags)1253 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
1254 {
1255
1256 if (flags & PURGE_PARENTS) {
1257 cache_purge_parents(vp);
1258 }
1259 if (flags & PURGE_CHILDREN) {
1260 cache_purge_children(vp);
1261 }
1262 if (name != NULL) {
1263 cache_purge_name(vp, name, namelen);
1264 }
1265 }
1266
1267 /*
1268 * vnode filter for cache_purgevfs().
1269 */
1270 static bool
cache_vdir_filter(void * cookie,vnode_t * vp)1271 cache_vdir_filter(void *cookie, vnode_t *vp)
1272 {
1273
1274 return vp->v_type == VDIR;
1275 }
1276
1277 /*
1278 * Cache flush, a whole filesystem; called when filesys is umounted to
1279 * remove entries that would now be invalid.
1280 */
1281 void
cache_purgevfs(struct mount * mp)1282 cache_purgevfs(struct mount *mp)
1283 {
1284 struct vnode_iterator *iter;
1285 vnode_t *dvp;
1286
1287 vfs_vnode_iterator_init(mp, &iter);
1288 for (;;) {
1289 dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
1290 if (dvp == NULL) {
1291 break;
1292 }
1293 cache_purge_children(dvp);
1294 vrele(dvp);
1295 }
1296 vfs_vnode_iterator_destroy(iter);
1297 }
1298
1299 /*
1300 * Re-queue an entry onto the tail of the active LRU list, after it has
1301 * scored a hit.
1302 */
1303 static void
cache_activate(struct namecache * ncp)1304 cache_activate(struct namecache *ncp)
1305 {
1306
1307 mutex_enter(&cache_lru_lock);
1308 TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
1309 TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1310 cache_lru.count[ncp->nc_lrulist]--;
1311 cache_lru.count[LRU_ACTIVE]++;
1312 ncp->nc_lrulist = LRU_ACTIVE;
1313 mutex_exit(&cache_lru_lock);
1314 }
1315
1316 /*
1317 * Try to balance the LRU lists. Pick some victim entries, and re-queue
1318 * them from the head of the active list to the tail of the inactive list.
1319 */
1320 static void
cache_deactivate(void)1321 cache_deactivate(void)
1322 {
1323 struct namecache *ncp;
1324 int total, i;
1325
1326 KASSERT(mutex_owned(&cache_lru_lock));
1327
1328 /* If we're nowhere near budget yet, don't bother. */
1329 total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
1330 if (total < (desiredvnodes >> 1)) {
1331 return;
1332 }
1333
1334 /*
1335 * Aim for a 1:1 ratio of active to inactive. This is to allow each
1336 * potential victim a reasonable amount of time to cycle through the
1337 * inactive list in order to score a hit and be reactivated, while
1338 * trying not to cause reactivations too frequently.
1339 */
1340 if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
1341 return;
1342 }
1343
1344 /* Move only a few at a time; will catch up eventually. */
1345 for (i = 0; i < cache_lru_maxdeact; i++) {
1346 ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
1347 if (ncp == NULL) {
1348 break;
1349 }
1350 KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
1351 ncp->nc_lrulist = LRU_INACTIVE;
1352 TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1353 TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
1354 cache_lru.count[LRU_ACTIVE]--;
1355 cache_lru.count[LRU_INACTIVE]++;
1356 }
1357 }
1358
1359 /*
1360 * Free some entries from the cache, when we have gone over budget.
1361 *
1362 * We don't want to cause too much work for any individual caller, and it
1363 * doesn't matter if we temporarily go over budget. This is also "just a
1364 * cache" so it's not a big deal if we screw up and throw out something we
1365 * shouldn't. So we take a relaxed attitude to this process to reduce its
1366 * impact.
1367 */
1368 static void
cache_reclaim(void)1369 cache_reclaim(void)
1370 {
1371 struct namecache *ncp;
1372 vnode_impl_t *dvi;
1373 int toscan;
1374
1375 /*
1376 * Scan up to a preset maximum number of entries, but no more than
1377 * 0.8% of the total at once (to allow for very small systems).
1378 *
1379 * On bigger systems, do a larger chunk of work to reduce the number
1380 * of times that cache_lru_lock is held for any length of time.
1381 */
1382 mutex_enter(&cache_lru_lock);
1383 toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
1384 toscan = MAX(toscan, 1);
1385 SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
1386 cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
1387 while (toscan-- != 0) {
1388 /* First try to balance the lists. */
1389 cache_deactivate();
1390
1391 /* Now look for a victim on head of inactive list (old). */
1392 ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
1393 if (ncp == NULL) {
1394 break;
1395 }
1396 dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
1397 KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
1398 KASSERT(dvi != NULL);
1399
1400 /*
1401 * Locking in the wrong direction. If we can't get the
1402 * lock, the directory is actively busy, and it could also
1403 * cause problems for the next guy in here, so send the
1404 * entry to the back of the list.
1405 */
1406 if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1407 TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
1408 ncp, nc_lru);
1409 TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
1410 ncp, nc_lru);
1411 continue;
1412 }
1413
1414 /*
1415 * Now have the victim entry locked. Drop the LRU list
1416 * lock, purge the entry, and start over. The hold on
1417 * vi_nc_lock will prevent the vnode from vanishing until
1418 * finished (cache_purge() will be called on dvp before it
1419 * disappears, and that will wait on vi_nc_lock).
1420 */
1421 mutex_exit(&cache_lru_lock);
1422 cache_remove(ncp, true);
1423 rw_exit(&dvi->vi_nc_lock);
1424 mutex_enter(&cache_lru_lock);
1425 }
1426 mutex_exit(&cache_lru_lock);
1427 }
1428
1429 /*
1430 * For file system code: count a lookup that required a full re-scan of
1431 * directory metadata.
1432 */
1433 void
namecache_count_pass2(void)1434 namecache_count_pass2(void)
1435 {
1436
1437 COUNT(ncs_pass2);
1438 }
1439
1440 /*
1441 * For file system code: count a lookup that scored a hit in the directory
1442 * metadata near the location of the last lookup.
1443 */
1444 void
namecache_count_2passes(void)1445 namecache_count_2passes(void)
1446 {
1447
1448 COUNT(ncs_2passes);
1449 }
1450
1451 /*
1452 * Sum the stats from all CPUs into nchstats. This needs to run at least
1453 * once within every window where a 32-bit counter could roll over. It's
1454 * called regularly by timer to ensure this.
1455 */
1456 static void
cache_update_stats(void * cookie)1457 cache_update_stats(void *cookie)
1458 {
1459 CPU_INFO_ITERATOR cii;
1460 struct cpu_info *ci;
1461
1462 mutex_enter(&cache_stat_lock);
1463 for (CPU_INFO_FOREACH(cii, ci)) {
1464 struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
1465 UPDATE(nchcpu, ncs_goodhits);
1466 UPDATE(nchcpu, ncs_neghits);
1467 UPDATE(nchcpu, ncs_badhits);
1468 UPDATE(nchcpu, ncs_falsehits);
1469 UPDATE(nchcpu, ncs_miss);
1470 UPDATE(nchcpu, ncs_long);
1471 UPDATE(nchcpu, ncs_pass2);
1472 UPDATE(nchcpu, ncs_2passes);
1473 UPDATE(nchcpu, ncs_revhits);
1474 UPDATE(nchcpu, ncs_revmiss);
1475 UPDATE(nchcpu, ncs_denied);
1476 }
1477 if (cookie != NULL) {
1478 memcpy(cookie, &nchstats, sizeof(nchstats));
1479 }
1480 /* Reset the timer; arrive back here in N minutes at latest. */
1481 callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1482 mutex_exit(&cache_stat_lock);
1483 }
1484
1485 /*
1486 * Fetch the current values of the stats for sysctl.
1487 */
1488 static int
cache_stat_sysctl(SYSCTLFN_ARGS)1489 cache_stat_sysctl(SYSCTLFN_ARGS)
1490 {
1491 struct nchstats stats;
1492
1493 if (oldp == NULL) {
1494 *oldlenp = sizeof(nchstats);
1495 return 0;
1496 }
1497
1498 if (*oldlenp <= 0) {
1499 *oldlenp = 0;
1500 return 0;
1501 }
1502
1503 /* Refresh the global stats. */
1504 sysctl_unlock();
1505 cache_update_stats(&stats);
1506 sysctl_relock();
1507
1508 *oldlenp = MIN(sizeof(stats), *oldlenp);
1509 return sysctl_copyout(l, &stats, oldp, *oldlenp);
1510 }
1511
1512 /*
1513 * For the debugger, given the address of a vnode, print all associated
1514 * names in the cache.
1515 */
1516 #ifdef DDB
1517 void
namecache_print(struct vnode * vp,void (* pr)(const char *,...))1518 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1519 {
1520 struct vnode *dvp = NULL;
1521 struct namecache *ncp;
1522 enum cache_lru_id id;
1523
1524 for (id = 0; id < LRU_COUNT; id++) {
1525 TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1526 if (ncp->nc_vp == vp) {
1527 (*pr)("name %.*s\n", ncp->nc_nlen,
1528 ncp->nc_name);
1529 dvp = ncp->nc_dvp;
1530 }
1531 }
1532 }
1533 if (dvp == NULL) {
1534 (*pr)("name not found\n");
1535 return;
1536 }
1537 for (id = 0; id < LRU_COUNT; id++) {
1538 TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1539 if (ncp->nc_vp == dvp) {
1540 (*pr)("parent %.*s\n", ncp->nc_nlen,
1541 ncp->nc_name);
1542 }
1543 }
1544 }
1545 }
1546 #endif
1547