1 /* $NetBSD: vfs_vnode.c,v 1.149 2023/02/24 11:02:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * BLOCKED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * v_usecount is adjusted with atomic operations, however to change
147 * from a non-zero value to zero the interlock must also be held.
148 */
149
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.149 2023/02/24 11:02:27 riastradh Exp $");
152
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178
179 #include <miscfs/deadfs/deadfs.h>
180 #include <miscfs/specfs/specdev.h>
181
182 #include <uvm/uvm.h>
183 #include <uvm/uvm_readahead.h>
184 #include <uvm/uvm_stat.h>
185
186 /* Flags to vrelel. */
187 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
188
189 #define LRU_VRELE 0
190 #define LRU_FREE 1
191 #define LRU_HOLD 2
192 #define LRU_COUNT 3
193
194 /*
195 * There are three lru lists: one holds vnodes waiting for async release,
196 * one is for vnodes which have no buffer/page references and one for those
197 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
198 * private cache line as vnodes migrate between them while under the same
199 * lock (vdrain_lock).
200 */
201 u_int numvnodes __cacheline_aligned;
202 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
203 static kmutex_t vdrain_lock __cacheline_aligned;
204 static kcondvar_t vdrain_cv;
205 static int vdrain_gen;
206 static kcondvar_t vdrain_gen_cv;
207 static bool vdrain_retry;
208 static lwp_t * vdrain_lwp;
209 SLIST_HEAD(hashhead, vnode_impl);
210 static kmutex_t vcache_lock __cacheline_aligned;
211 static kcondvar_t vcache_cv;
212 static u_int vcache_hashsize;
213 static u_long vcache_hashmask;
214 static struct hashhead *vcache_hashtab;
215 static pool_cache_t vcache_pool;
216 static void lru_requeue(vnode_t *, vnodelst_t *);
217 static vnodelst_t * lru_which(vnode_t *);
218 static vnode_impl_t * vcache_alloc(void);
219 static void vcache_dealloc(vnode_impl_t *);
220 static void vcache_free(vnode_impl_t *);
221 static void vcache_init(void);
222 static void vcache_reinit(void);
223 static void vcache_reclaim(vnode_t *);
224 static void vrelel(vnode_t *, int, int);
225 static void vdrain_thread(void *);
226 static void vnpanic(vnode_t *, const char *, ...)
227 __printflike(2, 3);
228
229 /* Routines having to do with the management of the vnode table. */
230
231 /*
232 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set
233 * only when the vnode state is LOADED.
234 * The next bit of v_usecount is a flag for vrelel(). It's set
235 * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
236 */
237 #define VUSECOUNT_MASK 0x3fffffff
238 #define VUSECOUNT_GATE 0x80000000
239 #define VUSECOUNT_VGET 0x40000000
240
241 /*
242 * Return the current usecount of a vnode.
243 */
244 inline int
vrefcnt(struct vnode * vp)245 vrefcnt(struct vnode *vp)
246 {
247
248 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
249 }
250
251 /* Vnode state operations and diagnostics. */
252
253 #if defined(DIAGNOSTIC)
254
255 #define VSTATE_VALID(state) \
256 ((state) != VS_ACTIVE && (state) != VS_MARKER)
257 #define VSTATE_GET(vp) \
258 vstate_assert_get((vp), __func__, __LINE__)
259 #define VSTATE_CHANGE(vp, from, to) \
260 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
261 #define VSTATE_WAIT_STABLE(vp) \
262 vstate_assert_wait_stable((vp), __func__, __LINE__)
263
264 void
_vstate_assert(vnode_t * vp,enum vnode_state state,const char * func,int line,bool has_lock)265 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
266 bool has_lock)
267 {
268 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
269 int refcnt = vrefcnt(vp);
270
271 if (!has_lock) {
272 enum vnode_state vstate = atomic_load_relaxed(&vip->vi_state);
273
274 if (state == VS_ACTIVE && refcnt > 0 &&
275 (vstate == VS_LOADED || vstate == VS_BLOCKED))
276 return;
277 if (vstate == state)
278 return;
279 mutex_enter((vp)->v_interlock);
280 }
281
282 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
283
284 if ((state == VS_ACTIVE && refcnt > 0 &&
285 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
286 vip->vi_state == state) {
287 if (!has_lock)
288 mutex_exit((vp)->v_interlock);
289 return;
290 }
291 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
292 vstate_name(vip->vi_state), refcnt,
293 vstate_name(state), func, line);
294 }
295
296 static enum vnode_state
vstate_assert_get(vnode_t * vp,const char * func,int line)297 vstate_assert_get(vnode_t *vp, const char *func, int line)
298 {
299 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
300
301 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
302 if (! VSTATE_VALID(vip->vi_state))
303 vnpanic(vp, "state is %s at %s:%d",
304 vstate_name(vip->vi_state), func, line);
305
306 return vip->vi_state;
307 }
308
309 static void
vstate_assert_wait_stable(vnode_t * vp,const char * func,int line)310 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
311 {
312 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
313
314 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
315 if (! VSTATE_VALID(vip->vi_state))
316 vnpanic(vp, "state is %s at %s:%d",
317 vstate_name(vip->vi_state), func, line);
318
319 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
320 cv_wait(&vp->v_cv, vp->v_interlock);
321
322 if (! VSTATE_VALID(vip->vi_state))
323 vnpanic(vp, "state is %s at %s:%d",
324 vstate_name(vip->vi_state), func, line);
325 }
326
327 static void
vstate_assert_change(vnode_t * vp,enum vnode_state from,enum vnode_state to,const char * func,int line)328 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
329 const char *func, int line)
330 {
331 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
332 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
333
334 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
335 if (from == VS_LOADING)
336 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
337
338 if (! VSTATE_VALID(from))
339 vnpanic(vp, "from is %s at %s:%d",
340 vstate_name(from), func, line);
341 if (! VSTATE_VALID(to))
342 vnpanic(vp, "to is %s at %s:%d",
343 vstate_name(to), func, line);
344 if (vip->vi_state != from)
345 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
346 vstate_name(vip->vi_state), vstate_name(from), func, line);
347 if ((from == VS_LOADED) != gated)
348 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
349 vstate_name(vip->vi_state), gated, func, line);
350
351 /* Open/close the gate for vcache_tryvget(). */
352 if (to == VS_LOADED) {
353 membar_release();
354 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
355 } else {
356 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
357 }
358
359 atomic_store_relaxed(&vip->vi_state, to);
360 if (from == VS_LOADING)
361 cv_broadcast(&vcache_cv);
362 if (to == VS_LOADED || to == VS_RECLAIMED)
363 cv_broadcast(&vp->v_cv);
364 }
365
366 #else /* defined(DIAGNOSTIC) */
367
368 #define VSTATE_GET(vp) \
369 (VNODE_TO_VIMPL((vp))->vi_state)
370 #define VSTATE_CHANGE(vp, from, to) \
371 vstate_change((vp), (from), (to))
372 #define VSTATE_WAIT_STABLE(vp) \
373 vstate_wait_stable((vp))
374 void
_vstate_assert(vnode_t * vp,enum vnode_state state,const char * func,int line,bool has_lock)375 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
376 bool has_lock)
377 {
378
379 }
380
381 static void
vstate_wait_stable(vnode_t * vp)382 vstate_wait_stable(vnode_t *vp)
383 {
384 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
385
386 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
387 cv_wait(&vp->v_cv, vp->v_interlock);
388 }
389
390 static void
vstate_change(vnode_t * vp,enum vnode_state from,enum vnode_state to)391 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
392 {
393 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
394
395 /* Open/close the gate for vcache_tryvget(). */
396 if (to == VS_LOADED) {
397 membar_release();
398 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
399 } else {
400 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
401 }
402
403 atomic_store_relaxed(&vip->vi_state, to);
404 if (from == VS_LOADING)
405 cv_broadcast(&vcache_cv);
406 if (to == VS_LOADED || to == VS_RECLAIMED)
407 cv_broadcast(&vp->v_cv);
408 }
409
410 #endif /* defined(DIAGNOSTIC) */
411
412 void
vfs_vnode_sysinit(void)413 vfs_vnode_sysinit(void)
414 {
415 int error __diagused, i;
416
417 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
418 KASSERT(dead_rootmount != NULL);
419 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
420
421 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
422 for (i = 0; i < LRU_COUNT; i++) {
423 TAILQ_INIT(&lru_list[i]);
424 }
425 vcache_init();
426
427 cv_init(&vdrain_cv, "vdrain");
428 cv_init(&vdrain_gen_cv, "vdrainwt");
429 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
430 NULL, &vdrain_lwp, "vdrain");
431 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
432 }
433
434 /*
435 * Allocate a new marker vnode.
436 */
437 vnode_t *
vnalloc_marker(struct mount * mp)438 vnalloc_marker(struct mount *mp)
439 {
440 vnode_impl_t *vip;
441 vnode_t *vp;
442
443 vip = pool_cache_get(vcache_pool, PR_WAITOK);
444 memset(vip, 0, sizeof(*vip));
445 vp = VIMPL_TO_VNODE(vip);
446 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
447 vp->v_mount = mp;
448 vp->v_type = VBAD;
449 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
450 klist_init(&vip->vi_klist.vk_klist);
451 vp->v_klist = &vip->vi_klist;
452 vip->vi_state = VS_MARKER;
453
454 return vp;
455 }
456
457 /*
458 * Free a marker vnode.
459 */
460 void
vnfree_marker(vnode_t * vp)461 vnfree_marker(vnode_t *vp)
462 {
463 vnode_impl_t *vip;
464
465 vip = VNODE_TO_VIMPL(vp);
466 KASSERT(vip->vi_state == VS_MARKER);
467 mutex_obj_free(vp->v_interlock);
468 uvm_obj_destroy(&vp->v_uobj, true);
469 klist_fini(&vip->vi_klist.vk_klist);
470 pool_cache_put(vcache_pool, vip);
471 }
472
473 /*
474 * Test a vnode for being a marker vnode.
475 */
476 bool
vnis_marker(vnode_t * vp)477 vnis_marker(vnode_t *vp)
478 {
479
480 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
481 }
482
483 /*
484 * Return the lru list this node should be on.
485 */
486 static vnodelst_t *
lru_which(vnode_t * vp)487 lru_which(vnode_t *vp)
488 {
489
490 KASSERT(mutex_owned(vp->v_interlock));
491
492 if (vp->v_holdcnt > 0)
493 return &lru_list[LRU_HOLD];
494 else
495 return &lru_list[LRU_FREE];
496 }
497
498 /*
499 * Put vnode to end of given list.
500 * Both the current and the new list may be NULL, used on vnode alloc/free.
501 * Adjust numvnodes and signal vdrain thread if there is work.
502 */
503 static void
lru_requeue(vnode_t * vp,vnodelst_t * listhd)504 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
505 {
506 vnode_impl_t *vip;
507 int d;
508
509 /*
510 * If the vnode is on the correct list, and was put there recently,
511 * then leave it be, thus avoiding huge cache and lock contention.
512 */
513 vip = VNODE_TO_VIMPL(vp);
514 if (listhd == vip->vi_lrulisthd &&
515 (getticks() - vip->vi_lrulisttm) < hz) {
516 return;
517 }
518
519 mutex_enter(&vdrain_lock);
520 d = 0;
521 if (vip->vi_lrulisthd != NULL)
522 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
523 else
524 d++;
525 vip->vi_lrulisthd = listhd;
526 vip->vi_lrulisttm = getticks();
527 if (vip->vi_lrulisthd != NULL)
528 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
529 else
530 d--;
531 if (d != 0) {
532 /*
533 * Looks strange? This is not a bug. Don't store
534 * numvnodes unless there is a change - avoid false
535 * sharing on MP.
536 */
537 numvnodes += d;
538 }
539 if ((d > 0 && numvnodes > desiredvnodes) ||
540 listhd == &lru_list[LRU_VRELE])
541 cv_signal(&vdrain_cv);
542 mutex_exit(&vdrain_lock);
543 }
544
545 /*
546 * Release deferred vrele vnodes for this mount.
547 * Called with file system suspended.
548 */
549 void
vrele_flush(struct mount * mp)550 vrele_flush(struct mount *mp)
551 {
552 vnode_impl_t *vip, *marker;
553 vnode_t *vp;
554 int when = 0;
555
556 KASSERT(fstrans_is_owner(mp));
557
558 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
559
560 mutex_enter(&vdrain_lock);
561 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
562
563 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
564 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
565 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
566 vi_lrulist);
567 vp = VIMPL_TO_VNODE(vip);
568 if (vnis_marker(vp))
569 continue;
570
571 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
572 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
573 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
574 vip->vi_lrulisttm = getticks();
575 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
576 mutex_exit(&vdrain_lock);
577
578 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
579 mutex_enter(vp->v_interlock);
580 vrelel(vp, 0, LK_EXCLUSIVE);
581
582 if (getticks() > when) {
583 yield();
584 when = getticks() + hz / 10;
585 }
586
587 mutex_enter(&vdrain_lock);
588 }
589
590 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
591 mutex_exit(&vdrain_lock);
592
593 vnfree_marker(VIMPL_TO_VNODE(marker));
594 }
595
596 /*
597 * Reclaim a cached vnode. Used from vdrain_thread only.
598 */
599 static __inline void
vdrain_remove(vnode_t * vp)600 vdrain_remove(vnode_t *vp)
601 {
602 struct mount *mp;
603
604 KASSERT(mutex_owned(&vdrain_lock));
605
606 /* Probe usecount (unlocked). */
607 if (vrefcnt(vp) > 0)
608 return;
609 /* Try v_interlock -- we lock the wrong direction! */
610 if (!mutex_tryenter(vp->v_interlock))
611 return;
612 /* Probe usecount and state. */
613 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
614 mutex_exit(vp->v_interlock);
615 return;
616 }
617 mp = vp->v_mount;
618 if (fstrans_start_nowait(mp) != 0) {
619 mutex_exit(vp->v_interlock);
620 return;
621 }
622 vdrain_retry = true;
623 mutex_exit(&vdrain_lock);
624
625 if (vcache_vget(vp) == 0) {
626 if (!vrecycle(vp)) {
627 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
628 mutex_enter(vp->v_interlock);
629 vrelel(vp, 0, LK_EXCLUSIVE);
630 }
631 }
632 fstrans_done(mp);
633
634 mutex_enter(&vdrain_lock);
635 }
636
637 /*
638 * Release a cached vnode. Used from vdrain_thread only.
639 */
640 static __inline void
vdrain_vrele(vnode_t * vp)641 vdrain_vrele(vnode_t *vp)
642 {
643 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
644 struct mount *mp;
645
646 KASSERT(mutex_owned(&vdrain_lock));
647
648 mp = vp->v_mount;
649 if (fstrans_start_nowait(mp) != 0)
650 return;
651
652 /*
653 * First remove the vnode from the vrele list.
654 * Put it on the last lru list, the last vrele()
655 * will put it back onto the right list before
656 * its usecount reaches zero.
657 */
658 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
659 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
660 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
661 vip->vi_lrulisttm = getticks();
662 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
663
664 vdrain_retry = true;
665 mutex_exit(&vdrain_lock);
666
667 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
668 mutex_enter(vp->v_interlock);
669 vrelel(vp, 0, LK_EXCLUSIVE);
670 fstrans_done(mp);
671
672 mutex_enter(&vdrain_lock);
673 }
674
675 /*
676 * Helper thread to keep the number of vnodes below desiredvnodes
677 * and release vnodes from asynchronous vrele.
678 */
679 static void
vdrain_thread(void * cookie)680 vdrain_thread(void *cookie)
681 {
682 int i;
683 u_int target;
684 vnode_impl_t *vip, *marker;
685
686 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
687
688 mutex_enter(&vdrain_lock);
689
690 for (;;) {
691 vdrain_retry = false;
692 target = desiredvnodes - desiredvnodes/10;
693
694 for (i = 0; i < LRU_COUNT; i++) {
695 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
696 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
697 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
698 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
699 vi_lrulist);
700 if (vnis_marker(VIMPL_TO_VNODE(vip)))
701 continue;
702 if (i == LRU_VRELE)
703 vdrain_vrele(VIMPL_TO_VNODE(vip));
704 else if (numvnodes < target)
705 break;
706 else
707 vdrain_remove(VIMPL_TO_VNODE(vip));
708 }
709 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
710 }
711
712 if (vdrain_retry) {
713 kpause("vdrainrt", false, 1, &vdrain_lock);
714 } else {
715 vdrain_gen++;
716 cv_broadcast(&vdrain_gen_cv);
717 cv_wait(&vdrain_cv, &vdrain_lock);
718 }
719 }
720 }
721
722 /*
723 * Try to drop reference on a vnode. Abort if we are releasing the
724 * last reference. Note: this _must_ succeed if not the last reference.
725 */
726 static bool
vtryrele(vnode_t * vp)727 vtryrele(vnode_t *vp)
728 {
729 u_int use, next;
730
731 membar_release();
732 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
733 if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
734 return false;
735 }
736 KASSERT((use & VUSECOUNT_MASK) > 1);
737 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
738 if (__predict_true(next == use)) {
739 return true;
740 }
741 }
742 }
743
744 /*
745 * vput: unlock and release the reference.
746 */
747 void
vput(vnode_t * vp)748 vput(vnode_t *vp)
749 {
750 int lktype;
751
752 /*
753 * Do an unlocked check of the usecount. If it looks like we're not
754 * about to drop the last reference, then unlock the vnode and try
755 * to drop the reference. If it ends up being the last reference
756 * after all, vrelel() can fix it all up. Most of the time this
757 * will all go to plan.
758 */
759 if (vrefcnt(vp) > 1) {
760 VOP_UNLOCK(vp);
761 if (vtryrele(vp)) {
762 return;
763 }
764 lktype = LK_NONE;
765 } else {
766 lktype = VOP_ISLOCKED(vp);
767 KASSERT(lktype != LK_NONE);
768 }
769 mutex_enter(vp->v_interlock);
770 vrelel(vp, 0, lktype);
771 }
772
773 /*
774 * Vnode release. If reference count drops to zero, call inactive
775 * routine and either return to freelist or free to the pool.
776 */
777 static void
vrelel(vnode_t * vp,int flags,int lktype)778 vrelel(vnode_t *vp, int flags, int lktype)
779 {
780 const bool async = ((flags & VRELEL_ASYNC) != 0);
781 bool recycle, defer, objlock_held;
782 u_int use, next;
783 int error;
784
785 objlock_held = false;
786
787 retry:
788 KASSERT(mutex_owned(vp->v_interlock));
789
790 if (__predict_false(vp->v_op == dead_vnodeop_p &&
791 VSTATE_GET(vp) != VS_RECLAIMED)) {
792 vnpanic(vp, "dead but not clean");
793 }
794
795 /*
796 * If not the last reference, just unlock and drop the reference count.
797 *
798 * Otherwise make sure we pass a point in time where we hold the
799 * last reference with VGET flag unset.
800 */
801 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
802 if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
803 if (objlock_held) {
804 objlock_held = false;
805 rw_exit(vp->v_uobj.vmobjlock);
806 }
807 if (lktype != LK_NONE) {
808 mutex_exit(vp->v_interlock);
809 lktype = LK_NONE;
810 VOP_UNLOCK(vp);
811 mutex_enter(vp->v_interlock);
812 }
813 if (vtryrele(vp)) {
814 mutex_exit(vp->v_interlock);
815 return;
816 }
817 next = atomic_load_relaxed(&vp->v_usecount);
818 continue;
819 }
820 KASSERT((use & VUSECOUNT_MASK) == 1);
821 next = use & ~VUSECOUNT_VGET;
822 if (next != use) {
823 next = atomic_cas_uint(&vp->v_usecount, use, next);
824 }
825 if (__predict_true(next == use)) {
826 break;
827 }
828 }
829 membar_acquire();
830 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
831 vnpanic(vp, "%s: bad ref count", __func__);
832 }
833
834 #ifdef DIAGNOSTIC
835 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
836 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
837 vprint("vrelel: missing VOP_CLOSE()", vp);
838 }
839 #endif
840
841 /*
842 * If already clean there is no need to lock, defer or
843 * deactivate this node.
844 */
845 if (VSTATE_GET(vp) == VS_RECLAIMED) {
846 if (objlock_held) {
847 objlock_held = false;
848 rw_exit(vp->v_uobj.vmobjlock);
849 }
850 if (lktype != LK_NONE) {
851 mutex_exit(vp->v_interlock);
852 lktype = LK_NONE;
853 VOP_UNLOCK(vp);
854 mutex_enter(vp->v_interlock);
855 }
856 goto out;
857 }
858
859 /*
860 * First try to get the vnode locked for VOP_INACTIVE().
861 * Defer vnode release to vdrain_thread if caller requests
862 * it explicitly, is the pagedaemon or the lock failed.
863 */
864 defer = false;
865 if ((curlwp == uvm.pagedaemon_lwp) || async) {
866 defer = true;
867 } else if (lktype == LK_SHARED) {
868 /* Excellent chance of getting, if the last ref. */
869 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
870 if (error != 0) {
871 defer = true;
872 } else {
873 lktype = LK_EXCLUSIVE;
874 }
875 } else if (lktype == LK_NONE) {
876 /* Excellent chance of getting, if the last ref. */
877 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
878 if (error != 0) {
879 defer = true;
880 } else {
881 lktype = LK_EXCLUSIVE;
882 }
883 }
884 KASSERT(mutex_owned(vp->v_interlock));
885 if (defer) {
886 /*
887 * Defer reclaim to the kthread; it's not safe to
888 * clean it here. We donate it our last reference.
889 */
890 if (lktype != LK_NONE) {
891 mutex_exit(vp->v_interlock);
892 VOP_UNLOCK(vp);
893 mutex_enter(vp->v_interlock);
894 }
895 lru_requeue(vp, &lru_list[LRU_VRELE]);
896 mutex_exit(vp->v_interlock);
897 return;
898 }
899 KASSERT(lktype == LK_EXCLUSIVE);
900
901 /* If the node gained another reference, retry. */
902 use = atomic_load_relaxed(&vp->v_usecount);
903 if ((use & VUSECOUNT_VGET) != 0) {
904 goto retry;
905 }
906 KASSERT((use & VUSECOUNT_MASK) == 1);
907
908 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
909 (vp->v_vflag & VV_MAPPED) != 0) {
910 /* Take care of space accounting. */
911 if (!objlock_held) {
912 objlock_held = true;
913 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
914 mutex_exit(vp->v_interlock);
915 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
916 mutex_enter(vp->v_interlock);
917 goto retry;
918 }
919 }
920 if ((vp->v_iflag & VI_EXECMAP) != 0) {
921 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
922 }
923 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
924 vp->v_vflag &= ~VV_MAPPED;
925 }
926 if (objlock_held) {
927 objlock_held = false;
928 rw_exit(vp->v_uobj.vmobjlock);
929 }
930
931 /*
932 * Deactivate the vnode, but preserve our reference across
933 * the call to VOP_INACTIVE().
934 *
935 * If VOP_INACTIVE() indicates that the file has been
936 * deleted, then recycle the vnode.
937 *
938 * Note that VOP_INACTIVE() will not drop the vnode lock.
939 */
940 mutex_exit(vp->v_interlock);
941 recycle = false;
942 VOP_INACTIVE(vp, &recycle);
943 if (!recycle) {
944 lktype = LK_NONE;
945 VOP_UNLOCK(vp);
946 }
947 mutex_enter(vp->v_interlock);
948
949 /*
950 * Block new references then check again to see if a
951 * new reference was acquired in the meantime. If
952 * it was, restore the vnode state and try again.
953 */
954 if (recycle) {
955 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
956 use = atomic_load_relaxed(&vp->v_usecount);
957 if ((use & VUSECOUNT_VGET) != 0) {
958 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
959 goto retry;
960 }
961 KASSERT((use & VUSECOUNT_MASK) == 1);
962 }
963
964 /*
965 * Recycle the vnode if the file is now unused (unlinked).
966 */
967 if (recycle) {
968 VSTATE_ASSERT(vp, VS_BLOCKED);
969 KASSERT(lktype == LK_EXCLUSIVE);
970 /* vcache_reclaim drops the lock. */
971 lktype = LK_NONE;
972 vcache_reclaim(vp);
973 }
974 KASSERT(vrefcnt(vp) > 0);
975 KASSERT(lktype == LK_NONE);
976
977 out:
978 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
979 if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
980 (use & VUSECOUNT_MASK) == 1)) {
981 /* Gained and released another reference, retry. */
982 goto retry;
983 }
984 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
985 if (__predict_true(next == use)) {
986 if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
987 /* Gained another reference. */
988 mutex_exit(vp->v_interlock);
989 return;
990 }
991 break;
992 }
993 }
994 membar_acquire();
995
996 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
997 /*
998 * It's clean so destroy it. It isn't referenced
999 * anywhere since it has been reclaimed.
1000 */
1001 vcache_free(VNODE_TO_VIMPL(vp));
1002 } else {
1003 /*
1004 * Otherwise, put it back onto the freelist. It
1005 * can't be destroyed while still associated with
1006 * a file system.
1007 */
1008 lru_requeue(vp, lru_which(vp));
1009 mutex_exit(vp->v_interlock);
1010 }
1011 }
1012
1013 void
vrele(vnode_t * vp)1014 vrele(vnode_t *vp)
1015 {
1016
1017 if (vtryrele(vp)) {
1018 return;
1019 }
1020 mutex_enter(vp->v_interlock);
1021 vrelel(vp, 0, LK_NONE);
1022 }
1023
1024 /*
1025 * Asynchronous vnode release, vnode is released in different context.
1026 */
1027 void
vrele_async(vnode_t * vp)1028 vrele_async(vnode_t *vp)
1029 {
1030
1031 if (vtryrele(vp)) {
1032 return;
1033 }
1034 mutex_enter(vp->v_interlock);
1035 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1036 }
1037
1038 /*
1039 * Vnode reference, where a reference is already held by some other
1040 * object (for example, a file structure).
1041 *
1042 * NB: lockless code sequences may rely on this not blocking.
1043 */
1044 void
vref(vnode_t * vp)1045 vref(vnode_t *vp)
1046 {
1047
1048 KASSERT(vrefcnt(vp) > 0);
1049
1050 atomic_inc_uint(&vp->v_usecount);
1051 }
1052
1053 /*
1054 * Page or buffer structure gets a reference.
1055 * Called with v_interlock held.
1056 */
1057 void
vholdl(vnode_t * vp)1058 vholdl(vnode_t *vp)
1059 {
1060
1061 KASSERT(mutex_owned(vp->v_interlock));
1062
1063 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1064 lru_requeue(vp, lru_which(vp));
1065 }
1066
1067 /*
1068 * Page or buffer structure gets a reference.
1069 */
1070 void
vhold(vnode_t * vp)1071 vhold(vnode_t *vp)
1072 {
1073
1074 mutex_enter(vp->v_interlock);
1075 vholdl(vp);
1076 mutex_exit(vp->v_interlock);
1077 }
1078
1079 /*
1080 * Page or buffer structure frees a reference.
1081 * Called with v_interlock held.
1082 */
1083 void
holdrelel(vnode_t * vp)1084 holdrelel(vnode_t *vp)
1085 {
1086
1087 KASSERT(mutex_owned(vp->v_interlock));
1088
1089 if (vp->v_holdcnt <= 0) {
1090 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1091 }
1092
1093 vp->v_holdcnt--;
1094 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1095 lru_requeue(vp, lru_which(vp));
1096 }
1097
1098 /*
1099 * Page or buffer structure frees a reference.
1100 */
1101 void
holdrele(vnode_t * vp)1102 holdrele(vnode_t *vp)
1103 {
1104
1105 mutex_enter(vp->v_interlock);
1106 holdrelel(vp);
1107 mutex_exit(vp->v_interlock);
1108 }
1109
1110 /*
1111 * Recycle an unused vnode if caller holds the last reference.
1112 */
1113 bool
vrecycle(vnode_t * vp)1114 vrecycle(vnode_t *vp)
1115 {
1116 int error __diagused;
1117
1118 mutex_enter(vp->v_interlock);
1119
1120 /* If the vnode is already clean we're done. */
1121 VSTATE_WAIT_STABLE(vp);
1122 if (VSTATE_GET(vp) != VS_LOADED) {
1123 VSTATE_ASSERT(vp, VS_RECLAIMED);
1124 vrelel(vp, 0, LK_NONE);
1125 return true;
1126 }
1127
1128 /* Prevent further references until the vnode is locked. */
1129 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1130
1131 /* Make sure we hold the last reference. */
1132 if (vrefcnt(vp) != 1) {
1133 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1134 mutex_exit(vp->v_interlock);
1135 return false;
1136 }
1137
1138 mutex_exit(vp->v_interlock);
1139
1140 /*
1141 * On a leaf file system this lock will always succeed as we hold
1142 * the last reference and prevent further references.
1143 * On layered file systems waiting for the lock would open a can of
1144 * deadlocks as the lower vnodes may have other active references.
1145 */
1146 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1147
1148 mutex_enter(vp->v_interlock);
1149 if (error) {
1150 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1151 mutex_exit(vp->v_interlock);
1152 return false;
1153 }
1154
1155 KASSERT(vrefcnt(vp) == 1);
1156 vcache_reclaim(vp);
1157 vrelel(vp, 0, LK_NONE);
1158
1159 return true;
1160 }
1161
1162 /*
1163 * Helper for vrevoke() to propagate suspension from lastmp
1164 * to thismp. Both args may be NULL.
1165 * Returns the currently suspended file system or NULL.
1166 */
1167 static struct mount *
vrevoke_suspend_next(struct mount * lastmp,struct mount * thismp)1168 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1169 {
1170 int error;
1171
1172 if (lastmp == thismp)
1173 return thismp;
1174
1175 if (lastmp != NULL)
1176 vfs_resume(lastmp);
1177
1178 if (thismp == NULL)
1179 return NULL;
1180
1181 do {
1182 error = vfs_suspend(thismp, 0);
1183 } while (error == EINTR || error == ERESTART);
1184
1185 if (error == 0)
1186 return thismp;
1187
1188 KASSERT(error == EOPNOTSUPP || error == ENOENT);
1189 return NULL;
1190 }
1191
1192 /*
1193 * Eliminate all activity associated with the requested vnode
1194 * and with all vnodes aliased to the requested vnode.
1195 */
1196 void
vrevoke(vnode_t * vp)1197 vrevoke(vnode_t *vp)
1198 {
1199 struct mount *mp;
1200 vnode_t *vq;
1201 enum vtype type;
1202 dev_t dev;
1203
1204 KASSERT(vrefcnt(vp) > 0);
1205
1206 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1207
1208 mutex_enter(vp->v_interlock);
1209 VSTATE_WAIT_STABLE(vp);
1210 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1211 mutex_exit(vp->v_interlock);
1212 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1213 atomic_inc_uint(&vp->v_usecount);
1214 mutex_exit(vp->v_interlock);
1215 vgone(vp);
1216 } else {
1217 dev = vp->v_rdev;
1218 type = vp->v_type;
1219 mutex_exit(vp->v_interlock);
1220
1221 while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq)
1222 == 0) {
1223 mp = vrevoke_suspend_next(mp, vq->v_mount);
1224 vgone(vq);
1225 }
1226 }
1227 vrevoke_suspend_next(mp, NULL);
1228 }
1229
1230 /*
1231 * Eliminate all activity associated with a vnode in preparation for
1232 * reuse. Drops a reference from the vnode.
1233 */
1234 void
vgone(vnode_t * vp)1235 vgone(vnode_t *vp)
1236 {
1237 int lktype;
1238
1239 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1240
1241 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1242 lktype = LK_EXCLUSIVE;
1243 mutex_enter(vp->v_interlock);
1244 VSTATE_WAIT_STABLE(vp);
1245 if (VSTATE_GET(vp) == VS_LOADED) {
1246 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1247 vcache_reclaim(vp);
1248 lktype = LK_NONE;
1249 }
1250 VSTATE_ASSERT(vp, VS_RECLAIMED);
1251 vrelel(vp, 0, lktype);
1252 }
1253
1254 static inline uint32_t
vcache_hash(const struct vcache_key * key)1255 vcache_hash(const struct vcache_key *key)
1256 {
1257 uint32_t hash = HASH32_BUF_INIT;
1258
1259 KASSERT(key->vk_key_len > 0);
1260
1261 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1262 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1263 return hash;
1264 }
1265
1266 static int
vcache_stats(struct hashstat_sysctl * hs,bool fill)1267 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1268 {
1269 vnode_impl_t *vip;
1270 uint64_t chain;
1271
1272 strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1273 strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1274 if (!fill)
1275 return 0;
1276
1277 hs->hash_size = vcache_hashmask + 1;
1278
1279 for (size_t i = 0; i < hs->hash_size; i++) {
1280 chain = 0;
1281 mutex_enter(&vcache_lock);
1282 SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1283 chain++;
1284 }
1285 mutex_exit(&vcache_lock);
1286 if (chain > 0) {
1287 hs->hash_used++;
1288 hs->hash_items += chain;
1289 if (chain > hs->hash_maxchain)
1290 hs->hash_maxchain = chain;
1291 }
1292 preempt_point();
1293 }
1294
1295 return 0;
1296 }
1297
1298 static void
vcache_init(void)1299 vcache_init(void)
1300 {
1301
1302 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1303 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1304 KASSERT(vcache_pool != NULL);
1305 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1306 cv_init(&vcache_cv, "vcache");
1307 vcache_hashsize = desiredvnodes;
1308 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1309 &vcache_hashmask);
1310 hashstat_register("vcache", vcache_stats);
1311 }
1312
1313 static void
vcache_reinit(void)1314 vcache_reinit(void)
1315 {
1316 int i;
1317 uint32_t hash;
1318 u_long oldmask, newmask;
1319 struct hashhead *oldtab, *newtab;
1320 vnode_impl_t *vip;
1321
1322 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1323 mutex_enter(&vcache_lock);
1324 oldtab = vcache_hashtab;
1325 oldmask = vcache_hashmask;
1326 vcache_hashsize = desiredvnodes;
1327 vcache_hashtab = newtab;
1328 vcache_hashmask = newmask;
1329 for (i = 0; i <= oldmask; i++) {
1330 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1331 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1332 hash = vcache_hash(&vip->vi_key);
1333 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1334 vip, vi_hash);
1335 }
1336 }
1337 mutex_exit(&vcache_lock);
1338 hashdone(oldtab, HASH_SLIST, oldmask);
1339 }
1340
1341 static inline vnode_impl_t *
vcache_hash_lookup(const struct vcache_key * key,uint32_t hash)1342 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1343 {
1344 struct hashhead *hashp;
1345 vnode_impl_t *vip;
1346
1347 KASSERT(mutex_owned(&vcache_lock));
1348
1349 hashp = &vcache_hashtab[hash & vcache_hashmask];
1350 SLIST_FOREACH(vip, hashp, vi_hash) {
1351 if (key->vk_mount != vip->vi_key.vk_mount)
1352 continue;
1353 if (key->vk_key_len != vip->vi_key.vk_key_len)
1354 continue;
1355 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1356 continue;
1357 return vip;
1358 }
1359 return NULL;
1360 }
1361
1362 /*
1363 * Allocate a new, uninitialized vcache node.
1364 */
1365 static vnode_impl_t *
vcache_alloc(void)1366 vcache_alloc(void)
1367 {
1368 vnode_impl_t *vip;
1369 vnode_t *vp;
1370
1371 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1372 vp = VIMPL_TO_VNODE(vip);
1373 memset(vip, 0, sizeof(*vip));
1374
1375 rw_init(&vip->vi_lock);
1376 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1377
1378 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1379 klist_init(&vip->vi_klist.vk_klist);
1380 vp->v_klist = &vip->vi_klist;
1381 cv_init(&vp->v_cv, "vnode");
1382 cache_vnode_init(vp);
1383
1384 vp->v_usecount = 1;
1385 vp->v_type = VNON;
1386 vp->v_size = vp->v_writesize = VSIZENOTSET;
1387
1388 vip->vi_state = VS_LOADING;
1389
1390 lru_requeue(vp, &lru_list[LRU_FREE]);
1391
1392 return vip;
1393 }
1394
1395 /*
1396 * Deallocate a vcache node in state VS_LOADING.
1397 *
1398 * vcache_lock held on entry and released on return.
1399 */
1400 static void
vcache_dealloc(vnode_impl_t * vip)1401 vcache_dealloc(vnode_impl_t *vip)
1402 {
1403 vnode_t *vp;
1404
1405 KASSERT(mutex_owned(&vcache_lock));
1406
1407 vp = VIMPL_TO_VNODE(vip);
1408 vfs_ref(dead_rootmount);
1409 vfs_insmntque(vp, dead_rootmount);
1410 mutex_enter(vp->v_interlock);
1411 vp->v_op = dead_vnodeop_p;
1412 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1413 mutex_exit(&vcache_lock);
1414 vrelel(vp, 0, LK_NONE);
1415 }
1416
1417 /*
1418 * Free an unused, unreferenced vcache node.
1419 * v_interlock locked on entry.
1420 */
1421 static void
vcache_free(vnode_impl_t * vip)1422 vcache_free(vnode_impl_t *vip)
1423 {
1424 vnode_t *vp;
1425
1426 vp = VIMPL_TO_VNODE(vip);
1427 KASSERT(mutex_owned(vp->v_interlock));
1428
1429 KASSERT(vrefcnt(vp) == 0);
1430 KASSERT(vp->v_holdcnt == 0);
1431 KASSERT(vp->v_writecount == 0);
1432 lru_requeue(vp, NULL);
1433 mutex_exit(vp->v_interlock);
1434
1435 vfs_insmntque(vp, NULL);
1436 if (vp->v_type == VBLK || vp->v_type == VCHR)
1437 spec_node_destroy(vp);
1438
1439 mutex_obj_free(vp->v_interlock);
1440 rw_destroy(&vip->vi_lock);
1441 uvm_obj_destroy(&vp->v_uobj, true);
1442 KASSERT(vp->v_klist == &vip->vi_klist);
1443 klist_fini(&vip->vi_klist.vk_klist);
1444 cv_destroy(&vp->v_cv);
1445 cache_vnode_fini(vp);
1446 pool_cache_put(vcache_pool, vip);
1447 }
1448
1449 /*
1450 * Try to get an initial reference on this cached vnode.
1451 * Returns zero on success or EBUSY if the vnode state is not LOADED.
1452 *
1453 * NB: lockless code sequences may rely on this not blocking.
1454 */
1455 int
vcache_tryvget(vnode_t * vp)1456 vcache_tryvget(vnode_t *vp)
1457 {
1458 u_int use, next;
1459
1460 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1461 if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1462 return EBUSY;
1463 }
1464 next = atomic_cas_uint(&vp->v_usecount,
1465 use, (use + 1) | VUSECOUNT_VGET);
1466 if (__predict_true(next == use)) {
1467 membar_acquire();
1468 return 0;
1469 }
1470 }
1471 }
1472
1473 /*
1474 * Try to get an initial reference on this cached vnode.
1475 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1476 * Will wait for the vnode state to be stable.
1477 *
1478 * v_interlock locked on entry and unlocked on exit.
1479 */
1480 int
vcache_vget(vnode_t * vp)1481 vcache_vget(vnode_t *vp)
1482 {
1483 int error;
1484
1485 KASSERT(mutex_owned(vp->v_interlock));
1486
1487 /* Increment hold count to prevent vnode from disappearing. */
1488 vp->v_holdcnt++;
1489 VSTATE_WAIT_STABLE(vp);
1490 vp->v_holdcnt--;
1491
1492 /* If this was the last reference to a reclaimed vnode free it now. */
1493 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1494 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1495 vcache_free(VNODE_TO_VIMPL(vp));
1496 else
1497 mutex_exit(vp->v_interlock);
1498 return ENOENT;
1499 }
1500 VSTATE_ASSERT(vp, VS_LOADED);
1501 error = vcache_tryvget(vp);
1502 KASSERT(error == 0);
1503 mutex_exit(vp->v_interlock);
1504
1505 return 0;
1506 }
1507
1508 /*
1509 * Get a vnode / fs node pair by key and return it referenced through vpp.
1510 */
1511 int
vcache_get(struct mount * mp,const void * key,size_t key_len,struct vnode ** vpp)1512 vcache_get(struct mount *mp, const void *key, size_t key_len,
1513 struct vnode **vpp)
1514 {
1515 int error;
1516 uint32_t hash;
1517 const void *new_key;
1518 struct vnode *vp;
1519 struct vcache_key vcache_key;
1520 vnode_impl_t *vip, *new_vip;
1521
1522 new_key = NULL;
1523 *vpp = NULL;
1524
1525 vcache_key.vk_mount = mp;
1526 vcache_key.vk_key = key;
1527 vcache_key.vk_key_len = key_len;
1528 hash = vcache_hash(&vcache_key);
1529
1530 again:
1531 mutex_enter(&vcache_lock);
1532 vip = vcache_hash_lookup(&vcache_key, hash);
1533
1534 /* If found, take a reference or retry. */
1535 if (__predict_true(vip != NULL)) {
1536 /*
1537 * If the vnode is loading we cannot take the v_interlock
1538 * here as it might change during load (see uvm_obj_setlock()).
1539 * As changing state from VS_LOADING requires both vcache_lock
1540 * and v_interlock it is safe to test with vcache_lock held.
1541 *
1542 * Wait for vnodes changing state from VS_LOADING and retry.
1543 */
1544 if (__predict_false(vip->vi_state == VS_LOADING)) {
1545 cv_wait(&vcache_cv, &vcache_lock);
1546 mutex_exit(&vcache_lock);
1547 goto again;
1548 }
1549 vp = VIMPL_TO_VNODE(vip);
1550 mutex_enter(vp->v_interlock);
1551 mutex_exit(&vcache_lock);
1552 error = vcache_vget(vp);
1553 if (error == ENOENT)
1554 goto again;
1555 if (error == 0)
1556 *vpp = vp;
1557 KASSERT((error != 0) == (*vpp == NULL));
1558 return error;
1559 }
1560 mutex_exit(&vcache_lock);
1561
1562 /* Allocate and initialize a new vcache / vnode pair. */
1563 error = vfs_busy(mp);
1564 if (error)
1565 return error;
1566 new_vip = vcache_alloc();
1567 new_vip->vi_key = vcache_key;
1568 vp = VIMPL_TO_VNODE(new_vip);
1569 mutex_enter(&vcache_lock);
1570 vip = vcache_hash_lookup(&vcache_key, hash);
1571 if (vip == NULL) {
1572 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1573 new_vip, vi_hash);
1574 vip = new_vip;
1575 }
1576
1577 /* If another thread beat us inserting this node, retry. */
1578 if (vip != new_vip) {
1579 vcache_dealloc(new_vip);
1580 vfs_unbusy(mp);
1581 goto again;
1582 }
1583 mutex_exit(&vcache_lock);
1584
1585 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1586 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1587 if (error) {
1588 mutex_enter(&vcache_lock);
1589 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1590 new_vip, vnode_impl, vi_hash);
1591 vcache_dealloc(new_vip);
1592 vfs_unbusy(mp);
1593 KASSERT(*vpp == NULL);
1594 return error;
1595 }
1596 KASSERT(new_key != NULL);
1597 KASSERT(memcmp(key, new_key, key_len) == 0);
1598 KASSERT(vp->v_op != NULL);
1599 vfs_insmntque(vp, mp);
1600 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1601 vp->v_vflag |= VV_MPSAFE;
1602 vfs_ref(mp);
1603 vfs_unbusy(mp);
1604
1605 /* Finished loading, finalize node. */
1606 mutex_enter(&vcache_lock);
1607 new_vip->vi_key.vk_key = new_key;
1608 mutex_enter(vp->v_interlock);
1609 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1610 mutex_exit(vp->v_interlock);
1611 mutex_exit(&vcache_lock);
1612 *vpp = vp;
1613 return 0;
1614 }
1615
1616 /*
1617 * Create a new vnode / fs node pair and return it referenced through vpp.
1618 */
1619 int
vcache_new(struct mount * mp,struct vnode * dvp,struct vattr * vap,kauth_cred_t cred,void * extra,struct vnode ** vpp)1620 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1621 kauth_cred_t cred, void *extra, struct vnode **vpp)
1622 {
1623 int error;
1624 uint32_t hash;
1625 struct vnode *vp, *ovp;
1626 vnode_impl_t *vip, *ovip;
1627
1628 *vpp = NULL;
1629
1630 /* Allocate and initialize a new vcache / vnode pair. */
1631 error = vfs_busy(mp);
1632 if (error)
1633 return error;
1634 vip = vcache_alloc();
1635 vip->vi_key.vk_mount = mp;
1636 vp = VIMPL_TO_VNODE(vip);
1637
1638 /* Create and load the fs node. */
1639 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1640 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1641 if (error) {
1642 mutex_enter(&vcache_lock);
1643 vcache_dealloc(vip);
1644 vfs_unbusy(mp);
1645 KASSERT(*vpp == NULL);
1646 return error;
1647 }
1648 KASSERT(vp->v_op != NULL);
1649 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1650 if (vip->vi_key.vk_key_len > 0) {
1651 KASSERT(vip->vi_key.vk_key != NULL);
1652 hash = vcache_hash(&vip->vi_key);
1653
1654 /*
1655 * Wait for previous instance to be reclaimed,
1656 * then insert new node.
1657 */
1658 mutex_enter(&vcache_lock);
1659 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1660 ovp = VIMPL_TO_VNODE(ovip);
1661 mutex_enter(ovp->v_interlock);
1662 mutex_exit(&vcache_lock);
1663 error = vcache_vget(ovp);
1664 KASSERT(error == ENOENT);
1665 mutex_enter(&vcache_lock);
1666 }
1667 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1668 vip, vi_hash);
1669 mutex_exit(&vcache_lock);
1670 }
1671 vfs_insmntque(vp, mp);
1672 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1673 vp->v_vflag |= VV_MPSAFE;
1674 vfs_ref(mp);
1675 vfs_unbusy(mp);
1676
1677 /* Finished loading, finalize node. */
1678 mutex_enter(&vcache_lock);
1679 mutex_enter(vp->v_interlock);
1680 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1681 mutex_exit(&vcache_lock);
1682 mutex_exit(vp->v_interlock);
1683 *vpp = vp;
1684 return 0;
1685 }
1686
1687 /*
1688 * Prepare key change: update old cache nodes key and lock new cache node.
1689 * Return an error if the new node already exists.
1690 */
1691 int
vcache_rekey_enter(struct mount * mp,struct vnode * vp,const void * old_key,size_t old_key_len,const void * new_key,size_t new_key_len)1692 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1693 const void *old_key, size_t old_key_len,
1694 const void *new_key, size_t new_key_len)
1695 {
1696 uint32_t old_hash, new_hash;
1697 struct vcache_key old_vcache_key, new_vcache_key;
1698 vnode_impl_t *vip, *new_vip;
1699
1700 old_vcache_key.vk_mount = mp;
1701 old_vcache_key.vk_key = old_key;
1702 old_vcache_key.vk_key_len = old_key_len;
1703 old_hash = vcache_hash(&old_vcache_key);
1704
1705 new_vcache_key.vk_mount = mp;
1706 new_vcache_key.vk_key = new_key;
1707 new_vcache_key.vk_key_len = new_key_len;
1708 new_hash = vcache_hash(&new_vcache_key);
1709
1710 new_vip = vcache_alloc();
1711 new_vip->vi_key = new_vcache_key;
1712
1713 /* Insert locked new node used as placeholder. */
1714 mutex_enter(&vcache_lock);
1715 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1716 if (vip != NULL) {
1717 vcache_dealloc(new_vip);
1718 return EEXIST;
1719 }
1720 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1721 new_vip, vi_hash);
1722
1723 /* Replace old nodes key with the temporary copy. */
1724 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1725 KASSERT(vip != NULL);
1726 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1727 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1728 vip->vi_key = old_vcache_key;
1729 mutex_exit(&vcache_lock);
1730 return 0;
1731 }
1732
1733 /*
1734 * Key change complete: update old node and remove placeholder.
1735 */
1736 void
vcache_rekey_exit(struct mount * mp,struct vnode * vp,const void * old_key,size_t old_key_len,const void * new_key,size_t new_key_len)1737 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1738 const void *old_key, size_t old_key_len,
1739 const void *new_key, size_t new_key_len)
1740 {
1741 uint32_t old_hash, new_hash;
1742 struct vcache_key old_vcache_key, new_vcache_key;
1743 vnode_impl_t *vip, *new_vip;
1744 struct vnode *new_vp;
1745
1746 old_vcache_key.vk_mount = mp;
1747 old_vcache_key.vk_key = old_key;
1748 old_vcache_key.vk_key_len = old_key_len;
1749 old_hash = vcache_hash(&old_vcache_key);
1750
1751 new_vcache_key.vk_mount = mp;
1752 new_vcache_key.vk_key = new_key;
1753 new_vcache_key.vk_key_len = new_key_len;
1754 new_hash = vcache_hash(&new_vcache_key);
1755
1756 mutex_enter(&vcache_lock);
1757
1758 /* Lookup old and new node. */
1759 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1760 KASSERT(vip != NULL);
1761 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1762
1763 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1764 KASSERT(new_vip != NULL);
1765 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1766 new_vp = VIMPL_TO_VNODE(new_vip);
1767 mutex_enter(new_vp->v_interlock);
1768 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1769 mutex_exit(new_vp->v_interlock);
1770
1771 /* Rekey old node and put it onto its new hashlist. */
1772 vip->vi_key = new_vcache_key;
1773 if (old_hash != new_hash) {
1774 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1775 vip, vnode_impl, vi_hash);
1776 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1777 vip, vi_hash);
1778 }
1779
1780 /* Remove new node used as placeholder. */
1781 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1782 new_vip, vnode_impl, vi_hash);
1783 vcache_dealloc(new_vip);
1784 }
1785
1786 /*
1787 * Disassociate the underlying file system from a vnode.
1788 *
1789 * Must be called with vnode locked and will return unlocked.
1790 * Must be called with the interlock held, and will return with it held.
1791 */
1792 static void
vcache_reclaim(vnode_t * vp)1793 vcache_reclaim(vnode_t *vp)
1794 {
1795 lwp_t *l = curlwp;
1796 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1797 struct mount *mp = vp->v_mount;
1798 uint32_t hash;
1799 uint8_t temp_buf[64], *temp_key;
1800 size_t temp_key_len;
1801 bool recycle;
1802 int error;
1803
1804 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1805 KASSERT(mutex_owned(vp->v_interlock));
1806 KASSERT(vrefcnt(vp) != 0);
1807
1808 temp_key_len = vip->vi_key.vk_key_len;
1809 /*
1810 * Prevent the vnode from being recycled or brought into use
1811 * while we clean it out.
1812 */
1813 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1814
1815 /*
1816 * Send NOTE_REVOKE now, before we call VOP_RECLAIM(),
1817 * because VOP_RECLAIM() could cause vp->v_klist to
1818 * become invalid. Don't check for interest in NOTE_REVOKE
1819 * here; it's always posted because it sets EV_EOF.
1820 *
1821 * Once it's been posted, reset vp->v_klist to point to
1822 * our own local storage, in case we were sharing with
1823 * someone else.
1824 */
1825 KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
1826 vp->v_klist = &vip->vi_klist;
1827 mutex_exit(vp->v_interlock);
1828
1829 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1830 mutex_enter(vp->v_interlock);
1831 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1832 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1833 }
1834 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1835 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1836 mutex_exit(vp->v_interlock);
1837 rw_exit(vp->v_uobj.vmobjlock);
1838
1839 /*
1840 * With vnode state set to reclaiming, purge name cache immediately
1841 * to prevent new handles on vnode, and wait for existing threads
1842 * trying to get a handle to notice VS_RECLAIMED status and abort.
1843 */
1844 cache_purge(vp);
1845
1846 /* Replace the vnode key with a temporary copy. */
1847 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1848 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1849 } else {
1850 temp_key = temp_buf;
1851 }
1852 if (vip->vi_key.vk_key_len > 0) {
1853 mutex_enter(&vcache_lock);
1854 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1855 vip->vi_key.vk_key = temp_key;
1856 mutex_exit(&vcache_lock);
1857 }
1858
1859 fstrans_start(mp);
1860
1861 /*
1862 * Clean out any cached data associated with the vnode.
1863 */
1864 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1865 if (error != 0) {
1866 if (wapbl_vphaswapbl(vp))
1867 WAPBL_DISCARD(wapbl_vptomp(vp));
1868 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1869 }
1870 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1871 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1872 if (vp->v_type == VBLK || vp->v_type == VCHR) {
1873 spec_node_revoke(vp);
1874 }
1875
1876 /*
1877 * Disassociate the underlying file system from the vnode.
1878 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1879 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1880 * would no longer function.
1881 */
1882 VOP_INACTIVE(vp, &recycle);
1883 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1884 if (VOP_RECLAIM(vp)) {
1885 vnpanic(vp, "%s: cannot reclaim", __func__);
1886 }
1887
1888 KASSERT(vp->v_data == NULL);
1889 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1890
1891 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1892 uvm_ra_freectx(vp->v_ractx);
1893 vp->v_ractx = NULL;
1894 }
1895
1896 if (vip->vi_key.vk_key_len > 0) {
1897 /* Remove from vnode cache. */
1898 hash = vcache_hash(&vip->vi_key);
1899 mutex_enter(&vcache_lock);
1900 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1901 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1902 vip, vnode_impl, vi_hash);
1903 mutex_exit(&vcache_lock);
1904 }
1905 if (temp_key != temp_buf)
1906 kmem_free(temp_key, temp_key_len);
1907
1908 /* Done with purge, notify sleepers of the grim news. */
1909 mutex_enter(vp->v_interlock);
1910 vp->v_op = dead_vnodeop_p;
1911 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1912 vp->v_tag = VT_NON;
1913 mutex_exit(vp->v_interlock);
1914
1915 /*
1916 * Move to dead mount. Must be after changing the operations
1917 * vector as vnode operations enter the mount before using the
1918 * operations vector. See sys/kern/vnode_if.c.
1919 */
1920 vp->v_vflag &= ~VV_ROOT;
1921 vfs_ref(dead_rootmount);
1922 vfs_insmntque(vp, dead_rootmount);
1923
1924 #ifdef PAX_SEGVGUARD
1925 pax_segvguard_cleanup(vp);
1926 #endif /* PAX_SEGVGUARD */
1927
1928 mutex_enter(vp->v_interlock);
1929 fstrans_done(mp);
1930 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1931 }
1932
1933 /*
1934 * Disassociate the underlying file system from an open device vnode
1935 * and make it anonymous.
1936 *
1937 * Vnode unlocked on entry, drops a reference to the vnode.
1938 */
1939 void
vcache_make_anon(vnode_t * vp)1940 vcache_make_anon(vnode_t *vp)
1941 {
1942 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1943 uint32_t hash;
1944 bool recycle;
1945
1946 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1947 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1948 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1949
1950 /* Remove from vnode cache. */
1951 hash = vcache_hash(&vip->vi_key);
1952 mutex_enter(&vcache_lock);
1953 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1954 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1955 vip, vnode_impl, vi_hash);
1956 vip->vi_key.vk_mount = dead_rootmount;
1957 vip->vi_key.vk_key_len = 0;
1958 vip->vi_key.vk_key = NULL;
1959 mutex_exit(&vcache_lock);
1960
1961 /*
1962 * Disassociate the underlying file system from the vnode.
1963 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1964 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1965 * would no longer function.
1966 */
1967 if (vn_lock(vp, LK_EXCLUSIVE)) {
1968 vnpanic(vp, "%s: cannot lock", __func__);
1969 }
1970 VOP_INACTIVE(vp, &recycle);
1971 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1972 if (VOP_RECLAIM(vp)) {
1973 vnpanic(vp, "%s: cannot reclaim", __func__);
1974 }
1975
1976 /* Purge name cache. */
1977 cache_purge(vp);
1978
1979 /* Done with purge, change operations vector. */
1980 mutex_enter(vp->v_interlock);
1981 vp->v_op = spec_vnodeop_p;
1982 vp->v_vflag |= VV_MPSAFE;
1983 mutex_exit(vp->v_interlock);
1984
1985 /*
1986 * Move to dead mount. Must be after changing the operations
1987 * vector as vnode operations enter the mount before using the
1988 * operations vector. See sys/kern/vnode_if.c.
1989 */
1990 vfs_ref(dead_rootmount);
1991 vfs_insmntque(vp, dead_rootmount);
1992
1993 vrele(vp);
1994 }
1995
1996 /*
1997 * Update outstanding I/O count and do wakeup if requested.
1998 */
1999 void
vwakeup(struct buf * bp)2000 vwakeup(struct buf *bp)
2001 {
2002 vnode_t *vp;
2003
2004 if ((vp = bp->b_vp) == NULL)
2005 return;
2006
2007 KASSERT(bp->b_objlock == vp->v_interlock);
2008 KASSERT(mutex_owned(bp->b_objlock));
2009
2010 if (--vp->v_numoutput < 0)
2011 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2012 if (vp->v_numoutput == 0)
2013 cv_broadcast(&vp->v_cv);
2014 }
2015
2016 /*
2017 * Test a vnode for being or becoming dead. Returns one of:
2018 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2019 * ENOENT: vnode is dead.
2020 * 0: otherwise.
2021 *
2022 * Whenever this function returns a non-zero value all future
2023 * calls will also return a non-zero value.
2024 */
2025 int
vdead_check(struct vnode * vp,int flags)2026 vdead_check(struct vnode *vp, int flags)
2027 {
2028
2029 KASSERT(mutex_owned(vp->v_interlock));
2030
2031 if (! ISSET(flags, VDEAD_NOWAIT))
2032 VSTATE_WAIT_STABLE(vp);
2033
2034 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2035 KASSERT(ISSET(flags, VDEAD_NOWAIT));
2036 return EBUSY;
2037 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2038 return ENOENT;
2039 }
2040
2041 return 0;
2042 }
2043
2044 int
vfs_drainvnodes(void)2045 vfs_drainvnodes(void)
2046 {
2047 int i, gen;
2048
2049 mutex_enter(&vdrain_lock);
2050 for (i = 0; i < 2; i++) {
2051 gen = vdrain_gen;
2052 while (gen == vdrain_gen) {
2053 cv_broadcast(&vdrain_cv);
2054 cv_wait(&vdrain_gen_cv, &vdrain_lock);
2055 }
2056 }
2057 mutex_exit(&vdrain_lock);
2058
2059 if (numvnodes >= desiredvnodes)
2060 return EBUSY;
2061
2062 if (vcache_hashsize != desiredvnodes)
2063 vcache_reinit();
2064
2065 return 0;
2066 }
2067
2068 void
vnpanic(vnode_t * vp,const char * fmt,...)2069 vnpanic(vnode_t *vp, const char *fmt, ...)
2070 {
2071 va_list ap;
2072
2073 #ifdef DIAGNOSTIC
2074 vprint(NULL, vp);
2075 #endif
2076 va_start(ap, fmt);
2077 vpanic(fmt, ap);
2078 va_end(ap);
2079 }
2080
2081 void
vshareilock(vnode_t * tvp,vnode_t * fvp)2082 vshareilock(vnode_t *tvp, vnode_t *fvp)
2083 {
2084 kmutex_t *oldlock;
2085
2086 oldlock = tvp->v_interlock;
2087 mutex_obj_hold(fvp->v_interlock);
2088 tvp->v_interlock = fvp->v_interlock;
2089 mutex_obj_free(oldlock);
2090 }
2091
2092 void
vshareklist(vnode_t * tvp,vnode_t * fvp)2093 vshareklist(vnode_t *tvp, vnode_t *fvp)
2094 {
2095 /*
2096 * If two vnodes share klist state, they must also share
2097 * an interlock.
2098 */
2099 KASSERT(tvp->v_interlock == fvp->v_interlock);
2100
2101 /*
2102 * We make the following assumptions:
2103 *
2104 * ==> Some other synchronization is happening outside of
2105 * our view to make this safe.
2106 *
2107 * ==> That the "to" vnode will have the necessary references
2108 * on the "from" vnode so that the storage for the klist
2109 * won't be yanked out from beneath us (the vnode_impl).
2110 *
2111 * ==> If "from" is also sharing, we then assume that "from"
2112 * has the necessary references, and so on.
2113 */
2114 tvp->v_klist = fvp->v_klist;
2115 }
2116