1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 */
36
37 /*
38 * External virtual filesystem routines
39 */
40
41 #include <sys/cdefs.h>
42 #include "opt_ddb.h"
43 #include "opt_watchdog.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/asan.h>
48 #include <sys/bio.h>
49 #include <sys/buf.h>
50 #include <sys/capsicum.h>
51 #include <sys/condvar.h>
52 #include <sys/conf.h>
53 #include <sys/counter.h>
54 #include <sys/dirent.h>
55 #include <sys/event.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/file.h>
59 #include <sys/fcntl.h>
60 #include <sys/jail.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/kthread.h>
64 #include <sys/ktr.h>
65 #include <sys/limits.h>
66 #include <sys/lockf.h>
67 #include <sys/malloc.h>
68 #include <sys/mount.h>
69 #include <sys/namei.h>
70 #include <sys/pctrie.h>
71 #include <sys/priv.h>
72 #include <sys/reboot.h>
73 #include <sys/refcount.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/sleepqueue.h>
77 #include <sys/smr.h>
78 #include <sys/smp.h>
79 #include <sys/stat.h>
80 #include <sys/sysctl.h>
81 #include <sys/syslog.h>
82 #include <sys/vmmeter.h>
83 #include <sys/vnode.h>
84 #include <sys/watchdog.h>
85
86 #include <machine/stdarg.h>
87
88 #include <security/mac/mac_framework.h>
89
90 #include <vm/vm.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_extern.h>
93 #include <vm/pmap.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/uma.h>
99
100 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS))
101 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS
102 #endif
103
104 #ifdef DDB
105 #include <ddb/ddb.h>
106 #endif
107
108 static void delmntque(struct vnode *vp);
109 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
110 int slpflag, int slptimeo);
111 static void syncer_shutdown(void *arg, int howto);
112 static int vtryrecycle(struct vnode *vp, bool isvnlru);
113 static void v_init_counters(struct vnode *);
114 static void vn_seqc_init(struct vnode *);
115 static void vn_seqc_write_end_free(struct vnode *vp);
116 static void vgonel(struct vnode *);
117 static bool vhold_recycle_free(struct vnode *);
118 static void vdropl_recycle(struct vnode *vp);
119 static void vdrop_recycle(struct vnode *vp);
120 static void vfs_knllock(void *arg);
121 static void vfs_knlunlock(void *arg);
122 static void vfs_knl_assert_lock(void *arg, int what);
123 static void destroy_vpollinfo(struct vpollinfo *vi);
124 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
125 daddr_t startlbn, daddr_t endlbn);
126 static void vnlru_recalc(void);
127
128 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
129 "vnode configuration and statistics");
130 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
131 "vnode configuration");
132 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
133 "vnode statistics");
134 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
135 "vnode recycling");
136
137 /*
138 * Number of vnodes in existence. Increased whenever getnewvnode()
139 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
140 */
141 static u_long __exclusive_cache_line numvnodes;
142
143 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
144 "Number of vnodes in existence (legacy)");
145 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0,
146 "Number of vnodes in existence");
147
148 static counter_u64_t vnodes_created;
149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
150 "Number of vnodes created by getnewvnode (legacy)");
151 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created,
152 "Number of vnodes created by getnewvnode");
153
154 /*
155 * Conversion tables for conversion from vnode types to inode formats
156 * and back.
157 */
158 __enum_uint8(vtype) iftovt_tab[16] = {
159 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
160 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
161 };
162 int vttoif_tab[10] = {
163 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
164 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
165 };
166
167 /*
168 * List of allocates vnodes in the system.
169 */
170 static TAILQ_HEAD(freelst, vnode) vnode_list;
171 static struct vnode *vnode_list_free_marker;
172 static struct vnode *vnode_list_reclaim_marker;
173
174 /*
175 * "Free" vnode target. Free vnodes are rarely completely free, but are
176 * just ones that are cheap to recycle. Usually they are for files which
177 * have been stat'd but not read; these usually have inode and namecache
178 * data attached to them. This target is the preferred minimum size of a
179 * sub-cache consisting mostly of such files. The system balances the size
180 * of this sub-cache with its complement to try to prevent either from
181 * thrashing while the other is relatively inactive. The targets express
182 * a preference for the best balance.
183 *
184 * "Above" this target there are 2 further targets (watermarks) related
185 * to recyling of free vnodes. In the best-operating case, the cache is
186 * exactly full, the free list has size between vlowat and vhiwat above the
187 * free target, and recycling from it and normal use maintains this state.
188 * Sometimes the free list is below vlowat or even empty, but this state
189 * is even better for immediate use provided the cache is not full.
190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
191 * ones) to reach one of these states. The watermarks are currently hard-
192 * coded as 4% and 9% of the available space higher. These and the default
193 * of 25% for wantfreevnodes are too large if the memory size is large.
194 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
195 * whenever vnlru_proc() becomes active.
196 */
197 static long wantfreevnodes;
198 static long __exclusive_cache_line freevnodes;
199 static long freevnodes_old;
200
201 static u_long recycles_count;
202 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS, &recycles_count, 0,
203 "Number of vnodes recycled to meet vnode cache targets (legacy)");
204 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD | CTLFLAG_STATS,
205 &recycles_count, 0,
206 "Number of vnodes recycled to meet vnode cache targets");
207
208 static u_long recycles_free_count;
209 SYSCTL_ULONG(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS,
210 &recycles_free_count, 0,
211 "Number of free vnodes recycled to meet vnode cache targets (legacy)");
212 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD | CTLFLAG_STATS,
213 &recycles_free_count, 0,
214 "Number of free vnodes recycled to meet vnode cache targets");
215
216 static counter_u64_t direct_recycles_free_count;
217 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, direct_recycles_free, CTLFLAG_RD,
218 &direct_recycles_free_count,
219 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets");
220
221 static counter_u64_t vnode_skipped_requeues;
222 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues,
223 "Number of times LRU requeue was skipped due to lock contention");
224
225 static u_long deferred_inact;
226 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD,
227 &deferred_inact, 0, "Number of times inactive processing was deferred");
228
229 /* To keep more than one thread at a time from running vfs_getnewfsid */
230 static struct mtx mntid_mtx;
231
232 /*
233 * Lock for any access to the following:
234 * vnode_list
235 * numvnodes
236 * freevnodes
237 */
238 static struct mtx __exclusive_cache_line vnode_list_mtx;
239
240 /* Publicly exported FS */
241 struct nfs_public nfs_pub;
242
243 static uma_zone_t buf_trie_zone;
244 static smr_t buf_trie_smr;
245
246 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
247 static uma_zone_t vnode_zone;
248 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll");
249
250 __read_frequently smr_t vfs_smr;
251
252 /*
253 * The workitem queue.
254 *
255 * It is useful to delay writes of file data and filesystem metadata
256 * for tens of seconds so that quickly created and deleted files need
257 * not waste disk bandwidth being created and removed. To realize this,
258 * we append vnodes to a "workitem" queue. When running with a soft
259 * updates implementation, most pending metadata dependencies should
260 * not wait for more than a few seconds. Thus, mounted on block devices
261 * are delayed only about a half the time that file data is delayed.
262 * Similarly, directory updates are more critical, so are only delayed
263 * about a third the time that file data is delayed. Thus, there are
264 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
265 * one each second (driven off the filesystem syncer process). The
266 * syncer_delayno variable indicates the next queue that is to be processed.
267 * Items that need to be processed soon are placed in this queue:
268 *
269 * syncer_workitem_pending[syncer_delayno]
270 *
271 * A delay of fifteen seconds is done by placing the request fifteen
272 * entries later in the queue:
273 *
274 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
275 *
276 */
277 static int syncer_delayno;
278 static long syncer_mask;
279 LIST_HEAD(synclist, bufobj);
280 static struct synclist *syncer_workitem_pending;
281 /*
282 * The sync_mtx protects:
283 * bo->bo_synclist
284 * sync_vnode_count
285 * syncer_delayno
286 * syncer_state
287 * syncer_workitem_pending
288 * syncer_worklist_len
289 * rushjob
290 */
291 static struct mtx sync_mtx;
292 static struct cv sync_wakeup;
293
294 #define SYNCER_MAXDELAY 32
295 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
296 static int syncdelay = 30; /* max time to delay syncing data */
297 static int filedelay = 30; /* time to delay syncing files */
298 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
299 "Time to delay syncing files (in seconds)");
300 static int dirdelay = 29; /* time to delay syncing directories */
301 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
302 "Time to delay syncing directories (in seconds)");
303 static int metadelay = 28; /* time to delay syncing metadata */
304 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
305 "Time to delay syncing metadata (in seconds)");
306 static int rushjob; /* number of slots to run ASAP */
307 static int stat_rush_requests; /* number of times I/O speeded up */
308 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
309 "Number of times I/O speeded up (rush requests)");
310
311 #define VDBATCH_SIZE 8
312 struct vdbatch {
313 u_int index;
314 struct mtx lock;
315 struct vnode *tab[VDBATCH_SIZE];
316 };
317 DPCPU_DEFINE_STATIC(struct vdbatch, vd);
318
319 static void vdbatch_dequeue(struct vnode *vp);
320
321 /*
322 * When shutting down the syncer, run it at four times normal speed.
323 */
324 #define SYNCER_SHUTDOWN_SPEEDUP 4
325 static int sync_vnode_count;
326 static int syncer_worklist_len;
327 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
328 syncer_state;
329
330 /* Target for maximum number of vnodes. */
331 u_long desiredvnodes;
332 static u_long gapvnodes; /* gap between wanted and desired */
333 static u_long vhiwat; /* enough extras after expansion */
334 static u_long vlowat; /* minimal extras before expansion */
335 static bool vstir; /* nonzero to stir non-free vnodes */
336 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */
337
338 static u_long vnlru_read_freevnodes(void);
339
340 /*
341 * Note that no attempt is made to sanitize these parameters.
342 */
343 static int
sysctl_maxvnodes(SYSCTL_HANDLER_ARGS)344 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS)
345 {
346 u_long val;
347 int error;
348
349 val = desiredvnodes;
350 error = sysctl_handle_long(oidp, &val, 0, req);
351 if (error != 0 || req->newptr == NULL)
352 return (error);
353
354 if (val == desiredvnodes)
355 return (0);
356 mtx_lock(&vnode_list_mtx);
357 desiredvnodes = val;
358 wantfreevnodes = desiredvnodes / 4;
359 vnlru_recalc();
360 mtx_unlock(&vnode_list_mtx);
361 /*
362 * XXX There is no protection against multiple threads changing
363 * desiredvnodes at the same time. Locking above only helps vnlru and
364 * getnewvnode.
365 */
366 vfs_hash_changesize(desiredvnodes);
367 cache_changesize(desiredvnodes);
368 return (0);
369 }
370
371 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
372 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes,
373 "LU", "Target for maximum number of vnodes (legacy)");
374 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit,
375 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes,
376 "LU", "Target for maximum number of vnodes");
377
378 static int
sysctl_freevnodes(SYSCTL_HANDLER_ARGS)379 sysctl_freevnodes(SYSCTL_HANDLER_ARGS)
380 {
381 u_long rfreevnodes;
382
383 rfreevnodes = vnlru_read_freevnodes();
384 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req));
385 }
386
387 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes,
388 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes,
389 "LU", "Number of \"free\" vnodes (legacy)");
390 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free,
391 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes,
392 "LU", "Number of \"free\" vnodes");
393
394 static int
sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS)395 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS)
396 {
397 u_long val;
398 int error;
399
400 val = wantfreevnodes;
401 error = sysctl_handle_long(oidp, &val, 0, req);
402 if (error != 0 || req->newptr == NULL)
403 return (error);
404
405 if (val == wantfreevnodes)
406 return (0);
407 mtx_lock(&vnode_list_mtx);
408 wantfreevnodes = val;
409 vnlru_recalc();
410 mtx_unlock(&vnode_list_mtx);
411 return (0);
412 }
413
414 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes,
415 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes,
416 "LU", "Target for minimum number of \"free\" vnodes (legacy)");
417 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree,
418 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes,
419 "LU", "Target for minimum number of \"free\" vnodes");
420
421 static int vnlru_nowhere;
422 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS,
423 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
424
425 static int
sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)426 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)
427 {
428 struct vnode *vp;
429 struct nameidata nd;
430 char *buf;
431 unsigned long ndflags;
432 int error;
433
434 if (req->newptr == NULL)
435 return (EINVAL);
436 if (req->newlen >= PATH_MAX)
437 return (E2BIG);
438
439 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK);
440 error = SYSCTL_IN(req, buf, req->newlen);
441 if (error != 0)
442 goto out;
443
444 buf[req->newlen] = '\0';
445
446 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1;
447 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf);
448 if ((error = namei(&nd)) != 0)
449 goto out;
450 vp = nd.ni_vp;
451
452 if (VN_IS_DOOMED(vp)) {
453 /*
454 * This vnode is being recycled. Return != 0 to let the caller
455 * know that the sysctl had no effect. Return EAGAIN because a
456 * subsequent call will likely succeed (since namei will create
457 * a new vnode if necessary)
458 */
459 error = EAGAIN;
460 goto putvnode;
461 }
462
463 vgone(vp);
464 putvnode:
465 vput(vp);
466 NDFREE_PNBUF(&nd);
467 out:
468 free(buf, M_TEMP);
469 return (error);
470 }
471
472 static int
sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS)473 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS)
474 {
475 struct thread *td = curthread;
476 struct vnode *vp;
477 struct file *fp;
478 int error;
479 int fd;
480
481 if (req->newptr == NULL)
482 return (EBADF);
483
484 error = sysctl_handle_int(oidp, &fd, 0, req);
485 if (error != 0)
486 return (error);
487 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp);
488 if (error != 0)
489 return (error);
490 vp = fp->f_vnode;
491
492 error = vn_lock(vp, LK_EXCLUSIVE);
493 if (error != 0)
494 goto drop;
495
496 vgone(vp);
497 VOP_UNLOCK(vp);
498 drop:
499 fdrop(fp, td);
500 return (error);
501 }
502
503 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode,
504 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
505 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname");
506 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode,
507 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
508 sysctl_ftry_reclaim_vnode, "I",
509 "Try to reclaim a vnode by its file descriptor");
510
511 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
512 #define vnsz2log 8
513 #ifndef DEBUG_LOCKS
514 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log &&
515 sizeof(struct vnode) < 1UL << (vnsz2log + 1),
516 "vnsz2log needs to be updated");
517 #endif
518
519 /*
520 * Support for the bufobj clean & dirty pctrie.
521 */
522 static void *
buf_trie_alloc(struct pctrie * ptree)523 buf_trie_alloc(struct pctrie *ptree)
524 {
525 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT));
526 }
527
528 static void
buf_trie_free(struct pctrie * ptree,void * node)529 buf_trie_free(struct pctrie *ptree, void *node)
530 {
531 uma_zfree_smr(buf_trie_zone, node);
532 }
533 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free,
534 buf_trie_smr);
535
536 /*
537 * Initialize the vnode management data structures.
538 *
539 * Reevaluate the following cap on the number of vnodes after the physical
540 * memory size exceeds 512GB. In the limit, as the physical memory size
541 * grows, the ratio of the memory size in KB to vnodes approaches 64:1.
542 */
543 #ifndef MAXVNODES_MAX
544 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */
545 #endif
546
547 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
548
549 static struct vnode *
vn_alloc_marker(struct mount * mp)550 vn_alloc_marker(struct mount *mp)
551 {
552 struct vnode *vp;
553
554 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
555 vp->v_type = VMARKER;
556 vp->v_mount = mp;
557
558 return (vp);
559 }
560
561 static void
vn_free_marker(struct vnode * vp)562 vn_free_marker(struct vnode *vp)
563 {
564
565 MPASS(vp->v_type == VMARKER);
566 free(vp, M_VNODE_MARKER);
567 }
568
569 #ifdef KASAN
570 static int
vnode_ctor(void * mem,int size,void * arg __unused,int flags __unused)571 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused)
572 {
573 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0);
574 return (0);
575 }
576
577 static void
vnode_dtor(void * mem,int size,void * arg __unused)578 vnode_dtor(void *mem, int size, void *arg __unused)
579 {
580 size_t end1, end2, off1, off2;
581
582 _Static_assert(offsetof(struct vnode, v_vnodelist) <
583 offsetof(struct vnode, v_dbatchcpu),
584 "KASAN marks require updating");
585
586 off1 = offsetof(struct vnode, v_vnodelist);
587 off2 = offsetof(struct vnode, v_dbatchcpu);
588 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist);
589 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu);
590
591 /*
592 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even
593 * after the vnode has been freed. Try to get some KASAN coverage by
594 * marking everything except those two fields as invalid. Because
595 * KASAN's tracking is not byte-granular, any preceding fields sharing
596 * the same 8-byte aligned word must also be marked valid.
597 */
598
599 /* Handle the area from the start until v_vnodelist... */
600 off1 = rounddown2(off1, KASAN_SHADOW_SCALE);
601 kasan_mark(mem, off1, off1, KASAN_UMA_FREED);
602
603 /* ... then the area between v_vnodelist and v_dbatchcpu ... */
604 off1 = roundup2(end1, KASAN_SHADOW_SCALE);
605 off2 = rounddown2(off2, KASAN_SHADOW_SCALE);
606 if (off2 > off1)
607 kasan_mark((void *)((char *)mem + off1), off2 - off1,
608 off2 - off1, KASAN_UMA_FREED);
609
610 /* ... and finally the area from v_dbatchcpu to the end. */
611 off2 = roundup2(end2, KASAN_SHADOW_SCALE);
612 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2,
613 KASAN_UMA_FREED);
614 }
615 #endif /* KASAN */
616
617 /*
618 * Initialize a vnode as it first enters the zone.
619 */
620 static int
vnode_init(void * mem,int size,int flags)621 vnode_init(void *mem, int size, int flags)
622 {
623 struct vnode *vp;
624
625 vp = mem;
626 bzero(vp, size);
627 /*
628 * Setup locks.
629 */
630 vp->v_vnlock = &vp->v_lock;
631 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
632 /*
633 * By default, don't allow shared locks unless filesystems opt-in.
634 */
635 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
636 LK_NOSHARE | LK_IS_VNODE);
637 /*
638 * Initialize bufobj.
639 */
640 bufobj_init(&vp->v_bufobj, vp);
641 /*
642 * Initialize namecache.
643 */
644 cache_vnode_init(vp);
645 /*
646 * Initialize rangelocks.
647 */
648 rangelock_init(&vp->v_rl);
649
650 vp->v_dbatchcpu = NOCPU;
651
652 vp->v_state = VSTATE_DEAD;
653
654 /*
655 * Check vhold_recycle_free for an explanation.
656 */
657 vp->v_holdcnt = VHOLD_NO_SMR;
658 vp->v_type = VNON;
659 mtx_lock(&vnode_list_mtx);
660 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist);
661 mtx_unlock(&vnode_list_mtx);
662 return (0);
663 }
664
665 /*
666 * Free a vnode when it is cleared from the zone.
667 */
668 static void
vnode_fini(void * mem,int size)669 vnode_fini(void *mem, int size)
670 {
671 struct vnode *vp;
672 struct bufobj *bo;
673
674 vp = mem;
675 vdbatch_dequeue(vp);
676 mtx_lock(&vnode_list_mtx);
677 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
678 mtx_unlock(&vnode_list_mtx);
679 rangelock_destroy(&vp->v_rl);
680 lockdestroy(vp->v_vnlock);
681 mtx_destroy(&vp->v_interlock);
682 bo = &vp->v_bufobj;
683 rw_destroy(BO_LOCKPTR(bo));
684
685 kasan_mark(mem, size, size, 0);
686 }
687
688 /*
689 * Provide the size of NFS nclnode and NFS fh for calculation of the
690 * vnode memory consumption. The size is specified directly to
691 * eliminate dependency on NFS-private header.
692 *
693 * Other filesystems may use bigger or smaller (like UFS and ZFS)
694 * private inode data, but the NFS-based estimation is ample enough.
695 * Still, we care about differences in the size between 64- and 32-bit
696 * platforms.
697 *
698 * Namecache structure size is heuristically
699 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
700 */
701 #ifdef _LP64
702 #define NFS_NCLNODE_SZ (528 + 64)
703 #define NC_SZ 148
704 #else
705 #define NFS_NCLNODE_SZ (360 + 32)
706 #define NC_SZ 92
707 #endif
708
709 static void
vntblinit(void * dummy __unused)710 vntblinit(void *dummy __unused)
711 {
712 struct vdbatch *vd;
713 uma_ctor ctor;
714 uma_dtor dtor;
715 int cpu, physvnodes, virtvnodes;
716
717 /*
718 * Desiredvnodes is a function of the physical memory size and the
719 * kernel's heap size. Generally speaking, it scales with the
720 * physical memory size. The ratio of desiredvnodes to the physical
721 * memory size is 1:16 until desiredvnodes exceeds 98,304.
722 * Thereafter, the
723 * marginal ratio of desiredvnodes to the physical memory size is
724 * 1:64. However, desiredvnodes is limited by the kernel's heap
725 * size. The memory required by desiredvnodes vnodes and vm objects
726 * must not exceed 1/10th of the kernel's heap size.
727 */
728 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
729 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
730 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
731 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
732 desiredvnodes = min(physvnodes, virtvnodes);
733 if (desiredvnodes > MAXVNODES_MAX) {
734 if (bootverbose)
735 printf("Reducing kern.maxvnodes %lu -> %lu\n",
736 desiredvnodes, MAXVNODES_MAX);
737 desiredvnodes = MAXVNODES_MAX;
738 }
739 wantfreevnodes = desiredvnodes / 4;
740 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
741 TAILQ_INIT(&vnode_list);
742 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF);
743 /*
744 * The lock is taken to appease WITNESS.
745 */
746 mtx_lock(&vnode_list_mtx);
747 vnlru_recalc();
748 mtx_unlock(&vnode_list_mtx);
749 vnode_list_free_marker = vn_alloc_marker(NULL);
750 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist);
751 vnode_list_reclaim_marker = vn_alloc_marker(NULL);
752 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist);
753
754 #ifdef KASAN
755 ctor = vnode_ctor;
756 dtor = vnode_dtor;
757 #else
758 ctor = NULL;
759 dtor = NULL;
760 #endif
761 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor,
762 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN);
763 uma_zone_set_smr(vnode_zone, vfs_smr);
764
765 /*
766 * Preallocate enough nodes to support one-per buf so that
767 * we can not fail an insert. reassignbuf() callers can not
768 * tolerate the insertion failure.
769 */
770 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
771 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
772 UMA_ZONE_NOFREE | UMA_ZONE_SMR);
773 buf_trie_smr = uma_zone_get_smr(buf_trie_zone);
774 uma_prealloc(buf_trie_zone, nbuf);
775
776 vnodes_created = counter_u64_alloc(M_WAITOK);
777 direct_recycles_free_count = counter_u64_alloc(M_WAITOK);
778 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK);
779
780 /*
781 * Initialize the filesystem syncer.
782 */
783 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
784 &syncer_mask);
785 syncer_maxdelay = syncer_mask + 1;
786 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
787 cv_init(&sync_wakeup, "syncer");
788
789 CPU_FOREACH(cpu) {
790 vd = DPCPU_ID_PTR((cpu), vd);
791 bzero(vd, sizeof(*vd));
792 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF);
793 }
794 }
795 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
796
797 /*
798 * Mark a mount point as busy. Used to synchronize access and to delay
799 * unmounting. Eventually, mountlist_mtx is not released on failure.
800 *
801 * vfs_busy() is a custom lock, it can block the caller.
802 * vfs_busy() only sleeps if the unmount is active on the mount point.
803 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
804 * vnode belonging to mp.
805 *
806 * Lookup uses vfs_busy() to traverse mount points.
807 * root fs var fs
808 * / vnode lock A / vnode lock (/var) D
809 * /var vnode lock B /log vnode lock(/var/log) E
810 * vfs_busy lock C vfs_busy lock F
811 *
812 * Within each file system, the lock order is C->A->B and F->D->E.
813 *
814 * When traversing across mounts, the system follows that lock order:
815 *
816 * C->A->B
817 * |
818 * +->F->D->E
819 *
820 * The lookup() process for namei("/var") illustrates the process:
821 * 1. VOP_LOOKUP() obtains B while A is held
822 * 2. vfs_busy() obtains a shared lock on F while A and B are held
823 * 3. vput() releases lock on B
824 * 4. vput() releases lock on A
825 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held
826 * 6. vfs_unbusy() releases shared lock on F
827 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
828 * Attempt to lock A (instead of vp_crossmp) while D is held would
829 * violate the global order, causing deadlocks.
830 *
831 * dounmount() locks B while F is drained. Note that for stacked
832 * filesystems, D and B in the example above may be the same lock,
833 * which introdues potential lock order reversal deadlock between
834 * dounmount() and step 5 above. These filesystems may avoid the LOR
835 * by setting VV_CROSSLOCK on the covered vnode so that lock B will
836 * remain held until after step 5.
837 */
838 int
vfs_busy(struct mount * mp,int flags)839 vfs_busy(struct mount *mp, int flags)
840 {
841 struct mount_pcpu *mpcpu;
842
843 MPASS((flags & ~MBF_MASK) == 0);
844 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
845
846 if (vfs_op_thread_enter(mp, mpcpu)) {
847 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
848 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0);
849 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0);
850 vfs_mp_count_add_pcpu(mpcpu, ref, 1);
851 vfs_mp_count_add_pcpu(mpcpu, lockref, 1);
852 vfs_op_thread_exit(mp, mpcpu);
853 if (flags & MBF_MNTLSTLOCK)
854 mtx_unlock(&mountlist_mtx);
855 return (0);
856 }
857
858 MNT_ILOCK(mp);
859 vfs_assert_mount_counters(mp);
860 MNT_REF(mp);
861 /*
862 * If mount point is currently being unmounted, sleep until the
863 * mount point fate is decided. If thread doing the unmounting fails,
864 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
865 * that this mount point has survived the unmount attempt and vfs_busy
866 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE
867 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
868 * about to be really destroyed. vfs_busy needs to release its
869 * reference on the mount point in this case and return with ENOENT,
870 * telling the caller the mount it tried to busy is no longer valid.
871 */
872 while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
873 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers),
874 ("%s: non-empty upper mount list with pending unmount",
875 __func__));
876 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
877 MNT_REL(mp);
878 MNT_IUNLOCK(mp);
879 CTR1(KTR_VFS, "%s: failed busying before sleeping",
880 __func__);
881 return (ENOENT);
882 }
883 if (flags & MBF_MNTLSTLOCK)
884 mtx_unlock(&mountlist_mtx);
885 mp->mnt_kern_flag |= MNTK_MWAIT;
886 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
887 if (flags & MBF_MNTLSTLOCK)
888 mtx_lock(&mountlist_mtx);
889 MNT_ILOCK(mp);
890 }
891 if (flags & MBF_MNTLSTLOCK)
892 mtx_unlock(&mountlist_mtx);
893 mp->mnt_lockref++;
894 MNT_IUNLOCK(mp);
895 return (0);
896 }
897
898 /*
899 * Free a busy filesystem.
900 */
901 void
vfs_unbusy(struct mount * mp)902 vfs_unbusy(struct mount *mp)
903 {
904 struct mount_pcpu *mpcpu;
905 int c;
906
907 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
908
909 if (vfs_op_thread_enter(mp, mpcpu)) {
910 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
911 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1);
912 vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
913 vfs_op_thread_exit(mp, mpcpu);
914 return;
915 }
916
917 MNT_ILOCK(mp);
918 vfs_assert_mount_counters(mp);
919 MNT_REL(mp);
920 c = --mp->mnt_lockref;
921 if (mp->mnt_vfs_ops == 0) {
922 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
923 MNT_IUNLOCK(mp);
924 return;
925 }
926 if (c < 0)
927 vfs_dump_mount_counters(mp);
928 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
929 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
930 CTR1(KTR_VFS, "%s: waking up waiters", __func__);
931 mp->mnt_kern_flag &= ~MNTK_DRAINING;
932 wakeup(&mp->mnt_lockref);
933 }
934 MNT_IUNLOCK(mp);
935 }
936
937 /*
938 * Lookup a mount point by filesystem identifier.
939 */
940 struct mount *
vfs_getvfs(fsid_t * fsid)941 vfs_getvfs(fsid_t *fsid)
942 {
943 struct mount *mp;
944
945 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
946 mtx_lock(&mountlist_mtx);
947 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
948 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
949 vfs_ref(mp);
950 mtx_unlock(&mountlist_mtx);
951 return (mp);
952 }
953 }
954 mtx_unlock(&mountlist_mtx);
955 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
956 return ((struct mount *) 0);
957 }
958
959 /*
960 * Lookup a mount point by filesystem identifier, busying it before
961 * returning.
962 *
963 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
964 * cache for popular filesystem identifiers. The cache is lockess, using
965 * the fact that struct mount's are never freed. In worst case we may
966 * get pointer to unmounted or even different filesystem, so we have to
967 * check what we got, and go slow way if so.
968 */
969 struct mount *
vfs_busyfs(fsid_t * fsid)970 vfs_busyfs(fsid_t *fsid)
971 {
972 #define FSID_CACHE_SIZE 256
973 typedef struct mount * volatile vmp_t;
974 static vmp_t cache[FSID_CACHE_SIZE];
975 struct mount *mp;
976 int error;
977 uint32_t hash;
978
979 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
980 hash = fsid->val[0] ^ fsid->val[1];
981 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
982 mp = cache[hash];
983 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0)
984 goto slow;
985 if (vfs_busy(mp, 0) != 0) {
986 cache[hash] = NULL;
987 goto slow;
988 }
989 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0)
990 return (mp);
991 else
992 vfs_unbusy(mp);
993
994 slow:
995 mtx_lock(&mountlist_mtx);
996 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
997 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
998 error = vfs_busy(mp, MBF_MNTLSTLOCK);
999 if (error) {
1000 cache[hash] = NULL;
1001 mtx_unlock(&mountlist_mtx);
1002 return (NULL);
1003 }
1004 cache[hash] = mp;
1005 return (mp);
1006 }
1007 }
1008 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
1009 mtx_unlock(&mountlist_mtx);
1010 return ((struct mount *) 0);
1011 }
1012
1013 /*
1014 * Check if a user can access privileged mount options.
1015 */
1016 int
vfs_suser(struct mount * mp,struct thread * td)1017 vfs_suser(struct mount *mp, struct thread *td)
1018 {
1019 int error;
1020
1021 if (jailed(td->td_ucred)) {
1022 /*
1023 * If the jail of the calling thread lacks permission for
1024 * this type of file system, deny immediately.
1025 */
1026 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag))
1027 return (EPERM);
1028
1029 /*
1030 * If the file system was mounted outside the jail of the
1031 * calling thread, deny immediately.
1032 */
1033 if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
1034 return (EPERM);
1035 }
1036
1037 /*
1038 * If file system supports delegated administration, we don't check
1039 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
1040 * by the file system itself.
1041 * If this is not the user that did original mount, we check for
1042 * the PRIV_VFS_MOUNT_OWNER privilege.
1043 */
1044 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
1045 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
1046 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
1047 return (error);
1048 }
1049 return (0);
1050 }
1051
1052 /*
1053 * Get a new unique fsid. Try to make its val[0] unique, since this value
1054 * will be used to create fake device numbers for stat(). Also try (but
1055 * not so hard) make its val[0] unique mod 2^16, since some emulators only
1056 * support 16-bit device numbers. We end up with unique val[0]'s for the
1057 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
1058 *
1059 * Keep in mind that several mounts may be running in parallel. Starting
1060 * the search one past where the previous search terminated is both a
1061 * micro-optimization and a defense against returning the same fsid to
1062 * different mounts.
1063 */
1064 void
vfs_getnewfsid(struct mount * mp)1065 vfs_getnewfsid(struct mount *mp)
1066 {
1067 static uint16_t mntid_base;
1068 struct mount *nmp;
1069 fsid_t tfsid;
1070 int mtype;
1071
1072 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
1073 mtx_lock(&mntid_mtx);
1074 mtype = mp->mnt_vfc->vfc_typenum;
1075 tfsid.val[1] = mtype;
1076 mtype = (mtype & 0xFF) << 24;
1077 for (;;) {
1078 tfsid.val[0] = makedev(255,
1079 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
1080 mntid_base++;
1081 if ((nmp = vfs_getvfs(&tfsid)) == NULL)
1082 break;
1083 vfs_rel(nmp);
1084 }
1085 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
1086 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
1087 mtx_unlock(&mntid_mtx);
1088 }
1089
1090 /*
1091 * Knob to control the precision of file timestamps:
1092 *
1093 * 0 = seconds only; nanoseconds zeroed.
1094 * 1 = seconds and nanoseconds, accurate within 1/HZ.
1095 * 2 = seconds and nanoseconds, truncated to microseconds.
1096 * >=3 = seconds and nanoseconds, maximum precision.
1097 */
1098 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
1099
1100 static int timestamp_precision = TSP_USEC;
1101 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
1102 ×tamp_precision, 0, "File timestamp precision (0: seconds, "
1103 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, "
1104 "3+: sec + ns (max. precision))");
1105
1106 /*
1107 * Get a current timestamp.
1108 */
1109 void
vfs_timestamp(struct timespec * tsp)1110 vfs_timestamp(struct timespec *tsp)
1111 {
1112 struct timeval tv;
1113
1114 switch (timestamp_precision) {
1115 case TSP_SEC:
1116 tsp->tv_sec = time_second;
1117 tsp->tv_nsec = 0;
1118 break;
1119 case TSP_HZ:
1120 getnanotime(tsp);
1121 break;
1122 case TSP_USEC:
1123 microtime(&tv);
1124 TIMEVAL_TO_TIMESPEC(&tv, tsp);
1125 break;
1126 case TSP_NSEC:
1127 default:
1128 nanotime(tsp);
1129 break;
1130 }
1131 }
1132
1133 /*
1134 * Set vnode attributes to VNOVAL
1135 */
1136 void
vattr_null(struct vattr * vap)1137 vattr_null(struct vattr *vap)
1138 {
1139
1140 vap->va_type = VNON;
1141 vap->va_size = VNOVAL;
1142 vap->va_bytes = VNOVAL;
1143 vap->va_mode = VNOVAL;
1144 vap->va_nlink = VNOVAL;
1145 vap->va_uid = VNOVAL;
1146 vap->va_gid = VNOVAL;
1147 vap->va_fsid = VNOVAL;
1148 vap->va_fileid = VNOVAL;
1149 vap->va_blocksize = VNOVAL;
1150 vap->va_rdev = VNOVAL;
1151 vap->va_atime.tv_sec = VNOVAL;
1152 vap->va_atime.tv_nsec = VNOVAL;
1153 vap->va_mtime.tv_sec = VNOVAL;
1154 vap->va_mtime.tv_nsec = VNOVAL;
1155 vap->va_ctime.tv_sec = VNOVAL;
1156 vap->va_ctime.tv_nsec = VNOVAL;
1157 vap->va_birthtime.tv_sec = VNOVAL;
1158 vap->va_birthtime.tv_nsec = VNOVAL;
1159 vap->va_flags = VNOVAL;
1160 vap->va_gen = VNOVAL;
1161 vap->va_vaflags = 0;
1162 }
1163
1164 /*
1165 * Try to reduce the total number of vnodes.
1166 *
1167 * This routine (and its user) are buggy in at least the following ways:
1168 * - all parameters were picked years ago when RAM sizes were significantly
1169 * smaller
1170 * - it can pick vnodes based on pages used by the vm object, but filesystems
1171 * like ZFS don't use it making the pick broken
1172 * - since ZFS has its own aging policy it gets partially combated by this one
1173 * - a dedicated method should be provided for filesystems to let them decide
1174 * whether the vnode should be recycled
1175 *
1176 * This routine is called when we have too many vnodes. It attempts
1177 * to free <count> vnodes and will potentially free vnodes that still
1178 * have VM backing store (VM backing store is typically the cause
1179 * of a vnode blowout so we want to do this). Therefore, this operation
1180 * is not considered cheap.
1181 *
1182 * A number of conditions may prevent a vnode from being reclaimed.
1183 * the buffer cache may have references on the vnode, a directory
1184 * vnode may still have references due to the namei cache representing
1185 * underlying files, or the vnode may be in active use. It is not
1186 * desirable to reuse such vnodes. These conditions may cause the
1187 * number of vnodes to reach some minimum value regardless of what
1188 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
1189 *
1190 * @param reclaim_nc_src Only reclaim directories with outgoing namecache
1191 * entries if this argument is strue
1192 * @param trigger Only reclaim vnodes with fewer than this many resident
1193 * pages.
1194 * @param target How many vnodes to reclaim.
1195 * @return The number of vnodes that were reclaimed.
1196 */
1197 static int
vlrureclaim(bool reclaim_nc_src,int trigger,u_long target)1198 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target)
1199 {
1200 struct vnode *vp, *mvp;
1201 struct mount *mp;
1202 struct vm_object *object;
1203 u_long done;
1204 bool retried;
1205
1206 mtx_assert(&vnode_list_mtx, MA_OWNED);
1207
1208 retried = false;
1209 done = 0;
1210
1211 mvp = vnode_list_reclaim_marker;
1212 restart:
1213 vp = mvp;
1214 while (done < target) {
1215 vp = TAILQ_NEXT(vp, v_vnodelist);
1216 if (__predict_false(vp == NULL))
1217 break;
1218
1219 if (__predict_false(vp->v_type == VMARKER))
1220 continue;
1221
1222 /*
1223 * If it's been deconstructed already, it's still
1224 * referenced, or it exceeds the trigger, skip it.
1225 * Also skip free vnodes. We are trying to make space
1226 * for more free vnodes, not reduce their count.
1227 */
1228 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 ||
1229 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)))
1230 goto next_iter;
1231
1232 if (vp->v_type == VBAD || vp->v_type == VNON)
1233 goto next_iter;
1234
1235 object = atomic_load_ptr(&vp->v_object);
1236 if (object == NULL || object->resident_page_count > trigger) {
1237 goto next_iter;
1238 }
1239
1240 /*
1241 * Handle races against vnode allocation. Filesystems lock the
1242 * vnode some time after it gets returned from getnewvnode,
1243 * despite type and hold count being manipulated earlier.
1244 * Resorting to checking v_mount restores guarantees present
1245 * before the global list was reworked to contain all vnodes.
1246 */
1247 if (!VI_TRYLOCK(vp))
1248 goto next_iter;
1249 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1250 VI_UNLOCK(vp);
1251 goto next_iter;
1252 }
1253 if (vp->v_mount == NULL) {
1254 VI_UNLOCK(vp);
1255 goto next_iter;
1256 }
1257 vholdl(vp);
1258 VI_UNLOCK(vp);
1259 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1260 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1261 mtx_unlock(&vnode_list_mtx);
1262
1263 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1264 vdrop_recycle(vp);
1265 goto next_iter_unlocked;
1266 }
1267 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
1268 vdrop_recycle(vp);
1269 vn_finished_write(mp);
1270 goto next_iter_unlocked;
1271 }
1272
1273 VI_LOCK(vp);
1274 if (vp->v_usecount > 0 ||
1275 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
1276 (vp->v_object != NULL && vp->v_object->handle == vp &&
1277 vp->v_object->resident_page_count > trigger)) {
1278 VOP_UNLOCK(vp);
1279 vdropl_recycle(vp);
1280 vn_finished_write(mp);
1281 goto next_iter_unlocked;
1282 }
1283 recycles_count++;
1284 vgonel(vp);
1285 VOP_UNLOCK(vp);
1286 vdropl_recycle(vp);
1287 vn_finished_write(mp);
1288 done++;
1289 next_iter_unlocked:
1290 maybe_yield();
1291 mtx_lock(&vnode_list_mtx);
1292 goto restart;
1293 next_iter:
1294 MPASS(vp->v_type != VMARKER);
1295 if (!should_yield())
1296 continue;
1297 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1298 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1299 mtx_unlock(&vnode_list_mtx);
1300 kern_yield(PRI_USER);
1301 mtx_lock(&vnode_list_mtx);
1302 goto restart;
1303 }
1304 if (done == 0 && !retried) {
1305 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1306 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1307 retried = true;
1308 goto restart;
1309 }
1310 return (done);
1311 }
1312
1313 static int max_free_per_call = 10000;
1314 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0,
1315 "limit on vnode free requests per call to the vnlru_free routine (legacy)");
1316 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW,
1317 &max_free_per_call, 0,
1318 "limit on vnode free requests per call to the vnlru_free routine");
1319
1320 /*
1321 * Attempt to recycle requested amount of free vnodes.
1322 */
1323 static int
vnlru_free_impl(int count,struct vfsops * mnt_op,struct vnode * mvp,bool isvnlru)1324 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru)
1325 {
1326 struct vnode *vp;
1327 struct mount *mp;
1328 int ocount;
1329 bool retried;
1330
1331 mtx_assert(&vnode_list_mtx, MA_OWNED);
1332 if (count > max_free_per_call)
1333 count = max_free_per_call;
1334 if (count == 0) {
1335 mtx_unlock(&vnode_list_mtx);
1336 return (0);
1337 }
1338 ocount = count;
1339 retried = false;
1340 vp = mvp;
1341 for (;;) {
1342 vp = TAILQ_NEXT(vp, v_vnodelist);
1343 if (__predict_false(vp == NULL)) {
1344 /*
1345 * The free vnode marker can be past eligible vnodes:
1346 * 1. if vdbatch_process trylock failed
1347 * 2. if vtryrecycle failed
1348 *
1349 * If so, start the scan from scratch.
1350 */
1351 if (!retried && vnlru_read_freevnodes() > 0) {
1352 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1353 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1354 vp = mvp;
1355 retried = true;
1356 continue;
1357 }
1358
1359 /*
1360 * Give up
1361 */
1362 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1363 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist);
1364 mtx_unlock(&vnode_list_mtx);
1365 break;
1366 }
1367 if (__predict_false(vp->v_type == VMARKER))
1368 continue;
1369 if (vp->v_holdcnt > 0)
1370 continue;
1371 /*
1372 * Don't recycle if our vnode is from different type
1373 * of mount point. Note that mp is type-safe, the
1374 * check does not reach unmapped address even if
1375 * vnode is reclaimed.
1376 */
1377 if (mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1378 mp->mnt_op != mnt_op) {
1379 continue;
1380 }
1381 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1382 continue;
1383 }
1384 if (!vhold_recycle_free(vp))
1385 continue;
1386 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1387 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1388 mtx_unlock(&vnode_list_mtx);
1389 /*
1390 * FIXME: ignores the return value, meaning it may be nothing
1391 * got recycled but it claims otherwise to the caller.
1392 *
1393 * Originally the value started being ignored in 2005 with
1394 * 114a1006a8204aa156e1f9ad6476cdff89cada7f .
1395 *
1396 * Respecting the value can run into significant stalls if most
1397 * vnodes belong to one file system and it has writes
1398 * suspended. In presence of many threads and millions of
1399 * vnodes they keep contending on the vnode_list_mtx lock only
1400 * to find vnodes they can't recycle.
1401 *
1402 * The solution would be to pre-check if the vnode is likely to
1403 * be recycle-able, but it needs to happen with the
1404 * vnode_list_mtx lock held. This runs into a problem where
1405 * VOP_GETWRITEMOUNT (currently needed to find out about if
1406 * writes are frozen) can take locks which LOR against it.
1407 *
1408 * Check nullfs for one example (null_getwritemount).
1409 */
1410 vtryrecycle(vp, isvnlru);
1411 count--;
1412 if (count == 0) {
1413 break;
1414 }
1415 mtx_lock(&vnode_list_mtx);
1416 vp = mvp;
1417 }
1418 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1419 return (ocount - count);
1420 }
1421
1422 /*
1423 * XXX: returns without vnode_list_mtx locked!
1424 */
1425 static int
vnlru_free_locked_direct(int count)1426 vnlru_free_locked_direct(int count)
1427 {
1428 int ret;
1429
1430 mtx_assert(&vnode_list_mtx, MA_OWNED);
1431 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, false);
1432 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1433 return (ret);
1434 }
1435
1436 static int
vnlru_free_locked_vnlru(int count)1437 vnlru_free_locked_vnlru(int count)
1438 {
1439 int ret;
1440
1441 mtx_assert(&vnode_list_mtx, MA_OWNED);
1442 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker, true);
1443 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1444 return (ret);
1445 }
1446
1447 static int
vnlru_free_vnlru(int count)1448 vnlru_free_vnlru(int count)
1449 {
1450
1451 mtx_lock(&vnode_list_mtx);
1452 return (vnlru_free_locked_vnlru(count));
1453 }
1454
1455 void
vnlru_free_vfsops(int count,struct vfsops * mnt_op,struct vnode * mvp)1456 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp)
1457 {
1458
1459 MPASS(mnt_op != NULL);
1460 MPASS(mvp != NULL);
1461 VNPASS(mvp->v_type == VMARKER, mvp);
1462 mtx_lock(&vnode_list_mtx);
1463 vnlru_free_impl(count, mnt_op, mvp, true);
1464 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1465 }
1466
1467 struct vnode *
vnlru_alloc_marker(void)1468 vnlru_alloc_marker(void)
1469 {
1470 struct vnode *mvp;
1471
1472 mvp = vn_alloc_marker(NULL);
1473 mtx_lock(&vnode_list_mtx);
1474 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist);
1475 mtx_unlock(&vnode_list_mtx);
1476 return (mvp);
1477 }
1478
1479 void
vnlru_free_marker(struct vnode * mvp)1480 vnlru_free_marker(struct vnode *mvp)
1481 {
1482 mtx_lock(&vnode_list_mtx);
1483 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1484 mtx_unlock(&vnode_list_mtx);
1485 vn_free_marker(mvp);
1486 }
1487
1488 static void
vnlru_recalc(void)1489 vnlru_recalc(void)
1490 {
1491
1492 mtx_assert(&vnode_list_mtx, MA_OWNED);
1493 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1494 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1495 vlowat = vhiwat / 2;
1496 }
1497
1498 /*
1499 * Attempt to recycle vnodes in a context that is always safe to block.
1500 * Calling vlrurecycle() from the bowels of filesystem code has some
1501 * interesting deadlock problems.
1502 */
1503 static struct proc *vnlruproc;
1504 static int vnlruproc_sig;
1505 static u_long vnlruproc_kicks;
1506
1507 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0,
1508 "Number of times vnlru awakened due to vnode shortage");
1509
1510 #define VNLRU_COUNT_SLOP 100
1511
1512 /*
1513 * The main freevnodes counter is only updated when a counter local to CPU
1514 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally
1515 * walked to compute a more accurate total.
1516 *
1517 * Note: the actual value at any given moment can still exceed slop, but it
1518 * should not be by significant margin in practice.
1519 */
1520 #define VNLRU_FREEVNODES_SLOP 126
1521
1522 static void __noinline
vfs_freevnodes_rollup(int8_t * lfreevnodes)1523 vfs_freevnodes_rollup(int8_t *lfreevnodes)
1524 {
1525
1526 atomic_add_long(&freevnodes, *lfreevnodes);
1527 *lfreevnodes = 0;
1528 critical_exit();
1529 }
1530
1531 static __inline void
vfs_freevnodes_inc(void)1532 vfs_freevnodes_inc(void)
1533 {
1534 int8_t *lfreevnodes;
1535
1536 critical_enter();
1537 lfreevnodes = PCPU_PTR(vfs_freevnodes);
1538 (*lfreevnodes)++;
1539 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP))
1540 vfs_freevnodes_rollup(lfreevnodes);
1541 else
1542 critical_exit();
1543 }
1544
1545 static __inline void
vfs_freevnodes_dec(void)1546 vfs_freevnodes_dec(void)
1547 {
1548 int8_t *lfreevnodes;
1549
1550 critical_enter();
1551 lfreevnodes = PCPU_PTR(vfs_freevnodes);
1552 (*lfreevnodes)--;
1553 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP))
1554 vfs_freevnodes_rollup(lfreevnodes);
1555 else
1556 critical_exit();
1557 }
1558
1559 static u_long
vnlru_read_freevnodes(void)1560 vnlru_read_freevnodes(void)
1561 {
1562 long slop, rfreevnodes, rfreevnodes_old;
1563 int cpu;
1564
1565 rfreevnodes = atomic_load_long(&freevnodes);
1566 rfreevnodes_old = atomic_load_long(&freevnodes_old);
1567
1568 if (rfreevnodes > rfreevnodes_old)
1569 slop = rfreevnodes - rfreevnodes_old;
1570 else
1571 slop = rfreevnodes_old - rfreevnodes;
1572 if (slop < VNLRU_FREEVNODES_SLOP)
1573 return (rfreevnodes >= 0 ? rfreevnodes : 0);
1574 CPU_FOREACH(cpu) {
1575 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes;
1576 }
1577 atomic_store_long(&freevnodes_old, rfreevnodes);
1578 return (freevnodes_old >= 0 ? freevnodes_old : 0);
1579 }
1580
1581 static bool
vnlru_under(u_long rnumvnodes,u_long limit)1582 vnlru_under(u_long rnumvnodes, u_long limit)
1583 {
1584 u_long rfreevnodes, space;
1585
1586 if (__predict_false(rnumvnodes > desiredvnodes))
1587 return (true);
1588
1589 space = desiredvnodes - rnumvnodes;
1590 if (space < limit) {
1591 rfreevnodes = vnlru_read_freevnodes();
1592 if (rfreevnodes > wantfreevnodes)
1593 space += rfreevnodes - wantfreevnodes;
1594 }
1595 return (space < limit);
1596 }
1597
1598 static void
vnlru_kick_locked(void)1599 vnlru_kick_locked(void)
1600 {
1601
1602 mtx_assert(&vnode_list_mtx, MA_OWNED);
1603 if (vnlruproc_sig == 0) {
1604 vnlruproc_sig = 1;
1605 vnlruproc_kicks++;
1606 wakeup(vnlruproc);
1607 }
1608 }
1609
1610 static void
vnlru_kick_cond(void)1611 vnlru_kick_cond(void)
1612 {
1613
1614 if (vnlru_read_freevnodes() > wantfreevnodes)
1615 return;
1616
1617 if (vnlruproc_sig)
1618 return;
1619 mtx_lock(&vnode_list_mtx);
1620 vnlru_kick_locked();
1621 mtx_unlock(&vnode_list_mtx);
1622 }
1623
1624 static void
vnlru_proc_sleep(void)1625 vnlru_proc_sleep(void)
1626 {
1627
1628 if (vnlruproc_sig) {
1629 vnlruproc_sig = 0;
1630 wakeup(&vnlruproc_sig);
1631 }
1632 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz);
1633 }
1634
1635 /*
1636 * A lighter version of the machinery below.
1637 *
1638 * Tries to reach goals only by recycling free vnodes and does not invoke
1639 * uma_reclaim(UMA_RECLAIM_DRAIN).
1640 *
1641 * This works around pathological behavior in vnlru in presence of tons of free
1642 * vnodes, but without having to rewrite the machinery at this time. Said
1643 * behavior boils down to continuously trying to reclaim all kinds of vnodes
1644 * (cycling through all levels of "force") when the count is transiently above
1645 * limit. This happens a lot when all vnodes are used up and vn_alloc
1646 * speculatively increments the counter.
1647 *
1648 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with
1649 * 1 million files in total and 20 find(1) processes stating them in parallel
1650 * (one per each tree).
1651 *
1652 * On a kernel with only stock machinery this needs anywhere between 60 and 120
1653 * seconds to execute (time varies *wildly* between runs). With the workaround
1654 * it consistently stays around 20 seconds [it got further down with later
1655 * changes].
1656 *
1657 * That is to say the entire thing needs a fundamental redesign (most notably
1658 * to accommodate faster recycling), the above only tries to get it ouf the way.
1659 *
1660 * Return values are:
1661 * -1 -- fallback to regular vnlru loop
1662 * 0 -- do nothing, go to sleep
1663 * >0 -- recycle this many vnodes
1664 */
1665 static long
vnlru_proc_light_pick(void)1666 vnlru_proc_light_pick(void)
1667 {
1668 u_long rnumvnodes, rfreevnodes;
1669
1670 if (vstir || vnlruproc_sig == 1)
1671 return (-1);
1672
1673 rnumvnodes = atomic_load_long(&numvnodes);
1674 rfreevnodes = vnlru_read_freevnodes();
1675
1676 /*
1677 * vnode limit might have changed and now we may be at a significant
1678 * excess. Bail if we can't sort it out with free vnodes.
1679 *
1680 * Due to atomic updates the count can legitimately go above
1681 * the limit for a short period, don't bother doing anything in
1682 * that case.
1683 */
1684 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) {
1685 if (rnumvnodes - rfreevnodes >= desiredvnodes ||
1686 rfreevnodes <= wantfreevnodes) {
1687 return (-1);
1688 }
1689
1690 return (rnumvnodes - desiredvnodes);
1691 }
1692
1693 /*
1694 * Don't try to reach wantfreevnodes target if there are too few vnodes
1695 * to begin with.
1696 */
1697 if (rnumvnodes < wantfreevnodes) {
1698 return (0);
1699 }
1700
1701 if (rfreevnodes < wantfreevnodes) {
1702 return (-1);
1703 }
1704
1705 return (0);
1706 }
1707
1708 static bool
vnlru_proc_light(void)1709 vnlru_proc_light(void)
1710 {
1711 long freecount;
1712
1713 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1714
1715 freecount = vnlru_proc_light_pick();
1716 if (freecount == -1)
1717 return (false);
1718
1719 if (freecount != 0) {
1720 vnlru_free_vnlru(freecount);
1721 }
1722
1723 mtx_lock(&vnode_list_mtx);
1724 vnlru_proc_sleep();
1725 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1726 return (true);
1727 }
1728
1729 static u_long uma_reclaim_calls;
1730 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, uma_reclaim_calls, CTLFLAG_RD | CTLFLAG_STATS,
1731 &uma_reclaim_calls, 0, "Number of calls to uma_reclaim");
1732
1733 static void
vnlru_proc(void)1734 vnlru_proc(void)
1735 {
1736 u_long rnumvnodes, rfreevnodes, target;
1737 unsigned long onumvnodes;
1738 int done, force, trigger, usevnodes;
1739 bool reclaim_nc_src, want_reread;
1740
1741 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1742 SHUTDOWN_PRI_FIRST);
1743
1744 force = 0;
1745 want_reread = false;
1746 for (;;) {
1747 kproc_suspend_check(vnlruproc);
1748
1749 if (force == 0 && vnlru_proc_light())
1750 continue;
1751
1752 mtx_lock(&vnode_list_mtx);
1753 rnumvnodes = atomic_load_long(&numvnodes);
1754
1755 if (want_reread) {
1756 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0;
1757 want_reread = false;
1758 }
1759
1760 /*
1761 * If numvnodes is too large (due to desiredvnodes being
1762 * adjusted using its sysctl, or emergency growth), first
1763 * try to reduce it by discarding free vnodes.
1764 */
1765 if (rnumvnodes > desiredvnodes + 10) {
1766 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes);
1767 mtx_lock(&vnode_list_mtx);
1768 rnumvnodes = atomic_load_long(&numvnodes);
1769 }
1770 /*
1771 * Sleep if the vnode cache is in a good state. This is
1772 * when it is not over-full and has space for about a 4%
1773 * or 9% expansion (by growing its size or inexcessively
1774 * reducing free vnode count). Otherwise, try to reclaim
1775 * space for a 10% expansion.
1776 */
1777 if (vstir && force == 0) {
1778 force = 1;
1779 vstir = false;
1780 }
1781 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) {
1782 vnlru_proc_sleep();
1783 continue;
1784 }
1785 rfreevnodes = vnlru_read_freevnodes();
1786
1787 onumvnodes = rnumvnodes;
1788 /*
1789 * Calculate parameters for recycling. These are the same
1790 * throughout the loop to give some semblance of fairness.
1791 * The trigger point is to avoid recycling vnodes with lots
1792 * of resident pages. We aren't trying to free memory; we
1793 * are trying to recycle or at least free vnodes.
1794 */
1795 if (rnumvnodes <= desiredvnodes)
1796 usevnodes = rnumvnodes - rfreevnodes;
1797 else
1798 usevnodes = rnumvnodes;
1799 if (usevnodes <= 0)
1800 usevnodes = 1;
1801 /*
1802 * The trigger value is chosen to give a conservatively
1803 * large value to ensure that it alone doesn't prevent
1804 * making progress. The value can easily be so large that
1805 * it is effectively infinite in some congested and
1806 * misconfigured cases, and this is necessary. Normally
1807 * it is about 8 to 100 (pages), which is quite large.
1808 */
1809 trigger = vm_cnt.v_page_count * 2 / usevnodes;
1810 if (force < 2)
1811 trigger = vsmalltrigger;
1812 reclaim_nc_src = force >= 3;
1813 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1);
1814 target = target / 10 + 1;
1815 done = vlrureclaim(reclaim_nc_src, trigger, target);
1816 mtx_unlock(&vnode_list_mtx);
1817 /*
1818 * Total number of vnodes can transiently go slightly above the
1819 * limit (see vn_alloc_hard), no need to call uma_reclaim if
1820 * this happens.
1821 */
1822 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes &&
1823 numvnodes <= desiredvnodes) {
1824 uma_reclaim_calls++;
1825 uma_reclaim(UMA_RECLAIM_DRAIN);
1826 }
1827 if (done == 0) {
1828 if (force == 0 || force == 1) {
1829 force = 2;
1830 continue;
1831 }
1832 if (force == 2) {
1833 force = 3;
1834 continue;
1835 }
1836 want_reread = true;
1837 force = 0;
1838 vnlru_nowhere++;
1839 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1840 } else {
1841 want_reread = true;
1842 kern_yield(PRI_USER);
1843 }
1844 }
1845 }
1846
1847 static struct kproc_desc vnlru_kp = {
1848 "vnlru",
1849 vnlru_proc,
1850 &vnlruproc
1851 };
1852 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1853 &vnlru_kp);
1854
1855 /*
1856 * Routines having to do with the management of the vnode table.
1857 */
1858
1859 /*
1860 * Try to recycle a freed vnode.
1861 */
1862 static int
vtryrecycle(struct vnode * vp,bool isvnlru)1863 vtryrecycle(struct vnode *vp, bool isvnlru)
1864 {
1865 struct mount *vnmp;
1866
1867 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1868 VNPASS(vp->v_holdcnt > 0, vp);
1869 /*
1870 * This vnode may found and locked via some other list, if so we
1871 * can't recycle it yet.
1872 */
1873 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1874 CTR2(KTR_VFS,
1875 "%s: impossible to recycle, vp %p lock is already held",
1876 __func__, vp);
1877 vdrop_recycle(vp);
1878 return (EWOULDBLOCK);
1879 }
1880 /*
1881 * Don't recycle if its filesystem is being suspended.
1882 */
1883 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1884 VOP_UNLOCK(vp);
1885 CTR2(KTR_VFS,
1886 "%s: impossible to recycle, cannot start the write for %p",
1887 __func__, vp);
1888 vdrop_recycle(vp);
1889 return (EBUSY);
1890 }
1891 /*
1892 * If we got this far, we need to acquire the interlock and see if
1893 * anyone picked up this vnode from another list. If not, we will
1894 * mark it with DOOMED via vgonel() so that anyone who does find it
1895 * will skip over it.
1896 */
1897 VI_LOCK(vp);
1898 if (vp->v_usecount) {
1899 VOP_UNLOCK(vp);
1900 vdropl_recycle(vp);
1901 vn_finished_write(vnmp);
1902 CTR2(KTR_VFS,
1903 "%s: impossible to recycle, %p is already referenced",
1904 __func__, vp);
1905 return (EBUSY);
1906 }
1907 if (!VN_IS_DOOMED(vp)) {
1908 if (isvnlru)
1909 recycles_free_count++;
1910 else
1911 counter_u64_add(direct_recycles_free_count, 1);
1912 vgonel(vp);
1913 }
1914 VOP_UNLOCK(vp);
1915 vdropl_recycle(vp);
1916 vn_finished_write(vnmp);
1917 return (0);
1918 }
1919
1920 /*
1921 * Allocate a new vnode.
1922 *
1923 * The operation never returns an error. Returning an error was disabled
1924 * in r145385 (dated 2005) with the following comment:
1925 *
1926 * XXX Not all VFS_VGET/ffs_vget callers check returns.
1927 *
1928 * Given the age of this commit (almost 15 years at the time of writing this
1929 * comment) restoring the ability to fail requires a significant audit of
1930 * all codepaths.
1931 *
1932 * The routine can try to free a vnode or stall for up to 1 second waiting for
1933 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation.
1934 */
1935 static u_long vn_alloc_cyclecount;
1936 static u_long vn_alloc_sleeps;
1937
1938 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0,
1939 "Number of times vnode allocation blocked waiting on vnlru");
1940
1941 static struct vnode * __noinline
vn_alloc_hard(struct mount * mp,u_long rnumvnodes,bool bumped)1942 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped)
1943 {
1944 u_long rfreevnodes;
1945
1946 if (bumped) {
1947 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) {
1948 atomic_subtract_long(&numvnodes, 1);
1949 bumped = false;
1950 }
1951 }
1952
1953 mtx_lock(&vnode_list_mtx);
1954
1955 if (vn_alloc_cyclecount != 0) {
1956 rnumvnodes = atomic_load_long(&numvnodes);
1957 if (rnumvnodes + 1 < desiredvnodes) {
1958 vn_alloc_cyclecount = 0;
1959 mtx_unlock(&vnode_list_mtx);
1960 goto alloc;
1961 }
1962
1963 rfreevnodes = vnlru_read_freevnodes();
1964 if (rfreevnodes < wantfreevnodes) {
1965 if (vn_alloc_cyclecount++ >= rfreevnodes) {
1966 vn_alloc_cyclecount = 0;
1967 vstir = true;
1968 }
1969 } else {
1970 vn_alloc_cyclecount = 0;
1971 }
1972 }
1973
1974 /*
1975 * Grow the vnode cache if it will not be above its target max after
1976 * growing. Otherwise, if there is at least one free vnode, try to
1977 * reclaim 1 item from it before growing the cache (possibly above its
1978 * target max if the reclamation failed or is delayed).
1979 */
1980 if (vnlru_free_locked_direct(1) > 0)
1981 goto alloc;
1982 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
1983 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
1984 /*
1985 * Wait for space for a new vnode.
1986 */
1987 if (bumped) {
1988 atomic_subtract_long(&numvnodes, 1);
1989 bumped = false;
1990 }
1991 mtx_lock(&vnode_list_mtx);
1992 vnlru_kick_locked();
1993 vn_alloc_sleeps++;
1994 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz);
1995 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes &&
1996 vnlru_read_freevnodes() > 1)
1997 vnlru_free_locked_direct(1);
1998 else
1999 mtx_unlock(&vnode_list_mtx);
2000 }
2001 alloc:
2002 mtx_assert(&vnode_list_mtx, MA_NOTOWNED);
2003 if (!bumped)
2004 atomic_add_long(&numvnodes, 1);
2005 vnlru_kick_cond();
2006 return (uma_zalloc_smr(vnode_zone, M_WAITOK));
2007 }
2008
2009 static struct vnode *
vn_alloc(struct mount * mp)2010 vn_alloc(struct mount *mp)
2011 {
2012 u_long rnumvnodes;
2013
2014 if (__predict_false(vn_alloc_cyclecount != 0))
2015 return (vn_alloc_hard(mp, 0, false));
2016 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
2017 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) {
2018 return (vn_alloc_hard(mp, rnumvnodes, true));
2019 }
2020
2021 return (uma_zalloc_smr(vnode_zone, M_WAITOK));
2022 }
2023
2024 static void
vn_free(struct vnode * vp)2025 vn_free(struct vnode *vp)
2026 {
2027
2028 atomic_subtract_long(&numvnodes, 1);
2029 uma_zfree_smr(vnode_zone, vp);
2030 }
2031
2032 /*
2033 * Allocate a new vnode.
2034 */
2035 int
getnewvnode(const char * tag,struct mount * mp,struct vop_vector * vops,struct vnode ** vpp)2036 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
2037 struct vnode **vpp)
2038 {
2039 struct vnode *vp;
2040 struct thread *td;
2041 struct lock_object *lo;
2042
2043 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
2044
2045 KASSERT(vops->registered,
2046 ("%s: not registered vector op %p\n", __func__, vops));
2047 cache_validate_vop_vector(mp, vops);
2048
2049 td = curthread;
2050 if (td->td_vp_reserved != NULL) {
2051 vp = td->td_vp_reserved;
2052 td->td_vp_reserved = NULL;
2053 } else {
2054 vp = vn_alloc(mp);
2055 }
2056 counter_u64_add(vnodes_created, 1);
2057
2058 vn_set_state(vp, VSTATE_UNINITIALIZED);
2059
2060 /*
2061 * Locks are given the generic name "vnode" when created.
2062 * Follow the historic practice of using the filesystem
2063 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
2064 *
2065 * Locks live in a witness group keyed on their name. Thus,
2066 * when a lock is renamed, it must also move from the witness
2067 * group of its old name to the witness group of its new name.
2068 *
2069 * The change only needs to be made when the vnode moves
2070 * from one filesystem type to another. We ensure that each
2071 * filesystem use a single static name pointer for its tag so
2072 * that we can compare pointers rather than doing a strcmp().
2073 */
2074 lo = &vp->v_vnlock->lock_object;
2075 #ifdef WITNESS
2076 if (lo->lo_name != tag) {
2077 #endif
2078 lo->lo_name = tag;
2079 #ifdef WITNESS
2080 WITNESS_DESTROY(lo);
2081 WITNESS_INIT(lo, tag);
2082 }
2083 #endif
2084 /*
2085 * By default, don't allow shared locks unless filesystems opt-in.
2086 */
2087 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
2088 /*
2089 * Finalize various vnode identity bits.
2090 */
2091 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
2092 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
2093 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
2094 vp->v_type = VNON;
2095 vp->v_op = vops;
2096 vp->v_irflag = 0;
2097 v_init_counters(vp);
2098 vn_seqc_init(vp);
2099 vp->v_bufobj.bo_ops = &buf_ops_bio;
2100 #ifdef DIAGNOSTIC
2101 if (mp == NULL && vops != &dead_vnodeops)
2102 printf("NULL mp in getnewvnode(9), tag %s\n", tag);
2103 #endif
2104 #ifdef MAC
2105 mac_vnode_init(vp);
2106 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
2107 mac_vnode_associate_singlelabel(mp, vp);
2108 #endif
2109 if (mp != NULL) {
2110 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
2111 }
2112
2113 /*
2114 * For the filesystems which do not use vfs_hash_insert(),
2115 * still initialize v_hash to have vfs_hash_index() useful.
2116 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
2117 * its own hashing.
2118 */
2119 vp->v_hash = (uintptr_t)vp >> vnsz2log;
2120
2121 *vpp = vp;
2122 return (0);
2123 }
2124
2125 void
getnewvnode_reserve(void)2126 getnewvnode_reserve(void)
2127 {
2128 struct thread *td;
2129
2130 td = curthread;
2131 MPASS(td->td_vp_reserved == NULL);
2132 td->td_vp_reserved = vn_alloc(NULL);
2133 }
2134
2135 void
getnewvnode_drop_reserve(void)2136 getnewvnode_drop_reserve(void)
2137 {
2138 struct thread *td;
2139
2140 td = curthread;
2141 if (td->td_vp_reserved != NULL) {
2142 vn_free(td->td_vp_reserved);
2143 td->td_vp_reserved = NULL;
2144 }
2145 }
2146
2147 static void __noinline
freevnode(struct vnode * vp)2148 freevnode(struct vnode *vp)
2149 {
2150 struct bufobj *bo;
2151
2152 /*
2153 * The vnode has been marked for destruction, so free it.
2154 *
2155 * The vnode will be returned to the zone where it will
2156 * normally remain until it is needed for another vnode. We
2157 * need to cleanup (or verify that the cleanup has already
2158 * been done) any residual data left from its current use
2159 * so as not to contaminate the freshly allocated vnode.
2160 */
2161 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2162 /*
2163 * Paired with vgone.
2164 */
2165 vn_seqc_write_end_free(vp);
2166
2167 bo = &vp->v_bufobj;
2168 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2169 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp);
2170 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2171 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2172 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2173 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2174 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2175 ("clean blk trie not empty"));
2176 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2177 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2178 ("dirty blk trie not empty"));
2179 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
2180 ("Dangling rangelock waiters"));
2181 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp,
2182 ("Leaked inactivation"));
2183 VI_UNLOCK(vp);
2184 cache_assert_no_entries(vp);
2185
2186 #ifdef MAC
2187 mac_vnode_destroy(vp);
2188 #endif
2189 if (vp->v_pollinfo != NULL) {
2190 /*
2191 * Use LK_NOWAIT to shut up witness about the lock. We may get
2192 * here while having another vnode locked when trying to
2193 * satisfy a lookup and needing to recycle.
2194 */
2195 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT);
2196 destroy_vpollinfo(vp->v_pollinfo);
2197 VOP_UNLOCK(vp);
2198 vp->v_pollinfo = NULL;
2199 }
2200 vp->v_mountedhere = NULL;
2201 vp->v_unpcb = NULL;
2202 vp->v_rdev = NULL;
2203 vp->v_fifoinfo = NULL;
2204 vp->v_iflag = 0;
2205 vp->v_vflag = 0;
2206 bo->bo_flag = 0;
2207 vn_free(vp);
2208 }
2209
2210 /*
2211 * Delete from old mount point vnode list, if on one.
2212 */
2213 static void
delmntque(struct vnode * vp)2214 delmntque(struct vnode *vp)
2215 {
2216 struct mount *mp;
2217
2218 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
2219
2220 mp = vp->v_mount;
2221 MNT_ILOCK(mp);
2222 VI_LOCK(vp);
2223 vp->v_mount = NULL;
2224 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
2225 ("bad mount point vnode list size"));
2226 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2227 mp->mnt_nvnodelistsize--;
2228 MNT_REL(mp);
2229 MNT_IUNLOCK(mp);
2230 /*
2231 * The caller expects the interlock to be still held.
2232 */
2233 ASSERT_VI_LOCKED(vp, __func__);
2234 }
2235
2236 static int
insmntque1_int(struct vnode * vp,struct mount * mp,bool dtr)2237 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr)
2238 {
2239
2240 KASSERT(vp->v_mount == NULL,
2241 ("insmntque: vnode already on per mount vnode list"));
2242 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
2243 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) {
2244 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
2245 } else {
2246 KASSERT(!dtr,
2247 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup",
2248 __func__));
2249 }
2250
2251 /*
2252 * We acquire the vnode interlock early to ensure that the
2253 * vnode cannot be recycled by another process releasing a
2254 * holdcnt on it before we get it on both the vnode list
2255 * and the active vnode list. The mount mutex protects only
2256 * manipulation of the vnode list and the vnode freelist
2257 * mutex protects only manipulation of the active vnode list.
2258 * Hence the need to hold the vnode interlock throughout.
2259 */
2260 MNT_ILOCK(mp);
2261 VI_LOCK(vp);
2262 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 &&
2263 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
2264 mp->mnt_nvnodelistsize == 0)) &&
2265 (vp->v_vflag & VV_FORCEINSMQ) == 0) {
2266 VI_UNLOCK(vp);
2267 MNT_IUNLOCK(mp);
2268 if (dtr) {
2269 vp->v_data = NULL;
2270 vp->v_op = &dead_vnodeops;
2271 vgone(vp);
2272 vput(vp);
2273 }
2274 return (EBUSY);
2275 }
2276 vp->v_mount = mp;
2277 MNT_REF(mp);
2278 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2279 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
2280 ("neg mount point vnode list size"));
2281 mp->mnt_nvnodelistsize++;
2282 VI_UNLOCK(vp);
2283 MNT_IUNLOCK(mp);
2284 return (0);
2285 }
2286
2287 /*
2288 * Insert into list of vnodes for the new mount point, if available.
2289 * insmntque() reclaims the vnode on insertion failure, insmntque1()
2290 * leaves handling of the vnode to the caller.
2291 */
2292 int
insmntque(struct vnode * vp,struct mount * mp)2293 insmntque(struct vnode *vp, struct mount *mp)
2294 {
2295 return (insmntque1_int(vp, mp, true));
2296 }
2297
2298 int
insmntque1(struct vnode * vp,struct mount * mp)2299 insmntque1(struct vnode *vp, struct mount *mp)
2300 {
2301 return (insmntque1_int(vp, mp, false));
2302 }
2303
2304 /*
2305 * Flush out and invalidate all buffers associated with a bufobj
2306 * Called with the underlying object locked.
2307 */
2308 int
bufobj_invalbuf(struct bufobj * bo,int flags,int slpflag,int slptimeo)2309 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
2310 {
2311 int error;
2312
2313 BO_LOCK(bo);
2314 if (flags & V_SAVE) {
2315 error = bufobj_wwait(bo, slpflag, slptimeo);
2316 if (error) {
2317 BO_UNLOCK(bo);
2318 return (error);
2319 }
2320 if (bo->bo_dirty.bv_cnt > 0) {
2321 BO_UNLOCK(bo);
2322 do {
2323 error = BO_SYNC(bo, MNT_WAIT);
2324 } while (error == ERELOOKUP);
2325 if (error != 0)
2326 return (error);
2327 BO_LOCK(bo);
2328 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
2329 BO_UNLOCK(bo);
2330 return (EBUSY);
2331 }
2332 }
2333 }
2334 /*
2335 * If you alter this loop please notice that interlock is dropped and
2336 * reacquired in flushbuflist. Special care is needed to ensure that
2337 * no race conditions occur from this.
2338 */
2339 do {
2340 error = flushbuflist(&bo->bo_clean,
2341 flags, bo, slpflag, slptimeo);
2342 if (error == 0 && !(flags & V_CLEANONLY))
2343 error = flushbuflist(&bo->bo_dirty,
2344 flags, bo, slpflag, slptimeo);
2345 if (error != 0 && error != EAGAIN) {
2346 BO_UNLOCK(bo);
2347 return (error);
2348 }
2349 } while (error != 0);
2350
2351 /*
2352 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
2353 * have write I/O in-progress but if there is a VM object then the
2354 * VM object can also have read-I/O in-progress.
2355 */
2356 do {
2357 bufobj_wwait(bo, 0, 0);
2358 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) {
2359 BO_UNLOCK(bo);
2360 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx");
2361 BO_LOCK(bo);
2362 }
2363 } while (bo->bo_numoutput > 0);
2364 BO_UNLOCK(bo);
2365
2366 /*
2367 * Destroy the copy in the VM cache, too.
2368 */
2369 if (bo->bo_object != NULL &&
2370 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) {
2371 VM_OBJECT_WLOCK(bo->bo_object);
2372 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
2373 OBJPR_CLEANONLY : 0);
2374 VM_OBJECT_WUNLOCK(bo->bo_object);
2375 }
2376
2377 #ifdef INVARIANTS
2378 BO_LOCK(bo);
2379 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO |
2380 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 ||
2381 bo->bo_clean.bv_cnt > 0))
2382 panic("vinvalbuf: flush failed");
2383 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 &&
2384 bo->bo_dirty.bv_cnt > 0)
2385 panic("vinvalbuf: flush dirty failed");
2386 BO_UNLOCK(bo);
2387 #endif
2388 return (0);
2389 }
2390
2391 /*
2392 * Flush out and invalidate all buffers associated with a vnode.
2393 * Called with the underlying object locked.
2394 */
2395 int
vinvalbuf(struct vnode * vp,int flags,int slpflag,int slptimeo)2396 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2397 {
2398
2399 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2400 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
2401 if (vp->v_object != NULL && vp->v_object->handle != vp)
2402 return (0);
2403 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
2404 }
2405
2406 /*
2407 * Flush out buffers on the specified list.
2408 *
2409 */
2410 static int
flushbuflist(struct bufv * bufv,int flags,struct bufobj * bo,int slpflag,int slptimeo)2411 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
2412 int slptimeo)
2413 {
2414 struct buf *bp, *nbp;
2415 int retval, error;
2416 daddr_t lblkno;
2417 b_xflags_t xflags;
2418
2419 ASSERT_BO_WLOCKED(bo);
2420
2421 retval = 0;
2422 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
2423 /*
2424 * If we are flushing both V_NORMAL and V_ALT buffers then
2425 * do not skip any buffers. If we are flushing only V_NORMAL
2426 * buffers then skip buffers marked as BX_ALTDATA. If we are
2427 * flushing only V_ALT buffers then skip buffers not marked
2428 * as BX_ALTDATA.
2429 */
2430 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) &&
2431 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) ||
2432 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) {
2433 continue;
2434 }
2435 if (nbp != NULL) {
2436 lblkno = nbp->b_lblkno;
2437 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
2438 }
2439 retval = EAGAIN;
2440 error = BUF_TIMELOCK(bp,
2441 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
2442 "flushbuf", slpflag, slptimeo);
2443 if (error) {
2444 BO_LOCK(bo);
2445 return (error != ENOLCK ? error : EAGAIN);
2446 }
2447 KASSERT(bp->b_bufobj == bo,
2448 ("bp %p wrong b_bufobj %p should be %p",
2449 bp, bp->b_bufobj, bo));
2450 /*
2451 * XXX Since there are no node locks for NFS, I
2452 * believe there is a slight chance that a delayed
2453 * write will occur while sleeping just above, so
2454 * check for it.
2455 */
2456 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
2457 (flags & V_SAVE)) {
2458 bremfree(bp);
2459 bp->b_flags |= B_ASYNC;
2460 bwrite(bp);
2461 BO_LOCK(bo);
2462 return (EAGAIN); /* XXX: why not loop ? */
2463 }
2464 bremfree(bp);
2465 bp->b_flags |= (B_INVAL | B_RELBUF);
2466 bp->b_flags &= ~B_ASYNC;
2467 brelse(bp);
2468 BO_LOCK(bo);
2469 if (nbp == NULL)
2470 break;
2471 nbp = gbincore(bo, lblkno);
2472 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2473 != xflags)
2474 break; /* nbp invalid */
2475 }
2476 return (retval);
2477 }
2478
2479 int
bnoreuselist(struct bufv * bufv,struct bufobj * bo,daddr_t startn,daddr_t endn)2480 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
2481 {
2482 struct buf *bp;
2483 int error;
2484 daddr_t lblkno;
2485
2486 ASSERT_BO_LOCKED(bo);
2487
2488 for (lblkno = startn;;) {
2489 again:
2490 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno);
2491 if (bp == NULL || bp->b_lblkno >= endn ||
2492 bp->b_lblkno < startn)
2493 break;
2494 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
2495 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
2496 if (error != 0) {
2497 BO_RLOCK(bo);
2498 if (error == ENOLCK)
2499 goto again;
2500 return (error);
2501 }
2502 KASSERT(bp->b_bufobj == bo,
2503 ("bp %p wrong b_bufobj %p should be %p",
2504 bp, bp->b_bufobj, bo));
2505 lblkno = bp->b_lblkno + 1;
2506 if ((bp->b_flags & B_MANAGED) == 0)
2507 bremfree(bp);
2508 bp->b_flags |= B_RELBUF;
2509 /*
2510 * In the VMIO case, use the B_NOREUSE flag to hint that the
2511 * pages backing each buffer in the range are unlikely to be
2512 * reused. Dirty buffers will have the hint applied once
2513 * they've been written.
2514 */
2515 if ((bp->b_flags & B_VMIO) != 0)
2516 bp->b_flags |= B_NOREUSE;
2517 brelse(bp);
2518 BO_RLOCK(bo);
2519 }
2520 return (0);
2521 }
2522
2523 /*
2524 * Truncate a file's buffer and pages to a specified length. This
2525 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
2526 * sync activity.
2527 */
2528 int
vtruncbuf(struct vnode * vp,off_t length,int blksize)2529 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2530 {
2531 struct buf *bp, *nbp;
2532 struct bufobj *bo;
2533 daddr_t startlbn;
2534
2535 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
2536 vp, blksize, (uintmax_t)length);
2537
2538 /*
2539 * Round up to the *next* lbn.
2540 */
2541 startlbn = howmany(length, blksize);
2542
2543 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
2544
2545 bo = &vp->v_bufobj;
2546 restart_unlocked:
2547 BO_LOCK(bo);
2548
2549 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
2550 ;
2551
2552 if (length > 0) {
2553 /*
2554 * Write out vnode metadata, e.g. indirect blocks.
2555 */
2556 restartsync:
2557 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2558 if (bp->b_lblkno >= 0)
2559 continue;
2560 /*
2561 * Since we hold the vnode lock this should only
2562 * fail if we're racing with the buf daemon.
2563 */
2564 if (BUF_LOCK(bp,
2565 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2566 BO_LOCKPTR(bo)) == ENOLCK)
2567 goto restart_unlocked;
2568
2569 VNASSERT((bp->b_flags & B_DELWRI), vp,
2570 ("buf(%p) on dirty queue without DELWRI", bp));
2571
2572 bremfree(bp);
2573 bawrite(bp);
2574 BO_LOCK(bo);
2575 goto restartsync;
2576 }
2577 }
2578
2579 bufobj_wwait(bo, 0, 0);
2580 BO_UNLOCK(bo);
2581 vnode_pager_setsize(vp, length);
2582
2583 return (0);
2584 }
2585
2586 /*
2587 * Invalidate the cached pages of a file's buffer within the range of block
2588 * numbers [startlbn, endlbn).
2589 */
2590 void
v_inval_buf_range(struct vnode * vp,daddr_t startlbn,daddr_t endlbn,int blksize)2591 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2592 int blksize)
2593 {
2594 struct bufobj *bo;
2595 off_t start, end;
2596
2597 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
2598
2599 start = blksize * startlbn;
2600 end = blksize * endlbn;
2601
2602 bo = &vp->v_bufobj;
2603 BO_LOCK(bo);
2604 MPASS(blksize == bo->bo_bsize);
2605
2606 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
2607 ;
2608
2609 BO_UNLOCK(bo);
2610 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
2611 }
2612
2613 static int
v_inval_buf_range_locked(struct vnode * vp,struct bufobj * bo,daddr_t startlbn,daddr_t endlbn)2614 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2615 daddr_t startlbn, daddr_t endlbn)
2616 {
2617 struct buf *bp, *nbp;
2618 bool anyfreed;
2619
2620 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
2621 ASSERT_BO_LOCKED(bo);
2622
2623 do {
2624 anyfreed = false;
2625 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
2626 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
2627 continue;
2628 if (BUF_LOCK(bp,
2629 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2630 BO_LOCKPTR(bo)) == ENOLCK) {
2631 BO_LOCK(bo);
2632 return (EAGAIN);
2633 }
2634
2635 bremfree(bp);
2636 bp->b_flags |= B_INVAL | B_RELBUF;
2637 bp->b_flags &= ~B_ASYNC;
2638 brelse(bp);
2639 anyfreed = true;
2640
2641 BO_LOCK(bo);
2642 if (nbp != NULL &&
2643 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
2644 nbp->b_vp != vp ||
2645 (nbp->b_flags & B_DELWRI) != 0))
2646 return (EAGAIN);
2647 }
2648
2649 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2650 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
2651 continue;
2652 if (BUF_LOCK(bp,
2653 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2654 BO_LOCKPTR(bo)) == ENOLCK) {
2655 BO_LOCK(bo);
2656 return (EAGAIN);
2657 }
2658 bremfree(bp);
2659 bp->b_flags |= B_INVAL | B_RELBUF;
2660 bp->b_flags &= ~B_ASYNC;
2661 brelse(bp);
2662 anyfreed = true;
2663
2664 BO_LOCK(bo);
2665 if (nbp != NULL &&
2666 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
2667 (nbp->b_vp != vp) ||
2668 (nbp->b_flags & B_DELWRI) == 0))
2669 return (EAGAIN);
2670 }
2671 } while (anyfreed);
2672 return (0);
2673 }
2674
2675 static void
buf_vlist_remove(struct buf * bp)2676 buf_vlist_remove(struct buf *bp)
2677 {
2678 struct bufv *bv;
2679 b_xflags_t flags;
2680
2681 flags = bp->b_xflags;
2682
2683 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2684 ASSERT_BO_WLOCKED(bp->b_bufobj);
2685 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 &&
2686 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN),
2687 ("%s: buffer %p has invalid queue state", __func__, bp));
2688
2689 if ((flags & BX_VNDIRTY) != 0)
2690 bv = &bp->b_bufobj->bo_dirty;
2691 else
2692 bv = &bp->b_bufobj->bo_clean;
2693 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
2694 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
2695 bv->bv_cnt--;
2696 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
2697 }
2698
2699 /*
2700 * Add the buffer to the sorted clean or dirty block list. Return zero on
2701 * success, EEXIST if a buffer with this identity already exists, or another
2702 * error on allocation failure.
2703 */
2704 static inline int
buf_vlist_find_or_add(struct buf * bp,struct bufobj * bo,b_xflags_t xflags)2705 buf_vlist_find_or_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
2706 {
2707 struct bufv *bv;
2708 struct buf *n;
2709 int error;
2710
2711 ASSERT_BO_WLOCKED(bo);
2712 KASSERT((bo->bo_flag & BO_NOBUFS) == 0,
2713 ("buf_vlist_add: bo %p does not allow bufs", bo));
2714 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
2715 ("dead bo %p", bo));
2716 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags,
2717 ("buf_vlist_add: b_xflags %#x not set on bp %p", xflags, bp));
2718
2719 if (xflags & BX_VNDIRTY)
2720 bv = &bo->bo_dirty;
2721 else
2722 bv = &bo->bo_clean;
2723
2724 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, &n);
2725 if (n == NULL) {
2726 KASSERT(error != EEXIST,
2727 ("buf_vlist_add: EEXIST but no existing buf found: bp %p",
2728 bp));
2729 } else {
2730 KASSERT((uint64_t)n->b_lblkno <= (uint64_t)bp->b_lblkno,
2731 ("buf_vlist_add: out of order insert/lookup: bp %p n %p",
2732 bp, n));
2733 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST),
2734 ("buf_vlist_add: inconsistent result for existing buf: "
2735 "error %d bp %p n %p", error, bp, n));
2736 }
2737 if (error != 0)
2738 return (error);
2739
2740 /* Keep the list ordered. */
2741 if (n == NULL) {
2742 KASSERT(TAILQ_EMPTY(&bv->bv_hd) ||
2743 (uint64_t)bp->b_lblkno <
2744 (uint64_t)TAILQ_FIRST(&bv->bv_hd)->b_lblkno,
2745 ("buf_vlist_add: queue order: "
2746 "%p should be before first %p",
2747 bp, TAILQ_FIRST(&bv->bv_hd)));
2748 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
2749 } else {
2750 KASSERT(TAILQ_NEXT(n, b_bobufs) == NULL ||
2751 (uint64_t)bp->b_lblkno <
2752 (uint64_t)TAILQ_NEXT(n, b_bobufs)->b_lblkno,
2753 ("buf_vlist_add: queue order: "
2754 "%p should be before next %p",
2755 bp, TAILQ_NEXT(n, b_bobufs)));
2756 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
2757 }
2758
2759 bv->bv_cnt++;
2760 return (0);
2761 }
2762
2763 /*
2764 * Add the buffer to the sorted clean or dirty block list.
2765 *
2766 * NOTE: xflags is passed as a constant, optimizing this inline function!
2767 */
2768 static void
buf_vlist_add(struct buf * bp,struct bufobj * bo,b_xflags_t xflags)2769 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
2770 {
2771 int error;
2772
2773 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
2774 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
2775 bp->b_xflags |= xflags;
2776 error = buf_vlist_find_or_add(bp, bo, xflags);
2777 if (error)
2778 panic("buf_vlist_add: error=%d", error);
2779 }
2780
2781 /*
2782 * Look up a buffer using the buffer tries.
2783 */
2784 struct buf *
gbincore(struct bufobj * bo,daddr_t lblkno)2785 gbincore(struct bufobj *bo, daddr_t lblkno)
2786 {
2787 struct buf *bp;
2788
2789 ASSERT_BO_LOCKED(bo);
2790 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
2791 if (bp != NULL)
2792 return (bp);
2793 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno));
2794 }
2795
2796 /*
2797 * Look up a buf using the buffer tries, without the bufobj lock. This relies
2798 * on SMR for safe lookup, and bufs being in a no-free zone to provide type
2799 * stability of the result. Like other lockless lookups, the found buf may
2800 * already be invalid by the time this function returns.
2801 */
2802 struct buf *
gbincore_unlocked(struct bufobj * bo,daddr_t lblkno)2803 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno)
2804 {
2805 struct buf *bp;
2806
2807 ASSERT_BO_UNLOCKED(bo);
2808 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno);
2809 if (bp != NULL)
2810 return (bp);
2811 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno));
2812 }
2813
2814 /*
2815 * Associate a buffer with a vnode.
2816 */
2817 int
bgetvp(struct vnode * vp,struct buf * bp)2818 bgetvp(struct vnode *vp, struct buf *bp)
2819 {
2820 struct bufobj *bo;
2821 int error;
2822
2823 bo = &vp->v_bufobj;
2824 ASSERT_BO_UNLOCKED(bo);
2825 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
2826
2827 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2828 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2829 ("bgetvp: bp already attached! %p", bp));
2830
2831 /*
2832 * Add the buf to the vnode's clean list unless we lost a race and find
2833 * an existing buf in either dirty or clean.
2834 */
2835 bp->b_vp = vp;
2836 bp->b_bufobj = bo;
2837 bp->b_xflags |= BX_VNCLEAN;
2838 error = EEXIST;
2839 BO_LOCK(bo);
2840 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL)
2841 error = buf_vlist_find_or_add(bp, bo, BX_VNCLEAN);
2842 BO_UNLOCK(bo);
2843 if (__predict_true(error == 0)) {
2844 vhold(vp);
2845 return (0);
2846 }
2847 if (error != EEXIST)
2848 panic("bgetvp: buf_vlist_add error: %d", error);
2849 bp->b_vp = NULL;
2850 bp->b_bufobj = NULL;
2851 bp->b_xflags &= ~BX_VNCLEAN;
2852 return (error);
2853 }
2854
2855 /*
2856 * Disassociate a buffer from a vnode.
2857 */
2858 void
brelvp(struct buf * bp)2859 brelvp(struct buf *bp)
2860 {
2861 struct bufobj *bo;
2862 struct vnode *vp;
2863
2864 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2865 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
2866
2867 /*
2868 * Delete from old vnode list, if on one.
2869 */
2870 vp = bp->b_vp; /* XXX */
2871 bo = bp->b_bufobj;
2872 BO_LOCK(bo);
2873 buf_vlist_remove(bp);
2874 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2875 bo->bo_flag &= ~BO_ONWORKLST;
2876 mtx_lock(&sync_mtx);
2877 LIST_REMOVE(bo, bo_synclist);
2878 syncer_worklist_len--;
2879 mtx_unlock(&sync_mtx);
2880 }
2881 bp->b_vp = NULL;
2882 bp->b_bufobj = NULL;
2883 BO_UNLOCK(bo);
2884 vdrop(vp);
2885 }
2886
2887 /*
2888 * Add an item to the syncer work queue.
2889 */
2890 static void
vn_syncer_add_to_worklist(struct bufobj * bo,int delay)2891 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
2892 {
2893 int slot;
2894
2895 ASSERT_BO_WLOCKED(bo);
2896
2897 mtx_lock(&sync_mtx);
2898 if (bo->bo_flag & BO_ONWORKLST)
2899 LIST_REMOVE(bo, bo_synclist);
2900 else {
2901 bo->bo_flag |= BO_ONWORKLST;
2902 syncer_worklist_len++;
2903 }
2904
2905 if (delay > syncer_maxdelay - 2)
2906 delay = syncer_maxdelay - 2;
2907 slot = (syncer_delayno + delay) & syncer_mask;
2908
2909 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
2910 mtx_unlock(&sync_mtx);
2911 }
2912
2913 static int
sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)2914 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2915 {
2916 int error, len;
2917
2918 mtx_lock(&sync_mtx);
2919 len = syncer_worklist_len - sync_vnode_count;
2920 mtx_unlock(&sync_mtx);
2921 error = SYSCTL_OUT(req, &len, sizeof(len));
2922 return (error);
2923 }
2924
2925 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len,
2926 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0,
2927 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2928
2929 static struct proc *updateproc;
2930 static void sched_sync(void);
2931 static struct kproc_desc up_kp = {
2932 "syncer",
2933 sched_sync,
2934 &updateproc
2935 };
2936 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2937
2938 static int
sync_vnode(struct synclist * slp,struct bufobj ** bo,struct thread * td)2939 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2940 {
2941 struct vnode *vp;
2942 struct mount *mp;
2943
2944 *bo = LIST_FIRST(slp);
2945 if (*bo == NULL)
2946 return (0);
2947 vp = bo2vnode(*bo);
2948 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2949 return (1);
2950 /*
2951 * We use vhold in case the vnode does not
2952 * successfully sync. vhold prevents the vnode from
2953 * going away when we unlock the sync_mtx so that
2954 * we can acquire the vnode interlock.
2955 */
2956 vholdl(vp);
2957 mtx_unlock(&sync_mtx);
2958 VI_UNLOCK(vp);
2959 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2960 vdrop(vp);
2961 mtx_lock(&sync_mtx);
2962 return (*bo == LIST_FIRST(slp));
2963 }
2964 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 ||
2965 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp,
2966 ("suspended mp syncing vp %p", vp));
2967 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2968 (void) VOP_FSYNC(vp, MNT_LAZY, td);
2969 VOP_UNLOCK(vp);
2970 vn_finished_write(mp);
2971 BO_LOCK(*bo);
2972 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2973 /*
2974 * Put us back on the worklist. The worklist
2975 * routine will remove us from our current
2976 * position and then add us back in at a later
2977 * position.
2978 */
2979 vn_syncer_add_to_worklist(*bo, syncdelay);
2980 }
2981 BO_UNLOCK(*bo);
2982 vdrop(vp);
2983 mtx_lock(&sync_mtx);
2984 return (0);
2985 }
2986
2987 static int first_printf = 1;
2988
2989 /*
2990 * System filesystem synchronizer daemon.
2991 */
2992 static void
sched_sync(void)2993 sched_sync(void)
2994 {
2995 struct synclist *next, *slp;
2996 struct bufobj *bo;
2997 long starttime;
2998 struct thread *td = curthread;
2999 int last_work_seen;
3000 int net_worklist_len;
3001 int syncer_final_iter;
3002 int error;
3003
3004 last_work_seen = 0;
3005 syncer_final_iter = 0;
3006 syncer_state = SYNCER_RUNNING;
3007 starttime = time_uptime;
3008 td->td_pflags |= TDP_NORUNNINGBUF;
3009
3010 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
3011 SHUTDOWN_PRI_LAST);
3012
3013 mtx_lock(&sync_mtx);
3014 for (;;) {
3015 if (syncer_state == SYNCER_FINAL_DELAY &&
3016 syncer_final_iter == 0) {
3017 mtx_unlock(&sync_mtx);
3018 kproc_suspend_check(td->td_proc);
3019 mtx_lock(&sync_mtx);
3020 }
3021 net_worklist_len = syncer_worklist_len - sync_vnode_count;
3022 if (syncer_state != SYNCER_RUNNING &&
3023 starttime != time_uptime) {
3024 if (first_printf) {
3025 printf("\nSyncing disks, vnodes remaining... ");
3026 first_printf = 0;
3027 }
3028 printf("%d ", net_worklist_len);
3029 }
3030 starttime = time_uptime;
3031
3032 /*
3033 * Push files whose dirty time has expired. Be careful
3034 * of interrupt race on slp queue.
3035 *
3036 * Skip over empty worklist slots when shutting down.
3037 */
3038 do {
3039 slp = &syncer_workitem_pending[syncer_delayno];
3040 syncer_delayno += 1;
3041 if (syncer_delayno == syncer_maxdelay)
3042 syncer_delayno = 0;
3043 next = &syncer_workitem_pending[syncer_delayno];
3044 /*
3045 * If the worklist has wrapped since the
3046 * it was emptied of all but syncer vnodes,
3047 * switch to the FINAL_DELAY state and run
3048 * for one more second.
3049 */
3050 if (syncer_state == SYNCER_SHUTTING_DOWN &&
3051 net_worklist_len == 0 &&
3052 last_work_seen == syncer_delayno) {
3053 syncer_state = SYNCER_FINAL_DELAY;
3054 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
3055 }
3056 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
3057 syncer_worklist_len > 0);
3058
3059 /*
3060 * Keep track of the last time there was anything
3061 * on the worklist other than syncer vnodes.
3062 * Return to the SHUTTING_DOWN state if any
3063 * new work appears.
3064 */
3065 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
3066 last_work_seen = syncer_delayno;
3067 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
3068 syncer_state = SYNCER_SHUTTING_DOWN;
3069 while (!LIST_EMPTY(slp)) {
3070 error = sync_vnode(slp, &bo, td);
3071 if (error == 1) {
3072 LIST_REMOVE(bo, bo_synclist);
3073 LIST_INSERT_HEAD(next, bo, bo_synclist);
3074 continue;
3075 }
3076
3077 if (first_printf == 0) {
3078 /*
3079 * Drop the sync mutex, because some watchdog
3080 * drivers need to sleep while patting
3081 */
3082 mtx_unlock(&sync_mtx);
3083 wdog_kern_pat(WD_LASTVAL);
3084 mtx_lock(&sync_mtx);
3085 }
3086 }
3087 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
3088 syncer_final_iter--;
3089 /*
3090 * The variable rushjob allows the kernel to speed up the
3091 * processing of the filesystem syncer process. A rushjob
3092 * value of N tells the filesystem syncer to process the next
3093 * N seconds worth of work on its queue ASAP. Currently rushjob
3094 * is used by the soft update code to speed up the filesystem
3095 * syncer process when the incore state is getting so far
3096 * ahead of the disk that the kernel memory pool is being
3097 * threatened with exhaustion.
3098 */
3099 if (rushjob > 0) {
3100 rushjob -= 1;
3101 continue;
3102 }
3103 /*
3104 * Just sleep for a short period of time between
3105 * iterations when shutting down to allow some I/O
3106 * to happen.
3107 *
3108 * If it has taken us less than a second to process the
3109 * current work, then wait. Otherwise start right over
3110 * again. We can still lose time if any single round
3111 * takes more than two seconds, but it does not really
3112 * matter as we are just trying to generally pace the
3113 * filesystem activity.
3114 */
3115 if (syncer_state != SYNCER_RUNNING ||
3116 time_uptime == starttime) {
3117 thread_lock(td);
3118 sched_prio(td, PPAUSE);
3119 thread_unlock(td);
3120 }
3121 if (syncer_state != SYNCER_RUNNING)
3122 cv_timedwait(&sync_wakeup, &sync_mtx,
3123 hz / SYNCER_SHUTDOWN_SPEEDUP);
3124 else if (time_uptime == starttime)
3125 cv_timedwait(&sync_wakeup, &sync_mtx, hz);
3126 }
3127 }
3128
3129 /*
3130 * Request the syncer daemon to speed up its work.
3131 * We never push it to speed up more than half of its
3132 * normal turn time, otherwise it could take over the cpu.
3133 */
3134 int
speedup_syncer(void)3135 speedup_syncer(void)
3136 {
3137 int ret = 0;
3138
3139 mtx_lock(&sync_mtx);
3140 if (rushjob < syncdelay / 2) {
3141 rushjob += 1;
3142 stat_rush_requests += 1;
3143 ret = 1;
3144 }
3145 mtx_unlock(&sync_mtx);
3146 cv_broadcast(&sync_wakeup);
3147 return (ret);
3148 }
3149
3150 /*
3151 * Tell the syncer to speed up its work and run though its work
3152 * list several times, then tell it to shut down.
3153 */
3154 static void
syncer_shutdown(void * arg,int howto)3155 syncer_shutdown(void *arg, int howto)
3156 {
3157
3158 if (howto & RB_NOSYNC)
3159 return;
3160 mtx_lock(&sync_mtx);
3161 syncer_state = SYNCER_SHUTTING_DOWN;
3162 rushjob = 0;
3163 mtx_unlock(&sync_mtx);
3164 cv_broadcast(&sync_wakeup);
3165 kproc_shutdown(arg, howto);
3166 }
3167
3168 void
syncer_suspend(void)3169 syncer_suspend(void)
3170 {
3171
3172 syncer_shutdown(updateproc, 0);
3173 }
3174
3175 void
syncer_resume(void)3176 syncer_resume(void)
3177 {
3178
3179 mtx_lock(&sync_mtx);
3180 first_printf = 1;
3181 syncer_state = SYNCER_RUNNING;
3182 mtx_unlock(&sync_mtx);
3183 cv_broadcast(&sync_wakeup);
3184 kproc_resume(updateproc);
3185 }
3186
3187 /*
3188 * Move the buffer between the clean and dirty lists of its vnode.
3189 */
3190 void
reassignbuf(struct buf * bp)3191 reassignbuf(struct buf *bp)
3192 {
3193 struct vnode *vp;
3194 struct bufobj *bo;
3195 int delay;
3196 #ifdef INVARIANTS
3197 struct bufv *bv;
3198 #endif
3199
3200 vp = bp->b_vp;
3201 bo = bp->b_bufobj;
3202
3203 KASSERT((bp->b_flags & B_PAGING) == 0,
3204 ("%s: cannot reassign paging buffer %p", __func__, bp));
3205
3206 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
3207 bp, bp->b_vp, bp->b_flags);
3208
3209 BO_LOCK(bo);
3210 buf_vlist_remove(bp);
3211
3212 /*
3213 * If dirty, put on list of dirty buffers; otherwise insert onto list
3214 * of clean buffers.
3215 */
3216 if (bp->b_flags & B_DELWRI) {
3217 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
3218 switch (vp->v_type) {
3219 case VDIR:
3220 delay = dirdelay;
3221 break;
3222 case VCHR:
3223 delay = metadelay;
3224 break;
3225 default:
3226 delay = filedelay;
3227 }
3228 vn_syncer_add_to_worklist(bo, delay);
3229 }
3230 buf_vlist_add(bp, bo, BX_VNDIRTY);
3231 } else {
3232 buf_vlist_add(bp, bo, BX_VNCLEAN);
3233
3234 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
3235 mtx_lock(&sync_mtx);
3236 LIST_REMOVE(bo, bo_synclist);
3237 syncer_worklist_len--;
3238 mtx_unlock(&sync_mtx);
3239 bo->bo_flag &= ~BO_ONWORKLST;
3240 }
3241 }
3242 #ifdef INVARIANTS
3243 bv = &bo->bo_clean;
3244 bp = TAILQ_FIRST(&bv->bv_hd);
3245 KASSERT(bp == NULL || bp->b_bufobj == bo,
3246 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3247 bp = TAILQ_LAST(&bv->bv_hd, buflists);
3248 KASSERT(bp == NULL || bp->b_bufobj == bo,
3249 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3250 bv = &bo->bo_dirty;
3251 bp = TAILQ_FIRST(&bv->bv_hd);
3252 KASSERT(bp == NULL || bp->b_bufobj == bo,
3253 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3254 bp = TAILQ_LAST(&bv->bv_hd, buflists);
3255 KASSERT(bp == NULL || bp->b_bufobj == bo,
3256 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
3257 #endif
3258 BO_UNLOCK(bo);
3259 }
3260
3261 static void
v_init_counters(struct vnode * vp)3262 v_init_counters(struct vnode *vp)
3263 {
3264
3265 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
3266 vp, ("%s called for an initialized vnode", __FUNCTION__));
3267 ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
3268
3269 refcount_init(&vp->v_holdcnt, 1);
3270 refcount_init(&vp->v_usecount, 1);
3271 }
3272
3273 /*
3274 * Get a usecount on a vnode.
3275 *
3276 * vget and vget_finish may fail to lock the vnode if they lose a race against
3277 * it being doomed. LK_RETRY can be passed in flags to lock it anyway.
3278 *
3279 * Consumers which don't guarantee liveness of the vnode can use SMR to
3280 * try to get a reference. Note this operation can fail since the vnode
3281 * may be awaiting getting freed by the time they get to it.
3282 */
3283 enum vgetstate
vget_prep_smr(struct vnode * vp)3284 vget_prep_smr(struct vnode *vp)
3285 {
3286 enum vgetstate vs;
3287
3288 VFS_SMR_ASSERT_ENTERED();
3289
3290 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3291 vs = VGET_USECOUNT;
3292 } else {
3293 if (vhold_smr(vp))
3294 vs = VGET_HOLDCNT;
3295 else
3296 vs = VGET_NONE;
3297 }
3298 return (vs);
3299 }
3300
3301 enum vgetstate
vget_prep(struct vnode * vp)3302 vget_prep(struct vnode *vp)
3303 {
3304 enum vgetstate vs;
3305
3306 if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
3307 vs = VGET_USECOUNT;
3308 } else {
3309 vhold(vp);
3310 vs = VGET_HOLDCNT;
3311 }
3312 return (vs);
3313 }
3314
3315 void
vget_abort(struct vnode * vp,enum vgetstate vs)3316 vget_abort(struct vnode *vp, enum vgetstate vs)
3317 {
3318
3319 switch (vs) {
3320 case VGET_USECOUNT:
3321 vrele(vp);
3322 break;
3323 case VGET_HOLDCNT:
3324 vdrop(vp);
3325 break;
3326 default:
3327 __assert_unreachable();
3328 }
3329 }
3330
3331 int
vget(struct vnode * vp,int flags)3332 vget(struct vnode *vp, int flags)
3333 {
3334 enum vgetstate vs;
3335
3336 vs = vget_prep(vp);
3337 return (vget_finish(vp, flags, vs));
3338 }
3339
3340 int
vget_finish(struct vnode * vp,int flags,enum vgetstate vs)3341 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3342 {
3343 int error;
3344
3345 if ((flags & LK_INTERLOCK) != 0)
3346 ASSERT_VI_LOCKED(vp, __func__);
3347 else
3348 ASSERT_VI_UNLOCKED(vp, __func__);
3349 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3350 VNPASS(vp->v_holdcnt > 0, vp);
3351 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3352
3353 error = vn_lock(vp, flags);
3354 if (__predict_false(error != 0)) {
3355 vget_abort(vp, vs);
3356 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
3357 vp);
3358 return (error);
3359 }
3360
3361 vget_finish_ref(vp, vs);
3362 return (0);
3363 }
3364
3365 void
vget_finish_ref(struct vnode * vp,enum vgetstate vs)3366 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3367 {
3368 int old;
3369
3370 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3371 VNPASS(vp->v_holdcnt > 0, vp);
3372 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3373
3374 if (vs == VGET_USECOUNT)
3375 return;
3376
3377 /*
3378 * We hold the vnode. If the usecount is 0 it will be utilized to keep
3379 * the vnode around. Otherwise someone else lended their hold count and
3380 * we have to drop ours.
3381 */
3382 old = atomic_fetchadd_int(&vp->v_usecount, 1);
3383 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old));
3384 if (old != 0) {
3385 #ifdef INVARIANTS
3386 old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
3387 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
3388 #else
3389 refcount_release(&vp->v_holdcnt);
3390 #endif
3391 }
3392 }
3393
3394 void
vref(struct vnode * vp)3395 vref(struct vnode *vp)
3396 {
3397 enum vgetstate vs;
3398
3399 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3400 vs = vget_prep(vp);
3401 vget_finish_ref(vp, vs);
3402 }
3403
3404 void
vrefact(struct vnode * vp)3405 vrefact(struct vnode *vp)
3406 {
3407 int old __diagused;
3408
3409 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3410 old = refcount_acquire(&vp->v_usecount);
3411 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
3412 }
3413
3414 void
vlazy(struct vnode * vp)3415 vlazy(struct vnode *vp)
3416 {
3417 struct mount *mp;
3418
3419 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3420
3421 if ((vp->v_mflag & VMP_LAZYLIST) != 0)
3422 return;
3423 /*
3424 * We may get here for inactive routines after the vnode got doomed.
3425 */
3426 if (VN_IS_DOOMED(vp))
3427 return;
3428 mp = vp->v_mount;
3429 mtx_lock(&mp->mnt_listmtx);
3430 if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
3431 vp->v_mflag |= VMP_LAZYLIST;
3432 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3433 mp->mnt_lazyvnodelistsize++;
3434 }
3435 mtx_unlock(&mp->mnt_listmtx);
3436 }
3437
3438 static void
vunlazy(struct vnode * vp)3439 vunlazy(struct vnode *vp)
3440 {
3441 struct mount *mp;
3442
3443 ASSERT_VI_LOCKED(vp, __func__);
3444 VNPASS(!VN_IS_DOOMED(vp), vp);
3445
3446 mp = vp->v_mount;
3447 mtx_lock(&mp->mnt_listmtx);
3448 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3449 /*
3450 * Don't remove the vnode from the lazy list if another thread
3451 * has increased the hold count. It may have re-enqueued the
3452 * vnode to the lazy list and is now responsible for its
3453 * removal.
3454 */
3455 if (vp->v_holdcnt == 0) {
3456 vp->v_mflag &= ~VMP_LAZYLIST;
3457 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3458 mp->mnt_lazyvnodelistsize--;
3459 }
3460 mtx_unlock(&mp->mnt_listmtx);
3461 }
3462
3463 /*
3464 * This routine is only meant to be called from vgonel prior to dooming
3465 * the vnode.
3466 */
3467 static void
vunlazy_gone(struct vnode * vp)3468 vunlazy_gone(struct vnode *vp)
3469 {
3470 struct mount *mp;
3471
3472 ASSERT_VOP_ELOCKED(vp, __func__);
3473 ASSERT_VI_LOCKED(vp, __func__);
3474 VNPASS(!VN_IS_DOOMED(vp), vp);
3475
3476 if (vp->v_mflag & VMP_LAZYLIST) {
3477 mp = vp->v_mount;
3478 mtx_lock(&mp->mnt_listmtx);
3479 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3480 vp->v_mflag &= ~VMP_LAZYLIST;
3481 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3482 mp->mnt_lazyvnodelistsize--;
3483 mtx_unlock(&mp->mnt_listmtx);
3484 }
3485 }
3486
3487 static void
vdefer_inactive(struct vnode * vp)3488 vdefer_inactive(struct vnode *vp)
3489 {
3490
3491 ASSERT_VI_LOCKED(vp, __func__);
3492 VNPASS(vp->v_holdcnt > 0, vp);
3493 if (VN_IS_DOOMED(vp)) {
3494 vdropl(vp);
3495 return;
3496 }
3497 if (vp->v_iflag & VI_DEFINACT) {
3498 VNPASS(vp->v_holdcnt > 1, vp);
3499 vdropl(vp);
3500 return;
3501 }
3502 if (vp->v_usecount > 0) {
3503 vp->v_iflag &= ~VI_OWEINACT;
3504 vdropl(vp);
3505 return;
3506 }
3507 vlazy(vp);
3508 vp->v_iflag |= VI_DEFINACT;
3509 VI_UNLOCK(vp);
3510 atomic_add_long(&deferred_inact, 1);
3511 }
3512
3513 static void
vdefer_inactive_unlocked(struct vnode * vp)3514 vdefer_inactive_unlocked(struct vnode *vp)
3515 {
3516
3517 VI_LOCK(vp);
3518 if ((vp->v_iflag & VI_OWEINACT) == 0) {
3519 vdropl(vp);
3520 return;
3521 }
3522 vdefer_inactive(vp);
3523 }
3524
3525 enum vput_op { VRELE, VPUT, VUNREF };
3526
3527 /*
3528 * Handle ->v_usecount transitioning to 0.
3529 *
3530 * By releasing the last usecount we take ownership of the hold count which
3531 * provides liveness of the vnode, meaning we have to vdrop.
3532 *
3533 * For all vnodes we may need to perform inactive processing. It requires an
3534 * exclusive lock on the vnode, while it is legal to call here with only a
3535 * shared lock (or no locks). If locking the vnode in an expected manner fails,
3536 * inactive processing gets deferred to the syncer.
3537 *
3538 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
3539 * on the lock being held all the way until VOP_INACTIVE. This in particular
3540 * happens with UFS which adds half-constructed vnodes to the hash, where they
3541 * can be found by other code.
3542 */
3543 static void
vput_final(struct vnode * vp,enum vput_op func)3544 vput_final(struct vnode *vp, enum vput_op func)
3545 {
3546 int error;
3547 bool want_unlock;
3548
3549 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3550 VNPASS(vp->v_holdcnt > 0, vp);
3551
3552 VI_LOCK(vp);
3553
3554 /*
3555 * By the time we got here someone else might have transitioned
3556 * the count back to > 0.
3557 */
3558 if (vp->v_usecount > 0)
3559 goto out;
3560
3561 /*
3562 * If the vnode is doomed vgone already performed inactive processing
3563 * (if needed).
3564 */
3565 if (VN_IS_DOOMED(vp))
3566 goto out;
3567
3568 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0))
3569 goto out;
3570
3571 if (vp->v_iflag & VI_DOINGINACT)
3572 goto out;
3573
3574 /*
3575 * Locking operations here will drop the interlock and possibly the
3576 * vnode lock, opening a window where the vnode can get doomed all the
3577 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to
3578 * perform inactive.
3579 */
3580 vp->v_iflag |= VI_OWEINACT;
3581 want_unlock = false;
3582 error = 0;
3583 switch (func) {
3584 case VRELE:
3585 switch (VOP_ISLOCKED(vp)) {
3586 case LK_EXCLUSIVE:
3587 break;
3588 case LK_EXCLOTHER:
3589 case 0:
3590 want_unlock = true;
3591 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
3592 VI_LOCK(vp);
3593 break;
3594 default:
3595 /*
3596 * The lock has at least one sharer, but we have no way
3597 * to conclude whether this is us. Play it safe and
3598 * defer processing.
3599 */
3600 error = EAGAIN;
3601 break;
3602 }
3603 break;
3604 case VPUT:
3605 want_unlock = true;
3606 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3607 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
3608 LK_NOWAIT);
3609 VI_LOCK(vp);
3610 }
3611 break;
3612 case VUNREF:
3613 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3614 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
3615 VI_LOCK(vp);
3616 }
3617 break;
3618 }
3619 if (error == 0) {
3620 if (func == VUNREF) {
3621 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
3622 ("recursive vunref"));
3623 vp->v_vflag |= VV_UNREF;
3624 }
3625 for (;;) {
3626 error = vinactive(vp);
3627 if (want_unlock)
3628 VOP_UNLOCK(vp);
3629 if (error != ERELOOKUP || !want_unlock)
3630 break;
3631 VOP_LOCK(vp, LK_EXCLUSIVE);
3632 }
3633 if (func == VUNREF)
3634 vp->v_vflag &= ~VV_UNREF;
3635 vdropl(vp);
3636 } else {
3637 vdefer_inactive(vp);
3638 }
3639 return;
3640 out:
3641 if (func == VPUT)
3642 VOP_UNLOCK(vp);
3643 vdropl(vp);
3644 }
3645
3646 /*
3647 * Decrement ->v_usecount for a vnode.
3648 *
3649 * Releasing the last use count requires additional processing, see vput_final
3650 * above for details.
3651 *
3652 * Comment above each variant denotes lock state on entry and exit.
3653 */
3654
3655 /*
3656 * in: any
3657 * out: same as passed in
3658 */
3659 void
vrele(struct vnode * vp)3660 vrele(struct vnode *vp)
3661 {
3662
3663 ASSERT_VI_UNLOCKED(vp, __func__);
3664 if (!refcount_release(&vp->v_usecount))
3665 return;
3666 vput_final(vp, VRELE);
3667 }
3668
3669 /*
3670 * in: locked
3671 * out: unlocked
3672 */
3673 void
vput(struct vnode * vp)3674 vput(struct vnode *vp)
3675 {
3676
3677 ASSERT_VOP_LOCKED(vp, __func__);
3678 ASSERT_VI_UNLOCKED(vp, __func__);
3679 if (!refcount_release(&vp->v_usecount)) {
3680 VOP_UNLOCK(vp);
3681 return;
3682 }
3683 vput_final(vp, VPUT);
3684 }
3685
3686 /*
3687 * in: locked
3688 * out: locked
3689 */
3690 void
vunref(struct vnode * vp)3691 vunref(struct vnode *vp)
3692 {
3693
3694 ASSERT_VOP_LOCKED(vp, __func__);
3695 ASSERT_VI_UNLOCKED(vp, __func__);
3696 if (!refcount_release(&vp->v_usecount))
3697 return;
3698 vput_final(vp, VUNREF);
3699 }
3700
3701 void
vhold(struct vnode * vp)3702 vhold(struct vnode *vp)
3703 {
3704 int old;
3705
3706 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3707 old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3708 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3709 ("%s: wrong hold count %d", __func__, old));
3710 if (old == 0)
3711 vfs_freevnodes_dec();
3712 }
3713
3714 void
vholdnz(struct vnode * vp)3715 vholdnz(struct vnode *vp)
3716 {
3717
3718 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3719 #ifdef INVARIANTS
3720 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3721 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3722 ("%s: wrong hold count %d", __func__, old));
3723 #else
3724 atomic_add_int(&vp->v_holdcnt, 1);
3725 #endif
3726 }
3727
3728 /*
3729 * Grab a hold count unless the vnode is freed.
3730 *
3731 * Only use this routine if vfs smr is the only protection you have against
3732 * freeing the vnode.
3733 *
3734 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag
3735 * is not set. After the flag is set the vnode becomes immutable to anyone but
3736 * the thread which managed to set the flag.
3737 *
3738 * It may be tempting to replace the loop with:
3739 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3740 * if (count & VHOLD_NO_SMR) {
3741 * backpedal and error out;
3742 * }
3743 *
3744 * However, while this is more performant, it hinders debugging by eliminating
3745 * the previously mentioned invariant.
3746 */
3747 bool
vhold_smr(struct vnode * vp)3748 vhold_smr(struct vnode *vp)
3749 {
3750 int count;
3751
3752 VFS_SMR_ASSERT_ENTERED();
3753
3754 count = atomic_load_int(&vp->v_holdcnt);
3755 for (;;) {
3756 if (count & VHOLD_NO_SMR) {
3757 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3758 ("non-zero hold count with flags %d\n", count));
3759 return (false);
3760 }
3761 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3762 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3763 if (count == 0)
3764 vfs_freevnodes_dec();
3765 return (true);
3766 }
3767 }
3768 }
3769
3770 /*
3771 * Hold a free vnode for recycling.
3772 *
3773 * Note: vnode_init references this comment.
3774 *
3775 * Attempts to recycle only need the global vnode list lock and have no use for
3776 * SMR.
3777 *
3778 * However, vnodes get inserted into the global list before they get fully
3779 * initialized and stay there until UMA decides to free the memory. This in
3780 * particular means the target can be found before it becomes usable and after
3781 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to
3782 * VHOLD_NO_SMR.
3783 *
3784 * Note: the vnode may gain more references after we transition the count 0->1.
3785 */
3786 static bool
vhold_recycle_free(struct vnode * vp)3787 vhold_recycle_free(struct vnode *vp)
3788 {
3789 int count;
3790
3791 mtx_assert(&vnode_list_mtx, MA_OWNED);
3792
3793 count = atomic_load_int(&vp->v_holdcnt);
3794 for (;;) {
3795 if (count & VHOLD_NO_SMR) {
3796 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3797 ("non-zero hold count with flags %d\n", count));
3798 return (false);
3799 }
3800 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3801 if (count > 0) {
3802 return (false);
3803 }
3804 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3805 vfs_freevnodes_dec();
3806 return (true);
3807 }
3808 }
3809 }
3810
3811 static void __noinline
vdbatch_process(struct vdbatch * vd)3812 vdbatch_process(struct vdbatch *vd)
3813 {
3814 struct vnode *vp;
3815 int i;
3816
3817 mtx_assert(&vd->lock, MA_OWNED);
3818 MPASS(curthread->td_pinned > 0);
3819 MPASS(vd->index == VDBATCH_SIZE);
3820
3821 /*
3822 * Attempt to requeue the passed batch, but give up easily.
3823 *
3824 * Despite batching the mechanism is prone to transient *significant*
3825 * lock contention, where vnode_list_mtx becomes the primary bottleneck
3826 * if multiple CPUs get here (one real-world example is highly parallel
3827 * do-nothing make , which will stat *tons* of vnodes). Since it is
3828 * quasi-LRU (read: not that great even if fully honoured) just dodge
3829 * the problem. Parties which don't like it are welcome to implement
3830 * something better.
3831 */
3832 critical_enter();
3833 if (mtx_trylock(&vnode_list_mtx)) {
3834 for (i = 0; i < VDBATCH_SIZE; i++) {
3835 vp = vd->tab[i];
3836 vd->tab[i] = NULL;
3837 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
3838 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist);
3839 MPASS(vp->v_dbatchcpu != NOCPU);
3840 vp->v_dbatchcpu = NOCPU;
3841 }
3842 mtx_unlock(&vnode_list_mtx);
3843 } else {
3844 counter_u64_add(vnode_skipped_requeues, 1);
3845
3846 for (i = 0; i < VDBATCH_SIZE; i++) {
3847 vp = vd->tab[i];
3848 vd->tab[i] = NULL;
3849 MPASS(vp->v_dbatchcpu != NOCPU);
3850 vp->v_dbatchcpu = NOCPU;
3851 }
3852 }
3853 vd->index = 0;
3854 critical_exit();
3855 }
3856
3857 static void
vdbatch_enqueue(struct vnode * vp)3858 vdbatch_enqueue(struct vnode *vp)
3859 {
3860 struct vdbatch *vd;
3861
3862 ASSERT_VI_LOCKED(vp, __func__);
3863 VNPASS(!VN_IS_DOOMED(vp), vp);
3864
3865 if (vp->v_dbatchcpu != NOCPU) {
3866 VI_UNLOCK(vp);
3867 return;
3868 }
3869
3870 sched_pin();
3871 vd = DPCPU_PTR(vd);
3872 mtx_lock(&vd->lock);
3873 MPASS(vd->index < VDBATCH_SIZE);
3874 MPASS(vd->tab[vd->index] == NULL);
3875 /*
3876 * A hack: we depend on being pinned so that we know what to put in
3877 * ->v_dbatchcpu.
3878 */
3879 vp->v_dbatchcpu = curcpu;
3880 vd->tab[vd->index] = vp;
3881 vd->index++;
3882 VI_UNLOCK(vp);
3883 if (vd->index == VDBATCH_SIZE)
3884 vdbatch_process(vd);
3885 mtx_unlock(&vd->lock);
3886 sched_unpin();
3887 }
3888
3889 /*
3890 * This routine must only be called for vnodes which are about to be
3891 * deallocated. Supporting dequeue for arbitrary vndoes would require
3892 * validating that the locked batch matches.
3893 */
3894 static void
vdbatch_dequeue(struct vnode * vp)3895 vdbatch_dequeue(struct vnode *vp)
3896 {
3897 struct vdbatch *vd;
3898 int i;
3899 short cpu;
3900
3901 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp);
3902
3903 cpu = vp->v_dbatchcpu;
3904 if (cpu == NOCPU)
3905 return;
3906
3907 vd = DPCPU_ID_PTR(cpu, vd);
3908 mtx_lock(&vd->lock);
3909 for (i = 0; i < vd->index; i++) {
3910 if (vd->tab[i] != vp)
3911 continue;
3912 vp->v_dbatchcpu = NOCPU;
3913 vd->index--;
3914 vd->tab[i] = vd->tab[vd->index];
3915 vd->tab[vd->index] = NULL;
3916 break;
3917 }
3918 mtx_unlock(&vd->lock);
3919 /*
3920 * Either we dequeued the vnode above or the target CPU beat us to it.
3921 */
3922 MPASS(vp->v_dbatchcpu == NOCPU);
3923 }
3924
3925 /*
3926 * Drop the hold count of the vnode.
3927 *
3928 * It will only get freed if this is the last hold *and* it has been vgone'd.
3929 *
3930 * Because the vnode vm object keeps a hold reference on the vnode if
3931 * there is at least one resident non-cached page, the vnode cannot
3932 * leave the active list without the page cleanup done.
3933 */
3934 static void __noinline
vdropl_final(struct vnode * vp)3935 vdropl_final(struct vnode *vp)
3936 {
3937
3938 ASSERT_VI_LOCKED(vp, __func__);
3939 VNPASS(VN_IS_DOOMED(vp), vp);
3940 /*
3941 * Set the VHOLD_NO_SMR flag.
3942 *
3943 * We may be racing against vhold_smr. If they win we can just pretend
3944 * we never got this far, they will vdrop later.
3945 */
3946 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) {
3947 vfs_freevnodes_inc();
3948 VI_UNLOCK(vp);
3949 /*
3950 * We lost the aforementioned race. Any subsequent access is
3951 * invalid as they might have managed to vdropl on their own.
3952 */
3953 return;
3954 }
3955 /*
3956 * Don't bump freevnodes as this one is going away.
3957 */
3958 freevnode(vp);
3959 }
3960
3961 void
vdrop(struct vnode * vp)3962 vdrop(struct vnode *vp)
3963 {
3964
3965 ASSERT_VI_UNLOCKED(vp, __func__);
3966 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3967 if (refcount_release_if_not_last(&vp->v_holdcnt))
3968 return;
3969 VI_LOCK(vp);
3970 vdropl(vp);
3971 }
3972
3973 static void __always_inline
vdropl_impl(struct vnode * vp,bool enqueue)3974 vdropl_impl(struct vnode *vp, bool enqueue)
3975 {
3976
3977 ASSERT_VI_LOCKED(vp, __func__);
3978 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3979 if (!refcount_release(&vp->v_holdcnt)) {
3980 VI_UNLOCK(vp);
3981 return;
3982 }
3983 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp);
3984 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
3985 if (VN_IS_DOOMED(vp)) {
3986 vdropl_final(vp);
3987 return;
3988 }
3989
3990 vfs_freevnodes_inc();
3991 if (vp->v_mflag & VMP_LAZYLIST) {
3992 vunlazy(vp);
3993 }
3994
3995 if (!enqueue) {
3996 VI_UNLOCK(vp);
3997 return;
3998 }
3999
4000 /*
4001 * Also unlocks the interlock. We can't assert on it as we
4002 * released our hold and by now the vnode might have been
4003 * freed.
4004 */
4005 vdbatch_enqueue(vp);
4006 }
4007
4008 void
vdropl(struct vnode * vp)4009 vdropl(struct vnode *vp)
4010 {
4011
4012 vdropl_impl(vp, true);
4013 }
4014
4015 /*
4016 * vdrop a vnode when recycling
4017 *
4018 * This is a special case routine only to be used when recycling, differs from
4019 * regular vdrop by not requeieing the vnode on LRU.
4020 *
4021 * Consider a case where vtryrecycle continuously fails with all vnodes (due to
4022 * e.g., frozen writes on the filesystem), filling the batch and causing it to
4023 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a
4024 * loop which can last for as long as writes are frozen.
4025 */
4026 static void
vdropl_recycle(struct vnode * vp)4027 vdropl_recycle(struct vnode *vp)
4028 {
4029
4030 vdropl_impl(vp, false);
4031 }
4032
4033 static void
vdrop_recycle(struct vnode * vp)4034 vdrop_recycle(struct vnode *vp)
4035 {
4036
4037 VI_LOCK(vp);
4038 vdropl_recycle(vp);
4039 }
4040
4041 /*
4042 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
4043 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
4044 */
4045 static int
vinactivef(struct vnode * vp)4046 vinactivef(struct vnode *vp)
4047 {
4048 int error;
4049
4050 ASSERT_VOP_ELOCKED(vp, "vinactive");
4051 ASSERT_VI_LOCKED(vp, "vinactive");
4052 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp);
4053 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4054 vp->v_iflag |= VI_DOINGINACT;
4055 vp->v_iflag &= ~VI_OWEINACT;
4056 VI_UNLOCK(vp);
4057
4058 /*
4059 * Before moving off the active list, we must be sure that any
4060 * modified pages are converted into the vnode's dirty
4061 * buffers, since these will no longer be checked once the
4062 * vnode is on the inactive list.
4063 *
4064 * The write-out of the dirty pages is asynchronous. At the
4065 * point that VOP_INACTIVE() is called, there could still be
4066 * pending I/O and dirty pages in the object.
4067 */
4068 if ((vp->v_vflag & VV_NOSYNC) == 0)
4069 vnode_pager_clean_async(vp);
4070
4071 error = VOP_INACTIVE(vp);
4072 VI_LOCK(vp);
4073 VNPASS(vp->v_iflag & VI_DOINGINACT, vp);
4074 vp->v_iflag &= ~VI_DOINGINACT;
4075 return (error);
4076 }
4077
4078 int
vinactive(struct vnode * vp)4079 vinactive(struct vnode *vp)
4080 {
4081
4082 ASSERT_VOP_ELOCKED(vp, "vinactive");
4083 ASSERT_VI_LOCKED(vp, "vinactive");
4084 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4085
4086 if ((vp->v_iflag & VI_OWEINACT) == 0)
4087 return (0);
4088 if (vp->v_iflag & VI_DOINGINACT)
4089 return (0);
4090 if (vp->v_usecount > 0) {
4091 vp->v_iflag &= ~VI_OWEINACT;
4092 return (0);
4093 }
4094 return (vinactivef(vp));
4095 }
4096
4097 /*
4098 * Remove any vnodes in the vnode table belonging to mount point mp.
4099 *
4100 * If FORCECLOSE is not specified, there should not be any active ones,
4101 * return error if any are found (nb: this is a user error, not a
4102 * system error). If FORCECLOSE is specified, detach any active vnodes
4103 * that are found.
4104 *
4105 * If WRITECLOSE is set, only flush out regular file vnodes open for
4106 * writing.
4107 *
4108 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
4109 *
4110 * `rootrefs' specifies the base reference count for the root vnode
4111 * of this filesystem. The root vnode is considered busy if its
4112 * v_usecount exceeds this value. On a successful return, vflush(, td)
4113 * will call vrele() on the root vnode exactly rootrefs times.
4114 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
4115 * be zero.
4116 */
4117 #ifdef DIAGNOSTIC
4118 static int busyprt = 0; /* print out busy vnodes */
4119 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
4120 #endif
4121
4122 int
vflush(struct mount * mp,int rootrefs,int flags,struct thread * td)4123 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
4124 {
4125 struct vnode *vp, *mvp, *rootvp = NULL;
4126 struct vattr vattr;
4127 int busy = 0, error;
4128
4129 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
4130 rootrefs, flags);
4131 if (rootrefs > 0) {
4132 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
4133 ("vflush: bad args"));
4134 /*
4135 * Get the filesystem root vnode. We can vput() it
4136 * immediately, since with rootrefs > 0, it won't go away.
4137 */
4138 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
4139 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
4140 __func__, error);
4141 return (error);
4142 }
4143 vput(rootvp);
4144 }
4145 loop:
4146 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
4147 vholdl(vp);
4148 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
4149 if (error) {
4150 vdrop(vp);
4151 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
4152 goto loop;
4153 }
4154 /*
4155 * Skip over a vnodes marked VV_SYSTEM.
4156 */
4157 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
4158 VOP_UNLOCK(vp);
4159 vdrop(vp);
4160 continue;
4161 }
4162 /*
4163 * If WRITECLOSE is set, flush out unlinked but still open
4164 * files (even if open only for reading) and regular file
4165 * vnodes open for writing.
4166 */
4167 if (flags & WRITECLOSE) {
4168 vnode_pager_clean_async(vp);
4169 do {
4170 error = VOP_FSYNC(vp, MNT_WAIT, td);
4171 } while (error == ERELOOKUP);
4172 if (error != 0) {
4173 VOP_UNLOCK(vp);
4174 vdrop(vp);
4175 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
4176 return (error);
4177 }
4178 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
4179 VI_LOCK(vp);
4180
4181 if ((vp->v_type == VNON ||
4182 (error == 0 && vattr.va_nlink > 0)) &&
4183 (vp->v_writecount <= 0 || vp->v_type != VREG)) {
4184 VOP_UNLOCK(vp);
4185 vdropl(vp);
4186 continue;
4187 }
4188 } else
4189 VI_LOCK(vp);
4190 /*
4191 * With v_usecount == 0, all we need to do is clear out the
4192 * vnode data structures and we are done.
4193 *
4194 * If FORCECLOSE is set, forcibly close the vnode.
4195 */
4196 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
4197 vgonel(vp);
4198 } else {
4199 busy++;
4200 #ifdef DIAGNOSTIC
4201 if (busyprt)
4202 vn_printf(vp, "vflush: busy vnode ");
4203 #endif
4204 }
4205 VOP_UNLOCK(vp);
4206 vdropl(vp);
4207 }
4208 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
4209 /*
4210 * If just the root vnode is busy, and if its refcount
4211 * is equal to `rootrefs', then go ahead and kill it.
4212 */
4213 VI_LOCK(rootvp);
4214 KASSERT(busy > 0, ("vflush: not busy"));
4215 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
4216 ("vflush: usecount %d < rootrefs %d",
4217 rootvp->v_usecount, rootrefs));
4218 if (busy == 1 && rootvp->v_usecount == rootrefs) {
4219 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
4220 vgone(rootvp);
4221 VOP_UNLOCK(rootvp);
4222 busy = 0;
4223 } else
4224 VI_UNLOCK(rootvp);
4225 }
4226 if (busy) {
4227 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
4228 busy);
4229 return (EBUSY);
4230 }
4231 for (; rootrefs > 0; rootrefs--)
4232 vrele(rootvp);
4233 return (0);
4234 }
4235
4236 /*
4237 * Recycle an unused vnode.
4238 */
4239 int
vrecycle(struct vnode * vp)4240 vrecycle(struct vnode *vp)
4241 {
4242 int recycled;
4243
4244 VI_LOCK(vp);
4245 recycled = vrecyclel(vp);
4246 VI_UNLOCK(vp);
4247 return (recycled);
4248 }
4249
4250 /*
4251 * vrecycle, with the vp interlock held.
4252 */
4253 int
vrecyclel(struct vnode * vp)4254 vrecyclel(struct vnode *vp)
4255 {
4256 int recycled;
4257
4258 ASSERT_VOP_ELOCKED(vp, __func__);
4259 ASSERT_VI_LOCKED(vp, __func__);
4260 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4261 recycled = 0;
4262 if (vp->v_usecount == 0) {
4263 recycled = 1;
4264 vgonel(vp);
4265 }
4266 return (recycled);
4267 }
4268
4269 /*
4270 * Eliminate all activity associated with a vnode
4271 * in preparation for reuse.
4272 */
4273 void
vgone(struct vnode * vp)4274 vgone(struct vnode *vp)
4275 {
4276 VI_LOCK(vp);
4277 vgonel(vp);
4278 VI_UNLOCK(vp);
4279 }
4280
4281 /*
4282 * Notify upper mounts about reclaimed or unlinked vnode.
4283 */
4284 void
vfs_notify_upper(struct vnode * vp,enum vfs_notify_upper_type event)4285 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event)
4286 {
4287 struct mount *mp;
4288 struct mount_upper_node *ump;
4289
4290 mp = atomic_load_ptr(&vp->v_mount);
4291 if (mp == NULL)
4292 return;
4293 if (TAILQ_EMPTY(&mp->mnt_notify))
4294 return;
4295
4296 MNT_ILOCK(mp);
4297 mp->mnt_upper_pending++;
4298 KASSERT(mp->mnt_upper_pending > 0,
4299 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending));
4300 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) {
4301 MNT_IUNLOCK(mp);
4302 switch (event) {
4303 case VFS_NOTIFY_UPPER_RECLAIM:
4304 VFS_RECLAIM_LOWERVP(ump->mp, vp);
4305 break;
4306 case VFS_NOTIFY_UPPER_UNLINK:
4307 VFS_UNLINK_LOWERVP(ump->mp, vp);
4308 break;
4309 }
4310 MNT_ILOCK(mp);
4311 }
4312 mp->mnt_upper_pending--;
4313 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 &&
4314 mp->mnt_upper_pending == 0) {
4315 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER;
4316 wakeup(&mp->mnt_uppers);
4317 }
4318 MNT_IUNLOCK(mp);
4319 }
4320
4321 /*
4322 * vgone, with the vp interlock held.
4323 */
4324 static void
vgonel(struct vnode * vp)4325 vgonel(struct vnode *vp)
4326 {
4327 struct thread *td;
4328 struct mount *mp;
4329 vm_object_t object;
4330 bool active, doinginact, oweinact;
4331
4332 ASSERT_VOP_ELOCKED(vp, "vgonel");
4333 ASSERT_VI_LOCKED(vp, "vgonel");
4334 VNASSERT(vp->v_holdcnt, vp,
4335 ("vgonel: vp %p has no reference.", vp));
4336 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4337 td = curthread;
4338
4339 /*
4340 * Don't vgonel if we're already doomed.
4341 */
4342 if (VN_IS_DOOMED(vp)) {
4343 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \
4344 vn_get_state(vp) == VSTATE_DEAD, vp);
4345 return;
4346 }
4347 /*
4348 * Paired with freevnode.
4349 */
4350 vn_seqc_write_begin_locked(vp);
4351 vunlazy_gone(vp);
4352 vn_irflag_set_locked(vp, VIRF_DOOMED);
4353 vn_set_state(vp, VSTATE_DESTROYING);
4354
4355 /*
4356 * Check to see if the vnode is in use. If so, we have to
4357 * call VOP_CLOSE() and VOP_INACTIVE().
4358 *
4359 * It could be that VOP_INACTIVE() requested reclamation, in
4360 * which case we should avoid recursion, so check
4361 * VI_DOINGINACT. This is not precise but good enough.
4362 */
4363 active = vp->v_usecount > 0;
4364 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4365 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0;
4366
4367 /*
4368 * If we need to do inactive VI_OWEINACT will be set.
4369 */
4370 if (vp->v_iflag & VI_DEFINACT) {
4371 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
4372 vp->v_iflag &= ~VI_DEFINACT;
4373 vdropl(vp);
4374 } else {
4375 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4376 VI_UNLOCK(vp);
4377 }
4378 cache_purge_vgone(vp);
4379 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
4380
4381 /*
4382 * If purging an active vnode, it must be closed and
4383 * deactivated before being reclaimed.
4384 */
4385 if (active)
4386 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
4387 if (!doinginact) {
4388 do {
4389 if (oweinact || active) {
4390 VI_LOCK(vp);
4391 vinactivef(vp);
4392 oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4393 VI_UNLOCK(vp);
4394 }
4395 } while (oweinact);
4396 }
4397 if (vp->v_type == VSOCK)
4398 vfs_unp_reclaim(vp);
4399
4400 /*
4401 * Clean out any buffers associated with the vnode.
4402 * If the flush fails, just toss the buffers.
4403 */
4404 mp = NULL;
4405 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
4406 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
4407 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
4408 while (vinvalbuf(vp, 0, 0, 0) != 0)
4409 ;
4410 }
4411
4412 BO_LOCK(&vp->v_bufobj);
4413 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
4414 vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
4415 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
4416 vp->v_bufobj.bo_clean.bv_cnt == 0,
4417 ("vp %p bufobj not invalidated", vp));
4418
4419 /*
4420 * For VMIO bufobj, BO_DEAD is set later, or in
4421 * vm_object_terminate() after the object's page queue is
4422 * flushed.
4423 */
4424 object = vp->v_bufobj.bo_object;
4425 if (object == NULL)
4426 vp->v_bufobj.bo_flag |= BO_DEAD;
4427 BO_UNLOCK(&vp->v_bufobj);
4428
4429 /*
4430 * Handle the VM part. Tmpfs handles v_object on its own (the
4431 * OBJT_VNODE check). Nullfs or other bypassing filesystems
4432 * should not touch the object borrowed from the lower vnode
4433 * (the handle check).
4434 */
4435 if (object != NULL && object->type == OBJT_VNODE &&
4436 object->handle == vp)
4437 vnode_destroy_vobject(vp);
4438
4439 /*
4440 * Reclaim the vnode.
4441 */
4442 if (VOP_RECLAIM(vp))
4443 panic("vgone: cannot reclaim");
4444 if (mp != NULL)
4445 vn_finished_secondary_write(mp);
4446 VNASSERT(vp->v_object == NULL, vp,
4447 ("vop_reclaim left v_object vp=%p", vp));
4448 /*
4449 * Clear the advisory locks and wake up waiting threads.
4450 */
4451 if (vp->v_lockf != NULL) {
4452 (void)VOP_ADVLOCKPURGE(vp);
4453 vp->v_lockf = NULL;
4454 }
4455 /*
4456 * Delete from old mount point vnode list.
4457 */
4458 if (vp->v_mount == NULL) {
4459 VI_LOCK(vp);
4460 } else {
4461 delmntque(vp);
4462 ASSERT_VI_LOCKED(vp, "vgonel 2");
4463 }
4464 /*
4465 * Done with purge, reset to the standard lock and invalidate
4466 * the vnode.
4467 */
4468 vp->v_vnlock = &vp->v_lock;
4469 vp->v_op = &dead_vnodeops;
4470 vp->v_type = VBAD;
4471 vn_set_state(vp, VSTATE_DEAD);
4472 }
4473
4474 /*
4475 * Print out a description of a vnode.
4476 */
4477 static const char *const vtypename[] = {
4478 [VNON] = "VNON",
4479 [VREG] = "VREG",
4480 [VDIR] = "VDIR",
4481 [VBLK] = "VBLK",
4482 [VCHR] = "VCHR",
4483 [VLNK] = "VLNK",
4484 [VSOCK] = "VSOCK",
4485 [VFIFO] = "VFIFO",
4486 [VBAD] = "VBAD",
4487 [VMARKER] = "VMARKER",
4488 };
4489 _Static_assert(nitems(vtypename) == VLASTTYPE + 1,
4490 "vnode type name not added to vtypename");
4491
4492 static const char *const vstatename[] = {
4493 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED",
4494 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED",
4495 [VSTATE_DESTROYING] = "VSTATE_DESTROYING",
4496 [VSTATE_DEAD] = "VSTATE_DEAD",
4497 };
4498 _Static_assert(nitems(vstatename) == VLASTSTATE + 1,
4499 "vnode state name not added to vstatename");
4500
4501 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0,
4502 "new hold count flag not added to vn_printf");
4503
4504 void
vn_printf(struct vnode * vp,const char * fmt,...)4505 vn_printf(struct vnode *vp, const char *fmt, ...)
4506 {
4507 va_list ap;
4508 char buf[256], buf2[16];
4509 u_long flags;
4510 u_int holdcnt;
4511 short irflag;
4512
4513 va_start(ap, fmt);
4514 vprintf(fmt, ap);
4515 va_end(ap);
4516 printf("%p: ", (void *)vp);
4517 printf("type %s state %s op %p\n", vtypename[vp->v_type],
4518 vstatename[vp->v_state], vp->v_op);
4519 holdcnt = atomic_load_int(&vp->v_holdcnt);
4520 printf(" usecount %d, writecount %d, refcount %d seqc users %d",
4521 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS,
4522 vp->v_seqc_users);
4523 switch (vp->v_type) {
4524 case VDIR:
4525 printf(" mountedhere %p\n", vp->v_mountedhere);
4526 break;
4527 case VCHR:
4528 printf(" rdev %p\n", vp->v_rdev);
4529 break;
4530 case VSOCK:
4531 printf(" socket %p\n", vp->v_unpcb);
4532 break;
4533 case VFIFO:
4534 printf(" fifoinfo %p\n", vp->v_fifoinfo);
4535 break;
4536 default:
4537 printf("\n");
4538 break;
4539 }
4540 buf[0] = '\0';
4541 buf[1] = '\0';
4542 if (holdcnt & VHOLD_NO_SMR)
4543 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf));
4544 printf(" hold count flags (%s)\n", buf + 1);
4545
4546 buf[0] = '\0';
4547 buf[1] = '\0';
4548 irflag = vn_irflag_read(vp);
4549 if (irflag & VIRF_DOOMED)
4550 strlcat(buf, "|VIRF_DOOMED", sizeof(buf));
4551 if (irflag & VIRF_PGREAD)
4552 strlcat(buf, "|VIRF_PGREAD", sizeof(buf));
4553 if (irflag & VIRF_MOUNTPOINT)
4554 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf));
4555 if (irflag & VIRF_TEXT_REF)
4556 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf));
4557 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF);
4558 if (flags != 0) {
4559 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags);
4560 strlcat(buf, buf2, sizeof(buf));
4561 }
4562 if (vp->v_vflag & VV_ROOT)
4563 strlcat(buf, "|VV_ROOT", sizeof(buf));
4564 if (vp->v_vflag & VV_ISTTY)
4565 strlcat(buf, "|VV_ISTTY", sizeof(buf));
4566 if (vp->v_vflag & VV_NOSYNC)
4567 strlcat(buf, "|VV_NOSYNC", sizeof(buf));
4568 if (vp->v_vflag & VV_ETERNALDEV)
4569 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
4570 if (vp->v_vflag & VV_CACHEDLABEL)
4571 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
4572 if (vp->v_vflag & VV_VMSIZEVNLOCK)
4573 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf));
4574 if (vp->v_vflag & VV_COPYONWRITE)
4575 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
4576 if (vp->v_vflag & VV_SYSTEM)
4577 strlcat(buf, "|VV_SYSTEM", sizeof(buf));
4578 if (vp->v_vflag & VV_PROCDEP)
4579 strlcat(buf, "|VV_PROCDEP", sizeof(buf));
4580 if (vp->v_vflag & VV_DELETED)
4581 strlcat(buf, "|VV_DELETED", sizeof(buf));
4582 if (vp->v_vflag & VV_MD)
4583 strlcat(buf, "|VV_MD", sizeof(buf));
4584 if (vp->v_vflag & VV_FORCEINSMQ)
4585 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
4586 if (vp->v_vflag & VV_READLINK)
4587 strlcat(buf, "|VV_READLINK", sizeof(buf));
4588 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
4589 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM |
4590 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK);
4591 if (flags != 0) {
4592 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
4593 strlcat(buf, buf2, sizeof(buf));
4594 }
4595 if (vp->v_iflag & VI_MOUNT)
4596 strlcat(buf, "|VI_MOUNT", sizeof(buf));
4597 if (vp->v_iflag & VI_DOINGINACT)
4598 strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
4599 if (vp->v_iflag & VI_OWEINACT)
4600 strlcat(buf, "|VI_OWEINACT", sizeof(buf));
4601 if (vp->v_iflag & VI_DEFINACT)
4602 strlcat(buf, "|VI_DEFINACT", sizeof(buf));
4603 if (vp->v_iflag & VI_FOPENING)
4604 strlcat(buf, "|VI_FOPENING", sizeof(buf));
4605 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT |
4606 VI_OWEINACT | VI_DEFINACT | VI_FOPENING);
4607 if (flags != 0) {
4608 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
4609 strlcat(buf, buf2, sizeof(buf));
4610 }
4611 if (vp->v_mflag & VMP_LAZYLIST)
4612 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf));
4613 flags = vp->v_mflag & ~(VMP_LAZYLIST);
4614 if (flags != 0) {
4615 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags);
4616 strlcat(buf, buf2, sizeof(buf));
4617 }
4618 printf(" flags (%s)", buf + 1);
4619 if (mtx_owned(VI_MTX(vp)))
4620 printf(" VI_LOCKed");
4621 printf("\n");
4622 if (vp->v_object != NULL)
4623 printf(" v_object %p ref %d pages %d "
4624 "cleanbuf %d dirtybuf %d\n",
4625 vp->v_object, vp->v_object->ref_count,
4626 vp->v_object->resident_page_count,
4627 vp->v_bufobj.bo_clean.bv_cnt,
4628 vp->v_bufobj.bo_dirty.bv_cnt);
4629 printf(" ");
4630 lockmgr_printinfo(vp->v_vnlock);
4631 if (vp->v_data != NULL)
4632 VOP_PRINT(vp);
4633 }
4634
4635 #ifdef DDB
4636 /*
4637 * List all of the locked vnodes in the system.
4638 * Called when debugging the kernel.
4639 */
DB_SHOW_COMMAND_FLAGS(lockedvnods,lockedvnodes,DB_CMD_MEMSAFE)4640 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE)
4641 {
4642 struct mount *mp;
4643 struct vnode *vp;
4644
4645 /*
4646 * Note: because this is DDB, we can't obey the locking semantics
4647 * for these structures, which means we could catch an inconsistent
4648 * state and dereference a nasty pointer. Not much to be done
4649 * about that.
4650 */
4651 db_printf("Locked vnodes\n");
4652 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4653 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4654 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
4655 vn_printf(vp, "vnode ");
4656 }
4657 }
4658 }
4659
4660 /*
4661 * Show details about the given vnode.
4662 */
DB_SHOW_COMMAND(vnode,db_show_vnode)4663 DB_SHOW_COMMAND(vnode, db_show_vnode)
4664 {
4665 struct vnode *vp;
4666
4667 if (!have_addr)
4668 return;
4669 vp = (struct vnode *)addr;
4670 vn_printf(vp, "vnode ");
4671 }
4672
4673 /*
4674 * Show details about the given mount point.
4675 */
DB_SHOW_COMMAND(mount,db_show_mount)4676 DB_SHOW_COMMAND(mount, db_show_mount)
4677 {
4678 struct mount *mp;
4679 struct vfsopt *opt;
4680 struct statfs *sp;
4681 struct vnode *vp;
4682 char buf[512];
4683 uint64_t mflags;
4684 u_int flags;
4685
4686 if (!have_addr) {
4687 /* No address given, print short info about all mount points. */
4688 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4689 db_printf("%p %s on %s (%s)\n", mp,
4690 mp->mnt_stat.f_mntfromname,
4691 mp->mnt_stat.f_mntonname,
4692 mp->mnt_stat.f_fstypename);
4693 if (db_pager_quit)
4694 break;
4695 }
4696 db_printf("\nMore info: show mount <addr>\n");
4697 return;
4698 }
4699
4700 mp = (struct mount *)addr;
4701 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
4702 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
4703
4704 buf[0] = '\0';
4705 mflags = mp->mnt_flag;
4706 #define MNT_FLAG(flag) do { \
4707 if (mflags & (flag)) { \
4708 if (buf[0] != '\0') \
4709 strlcat(buf, ", ", sizeof(buf)); \
4710 strlcat(buf, (#flag) + 4, sizeof(buf)); \
4711 mflags &= ~(flag); \
4712 } \
4713 } while (0)
4714 MNT_FLAG(MNT_RDONLY);
4715 MNT_FLAG(MNT_SYNCHRONOUS);
4716 MNT_FLAG(MNT_NOEXEC);
4717 MNT_FLAG(MNT_NOSUID);
4718 MNT_FLAG(MNT_NFS4ACLS);
4719 MNT_FLAG(MNT_UNION);
4720 MNT_FLAG(MNT_ASYNC);
4721 MNT_FLAG(MNT_SUIDDIR);
4722 MNT_FLAG(MNT_SOFTDEP);
4723 MNT_FLAG(MNT_NOSYMFOLLOW);
4724 MNT_FLAG(MNT_GJOURNAL);
4725 MNT_FLAG(MNT_MULTILABEL);
4726 MNT_FLAG(MNT_ACLS);
4727 MNT_FLAG(MNT_NOATIME);
4728 MNT_FLAG(MNT_NOCLUSTERR);
4729 MNT_FLAG(MNT_NOCLUSTERW);
4730 MNT_FLAG(MNT_SUJ);
4731 MNT_FLAG(MNT_EXRDONLY);
4732 MNT_FLAG(MNT_EXPORTED);
4733 MNT_FLAG(MNT_DEFEXPORTED);
4734 MNT_FLAG(MNT_EXPORTANON);
4735 MNT_FLAG(MNT_EXKERB);
4736 MNT_FLAG(MNT_EXPUBLIC);
4737 MNT_FLAG(MNT_LOCAL);
4738 MNT_FLAG(MNT_QUOTA);
4739 MNT_FLAG(MNT_ROOTFS);
4740 MNT_FLAG(MNT_USER);
4741 MNT_FLAG(MNT_IGNORE);
4742 MNT_FLAG(MNT_UPDATE);
4743 MNT_FLAG(MNT_DELEXPORT);
4744 MNT_FLAG(MNT_RELOAD);
4745 MNT_FLAG(MNT_FORCE);
4746 MNT_FLAG(MNT_SNAPSHOT);
4747 MNT_FLAG(MNT_BYFSID);
4748 #undef MNT_FLAG
4749 if (mflags != 0) {
4750 if (buf[0] != '\0')
4751 strlcat(buf, ", ", sizeof(buf));
4752 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4753 "0x%016jx", mflags);
4754 }
4755 db_printf(" mnt_flag = %s\n", buf);
4756
4757 buf[0] = '\0';
4758 flags = mp->mnt_kern_flag;
4759 #define MNT_KERN_FLAG(flag) do { \
4760 if (flags & (flag)) { \
4761 if (buf[0] != '\0') \
4762 strlcat(buf, ", ", sizeof(buf)); \
4763 strlcat(buf, (#flag) + 5, sizeof(buf)); \
4764 flags &= ~(flag); \
4765 } \
4766 } while (0)
4767 MNT_KERN_FLAG(MNTK_UNMOUNTF);
4768 MNT_KERN_FLAG(MNTK_ASYNC);
4769 MNT_KERN_FLAG(MNTK_SOFTDEP);
4770 MNT_KERN_FLAG(MNTK_NOMSYNC);
4771 MNT_KERN_FLAG(MNTK_DRAINING);
4772 MNT_KERN_FLAG(MNTK_REFEXPIRE);
4773 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
4774 MNT_KERN_FLAG(MNTK_SHARED_WRITES);
4775 MNT_KERN_FLAG(MNTK_NO_IOPF);
4776 MNT_KERN_FLAG(MNTK_RECURSE);
4777 MNT_KERN_FLAG(MNTK_UPPER_WAITER);
4778 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE);
4779 MNT_KERN_FLAG(MNTK_USES_BCACHE);
4780 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG);
4781 MNT_KERN_FLAG(MNTK_FPLOOKUP);
4782 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER);
4783 MNT_KERN_FLAG(MNTK_NOASYNC);
4784 MNT_KERN_FLAG(MNTK_UNMOUNT);
4785 MNT_KERN_FLAG(MNTK_MWAIT);
4786 MNT_KERN_FLAG(MNTK_SUSPEND);
4787 MNT_KERN_FLAG(MNTK_SUSPEND2);
4788 MNT_KERN_FLAG(MNTK_SUSPENDED);
4789 MNT_KERN_FLAG(MNTK_NULL_NOCACHE);
4790 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
4791 #undef MNT_KERN_FLAG
4792 if (flags != 0) {
4793 if (buf[0] != '\0')
4794 strlcat(buf, ", ", sizeof(buf));
4795 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4796 "0x%08x", flags);
4797 }
4798 db_printf(" mnt_kern_flag = %s\n", buf);
4799
4800 db_printf(" mnt_opt = ");
4801 opt = TAILQ_FIRST(mp->mnt_opt);
4802 if (opt != NULL) {
4803 db_printf("%s", opt->name);
4804 opt = TAILQ_NEXT(opt, link);
4805 while (opt != NULL) {
4806 db_printf(", %s", opt->name);
4807 opt = TAILQ_NEXT(opt, link);
4808 }
4809 }
4810 db_printf("\n");
4811
4812 sp = &mp->mnt_stat;
4813 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx "
4814 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
4815 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
4816 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
4817 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
4818 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
4819 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
4820 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
4821 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
4822 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
4823 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
4824 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
4825
4826 db_printf(" mnt_cred = { uid=%u ruid=%u",
4827 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
4828 if (jailed(mp->mnt_cred))
4829 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
4830 db_printf(" }\n");
4831 db_printf(" mnt_ref = %d (with %d in the struct)\n",
4832 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref);
4833 db_printf(" mnt_gen = %d\n", mp->mnt_gen);
4834 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
4835 db_printf(" mnt_lazyvnodelistsize = %d\n",
4836 mp->mnt_lazyvnodelistsize);
4837 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n",
4838 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount);
4839 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max);
4840 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed);
4841 db_printf(" mnt_lockref = %d (with %d in the struct)\n",
4842 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref);
4843 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
4844 db_printf(" mnt_secondary_accwrites = %d\n",
4845 mp->mnt_secondary_accwrites);
4846 db_printf(" mnt_gjprovider = %s\n",
4847 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
4848 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops);
4849
4850 db_printf("\n\nList of active vnodes\n");
4851 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4852 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) {
4853 vn_printf(vp, "vnode ");
4854 if (db_pager_quit)
4855 break;
4856 }
4857 }
4858 db_printf("\n\nList of inactive vnodes\n");
4859 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4860 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) {
4861 vn_printf(vp, "vnode ");
4862 if (db_pager_quit)
4863 break;
4864 }
4865 }
4866 }
4867 #endif /* DDB */
4868
4869 /*
4870 * Fill in a struct xvfsconf based on a struct vfsconf.
4871 */
4872 static int
vfsconf2x(struct sysctl_req * req,struct vfsconf * vfsp)4873 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
4874 {
4875 struct xvfsconf xvfsp;
4876
4877 bzero(&xvfsp, sizeof(xvfsp));
4878 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4879 xvfsp.vfc_typenum = vfsp->vfc_typenum;
4880 xvfsp.vfc_refcount = vfsp->vfc_refcount;
4881 xvfsp.vfc_flags = vfsp->vfc_flags;
4882 /*
4883 * These are unused in userland, we keep them
4884 * to not break binary compatibility.
4885 */
4886 xvfsp.vfc_vfsops = NULL;
4887 xvfsp.vfc_next = NULL;
4888 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4889 }
4890
4891 #ifdef COMPAT_FREEBSD32
4892 struct xvfsconf32 {
4893 uint32_t vfc_vfsops;
4894 char vfc_name[MFSNAMELEN];
4895 int32_t vfc_typenum;
4896 int32_t vfc_refcount;
4897 int32_t vfc_flags;
4898 uint32_t vfc_next;
4899 };
4900
4901 static int
vfsconf2x32(struct sysctl_req * req,struct vfsconf * vfsp)4902 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
4903 {
4904 struct xvfsconf32 xvfsp;
4905
4906 bzero(&xvfsp, sizeof(xvfsp));
4907 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4908 xvfsp.vfc_typenum = vfsp->vfc_typenum;
4909 xvfsp.vfc_refcount = vfsp->vfc_refcount;
4910 xvfsp.vfc_flags = vfsp->vfc_flags;
4911 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4912 }
4913 #endif
4914
4915 /*
4916 * Top level filesystem related information gathering.
4917 */
4918 static int
sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)4919 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
4920 {
4921 struct vfsconf *vfsp;
4922 int error;
4923
4924 error = 0;
4925 vfsconf_slock();
4926 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4927 #ifdef COMPAT_FREEBSD32
4928 if (req->flags & SCTL_MASK32)
4929 error = vfsconf2x32(req, vfsp);
4930 else
4931 #endif
4932 error = vfsconf2x(req, vfsp);
4933 if (error)
4934 break;
4935 }
4936 vfsconf_sunlock();
4937 return (error);
4938 }
4939
4940 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
4941 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
4942 "S,xvfsconf", "List of all configured filesystems");
4943
4944 #ifndef BURN_BRIDGES
4945 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
4946
4947 static int
vfs_sysctl(SYSCTL_HANDLER_ARGS)4948 vfs_sysctl(SYSCTL_HANDLER_ARGS)
4949 {
4950 int *name = (int *)arg1 - 1; /* XXX */
4951 u_int namelen = arg2 + 1; /* XXX */
4952 struct vfsconf *vfsp;
4953
4954 log(LOG_WARNING, "userland calling deprecated sysctl, "
4955 "please rebuild world\n");
4956
4957 #if 1 || defined(COMPAT_PRELITE2)
4958 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
4959 if (namelen == 1)
4960 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
4961 #endif
4962
4963 switch (name[1]) {
4964 case VFS_MAXTYPENUM:
4965 if (namelen != 2)
4966 return (ENOTDIR);
4967 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
4968 case VFS_CONF:
4969 if (namelen != 3)
4970 return (ENOTDIR); /* overloaded */
4971 vfsconf_slock();
4972 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4973 if (vfsp->vfc_typenum == name[2])
4974 break;
4975 }
4976 vfsconf_sunlock();
4977 if (vfsp == NULL)
4978 return (EOPNOTSUPP);
4979 #ifdef COMPAT_FREEBSD32
4980 if (req->flags & SCTL_MASK32)
4981 return (vfsconf2x32(req, vfsp));
4982 else
4983 #endif
4984 return (vfsconf2x(req, vfsp));
4985 }
4986 return (EOPNOTSUPP);
4987 }
4988
4989 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
4990 CTLFLAG_MPSAFE, vfs_sysctl,
4991 "Generic filesystem");
4992
4993 #if 1 || defined(COMPAT_PRELITE2)
4994
4995 static int
sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)4996 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
4997 {
4998 int error;
4999 struct vfsconf *vfsp;
5000 struct ovfsconf ovfs;
5001
5002 vfsconf_slock();
5003 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
5004 bzero(&ovfs, sizeof(ovfs));
5005 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
5006 strcpy(ovfs.vfc_name, vfsp->vfc_name);
5007 ovfs.vfc_index = vfsp->vfc_typenum;
5008 ovfs.vfc_refcount = vfsp->vfc_refcount;
5009 ovfs.vfc_flags = vfsp->vfc_flags;
5010 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
5011 if (error != 0) {
5012 vfsconf_sunlock();
5013 return (error);
5014 }
5015 }
5016 vfsconf_sunlock();
5017 return (0);
5018 }
5019
5020 #endif /* 1 || COMPAT_PRELITE2 */
5021 #endif /* !BURN_BRIDGES */
5022
5023 static void
unmount_or_warn(struct mount * mp)5024 unmount_or_warn(struct mount *mp)
5025 {
5026 int error;
5027
5028 error = dounmount(mp, MNT_FORCE, curthread);
5029 if (error != 0) {
5030 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
5031 if (error == EBUSY)
5032 printf("BUSY)\n");
5033 else
5034 printf("%d)\n", error);
5035 }
5036 }
5037
5038 /*
5039 * Unmount all filesystems. The list is traversed in reverse order
5040 * of mounting to avoid dependencies.
5041 */
5042 void
vfs_unmountall(void)5043 vfs_unmountall(void)
5044 {
5045 struct mount *mp, *tmp;
5046
5047 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
5048
5049 /*
5050 * Since this only runs when rebooting, it is not interlocked.
5051 */
5052 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
5053 vfs_ref(mp);
5054
5055 /*
5056 * Forcibly unmounting "/dev" before "/" would prevent clean
5057 * unmount of the latter.
5058 */
5059 if (mp == rootdevmp)
5060 continue;
5061
5062 unmount_or_warn(mp);
5063 }
5064
5065 if (rootdevmp != NULL)
5066 unmount_or_warn(rootdevmp);
5067 }
5068
5069 static void
vfs_deferred_inactive(struct vnode * vp,int lkflags)5070 vfs_deferred_inactive(struct vnode *vp, int lkflags)
5071 {
5072
5073 ASSERT_VI_LOCKED(vp, __func__);
5074 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp);
5075 if ((vp->v_iflag & VI_OWEINACT) == 0) {
5076 vdropl(vp);
5077 return;
5078 }
5079 if (vn_lock(vp, lkflags) == 0) {
5080 VI_LOCK(vp);
5081 vinactive(vp);
5082 VOP_UNLOCK(vp);
5083 vdropl(vp);
5084 return;
5085 }
5086 vdefer_inactive_unlocked(vp);
5087 }
5088
5089 static int
vfs_periodic_inactive_filter(struct vnode * vp,void * arg)5090 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
5091 {
5092
5093 return (vp->v_iflag & VI_DEFINACT);
5094 }
5095
5096 static void __noinline
vfs_periodic_inactive(struct mount * mp,int flags)5097 vfs_periodic_inactive(struct mount *mp, int flags)
5098 {
5099 struct vnode *vp, *mvp;
5100 int lkflags;
5101
5102 lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
5103 if (flags != MNT_WAIT)
5104 lkflags |= LK_NOWAIT;
5105
5106 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) {
5107 if ((vp->v_iflag & VI_DEFINACT) == 0) {
5108 VI_UNLOCK(vp);
5109 continue;
5110 }
5111 vp->v_iflag &= ~VI_DEFINACT;
5112 vfs_deferred_inactive(vp, lkflags);
5113 }
5114 }
5115
5116 static inline bool
vfs_want_msync(struct vnode * vp)5117 vfs_want_msync(struct vnode *vp)
5118 {
5119 struct vm_object *obj;
5120
5121 /*
5122 * This test may be performed without any locks held.
5123 * We rely on vm_object's type stability.
5124 */
5125 if (vp->v_vflag & VV_NOSYNC)
5126 return (false);
5127 obj = vp->v_object;
5128 return (obj != NULL && vm_object_mightbedirty(obj));
5129 }
5130
5131 static int
vfs_periodic_msync_inactive_filter(struct vnode * vp,void * arg __unused)5132 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
5133 {
5134
5135 if (vp->v_vflag & VV_NOSYNC)
5136 return (false);
5137 if (vp->v_iflag & VI_DEFINACT)
5138 return (true);
5139 return (vfs_want_msync(vp));
5140 }
5141
5142 static void __noinline
vfs_periodic_msync_inactive(struct mount * mp,int flags)5143 vfs_periodic_msync_inactive(struct mount *mp, int flags)
5144 {
5145 struct vnode *vp, *mvp;
5146 int lkflags;
5147 bool seen_defer;
5148
5149 lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
5150 if (flags != MNT_WAIT)
5151 lkflags |= LK_NOWAIT;
5152
5153 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
5154 seen_defer = false;
5155 if (vp->v_iflag & VI_DEFINACT) {
5156 vp->v_iflag &= ~VI_DEFINACT;
5157 seen_defer = true;
5158 }
5159 if (!vfs_want_msync(vp)) {
5160 if (seen_defer)
5161 vfs_deferred_inactive(vp, lkflags);
5162 else
5163 VI_UNLOCK(vp);
5164 continue;
5165 }
5166 if (vget(vp, lkflags) == 0) {
5167 if ((vp->v_vflag & VV_NOSYNC) == 0) {
5168 if (flags == MNT_WAIT)
5169 vnode_pager_clean_sync(vp);
5170 else
5171 vnode_pager_clean_async(vp);
5172 }
5173 vput(vp);
5174 if (seen_defer)
5175 vdrop(vp);
5176 } else {
5177 if (seen_defer)
5178 vdefer_inactive_unlocked(vp);
5179 }
5180 }
5181 }
5182
5183 void
vfs_periodic(struct mount * mp,int flags)5184 vfs_periodic(struct mount *mp, int flags)
5185 {
5186
5187 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
5188
5189 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0)
5190 vfs_periodic_inactive(mp, flags);
5191 else
5192 vfs_periodic_msync_inactive(mp, flags);
5193 }
5194
5195 static void
destroy_vpollinfo_free(struct vpollinfo * vi)5196 destroy_vpollinfo_free(struct vpollinfo *vi)
5197 {
5198
5199 knlist_destroy(&vi->vpi_selinfo.si_note);
5200 mtx_destroy(&vi->vpi_lock);
5201 free(vi, M_VNODEPOLL);
5202 }
5203
5204 static void
destroy_vpollinfo(struct vpollinfo * vi)5205 destroy_vpollinfo(struct vpollinfo *vi)
5206 {
5207
5208 knlist_clear(&vi->vpi_selinfo.si_note, 1);
5209 seldrain(&vi->vpi_selinfo);
5210 destroy_vpollinfo_free(vi);
5211 }
5212
5213 /*
5214 * Initialize per-vnode helper structure to hold poll-related state.
5215 */
5216 void
v_addpollinfo(struct vnode * vp)5217 v_addpollinfo(struct vnode *vp)
5218 {
5219 struct vpollinfo *vi;
5220
5221 if (vp->v_pollinfo != NULL)
5222 return;
5223 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO);
5224 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
5225 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
5226 vfs_knlunlock, vfs_knl_assert_lock);
5227 VI_LOCK(vp);
5228 if (vp->v_pollinfo != NULL) {
5229 VI_UNLOCK(vp);
5230 destroy_vpollinfo_free(vi);
5231 return;
5232 }
5233 vp->v_pollinfo = vi;
5234 VI_UNLOCK(vp);
5235 }
5236
5237 /*
5238 * Record a process's interest in events which might happen to
5239 * a vnode. Because poll uses the historic select-style interface
5240 * internally, this routine serves as both the ``check for any
5241 * pending events'' and the ``record my interest in future events''
5242 * functions. (These are done together, while the lock is held,
5243 * to avoid race conditions.)
5244 */
5245 int
vn_pollrecord(struct vnode * vp,struct thread * td,int events)5246 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
5247 {
5248
5249 v_addpollinfo(vp);
5250 mtx_lock(&vp->v_pollinfo->vpi_lock);
5251 if (vp->v_pollinfo->vpi_revents & events) {
5252 /*
5253 * This leaves events we are not interested
5254 * in available for the other process which
5255 * which presumably had requested them
5256 * (otherwise they would never have been
5257 * recorded).
5258 */
5259 events &= vp->v_pollinfo->vpi_revents;
5260 vp->v_pollinfo->vpi_revents &= ~events;
5261
5262 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5263 return (events);
5264 }
5265 vp->v_pollinfo->vpi_events |= events;
5266 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
5267 mtx_unlock(&vp->v_pollinfo->vpi_lock);
5268 return (0);
5269 }
5270
5271 /*
5272 * Routine to create and manage a filesystem syncer vnode.
5273 */
5274 #define sync_close ((int (*)(struct vop_close_args *))nullop)
5275 static int sync_fsync(struct vop_fsync_args *);
5276 static int sync_inactive(struct vop_inactive_args *);
5277 static int sync_reclaim(struct vop_reclaim_args *);
5278
5279 static struct vop_vector sync_vnodeops = {
5280 .vop_bypass = VOP_EOPNOTSUPP,
5281 .vop_close = sync_close,
5282 .vop_fsync = sync_fsync,
5283 .vop_getwritemount = vop_stdgetwritemount,
5284 .vop_inactive = sync_inactive,
5285 .vop_need_inactive = vop_stdneed_inactive,
5286 .vop_reclaim = sync_reclaim,
5287 .vop_lock1 = vop_stdlock,
5288 .vop_unlock = vop_stdunlock,
5289 .vop_islocked = vop_stdislocked,
5290 .vop_fplookup_vexec = VOP_EAGAIN,
5291 .vop_fplookup_symlink = VOP_EAGAIN,
5292 };
5293 VFS_VOP_VECTOR_REGISTER(sync_vnodeops);
5294
5295 /*
5296 * Create a new filesystem syncer vnode for the specified mount point.
5297 */
5298 void
vfs_allocate_syncvnode(struct mount * mp)5299 vfs_allocate_syncvnode(struct mount *mp)
5300 {
5301 struct vnode *vp;
5302 struct bufobj *bo;
5303 static long start, incr, next;
5304 int error;
5305
5306 /* Allocate a new vnode */
5307 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
5308 if (error != 0)
5309 panic("vfs_allocate_syncvnode: getnewvnode() failed");
5310 vp->v_type = VNON;
5311 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5312 vp->v_vflag |= VV_FORCEINSMQ;
5313 error = insmntque1(vp, mp);
5314 if (error != 0)
5315 panic("vfs_allocate_syncvnode: insmntque() failed");
5316 vp->v_vflag &= ~VV_FORCEINSMQ;
5317 vn_set_state(vp, VSTATE_CONSTRUCTED);
5318 VOP_UNLOCK(vp);
5319 /*
5320 * Place the vnode onto the syncer worklist. We attempt to
5321 * scatter them about on the list so that they will go off
5322 * at evenly distributed times even if all the filesystems
5323 * are mounted at once.
5324 */
5325 next += incr;
5326 if (next == 0 || next > syncer_maxdelay) {
5327 start /= 2;
5328 incr /= 2;
5329 if (start == 0) {
5330 start = syncer_maxdelay / 2;
5331 incr = syncer_maxdelay;
5332 }
5333 next = start;
5334 }
5335 bo = &vp->v_bufobj;
5336 BO_LOCK(bo);
5337 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
5338 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
5339 mtx_lock(&sync_mtx);
5340 sync_vnode_count++;
5341 if (mp->mnt_syncer == NULL) {
5342 mp->mnt_syncer = vp;
5343 vp = NULL;
5344 }
5345 mtx_unlock(&sync_mtx);
5346 BO_UNLOCK(bo);
5347 if (vp != NULL) {
5348 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5349 vgone(vp);
5350 vput(vp);
5351 }
5352 }
5353
5354 void
vfs_deallocate_syncvnode(struct mount * mp)5355 vfs_deallocate_syncvnode(struct mount *mp)
5356 {
5357 struct vnode *vp;
5358
5359 mtx_lock(&sync_mtx);
5360 vp = mp->mnt_syncer;
5361 if (vp != NULL)
5362 mp->mnt_syncer = NULL;
5363 mtx_unlock(&sync_mtx);
5364 if (vp != NULL)
5365 vrele(vp);
5366 }
5367
5368 /*
5369 * Do a lazy sync of the filesystem.
5370 */
5371 static int
sync_fsync(struct vop_fsync_args * ap)5372 sync_fsync(struct vop_fsync_args *ap)
5373 {
5374 struct vnode *syncvp = ap->a_vp;
5375 struct mount *mp = syncvp->v_mount;
5376 int error, save;
5377 struct bufobj *bo;
5378
5379 /*
5380 * We only need to do something if this is a lazy evaluation.
5381 */
5382 if (ap->a_waitfor != MNT_LAZY)
5383 return (0);
5384
5385 /*
5386 * Move ourselves to the back of the sync list.
5387 */
5388 bo = &syncvp->v_bufobj;
5389 BO_LOCK(bo);
5390 vn_syncer_add_to_worklist(bo, syncdelay);
5391 BO_UNLOCK(bo);
5392
5393 /*
5394 * Walk the list of vnodes pushing all that are dirty and
5395 * not already on the sync list.
5396 */
5397 if (vfs_busy(mp, MBF_NOWAIT) != 0)
5398 return (0);
5399 VOP_UNLOCK(syncvp);
5400 save = curthread_pflags_set(TDP_SYNCIO);
5401 /*
5402 * The filesystem at hand may be idle with free vnodes stored in the
5403 * batch. Return them instead of letting them stay there indefinitely.
5404 */
5405 vfs_periodic(mp, MNT_NOWAIT);
5406 error = VFS_SYNC(mp, MNT_LAZY);
5407 curthread_pflags_restore(save);
5408 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY);
5409 vfs_unbusy(mp);
5410 return (error);
5411 }
5412
5413 /*
5414 * The syncer vnode is no referenced.
5415 */
5416 static int
sync_inactive(struct vop_inactive_args * ap)5417 sync_inactive(struct vop_inactive_args *ap)
5418 {
5419
5420 vgone(ap->a_vp);
5421 return (0);
5422 }
5423
5424 /*
5425 * The syncer vnode is no longer needed and is being decommissioned.
5426 *
5427 * Modifications to the worklist must be protected by sync_mtx.
5428 */
5429 static int
sync_reclaim(struct vop_reclaim_args * ap)5430 sync_reclaim(struct vop_reclaim_args *ap)
5431 {
5432 struct vnode *vp = ap->a_vp;
5433 struct bufobj *bo;
5434
5435 bo = &vp->v_bufobj;
5436 BO_LOCK(bo);
5437 mtx_lock(&sync_mtx);
5438 if (vp->v_mount->mnt_syncer == vp)
5439 vp->v_mount->mnt_syncer = NULL;
5440 if (bo->bo_flag & BO_ONWORKLST) {
5441 LIST_REMOVE(bo, bo_synclist);
5442 syncer_worklist_len--;
5443 sync_vnode_count--;
5444 bo->bo_flag &= ~BO_ONWORKLST;
5445 }
5446 mtx_unlock(&sync_mtx);
5447 BO_UNLOCK(bo);
5448
5449 return (0);
5450 }
5451
5452 int
vn_need_pageq_flush(struct vnode * vp)5453 vn_need_pageq_flush(struct vnode *vp)
5454 {
5455 struct vm_object *obj;
5456
5457 obj = vp->v_object;
5458 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
5459 vm_object_mightbedirty(obj));
5460 }
5461
5462 /*
5463 * Check if vnode represents a disk device
5464 */
5465 bool
vn_isdisk_error(struct vnode * vp,int * errp)5466 vn_isdisk_error(struct vnode *vp, int *errp)
5467 {
5468 int error;
5469
5470 if (vp->v_type != VCHR) {
5471 error = ENOTBLK;
5472 goto out;
5473 }
5474 error = 0;
5475 dev_lock();
5476 if (vp->v_rdev == NULL)
5477 error = ENXIO;
5478 else if (vp->v_rdev->si_devsw == NULL)
5479 error = ENXIO;
5480 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
5481 error = ENOTBLK;
5482 dev_unlock();
5483 out:
5484 *errp = error;
5485 return (error == 0);
5486 }
5487
5488 bool
vn_isdisk(struct vnode * vp)5489 vn_isdisk(struct vnode *vp)
5490 {
5491 int error;
5492
5493 return (vn_isdisk_error(vp, &error));
5494 }
5495
5496 /*
5497 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
5498 * the comment above cache_fplookup for details.
5499 */
5500 int
vaccess_vexec_smr(mode_t file_mode,uid_t file_uid,gid_t file_gid,struct ucred * cred)5501 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred)
5502 {
5503 int error;
5504
5505 VFS_SMR_ASSERT_ENTERED();
5506
5507 /* Check the owner. */
5508 if (cred->cr_uid == file_uid) {
5509 if (file_mode & S_IXUSR)
5510 return (0);
5511 goto out_error;
5512 }
5513
5514 /* Otherwise, check the groups (first match) */
5515 if (groupmember(file_gid, cred)) {
5516 if (file_mode & S_IXGRP)
5517 return (0);
5518 goto out_error;
5519 }
5520
5521 /* Otherwise, check everyone else. */
5522 if (file_mode & S_IXOTH)
5523 return (0);
5524 out_error:
5525 /*
5526 * Permission check failed, but it is possible denial will get overwritten
5527 * (e.g., when root is traversing through a 700 directory owned by someone
5528 * else).
5529 *
5530 * vaccess() calls priv_check_cred which in turn can descent into MAC
5531 * modules overriding this result. It's quite unclear what semantics
5532 * are allowed for them to operate, thus for safety we don't call them
5533 * from within the SMR section. This also means if any such modules
5534 * are present, we have to let the regular lookup decide.
5535 */
5536 error = priv_check_cred_vfs_lookup_nomac(cred);
5537 switch (error) {
5538 case 0:
5539 return (0);
5540 case EAGAIN:
5541 /*
5542 * MAC modules present.
5543 */
5544 return (EAGAIN);
5545 case EPERM:
5546 return (EACCES);
5547 default:
5548 return (error);
5549 }
5550 }
5551
5552 /*
5553 * Common filesystem object access control check routine. Accepts a
5554 * vnode's type, "mode", uid and gid, requested access mode, and credentials.
5555 * Returns 0 on success, or an errno on failure.
5556 */
5557 int
vaccess(__enum_uint8 (vtype)type,mode_t file_mode,uid_t file_uid,gid_t file_gid,accmode_t accmode,struct ucred * cred)5558 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
5559 accmode_t accmode, struct ucred *cred)
5560 {
5561 accmode_t dac_granted;
5562 accmode_t priv_granted;
5563
5564 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
5565 ("invalid bit in accmode"));
5566 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
5567 ("VAPPEND without VWRITE"));
5568
5569 /*
5570 * Look for a normal, non-privileged way to access the file/directory
5571 * as requested. If it exists, go with that.
5572 */
5573
5574 dac_granted = 0;
5575
5576 /* Check the owner. */
5577 if (cred->cr_uid == file_uid) {
5578 dac_granted |= VADMIN;
5579 if (file_mode & S_IXUSR)
5580 dac_granted |= VEXEC;
5581 if (file_mode & S_IRUSR)
5582 dac_granted |= VREAD;
5583 if (file_mode & S_IWUSR)
5584 dac_granted |= (VWRITE | VAPPEND);
5585
5586 if ((accmode & dac_granted) == accmode)
5587 return (0);
5588
5589 goto privcheck;
5590 }
5591
5592 /* Otherwise, check the groups (first match) */
5593 if (groupmember(file_gid, cred)) {
5594 if (file_mode & S_IXGRP)
5595 dac_granted |= VEXEC;
5596 if (file_mode & S_IRGRP)
5597 dac_granted |= VREAD;
5598 if (file_mode & S_IWGRP)
5599 dac_granted |= (VWRITE | VAPPEND);
5600
5601 if ((accmode & dac_granted) == accmode)
5602 return (0);
5603
5604 goto privcheck;
5605 }
5606
5607 /* Otherwise, check everyone else. */
5608 if (file_mode & S_IXOTH)
5609 dac_granted |= VEXEC;
5610 if (file_mode & S_IROTH)
5611 dac_granted |= VREAD;
5612 if (file_mode & S_IWOTH)
5613 dac_granted |= (VWRITE | VAPPEND);
5614 if ((accmode & dac_granted) == accmode)
5615 return (0);
5616
5617 privcheck:
5618 /*
5619 * Build a privilege mask to determine if the set of privileges
5620 * satisfies the requirements when combined with the granted mask
5621 * from above. For each privilege, if the privilege is required,
5622 * bitwise or the request type onto the priv_granted mask.
5623 */
5624 priv_granted = 0;
5625
5626 if (type == VDIR) {
5627 /*
5628 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
5629 * requests, instead of PRIV_VFS_EXEC.
5630 */
5631 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5632 !priv_check_cred(cred, PRIV_VFS_LOOKUP))
5633 priv_granted |= VEXEC;
5634 } else {
5635 /*
5636 * Ensure that at least one execute bit is on. Otherwise,
5637 * a privileged user will always succeed, and we don't want
5638 * this to happen unless the file really is executable.
5639 */
5640 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5641 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
5642 !priv_check_cred(cred, PRIV_VFS_EXEC))
5643 priv_granted |= VEXEC;
5644 }
5645
5646 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
5647 !priv_check_cred(cred, PRIV_VFS_READ))
5648 priv_granted |= VREAD;
5649
5650 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
5651 !priv_check_cred(cred, PRIV_VFS_WRITE))
5652 priv_granted |= (VWRITE | VAPPEND);
5653
5654 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
5655 !priv_check_cred(cred, PRIV_VFS_ADMIN))
5656 priv_granted |= VADMIN;
5657
5658 if ((accmode & (priv_granted | dac_granted)) == accmode) {
5659 return (0);
5660 }
5661
5662 return ((accmode & VADMIN) ? EPERM : EACCES);
5663 }
5664
5665 /*
5666 * Credential check based on process requesting service, and per-attribute
5667 * permissions.
5668 */
5669 int
extattr_check_cred(struct vnode * vp,int attrnamespace,struct ucred * cred,struct thread * td,accmode_t accmode)5670 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5671 struct thread *td, accmode_t accmode)
5672 {
5673
5674 /*
5675 * Kernel-invoked always succeeds.
5676 */
5677 if (cred == NOCRED)
5678 return (0);
5679
5680 /*
5681 * Do not allow privileged processes in jail to directly manipulate
5682 * system attributes.
5683 */
5684 switch (attrnamespace) {
5685 case EXTATTR_NAMESPACE_SYSTEM:
5686 /* Potentially should be: return (EPERM); */
5687 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM));
5688 case EXTATTR_NAMESPACE_USER:
5689 return (VOP_ACCESS(vp, accmode, cred, td));
5690 default:
5691 return (EPERM);
5692 }
5693 }
5694
5695 #ifdef DEBUG_VFS_LOCKS
5696 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
5697 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
5698 "Drop into debugger on lock violation");
5699
5700 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
5701 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
5702 0, "Check for interlock across VOPs");
5703
5704 int vfs_badlock_print = 1; /* Print lock violations. */
5705 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
5706 0, "Print lock violations");
5707
5708 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */
5709 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode,
5710 0, "Print vnode details on lock violations");
5711
5712 #ifdef KDB
5713 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
5714 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
5715 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
5716 #endif
5717
5718 static void
vfs_badlock(const char * msg,const char * str,struct vnode * vp)5719 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5720 {
5721
5722 #ifdef KDB
5723 if (vfs_badlock_backtrace)
5724 kdb_backtrace();
5725 #endif
5726 if (vfs_badlock_vnode)
5727 vn_printf(vp, "vnode ");
5728 if (vfs_badlock_print)
5729 printf("%s: %p %s\n", str, (void *)vp, msg);
5730 if (vfs_badlock_ddb)
5731 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5732 }
5733
5734 void
assert_vi_locked(struct vnode * vp,const char * str)5735 assert_vi_locked(struct vnode *vp, const char *str)
5736 {
5737
5738 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
5739 vfs_badlock("interlock is not locked but should be", str, vp);
5740 }
5741
5742 void
assert_vi_unlocked(struct vnode * vp,const char * str)5743 assert_vi_unlocked(struct vnode *vp, const char *str)
5744 {
5745
5746 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
5747 vfs_badlock("interlock is locked but should not be", str, vp);
5748 }
5749
5750 void
assert_vop_locked(struct vnode * vp,const char * str)5751 assert_vop_locked(struct vnode *vp, const char *str)
5752 {
5753 if (KERNEL_PANICKED() || vp == NULL)
5754 return;
5755
5756 #ifdef WITNESS
5757 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5758 witness_is_owned(&vp->v_vnlock->lock_object) == -1)
5759 #else
5760 int locked = VOP_ISLOCKED(vp);
5761 if (locked == 0 || locked == LK_EXCLOTHER)
5762 #endif
5763 vfs_badlock("is not locked but should be", str, vp);
5764 }
5765
5766 void
assert_vop_unlocked(struct vnode * vp,const char * str)5767 assert_vop_unlocked(struct vnode *vp, const char *str)
5768 {
5769 if (KERNEL_PANICKED() || vp == NULL)
5770 return;
5771
5772 #ifdef WITNESS
5773 if ((vp->v_irflag & VIRF_CROSSMP) == 0 &&
5774 witness_is_owned(&vp->v_vnlock->lock_object) == 1)
5775 #else
5776 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
5777 #endif
5778 vfs_badlock("is locked but should not be", str, vp);
5779 }
5780
5781 void
assert_vop_elocked(struct vnode * vp,const char * str)5782 assert_vop_elocked(struct vnode *vp, const char *str)
5783 {
5784 if (KERNEL_PANICKED() || vp == NULL)
5785 return;
5786
5787 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
5788 vfs_badlock("is not exclusive locked but should be", str, vp);
5789 }
5790 #endif /* DEBUG_VFS_LOCKS */
5791
5792 void
vop_rename_fail(struct vop_rename_args * ap)5793 vop_rename_fail(struct vop_rename_args *ap)
5794 {
5795
5796 if (ap->a_tvp != NULL)
5797 vput(ap->a_tvp);
5798 if (ap->a_tdvp == ap->a_tvp)
5799 vrele(ap->a_tdvp);
5800 else
5801 vput(ap->a_tdvp);
5802 vrele(ap->a_fdvp);
5803 vrele(ap->a_fvp);
5804 }
5805
5806 void
vop_rename_pre(void * ap)5807 vop_rename_pre(void *ap)
5808 {
5809 struct vop_rename_args *a = ap;
5810
5811 #ifdef DEBUG_VFS_LOCKS
5812 if (a->a_tvp)
5813 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
5814 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
5815 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
5816 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
5817
5818 /* Check the source (from). */
5819 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
5820 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
5821 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
5822 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
5823 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
5824
5825 /* Check the target. */
5826 if (a->a_tvp)
5827 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
5828 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
5829 #endif
5830 /*
5831 * It may be tempting to add vn_seqc_write_begin/end calls here and
5832 * in vop_rename_post but that's not going to work out since some
5833 * filesystems relookup vnodes mid-rename. This is probably a bug.
5834 *
5835 * For now filesystems are expected to do the relevant calls after they
5836 * decide what vnodes to operate on.
5837 */
5838 if (a->a_tdvp != a->a_fdvp)
5839 vhold(a->a_fdvp);
5840 if (a->a_tvp != a->a_fvp)
5841 vhold(a->a_fvp);
5842 vhold(a->a_tdvp);
5843 if (a->a_tvp)
5844 vhold(a->a_tvp);
5845 }
5846
5847 #ifdef DEBUG_VFS_LOCKS
5848 void
vop_fplookup_vexec_debugpre(void * ap __unused)5849 vop_fplookup_vexec_debugpre(void *ap __unused)
5850 {
5851
5852 VFS_SMR_ASSERT_ENTERED();
5853 }
5854
5855 void
vop_fplookup_vexec_debugpost(void * ap,int rc)5856 vop_fplookup_vexec_debugpost(void *ap, int rc)
5857 {
5858 struct vop_fplookup_vexec_args *a;
5859 struct vnode *vp;
5860
5861 a = ap;
5862 vp = a->a_vp;
5863
5864 VFS_SMR_ASSERT_ENTERED();
5865 if (rc == EOPNOTSUPP)
5866 VNPASS(VN_IS_DOOMED(vp), vp);
5867 }
5868
5869 void
vop_fplookup_symlink_debugpre(void * ap __unused)5870 vop_fplookup_symlink_debugpre(void *ap __unused)
5871 {
5872
5873 VFS_SMR_ASSERT_ENTERED();
5874 }
5875
5876 void
vop_fplookup_symlink_debugpost(void * ap __unused,int rc __unused)5877 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused)
5878 {
5879
5880 VFS_SMR_ASSERT_ENTERED();
5881 }
5882
5883 static void
vop_fsync_debugprepost(struct vnode * vp,const char * name)5884 vop_fsync_debugprepost(struct vnode *vp, const char *name)
5885 {
5886 if (vp->v_type == VCHR)
5887 ;
5888 /*
5889 * The shared vs. exclusive locking policy for fsync()
5890 * is actually determined by vp's write mount as indicated
5891 * by VOP_GETWRITEMOUNT(), which for stacked filesystems
5892 * may not be the same as vp->v_mount. However, if the
5893 * underlying filesystem which really handles the fsync()
5894 * supports shared locking, the stacked filesystem must also
5895 * be prepared for its VOP_FSYNC() operation to be called
5896 * with only a shared lock. On the other hand, if the
5897 * stacked filesystem claims support for shared write
5898 * locking but the underlying filesystem does not, and the
5899 * caller incorrectly uses a shared lock, this condition
5900 * should still be caught when the stacked filesystem
5901 * invokes VOP_FSYNC() on the underlying filesystem.
5902 */
5903 else if (MNT_SHARED_WRITES(vp->v_mount))
5904 ASSERT_VOP_LOCKED(vp, name);
5905 else
5906 ASSERT_VOP_ELOCKED(vp, name);
5907 }
5908
5909 void
vop_fsync_debugpre(void * a)5910 vop_fsync_debugpre(void *a)
5911 {
5912 struct vop_fsync_args *ap;
5913
5914 ap = a;
5915 vop_fsync_debugprepost(ap->a_vp, "fsync");
5916 }
5917
5918 void
vop_fsync_debugpost(void * a,int rc __unused)5919 vop_fsync_debugpost(void *a, int rc __unused)
5920 {
5921 struct vop_fsync_args *ap;
5922
5923 ap = a;
5924 vop_fsync_debugprepost(ap->a_vp, "fsync");
5925 }
5926
5927 void
vop_fdatasync_debugpre(void * a)5928 vop_fdatasync_debugpre(void *a)
5929 {
5930 struct vop_fdatasync_args *ap;
5931
5932 ap = a;
5933 vop_fsync_debugprepost(ap->a_vp, "fsync");
5934 }
5935
5936 void
vop_fdatasync_debugpost(void * a,int rc __unused)5937 vop_fdatasync_debugpost(void *a, int rc __unused)
5938 {
5939 struct vop_fdatasync_args *ap;
5940
5941 ap = a;
5942 vop_fsync_debugprepost(ap->a_vp, "fsync");
5943 }
5944
5945 void
vop_strategy_debugpre(void * ap)5946 vop_strategy_debugpre(void *ap)
5947 {
5948 struct vop_strategy_args *a;
5949 struct buf *bp;
5950
5951 a = ap;
5952 bp = a->a_bp;
5953
5954 /*
5955 * Cluster ops lock their component buffers but not the IO container.
5956 */
5957 if ((bp->b_flags & B_CLUSTER) != 0)
5958 return;
5959
5960 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) {
5961 if (vfs_badlock_print)
5962 printf(
5963 "VOP_STRATEGY: bp is not locked but should be\n");
5964 if (vfs_badlock_ddb)
5965 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5966 }
5967 }
5968
5969 void
vop_lock_debugpre(void * ap)5970 vop_lock_debugpre(void *ap)
5971 {
5972 struct vop_lock1_args *a = ap;
5973
5974 if ((a->a_flags & LK_INTERLOCK) == 0)
5975 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
5976 else
5977 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
5978 }
5979
5980 void
vop_lock_debugpost(void * ap,int rc)5981 vop_lock_debugpost(void *ap, int rc)
5982 {
5983 struct vop_lock1_args *a = ap;
5984
5985 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
5986 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
5987 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
5988 }
5989
5990 void
vop_unlock_debugpre(void * ap)5991 vop_unlock_debugpre(void *ap)
5992 {
5993 struct vop_unlock_args *a = ap;
5994 struct vnode *vp = a->a_vp;
5995
5996 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp);
5997 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK");
5998 }
5999
6000 void
vop_need_inactive_debugpre(void * ap)6001 vop_need_inactive_debugpre(void *ap)
6002 {
6003 struct vop_need_inactive_args *a = ap;
6004
6005 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
6006 }
6007
6008 void
vop_need_inactive_debugpost(void * ap,int rc)6009 vop_need_inactive_debugpost(void *ap, int rc)
6010 {
6011 struct vop_need_inactive_args *a = ap;
6012
6013 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
6014 }
6015 #endif
6016
6017 void
vop_create_pre(void * ap)6018 vop_create_pre(void *ap)
6019 {
6020 struct vop_create_args *a;
6021 struct vnode *dvp;
6022
6023 a = ap;
6024 dvp = a->a_dvp;
6025 vn_seqc_write_begin(dvp);
6026 }
6027
6028 void
vop_create_post(void * ap,int rc)6029 vop_create_post(void *ap, int rc)
6030 {
6031 struct vop_create_args *a;
6032 struct vnode *dvp;
6033
6034 a = ap;
6035 dvp = a->a_dvp;
6036 vn_seqc_write_end(dvp);
6037 if (!rc)
6038 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6039 }
6040
6041 void
vop_whiteout_pre(void * ap)6042 vop_whiteout_pre(void *ap)
6043 {
6044 struct vop_whiteout_args *a;
6045 struct vnode *dvp;
6046
6047 a = ap;
6048 dvp = a->a_dvp;
6049 vn_seqc_write_begin(dvp);
6050 }
6051
6052 void
vop_whiteout_post(void * ap,int rc)6053 vop_whiteout_post(void *ap, int rc)
6054 {
6055 struct vop_whiteout_args *a;
6056 struct vnode *dvp;
6057
6058 a = ap;
6059 dvp = a->a_dvp;
6060 vn_seqc_write_end(dvp);
6061 }
6062
6063 void
vop_deleteextattr_pre(void * ap)6064 vop_deleteextattr_pre(void *ap)
6065 {
6066 struct vop_deleteextattr_args *a;
6067 struct vnode *vp;
6068
6069 a = ap;
6070 vp = a->a_vp;
6071 vn_seqc_write_begin(vp);
6072 }
6073
6074 void
vop_deleteextattr_post(void * ap,int rc)6075 vop_deleteextattr_post(void *ap, int rc)
6076 {
6077 struct vop_deleteextattr_args *a;
6078 struct vnode *vp;
6079
6080 a = ap;
6081 vp = a->a_vp;
6082 vn_seqc_write_end(vp);
6083 if (!rc)
6084 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
6085 }
6086
6087 void
vop_link_pre(void * ap)6088 vop_link_pre(void *ap)
6089 {
6090 struct vop_link_args *a;
6091 struct vnode *vp, *tdvp;
6092
6093 a = ap;
6094 vp = a->a_vp;
6095 tdvp = a->a_tdvp;
6096 vn_seqc_write_begin(vp);
6097 vn_seqc_write_begin(tdvp);
6098 }
6099
6100 void
vop_link_post(void * ap,int rc)6101 vop_link_post(void *ap, int rc)
6102 {
6103 struct vop_link_args *a;
6104 struct vnode *vp, *tdvp;
6105
6106 a = ap;
6107 vp = a->a_vp;
6108 tdvp = a->a_tdvp;
6109 vn_seqc_write_end(vp);
6110 vn_seqc_write_end(tdvp);
6111 if (!rc) {
6112 VFS_KNOTE_LOCKED(vp, NOTE_LINK);
6113 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE);
6114 }
6115 }
6116
6117 void
vop_mkdir_pre(void * ap)6118 vop_mkdir_pre(void *ap)
6119 {
6120 struct vop_mkdir_args *a;
6121 struct vnode *dvp;
6122
6123 a = ap;
6124 dvp = a->a_dvp;
6125 vn_seqc_write_begin(dvp);
6126 }
6127
6128 void
vop_mkdir_post(void * ap,int rc)6129 vop_mkdir_post(void *ap, int rc)
6130 {
6131 struct vop_mkdir_args *a;
6132 struct vnode *dvp;
6133
6134 a = ap;
6135 dvp = a->a_dvp;
6136 vn_seqc_write_end(dvp);
6137 if (!rc)
6138 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
6139 }
6140
6141 #ifdef DEBUG_VFS_LOCKS
6142 void
vop_mkdir_debugpost(void * ap,int rc)6143 vop_mkdir_debugpost(void *ap, int rc)
6144 {
6145 struct vop_mkdir_args *a;
6146
6147 a = ap;
6148 if (!rc)
6149 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp);
6150 }
6151 #endif
6152
6153 void
vop_mknod_pre(void * ap)6154 vop_mknod_pre(void *ap)
6155 {
6156 struct vop_mknod_args *a;
6157 struct vnode *dvp;
6158
6159 a = ap;
6160 dvp = a->a_dvp;
6161 vn_seqc_write_begin(dvp);
6162 }
6163
6164 void
vop_mknod_post(void * ap,int rc)6165 vop_mknod_post(void *ap, int rc)
6166 {
6167 struct vop_mknod_args *a;
6168 struct vnode *dvp;
6169
6170 a = ap;
6171 dvp = a->a_dvp;
6172 vn_seqc_write_end(dvp);
6173 if (!rc)
6174 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6175 }
6176
6177 void
vop_reclaim_post(void * ap,int rc)6178 vop_reclaim_post(void *ap, int rc)
6179 {
6180 struct vop_reclaim_args *a;
6181 struct vnode *vp;
6182
6183 a = ap;
6184 vp = a->a_vp;
6185 ASSERT_VOP_IN_SEQC(vp);
6186 if (!rc)
6187 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE);
6188 }
6189
6190 void
vop_remove_pre(void * ap)6191 vop_remove_pre(void *ap)
6192 {
6193 struct vop_remove_args *a;
6194 struct vnode *dvp, *vp;
6195
6196 a = ap;
6197 dvp = a->a_dvp;
6198 vp = a->a_vp;
6199 vn_seqc_write_begin(dvp);
6200 vn_seqc_write_begin(vp);
6201 }
6202
6203 void
vop_remove_post(void * ap,int rc)6204 vop_remove_post(void *ap, int rc)
6205 {
6206 struct vop_remove_args *a;
6207 struct vnode *dvp, *vp;
6208
6209 a = ap;
6210 dvp = a->a_dvp;
6211 vp = a->a_vp;
6212 vn_seqc_write_end(dvp);
6213 vn_seqc_write_end(vp);
6214 if (!rc) {
6215 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6216 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6217 }
6218 }
6219
6220 void
vop_rename_post(void * ap,int rc)6221 vop_rename_post(void *ap, int rc)
6222 {
6223 struct vop_rename_args *a = ap;
6224 long hint;
6225
6226 if (!rc) {
6227 hint = NOTE_WRITE;
6228 if (a->a_fdvp == a->a_tdvp) {
6229 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
6230 hint |= NOTE_LINK;
6231 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
6232 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
6233 } else {
6234 hint |= NOTE_EXTEND;
6235 if (a->a_fvp->v_type == VDIR)
6236 hint |= NOTE_LINK;
6237 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
6238
6239 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
6240 a->a_tvp->v_type == VDIR)
6241 hint &= ~NOTE_LINK;
6242 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
6243 }
6244
6245 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
6246 if (a->a_tvp)
6247 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
6248 }
6249 if (a->a_tdvp != a->a_fdvp)
6250 vdrop(a->a_fdvp);
6251 if (a->a_tvp != a->a_fvp)
6252 vdrop(a->a_fvp);
6253 vdrop(a->a_tdvp);
6254 if (a->a_tvp)
6255 vdrop(a->a_tvp);
6256 }
6257
6258 void
vop_rmdir_pre(void * ap)6259 vop_rmdir_pre(void *ap)
6260 {
6261 struct vop_rmdir_args *a;
6262 struct vnode *dvp, *vp;
6263
6264 a = ap;
6265 dvp = a->a_dvp;
6266 vp = a->a_vp;
6267 vn_seqc_write_begin(dvp);
6268 vn_seqc_write_begin(vp);
6269 }
6270
6271 void
vop_rmdir_post(void * ap,int rc)6272 vop_rmdir_post(void *ap, int rc)
6273 {
6274 struct vop_rmdir_args *a;
6275 struct vnode *dvp, *vp;
6276
6277 a = ap;
6278 dvp = a->a_dvp;
6279 vp = a->a_vp;
6280 vn_seqc_write_end(dvp);
6281 vn_seqc_write_end(vp);
6282 if (!rc) {
6283 vp->v_vflag |= VV_UNLINKED;
6284 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
6285 VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
6286 }
6287 }
6288
6289 void
vop_setattr_pre(void * ap)6290 vop_setattr_pre(void *ap)
6291 {
6292 struct vop_setattr_args *a;
6293 struct vnode *vp;
6294
6295 a = ap;
6296 vp = a->a_vp;
6297 vn_seqc_write_begin(vp);
6298 }
6299
6300 void
vop_setattr_post(void * ap,int rc)6301 vop_setattr_post(void *ap, int rc)
6302 {
6303 struct vop_setattr_args *a;
6304 struct vnode *vp;
6305
6306 a = ap;
6307 vp = a->a_vp;
6308 vn_seqc_write_end(vp);
6309 if (!rc)
6310 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6311 }
6312
6313 void
vop_setacl_pre(void * ap)6314 vop_setacl_pre(void *ap)
6315 {
6316 struct vop_setacl_args *a;
6317 struct vnode *vp;
6318
6319 a = ap;
6320 vp = a->a_vp;
6321 vn_seqc_write_begin(vp);
6322 }
6323
6324 void
vop_setacl_post(void * ap,int rc __unused)6325 vop_setacl_post(void *ap, int rc __unused)
6326 {
6327 struct vop_setacl_args *a;
6328 struct vnode *vp;
6329
6330 a = ap;
6331 vp = a->a_vp;
6332 vn_seqc_write_end(vp);
6333 }
6334
6335 void
vop_setextattr_pre(void * ap)6336 vop_setextattr_pre(void *ap)
6337 {
6338 struct vop_setextattr_args *a;
6339 struct vnode *vp;
6340
6341 a = ap;
6342 vp = a->a_vp;
6343 vn_seqc_write_begin(vp);
6344 }
6345
6346 void
vop_setextattr_post(void * ap,int rc)6347 vop_setextattr_post(void *ap, int rc)
6348 {
6349 struct vop_setextattr_args *a;
6350 struct vnode *vp;
6351
6352 a = ap;
6353 vp = a->a_vp;
6354 vn_seqc_write_end(vp);
6355 if (!rc)
6356 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6357 }
6358
6359 void
vop_symlink_pre(void * ap)6360 vop_symlink_pre(void *ap)
6361 {
6362 struct vop_symlink_args *a;
6363 struct vnode *dvp;
6364
6365 a = ap;
6366 dvp = a->a_dvp;
6367 vn_seqc_write_begin(dvp);
6368 }
6369
6370 void
vop_symlink_post(void * ap,int rc)6371 vop_symlink_post(void *ap, int rc)
6372 {
6373 struct vop_symlink_args *a;
6374 struct vnode *dvp;
6375
6376 a = ap;
6377 dvp = a->a_dvp;
6378 vn_seqc_write_end(dvp);
6379 if (!rc)
6380 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6381 }
6382
6383 void
vop_open_post(void * ap,int rc)6384 vop_open_post(void *ap, int rc)
6385 {
6386 struct vop_open_args *a = ap;
6387
6388 if (!rc)
6389 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN);
6390 }
6391
6392 void
vop_close_post(void * ap,int rc)6393 vop_close_post(void *ap, int rc)
6394 {
6395 struct vop_close_args *a = ap;
6396
6397 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
6398 !VN_IS_DOOMED(a->a_vp))) {
6399 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
6400 NOTE_CLOSE_WRITE : NOTE_CLOSE);
6401 }
6402 }
6403
6404 void
vop_read_post(void * ap,int rc)6405 vop_read_post(void *ap, int rc)
6406 {
6407 struct vop_read_args *a = ap;
6408
6409 if (!rc)
6410 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6411 }
6412
6413 void
vop_read_pgcache_post(void * ap,int rc)6414 vop_read_pgcache_post(void *ap, int rc)
6415 {
6416 struct vop_read_pgcache_args *a = ap;
6417
6418 if (!rc)
6419 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ);
6420 }
6421
6422 void
vop_readdir_post(void * ap,int rc)6423 vop_readdir_post(void *ap, int rc)
6424 {
6425 struct vop_readdir_args *a = ap;
6426
6427 if (!rc)
6428 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6429 }
6430
6431 static struct knlist fs_knlist;
6432
6433 static void
vfs_event_init(void * arg)6434 vfs_event_init(void *arg)
6435 {
6436 knlist_init_mtx(&fs_knlist, NULL);
6437 }
6438 /* XXX - correct order? */
6439 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
6440
6441 void
vfs_event_signal(fsid_t * fsid,uint32_t event,intptr_t data __unused)6442 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
6443 {
6444
6445 KNOTE_UNLOCKED(&fs_knlist, event);
6446 }
6447
6448 static int filt_fsattach(struct knote *kn);
6449 static void filt_fsdetach(struct knote *kn);
6450 static int filt_fsevent(struct knote *kn, long hint);
6451
6452 struct filterops fs_filtops = {
6453 .f_isfd = 0,
6454 .f_attach = filt_fsattach,
6455 .f_detach = filt_fsdetach,
6456 .f_event = filt_fsevent
6457 };
6458
6459 static int
filt_fsattach(struct knote * kn)6460 filt_fsattach(struct knote *kn)
6461 {
6462
6463 kn->kn_flags |= EV_CLEAR;
6464 knlist_add(&fs_knlist, kn, 0);
6465 return (0);
6466 }
6467
6468 static void
filt_fsdetach(struct knote * kn)6469 filt_fsdetach(struct knote *kn)
6470 {
6471
6472 knlist_remove(&fs_knlist, kn, 0);
6473 }
6474
6475 static int
filt_fsevent(struct knote * kn,long hint)6476 filt_fsevent(struct knote *kn, long hint)
6477 {
6478
6479 kn->kn_fflags |= kn->kn_sfflags & hint;
6480
6481 return (kn->kn_fflags != 0);
6482 }
6483
6484 static int
sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)6485 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
6486 {
6487 struct vfsidctl vc;
6488 int error;
6489 struct mount *mp;
6490
6491 error = SYSCTL_IN(req, &vc, sizeof(vc));
6492 if (error)
6493 return (error);
6494 if (vc.vc_vers != VFS_CTL_VERS1)
6495 return (EINVAL);
6496 mp = vfs_getvfs(&vc.vc_fsid);
6497 if (mp == NULL)
6498 return (ENOENT);
6499 /* ensure that a specific sysctl goes to the right filesystem. */
6500 if (strcmp(vc.vc_fstypename, "*") != 0 &&
6501 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
6502 vfs_rel(mp);
6503 return (EINVAL);
6504 }
6505 VCTLTOREQ(&vc, req);
6506 error = VFS_SYSCTL(mp, vc.vc_op, req);
6507 vfs_rel(mp);
6508 return (error);
6509 }
6510
6511 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR,
6512 NULL, 0, sysctl_vfs_ctl, "",
6513 "Sysctl by fsid");
6514
6515 /*
6516 * Function to initialize a va_filerev field sensibly.
6517 * XXX: Wouldn't a random number make a lot more sense ??
6518 */
6519 u_quad_t
init_va_filerev(void)6520 init_va_filerev(void)
6521 {
6522 struct bintime bt;
6523
6524 getbinuptime(&bt);
6525 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
6526 }
6527
6528 static int filt_vfsread(struct knote *kn, long hint);
6529 static int filt_vfswrite(struct knote *kn, long hint);
6530 static int filt_vfsvnode(struct knote *kn, long hint);
6531 static void filt_vfsdetach(struct knote *kn);
6532 static struct filterops vfsread_filtops = {
6533 .f_isfd = 1,
6534 .f_detach = filt_vfsdetach,
6535 .f_event = filt_vfsread
6536 };
6537 static struct filterops vfswrite_filtops = {
6538 .f_isfd = 1,
6539 .f_detach = filt_vfsdetach,
6540 .f_event = filt_vfswrite
6541 };
6542 static struct filterops vfsvnode_filtops = {
6543 .f_isfd = 1,
6544 .f_detach = filt_vfsdetach,
6545 .f_event = filt_vfsvnode
6546 };
6547
6548 static void
vfs_knllock(void * arg)6549 vfs_knllock(void *arg)
6550 {
6551 struct vnode *vp = arg;
6552
6553 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6554 }
6555
6556 static void
vfs_knlunlock(void * arg)6557 vfs_knlunlock(void *arg)
6558 {
6559 struct vnode *vp = arg;
6560
6561 VOP_UNLOCK(vp);
6562 }
6563
6564 static void
vfs_knl_assert_lock(void * arg,int what)6565 vfs_knl_assert_lock(void *arg, int what)
6566 {
6567 #ifdef DEBUG_VFS_LOCKS
6568 struct vnode *vp = arg;
6569
6570 if (what == LA_LOCKED)
6571 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
6572 else
6573 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
6574 #endif
6575 }
6576
6577 int
vfs_kqfilter(struct vop_kqfilter_args * ap)6578 vfs_kqfilter(struct vop_kqfilter_args *ap)
6579 {
6580 struct vnode *vp = ap->a_vp;
6581 struct knote *kn = ap->a_kn;
6582 struct knlist *knl;
6583
6584 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ &&
6585 kn->kn_filter != EVFILT_WRITE),
6586 ("READ/WRITE filter on a FIFO leaked through"));
6587 switch (kn->kn_filter) {
6588 case EVFILT_READ:
6589 kn->kn_fop = &vfsread_filtops;
6590 break;
6591 case EVFILT_WRITE:
6592 kn->kn_fop = &vfswrite_filtops;
6593 break;
6594 case EVFILT_VNODE:
6595 kn->kn_fop = &vfsvnode_filtops;
6596 break;
6597 default:
6598 return (EINVAL);
6599 }
6600
6601 kn->kn_hook = (caddr_t)vp;
6602
6603 v_addpollinfo(vp);
6604 if (vp->v_pollinfo == NULL)
6605 return (ENOMEM);
6606 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
6607 vhold(vp);
6608 knlist_add(knl, kn, 0);
6609
6610 return (0);
6611 }
6612
6613 /*
6614 * Detach knote from vnode
6615 */
6616 static void
filt_vfsdetach(struct knote * kn)6617 filt_vfsdetach(struct knote *kn)
6618 {
6619 struct vnode *vp = (struct vnode *)kn->kn_hook;
6620
6621 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
6622 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
6623 vdrop(vp);
6624 }
6625
6626 /*ARGSUSED*/
6627 static int
filt_vfsread(struct knote * kn,long hint)6628 filt_vfsread(struct knote *kn, long hint)
6629 {
6630 struct vnode *vp = (struct vnode *)kn->kn_hook;
6631 off_t size;
6632 int res;
6633
6634 /*
6635 * filesystem is gone, so set the EOF flag and schedule
6636 * the knote for deletion.
6637 */
6638 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6639 VI_LOCK(vp);
6640 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6641 VI_UNLOCK(vp);
6642 return (1);
6643 }
6644
6645 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0)
6646 return (0);
6647
6648 VI_LOCK(vp);
6649 kn->kn_data = size - kn->kn_fp->f_offset;
6650 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
6651 VI_UNLOCK(vp);
6652 return (res);
6653 }
6654
6655 /*ARGSUSED*/
6656 static int
filt_vfswrite(struct knote * kn,long hint)6657 filt_vfswrite(struct knote *kn, long hint)
6658 {
6659 struct vnode *vp = (struct vnode *)kn->kn_hook;
6660
6661 VI_LOCK(vp);
6662
6663 /*
6664 * filesystem is gone, so set the EOF flag and schedule
6665 * the knote for deletion.
6666 */
6667 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
6668 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6669
6670 kn->kn_data = 0;
6671 VI_UNLOCK(vp);
6672 return (1);
6673 }
6674
6675 static int
filt_vfsvnode(struct knote * kn,long hint)6676 filt_vfsvnode(struct knote *kn, long hint)
6677 {
6678 struct vnode *vp = (struct vnode *)kn->kn_hook;
6679 int res;
6680
6681 VI_LOCK(vp);
6682 if (kn->kn_sfflags & hint)
6683 kn->kn_fflags |= hint;
6684 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6685 kn->kn_flags |= EV_EOF;
6686 VI_UNLOCK(vp);
6687 return (1);
6688 }
6689 res = (kn->kn_fflags != 0);
6690 VI_UNLOCK(vp);
6691 return (res);
6692 }
6693
6694 int
vfs_read_dirent(struct vop_readdir_args * ap,struct dirent * dp,off_t off)6695 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
6696 {
6697 int error;
6698
6699 if (dp->d_reclen > ap->a_uio->uio_resid)
6700 return (ENAMETOOLONG);
6701 error = uiomove(dp, dp->d_reclen, ap->a_uio);
6702 if (error) {
6703 if (ap->a_ncookies != NULL) {
6704 if (ap->a_cookies != NULL)
6705 free(ap->a_cookies, M_TEMP);
6706 ap->a_cookies = NULL;
6707 *ap->a_ncookies = 0;
6708 }
6709 return (error);
6710 }
6711 if (ap->a_ncookies == NULL)
6712 return (0);
6713
6714 KASSERT(ap->a_cookies,
6715 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
6716
6717 *ap->a_cookies = realloc(*ap->a_cookies,
6718 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO);
6719 (*ap->a_cookies)[*ap->a_ncookies] = off;
6720 *ap->a_ncookies += 1;
6721 return (0);
6722 }
6723
6724 /*
6725 * The purpose of this routine is to remove granularity from accmode_t,
6726 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
6727 * VADMIN and VAPPEND.
6728 *
6729 * If it returns 0, the caller is supposed to continue with the usual
6730 * access checks using 'accmode' as modified by this routine. If it
6731 * returns nonzero value, the caller is supposed to return that value
6732 * as errno.
6733 *
6734 * Note that after this routine runs, accmode may be zero.
6735 */
6736 int
vfs_unixify_accmode(accmode_t * accmode)6737 vfs_unixify_accmode(accmode_t *accmode)
6738 {
6739 /*
6740 * There is no way to specify explicit "deny" rule using
6741 * file mode or POSIX.1e ACLs.
6742 */
6743 if (*accmode & VEXPLICIT_DENY) {
6744 *accmode = 0;
6745 return (0);
6746 }
6747
6748 /*
6749 * None of these can be translated into usual access bits.
6750 * Also, the common case for NFSv4 ACLs is to not contain
6751 * either of these bits. Caller should check for VWRITE
6752 * on the containing directory instead.
6753 */
6754 if (*accmode & (VDELETE_CHILD | VDELETE))
6755 return (EPERM);
6756
6757 if (*accmode & VADMIN_PERMS) {
6758 *accmode &= ~VADMIN_PERMS;
6759 *accmode |= VADMIN;
6760 }
6761
6762 /*
6763 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
6764 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
6765 */
6766 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
6767
6768 return (0);
6769 }
6770
6771 /*
6772 * Clear out a doomed vnode (if any) and replace it with a new one as long
6773 * as the fs is not being unmounted. Return the root vnode to the caller.
6774 */
6775 static int __noinline
vfs_cache_root_fallback(struct mount * mp,int flags,struct vnode ** vpp)6776 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
6777 {
6778 struct vnode *vp;
6779 int error;
6780
6781 restart:
6782 if (mp->mnt_rootvnode != NULL) {
6783 MNT_ILOCK(mp);
6784 vp = mp->mnt_rootvnode;
6785 if (vp != NULL) {
6786 if (!VN_IS_DOOMED(vp)) {
6787 vrefact(vp);
6788 MNT_IUNLOCK(mp);
6789 error = vn_lock(vp, flags);
6790 if (error == 0) {
6791 *vpp = vp;
6792 return (0);
6793 }
6794 vrele(vp);
6795 goto restart;
6796 }
6797 /*
6798 * Clear the old one.
6799 */
6800 mp->mnt_rootvnode = NULL;
6801 }
6802 MNT_IUNLOCK(mp);
6803 if (vp != NULL) {
6804 vfs_op_barrier_wait(mp);
6805 vrele(vp);
6806 }
6807 }
6808 error = VFS_CACHEDROOT(mp, flags, vpp);
6809 if (error != 0)
6810 return (error);
6811 if (mp->mnt_vfs_ops == 0) {
6812 MNT_ILOCK(mp);
6813 if (mp->mnt_vfs_ops != 0) {
6814 MNT_IUNLOCK(mp);
6815 return (0);
6816 }
6817 if (mp->mnt_rootvnode == NULL) {
6818 vrefact(*vpp);
6819 mp->mnt_rootvnode = *vpp;
6820 } else {
6821 if (mp->mnt_rootvnode != *vpp) {
6822 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) {
6823 panic("%s: mismatch between vnode returned "
6824 " by VFS_CACHEDROOT and the one cached "
6825 " (%p != %p)",
6826 __func__, *vpp, mp->mnt_rootvnode);
6827 }
6828 }
6829 }
6830 MNT_IUNLOCK(mp);
6831 }
6832 return (0);
6833 }
6834
6835 int
vfs_cache_root(struct mount * mp,int flags,struct vnode ** vpp)6836 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
6837 {
6838 struct mount_pcpu *mpcpu;
6839 struct vnode *vp;
6840 int error;
6841
6842 if (!vfs_op_thread_enter(mp, mpcpu))
6843 return (vfs_cache_root_fallback(mp, flags, vpp));
6844 vp = atomic_load_ptr(&mp->mnt_rootvnode);
6845 if (vp == NULL || VN_IS_DOOMED(vp)) {
6846 vfs_op_thread_exit(mp, mpcpu);
6847 return (vfs_cache_root_fallback(mp, flags, vpp));
6848 }
6849 vrefact(vp);
6850 vfs_op_thread_exit(mp, mpcpu);
6851 error = vn_lock(vp, flags);
6852 if (error != 0) {
6853 vrele(vp);
6854 return (vfs_cache_root_fallback(mp, flags, vpp));
6855 }
6856 *vpp = vp;
6857 return (0);
6858 }
6859
6860 struct vnode *
vfs_cache_root_clear(struct mount * mp)6861 vfs_cache_root_clear(struct mount *mp)
6862 {
6863 struct vnode *vp;
6864
6865 /*
6866 * ops > 0 guarantees there is nobody who can see this vnode
6867 */
6868 MPASS(mp->mnt_vfs_ops > 0);
6869 vp = mp->mnt_rootvnode;
6870 if (vp != NULL)
6871 vn_seqc_write_begin(vp);
6872 mp->mnt_rootvnode = NULL;
6873 return (vp);
6874 }
6875
6876 void
vfs_cache_root_set(struct mount * mp,struct vnode * vp)6877 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6878 {
6879
6880 MPASS(mp->mnt_vfs_ops > 0);
6881 vrefact(vp);
6882 mp->mnt_rootvnode = vp;
6883 }
6884
6885 /*
6886 * These are helper functions for filesystems to traverse all
6887 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
6888 *
6889 * This interface replaces MNT_VNODE_FOREACH.
6890 */
6891
6892 struct vnode *
__mnt_vnode_next_all(struct vnode ** mvp,struct mount * mp)6893 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
6894 {
6895 struct vnode *vp;
6896
6897 maybe_yield();
6898 MNT_ILOCK(mp);
6899 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6900 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
6901 vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
6902 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6903 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6904 continue;
6905 VI_LOCK(vp);
6906 if (VN_IS_DOOMED(vp)) {
6907 VI_UNLOCK(vp);
6908 continue;
6909 }
6910 break;
6911 }
6912 if (vp == NULL) {
6913 __mnt_vnode_markerfree_all(mvp, mp);
6914 /* MNT_IUNLOCK(mp); -- done in above function */
6915 mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
6916 return (NULL);
6917 }
6918 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6919 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6920 MNT_IUNLOCK(mp);
6921 return (vp);
6922 }
6923
6924 struct vnode *
__mnt_vnode_first_all(struct vnode ** mvp,struct mount * mp)6925 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
6926 {
6927 struct vnode *vp;
6928
6929 *mvp = vn_alloc_marker(mp);
6930 MNT_ILOCK(mp);
6931 MNT_REF(mp);
6932
6933 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
6934 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6935 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6936 continue;
6937 VI_LOCK(vp);
6938 if (VN_IS_DOOMED(vp)) {
6939 VI_UNLOCK(vp);
6940 continue;
6941 }
6942 break;
6943 }
6944 if (vp == NULL) {
6945 MNT_REL(mp);
6946 MNT_IUNLOCK(mp);
6947 vn_free_marker(*mvp);
6948 *mvp = NULL;
6949 return (NULL);
6950 }
6951 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6952 MNT_IUNLOCK(mp);
6953 return (vp);
6954 }
6955
6956 void
__mnt_vnode_markerfree_all(struct vnode ** mvp,struct mount * mp)6957 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
6958 {
6959
6960 if (*mvp == NULL) {
6961 MNT_IUNLOCK(mp);
6962 return;
6963 }
6964
6965 mtx_assert(MNT_MTX(mp), MA_OWNED);
6966
6967 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6968 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6969 MNT_REL(mp);
6970 MNT_IUNLOCK(mp);
6971 vn_free_marker(*mvp);
6972 *mvp = NULL;
6973 }
6974
6975 /*
6976 * These are helper functions for filesystems to traverse their
6977 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h
6978 */
6979 static void
mnt_vnode_markerfree_lazy(struct vnode ** mvp,struct mount * mp)6980 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
6981 {
6982
6983 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6984
6985 MNT_ILOCK(mp);
6986 MNT_REL(mp);
6987 MNT_IUNLOCK(mp);
6988 vn_free_marker(*mvp);
6989 *mvp = NULL;
6990 }
6991
6992 /*
6993 * Relock the mp mount vnode list lock with the vp vnode interlock in the
6994 * conventional lock order during mnt_vnode_next_lazy iteration.
6995 *
6996 * On entry, the mount vnode list lock is held and the vnode interlock is not.
6997 * The list lock is dropped and reacquired. On success, both locks are held.
6998 * On failure, the mount vnode list lock is held but the vnode interlock is
6999 * not, and the procedure may have yielded.
7000 */
7001 static bool
mnt_vnode_next_lazy_relock(struct vnode * mvp,struct mount * mp,struct vnode * vp)7002 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
7003 struct vnode *vp)
7004 {
7005
7006 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER &&
7007 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp,
7008 ("%s: bad marker", __func__));
7009 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
7010 ("%s: inappropriate vnode", __func__));
7011 ASSERT_VI_UNLOCKED(vp, __func__);
7012 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
7013
7014 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist);
7015 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
7016
7017 /*
7018 * Note we may be racing against vdrop which transitioned the hold
7019 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine,
7020 * if we are the only user after we get the interlock we will just
7021 * vdrop.
7022 */
7023 vhold(vp);
7024 mtx_unlock(&mp->mnt_listmtx);
7025 VI_LOCK(vp);
7026 if (VN_IS_DOOMED(vp)) {
7027 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
7028 goto out_lost;
7029 }
7030 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
7031 /*
7032 * There is nothing to do if we are the last user.
7033 */
7034 if (!refcount_release_if_not_last(&vp->v_holdcnt))
7035 goto out_lost;
7036 mtx_lock(&mp->mnt_listmtx);
7037 return (true);
7038 out_lost:
7039 vdropl(vp);
7040 maybe_yield();
7041 mtx_lock(&mp->mnt_listmtx);
7042 return (false);
7043 }
7044
7045 static struct vnode *
mnt_vnode_next_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7046 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7047 void *cbarg)
7048 {
7049 struct vnode *vp;
7050
7051 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
7052 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
7053 restart:
7054 vp = TAILQ_NEXT(*mvp, v_lazylist);
7055 while (vp != NULL) {
7056 if (vp->v_type == VMARKER) {
7057 vp = TAILQ_NEXT(vp, v_lazylist);
7058 continue;
7059 }
7060 /*
7061 * See if we want to process the vnode. Note we may encounter a
7062 * long string of vnodes we don't care about and hog the list
7063 * as a result. Check for it and requeue the marker.
7064 */
7065 VNPASS(!VN_IS_DOOMED(vp), vp);
7066 if (!cb(vp, cbarg)) {
7067 if (!should_yield()) {
7068 vp = TAILQ_NEXT(vp, v_lazylist);
7069 continue;
7070 }
7071 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp,
7072 v_lazylist);
7073 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp,
7074 v_lazylist);
7075 mtx_unlock(&mp->mnt_listmtx);
7076 kern_yield(PRI_USER);
7077 mtx_lock(&mp->mnt_listmtx);
7078 goto restart;
7079 }
7080 /*
7081 * Try-lock because this is the wrong lock order.
7082 */
7083 if (!VI_TRYLOCK(vp) &&
7084 !mnt_vnode_next_lazy_relock(*mvp, mp, vp))
7085 goto restart;
7086 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
7087 KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
7088 ("alien vnode on the lazy list %p %p", vp, mp));
7089 VNPASS(vp->v_mount == mp, vp);
7090 VNPASS(!VN_IS_DOOMED(vp), vp);
7091 break;
7092 }
7093 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
7094
7095 /* Check if we are done */
7096 if (vp == NULL) {
7097 mtx_unlock(&mp->mnt_listmtx);
7098 mnt_vnode_markerfree_lazy(mvp, mp);
7099 return (NULL);
7100 }
7101 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist);
7102 mtx_unlock(&mp->mnt_listmtx);
7103 ASSERT_VI_LOCKED(vp, "lazy iter");
7104 return (vp);
7105 }
7106
7107 struct vnode *
__mnt_vnode_next_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7108 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7109 void *cbarg)
7110 {
7111
7112 maybe_yield();
7113 mtx_lock(&mp->mnt_listmtx);
7114 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
7115 }
7116
7117 struct vnode *
__mnt_vnode_first_lazy(struct vnode ** mvp,struct mount * mp,mnt_lazy_cb_t * cb,void * cbarg)7118 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
7119 void *cbarg)
7120 {
7121 struct vnode *vp;
7122
7123 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist))
7124 return (NULL);
7125
7126 *mvp = vn_alloc_marker(mp);
7127 MNT_ILOCK(mp);
7128 MNT_REF(mp);
7129 MNT_IUNLOCK(mp);
7130
7131 mtx_lock(&mp->mnt_listmtx);
7132 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist);
7133 if (vp == NULL) {
7134 mtx_unlock(&mp->mnt_listmtx);
7135 mnt_vnode_markerfree_lazy(mvp, mp);
7136 return (NULL);
7137 }
7138 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist);
7139 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
7140 }
7141
7142 void
__mnt_vnode_markerfree_lazy(struct vnode ** mvp,struct mount * mp)7143 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
7144 {
7145
7146 if (*mvp == NULL)
7147 return;
7148
7149 mtx_lock(&mp->mnt_listmtx);
7150 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
7151 mtx_unlock(&mp->mnt_listmtx);
7152 mnt_vnode_markerfree_lazy(mvp, mp);
7153 }
7154
7155 int
vn_dir_check_exec(struct vnode * vp,struct componentname * cnp)7156 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
7157 {
7158
7159 if ((cnp->cn_flags & NOEXECCHECK) != 0) {
7160 cnp->cn_flags &= ~NOEXECCHECK;
7161 return (0);
7162 }
7163
7164 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread));
7165 }
7166
7167 /*
7168 * Do not use this variant unless you have means other than the hold count
7169 * to prevent the vnode from getting freed.
7170 */
7171 void
vn_seqc_write_begin_locked(struct vnode * vp)7172 vn_seqc_write_begin_locked(struct vnode *vp)
7173 {
7174
7175 ASSERT_VI_LOCKED(vp, __func__);
7176 VNPASS(vp->v_holdcnt > 0, vp);
7177 VNPASS(vp->v_seqc_users >= 0, vp);
7178 vp->v_seqc_users++;
7179 if (vp->v_seqc_users == 1)
7180 seqc_sleepable_write_begin(&vp->v_seqc);
7181 }
7182
7183 void
vn_seqc_write_begin(struct vnode * vp)7184 vn_seqc_write_begin(struct vnode *vp)
7185 {
7186
7187 VI_LOCK(vp);
7188 vn_seqc_write_begin_locked(vp);
7189 VI_UNLOCK(vp);
7190 }
7191
7192 void
vn_seqc_write_end_locked(struct vnode * vp)7193 vn_seqc_write_end_locked(struct vnode *vp)
7194 {
7195
7196 ASSERT_VI_LOCKED(vp, __func__);
7197 VNPASS(vp->v_seqc_users > 0, vp);
7198 vp->v_seqc_users--;
7199 if (vp->v_seqc_users == 0)
7200 seqc_sleepable_write_end(&vp->v_seqc);
7201 }
7202
7203 void
vn_seqc_write_end(struct vnode * vp)7204 vn_seqc_write_end(struct vnode *vp)
7205 {
7206
7207 VI_LOCK(vp);
7208 vn_seqc_write_end_locked(vp);
7209 VI_UNLOCK(vp);
7210 }
7211
7212 /*
7213 * Special case handling for allocating and freeing vnodes.
7214 *
7215 * The counter remains unchanged on free so that a doomed vnode will
7216 * keep testing as in modify as long as it is accessible with SMR.
7217 */
7218 static void
vn_seqc_init(struct vnode * vp)7219 vn_seqc_init(struct vnode *vp)
7220 {
7221
7222 vp->v_seqc = 0;
7223 vp->v_seqc_users = 0;
7224 }
7225
7226 static void
vn_seqc_write_end_free(struct vnode * vp)7227 vn_seqc_write_end_free(struct vnode *vp)
7228 {
7229
7230 VNPASS(seqc_in_modify(vp->v_seqc), vp);
7231 VNPASS(vp->v_seqc_users == 1, vp);
7232 }
7233
7234 void
vn_irflag_set_locked(struct vnode * vp,short toset)7235 vn_irflag_set_locked(struct vnode *vp, short toset)
7236 {
7237 short flags;
7238
7239 ASSERT_VI_LOCKED(vp, __func__);
7240 flags = vn_irflag_read(vp);
7241 VNASSERT((flags & toset) == 0, vp,
7242 ("%s: some of the passed flags already set (have %d, passed %d)\n",
7243 __func__, flags, toset));
7244 atomic_store_short(&vp->v_irflag, flags | toset);
7245 }
7246
7247 void
vn_irflag_set(struct vnode * vp,short toset)7248 vn_irflag_set(struct vnode *vp, short toset)
7249 {
7250
7251 VI_LOCK(vp);
7252 vn_irflag_set_locked(vp, toset);
7253 VI_UNLOCK(vp);
7254 }
7255
7256 void
vn_irflag_set_cond_locked(struct vnode * vp,short toset)7257 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7258 {
7259 short flags;
7260
7261 ASSERT_VI_LOCKED(vp, __func__);
7262 flags = vn_irflag_read(vp);
7263 atomic_store_short(&vp->v_irflag, flags | toset);
7264 }
7265
7266 void
vn_irflag_set_cond(struct vnode * vp,short toset)7267 vn_irflag_set_cond(struct vnode *vp, short toset)
7268 {
7269
7270 VI_LOCK(vp);
7271 vn_irflag_set_cond_locked(vp, toset);
7272 VI_UNLOCK(vp);
7273 }
7274
7275 void
vn_irflag_unset_locked(struct vnode * vp,short tounset)7276 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7277 {
7278 short flags;
7279
7280 ASSERT_VI_LOCKED(vp, __func__);
7281 flags = vn_irflag_read(vp);
7282 VNASSERT((flags & tounset) == tounset, vp,
7283 ("%s: some of the passed flags not set (have %d, passed %d)\n",
7284 __func__, flags, tounset));
7285 atomic_store_short(&vp->v_irflag, flags & ~tounset);
7286 }
7287
7288 void
vn_irflag_unset(struct vnode * vp,short tounset)7289 vn_irflag_unset(struct vnode *vp, short tounset)
7290 {
7291
7292 VI_LOCK(vp);
7293 vn_irflag_unset_locked(vp, tounset);
7294 VI_UNLOCK(vp);
7295 }
7296
7297 int
vn_getsize_locked(struct vnode * vp,off_t * size,struct ucred * cred)7298 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred)
7299 {
7300 struct vattr vattr;
7301 int error;
7302
7303 ASSERT_VOP_LOCKED(vp, __func__);
7304 error = VOP_GETATTR(vp, &vattr, cred);
7305 if (__predict_true(error == 0)) {
7306 if (vattr.va_size <= OFF_MAX)
7307 *size = vattr.va_size;
7308 else
7309 error = EFBIG;
7310 }
7311 return (error);
7312 }
7313
7314 int
vn_getsize(struct vnode * vp,off_t * size,struct ucred * cred)7315 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred)
7316 {
7317 int error;
7318
7319 VOP_LOCK(vp, LK_SHARED);
7320 error = vn_getsize_locked(vp, size, cred);
7321 VOP_UNLOCK(vp);
7322 return (error);
7323 }
7324
7325 #ifdef INVARIANTS
7326 void
vn_set_state_validate(struct vnode * vp,__enum_uint8 (vstate)state)7327 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state)
7328 {
7329
7330 switch (vp->v_state) {
7331 case VSTATE_UNINITIALIZED:
7332 switch (state) {
7333 case VSTATE_CONSTRUCTED:
7334 case VSTATE_DESTROYING:
7335 return;
7336 default:
7337 break;
7338 }
7339 break;
7340 case VSTATE_CONSTRUCTED:
7341 ASSERT_VOP_ELOCKED(vp, __func__);
7342 switch (state) {
7343 case VSTATE_DESTROYING:
7344 return;
7345 default:
7346 break;
7347 }
7348 break;
7349 case VSTATE_DESTROYING:
7350 ASSERT_VOP_ELOCKED(vp, __func__);
7351 switch (state) {
7352 case VSTATE_DEAD:
7353 return;
7354 default:
7355 break;
7356 }
7357 break;
7358 case VSTATE_DEAD:
7359 switch (state) {
7360 case VSTATE_UNINITIALIZED:
7361 return;
7362 default:
7363 break;
7364 }
7365 break;
7366 }
7367
7368 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state);
7369 panic("invalid state transition %d -> %d\n", vp->v_state, state);
7370 }
7371 #endif
7372