1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static counter_u64_t recycles_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static counter_u64_t recycles_free_count; 209 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 210 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 211 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 212 "Number of free vnodes recycled to meet vnode cache targets"); 213 214 static counter_u64_t vnode_skipped_requeues; 215 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 216 "Number of times LRU requeue was skipped due to lock contention"); 217 218 static u_long deferred_inact; 219 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 220 &deferred_inact, 0, "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 static smr_t buf_trie_smr; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 242 243 __read_frequently smr_t vfs_smr; 244 245 /* 246 * The workitem queue. 247 * 248 * It is useful to delay writes of file data and filesystem metadata 249 * for tens of seconds so that quickly created and deleted files need 250 * not waste disk bandwidth being created and removed. To realize this, 251 * we append vnodes to a "workitem" queue. When running with a soft 252 * updates implementation, most pending metadata dependencies should 253 * not wait for more than a few seconds. Thus, mounted on block devices 254 * are delayed only about a half the time that file data is delayed. 255 * Similarly, directory updates are more critical, so are only delayed 256 * about a third the time that file data is delayed. Thus, there are 257 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 258 * one each second (driven off the filesystem syncer process). The 259 * syncer_delayno variable indicates the next queue that is to be processed. 260 * Items that need to be processed soon are placed in this queue: 261 * 262 * syncer_workitem_pending[syncer_delayno] 263 * 264 * A delay of fifteen seconds is done by placing the request fifteen 265 * entries later in the queue: 266 * 267 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 268 * 269 */ 270 static int syncer_delayno; 271 static long syncer_mask; 272 LIST_HEAD(synclist, bufobj); 273 static struct synclist *syncer_workitem_pending; 274 /* 275 * The sync_mtx protects: 276 * bo->bo_synclist 277 * sync_vnode_count 278 * syncer_delayno 279 * syncer_state 280 * syncer_workitem_pending 281 * syncer_worklist_len 282 * rushjob 283 */ 284 static struct mtx sync_mtx; 285 static struct cv sync_wakeup; 286 287 #define SYNCER_MAXDELAY 32 288 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 289 static int syncdelay = 30; /* max time to delay syncing data */ 290 static int filedelay = 30; /* time to delay syncing files */ 291 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 292 "Time to delay syncing files (in seconds)"); 293 static int dirdelay = 29; /* time to delay syncing directories */ 294 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 295 "Time to delay syncing directories (in seconds)"); 296 static int metadelay = 28; /* time to delay syncing metadata */ 297 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 298 "Time to delay syncing metadata (in seconds)"); 299 static int rushjob; /* number of slots to run ASAP */ 300 static int stat_rush_requests; /* number of times I/O speeded up */ 301 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 302 "Number of times I/O speeded up (rush requests)"); 303 304 #define VDBATCH_SIZE 8 305 struct vdbatch { 306 u_int index; 307 struct mtx lock; 308 struct vnode *tab[VDBATCH_SIZE]; 309 }; 310 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 311 312 static void vdbatch_dequeue(struct vnode *vp); 313 314 /* 315 * When shutting down the syncer, run it at four times normal speed. 316 */ 317 #define SYNCER_SHUTDOWN_SPEEDUP 4 318 static int sync_vnode_count; 319 static int syncer_worklist_len; 320 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 321 syncer_state; 322 323 /* Target for maximum number of vnodes. */ 324 u_long desiredvnodes; 325 static u_long gapvnodes; /* gap between wanted and desired */ 326 static u_long vhiwat; /* enough extras after expansion */ 327 static u_long vlowat; /* minimal extras before expansion */ 328 static bool vstir; /* nonzero to stir non-free vnodes */ 329 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 330 331 static u_long vnlru_read_freevnodes(void); 332 333 /* 334 * Note that no attempt is made to sanitize these parameters. 335 */ 336 static int 337 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 338 { 339 u_long val; 340 int error; 341 342 val = desiredvnodes; 343 error = sysctl_handle_long(oidp, &val, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 347 if (val == desiredvnodes) 348 return (0); 349 mtx_lock(&vnode_list_mtx); 350 desiredvnodes = val; 351 wantfreevnodes = desiredvnodes / 4; 352 vnlru_recalc(); 353 mtx_unlock(&vnode_list_mtx); 354 /* 355 * XXX There is no protection against multiple threads changing 356 * desiredvnodes at the same time. Locking above only helps vnlru and 357 * getnewvnode. 358 */ 359 vfs_hash_changesize(desiredvnodes); 360 cache_changesize(desiredvnodes); 361 return (0); 362 } 363 364 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 365 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 366 "LU", "Target for maximum number of vnodes (legacy)"); 367 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 369 "LU", "Target for maximum number of vnodes"); 370 371 static int 372 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 373 { 374 u_long rfreevnodes; 375 376 rfreevnodes = vnlru_read_freevnodes(); 377 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 378 } 379 380 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 381 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 382 "LU", "Number of \"free\" vnodes (legacy)"); 383 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 384 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 385 "LU", "Number of \"free\" vnodes"); 386 387 static int 388 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 389 { 390 u_long val; 391 int error; 392 393 val = wantfreevnodes; 394 error = sysctl_handle_long(oidp, &val, 0, req); 395 if (error != 0 || req->newptr == NULL) 396 return (error); 397 398 if (val == wantfreevnodes) 399 return (0); 400 mtx_lock(&vnode_list_mtx); 401 wantfreevnodes = val; 402 vnlru_recalc(); 403 mtx_unlock(&vnode_list_mtx); 404 return (0); 405 } 406 407 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 408 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 409 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 410 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 411 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 412 "LU", "Target for minimum number of \"free\" vnodes"); 413 414 static int vnlru_nowhere; 415 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 416 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 417 418 static int 419 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 420 { 421 struct vnode *vp; 422 struct nameidata nd; 423 char *buf; 424 unsigned long ndflags; 425 int error; 426 427 if (req->newptr == NULL) 428 return (EINVAL); 429 if (req->newlen >= PATH_MAX) 430 return (E2BIG); 431 432 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 433 error = SYSCTL_IN(req, buf, req->newlen); 434 if (error != 0) 435 goto out; 436 437 buf[req->newlen] = '\0'; 438 439 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 440 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 441 if ((error = namei(&nd)) != 0) 442 goto out; 443 vp = nd.ni_vp; 444 445 if (VN_IS_DOOMED(vp)) { 446 /* 447 * This vnode is being recycled. Return != 0 to let the caller 448 * know that the sysctl had no effect. Return EAGAIN because a 449 * subsequent call will likely succeed (since namei will create 450 * a new vnode if necessary) 451 */ 452 error = EAGAIN; 453 goto putvnode; 454 } 455 456 counter_u64_add(recycles_count, 1); 457 vgone(vp); 458 putvnode: 459 vput(vp); 460 NDFREE_PNBUF(&nd); 461 out: 462 free(buf, M_TEMP); 463 return (error); 464 } 465 466 static int 467 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 468 { 469 struct thread *td = curthread; 470 struct vnode *vp; 471 struct file *fp; 472 int error; 473 int fd; 474 475 if (req->newptr == NULL) 476 return (EBADF); 477 478 error = sysctl_handle_int(oidp, &fd, 0, req); 479 if (error != 0) 480 return (error); 481 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 482 if (error != 0) 483 return (error); 484 vp = fp->f_vnode; 485 486 error = vn_lock(vp, LK_EXCLUSIVE); 487 if (error != 0) 488 goto drop; 489 490 counter_u64_add(recycles_count, 1); 491 vgone(vp); 492 VOP_UNLOCK(vp); 493 drop: 494 fdrop(fp, td); 495 return (error); 496 } 497 498 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 499 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 500 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 501 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 502 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 503 sysctl_ftry_reclaim_vnode, "I", 504 "Try to reclaim a vnode by its file descriptor"); 505 506 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 507 #define vnsz2log 8 508 #ifndef DEBUG_LOCKS 509 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 510 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 511 "vnsz2log needs to be updated"); 512 #endif 513 514 /* 515 * Support for the bufobj clean & dirty pctrie. 516 */ 517 static void * 518 buf_trie_alloc(struct pctrie *ptree) 519 { 520 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 521 } 522 523 static void 524 buf_trie_free(struct pctrie *ptree, void *node) 525 { 526 uma_zfree_smr(buf_trie_zone, node); 527 } 528 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 529 buf_trie_smr); 530 531 /* 532 * Initialize the vnode management data structures. 533 * 534 * Reevaluate the following cap on the number of vnodes after the physical 535 * memory size exceeds 512GB. In the limit, as the physical memory size 536 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 537 */ 538 #ifndef MAXVNODES_MAX 539 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 540 #endif 541 542 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 543 544 static struct vnode * 545 vn_alloc_marker(struct mount *mp) 546 { 547 struct vnode *vp; 548 549 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 550 vp->v_type = VMARKER; 551 vp->v_mount = mp; 552 553 return (vp); 554 } 555 556 static void 557 vn_free_marker(struct vnode *vp) 558 { 559 560 MPASS(vp->v_type == VMARKER); 561 free(vp, M_VNODE_MARKER); 562 } 563 564 #ifdef KASAN 565 static int 566 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 567 { 568 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 569 return (0); 570 } 571 572 static void 573 vnode_dtor(void *mem, int size, void *arg __unused) 574 { 575 size_t end1, end2, off1, off2; 576 577 _Static_assert(offsetof(struct vnode, v_vnodelist) < 578 offsetof(struct vnode, v_dbatchcpu), 579 "KASAN marks require updating"); 580 581 off1 = offsetof(struct vnode, v_vnodelist); 582 off2 = offsetof(struct vnode, v_dbatchcpu); 583 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 584 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 585 586 /* 587 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 588 * after the vnode has been freed. Try to get some KASAN coverage by 589 * marking everything except those two fields as invalid. Because 590 * KASAN's tracking is not byte-granular, any preceding fields sharing 591 * the same 8-byte aligned word must also be marked valid. 592 */ 593 594 /* Handle the area from the start until v_vnodelist... */ 595 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 596 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 597 598 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 599 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 600 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 601 if (off2 > off1) 602 kasan_mark((void *)((char *)mem + off1), off2 - off1, 603 off2 - off1, KASAN_UMA_FREED); 604 605 /* ... and finally the area from v_dbatchcpu to the end. */ 606 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 607 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 608 KASAN_UMA_FREED); 609 } 610 #endif /* KASAN */ 611 612 /* 613 * Initialize a vnode as it first enters the zone. 614 */ 615 static int 616 vnode_init(void *mem, int size, int flags) 617 { 618 struct vnode *vp; 619 620 vp = mem; 621 bzero(vp, size); 622 /* 623 * Setup locks. 624 */ 625 vp->v_vnlock = &vp->v_lock; 626 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 627 /* 628 * By default, don't allow shared locks unless filesystems opt-in. 629 */ 630 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 631 LK_NOSHARE | LK_IS_VNODE); 632 /* 633 * Initialize bufobj. 634 */ 635 bufobj_init(&vp->v_bufobj, vp); 636 /* 637 * Initialize namecache. 638 */ 639 cache_vnode_init(vp); 640 /* 641 * Initialize rangelocks. 642 */ 643 rangelock_init(&vp->v_rl); 644 645 vp->v_dbatchcpu = NOCPU; 646 647 vp->v_state = VSTATE_DEAD; 648 649 /* 650 * Check vhold_recycle_free for an explanation. 651 */ 652 vp->v_holdcnt = VHOLD_NO_SMR; 653 vp->v_type = VNON; 654 mtx_lock(&vnode_list_mtx); 655 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 656 mtx_unlock(&vnode_list_mtx); 657 return (0); 658 } 659 660 /* 661 * Free a vnode when it is cleared from the zone. 662 */ 663 static void 664 vnode_fini(void *mem, int size) 665 { 666 struct vnode *vp; 667 struct bufobj *bo; 668 669 vp = mem; 670 vdbatch_dequeue(vp); 671 mtx_lock(&vnode_list_mtx); 672 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 673 mtx_unlock(&vnode_list_mtx); 674 rangelock_destroy(&vp->v_rl); 675 lockdestroy(vp->v_vnlock); 676 mtx_destroy(&vp->v_interlock); 677 bo = &vp->v_bufobj; 678 rw_destroy(BO_LOCKPTR(bo)); 679 680 kasan_mark(mem, size, size, 0); 681 } 682 683 /* 684 * Provide the size of NFS nclnode and NFS fh for calculation of the 685 * vnode memory consumption. The size is specified directly to 686 * eliminate dependency on NFS-private header. 687 * 688 * Other filesystems may use bigger or smaller (like UFS and ZFS) 689 * private inode data, but the NFS-based estimation is ample enough. 690 * Still, we care about differences in the size between 64- and 32-bit 691 * platforms. 692 * 693 * Namecache structure size is heuristically 694 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 695 */ 696 #ifdef _LP64 697 #define NFS_NCLNODE_SZ (528 + 64) 698 #define NC_SZ 148 699 #else 700 #define NFS_NCLNODE_SZ (360 + 32) 701 #define NC_SZ 92 702 #endif 703 704 static void 705 vntblinit(void *dummy __unused) 706 { 707 struct vdbatch *vd; 708 uma_ctor ctor; 709 uma_dtor dtor; 710 int cpu, physvnodes, virtvnodes; 711 712 /* 713 * Desiredvnodes is a function of the physical memory size and the 714 * kernel's heap size. Generally speaking, it scales with the 715 * physical memory size. The ratio of desiredvnodes to the physical 716 * memory size is 1:16 until desiredvnodes exceeds 98,304. 717 * Thereafter, the 718 * marginal ratio of desiredvnodes to the physical memory size is 719 * 1:64. However, desiredvnodes is limited by the kernel's heap 720 * size. The memory required by desiredvnodes vnodes and vm objects 721 * must not exceed 1/10th of the kernel's heap size. 722 */ 723 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 724 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 725 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 726 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 727 desiredvnodes = min(physvnodes, virtvnodes); 728 if (desiredvnodes > MAXVNODES_MAX) { 729 if (bootverbose) 730 printf("Reducing kern.maxvnodes %lu -> %lu\n", 731 desiredvnodes, MAXVNODES_MAX); 732 desiredvnodes = MAXVNODES_MAX; 733 } 734 wantfreevnodes = desiredvnodes / 4; 735 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 736 TAILQ_INIT(&vnode_list); 737 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 738 /* 739 * The lock is taken to appease WITNESS. 740 */ 741 mtx_lock(&vnode_list_mtx); 742 vnlru_recalc(); 743 mtx_unlock(&vnode_list_mtx); 744 vnode_list_free_marker = vn_alloc_marker(NULL); 745 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 746 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 747 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 748 749 #ifdef KASAN 750 ctor = vnode_ctor; 751 dtor = vnode_dtor; 752 #else 753 ctor = NULL; 754 dtor = NULL; 755 #endif 756 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 757 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 758 uma_zone_set_smr(vnode_zone, vfs_smr); 759 760 /* 761 * Preallocate enough nodes to support one-per buf so that 762 * we can not fail an insert. reassignbuf() callers can not 763 * tolerate the insertion failure. 764 */ 765 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 766 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 767 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 768 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 769 uma_prealloc(buf_trie_zone, nbuf); 770 771 vnodes_created = counter_u64_alloc(M_WAITOK); 772 recycles_count = counter_u64_alloc(M_WAITOK); 773 recycles_free_count = counter_u64_alloc(M_WAITOK); 774 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 775 776 /* 777 * Initialize the filesystem syncer. 778 */ 779 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 780 &syncer_mask); 781 syncer_maxdelay = syncer_mask + 1; 782 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 783 cv_init(&sync_wakeup, "syncer"); 784 785 CPU_FOREACH(cpu) { 786 vd = DPCPU_ID_PTR((cpu), vd); 787 bzero(vd, sizeof(*vd)); 788 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 789 } 790 } 791 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 792 793 /* 794 * Mark a mount point as busy. Used to synchronize access and to delay 795 * unmounting. Eventually, mountlist_mtx is not released on failure. 796 * 797 * vfs_busy() is a custom lock, it can block the caller. 798 * vfs_busy() only sleeps if the unmount is active on the mount point. 799 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 800 * vnode belonging to mp. 801 * 802 * Lookup uses vfs_busy() to traverse mount points. 803 * root fs var fs 804 * / vnode lock A / vnode lock (/var) D 805 * /var vnode lock B /log vnode lock(/var/log) E 806 * vfs_busy lock C vfs_busy lock F 807 * 808 * Within each file system, the lock order is C->A->B and F->D->E. 809 * 810 * When traversing across mounts, the system follows that lock order: 811 * 812 * C->A->B 813 * | 814 * +->F->D->E 815 * 816 * The lookup() process for namei("/var") illustrates the process: 817 * 1. VOP_LOOKUP() obtains B while A is held 818 * 2. vfs_busy() obtains a shared lock on F while A and B are held 819 * 3. vput() releases lock on B 820 * 4. vput() releases lock on A 821 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 822 * 6. vfs_unbusy() releases shared lock on F 823 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 824 * Attempt to lock A (instead of vp_crossmp) while D is held would 825 * violate the global order, causing deadlocks. 826 * 827 * dounmount() locks B while F is drained. Note that for stacked 828 * filesystems, D and B in the example above may be the same lock, 829 * which introdues potential lock order reversal deadlock between 830 * dounmount() and step 5 above. These filesystems may avoid the LOR 831 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 832 * remain held until after step 5. 833 */ 834 int 835 vfs_busy(struct mount *mp, int flags) 836 { 837 struct mount_pcpu *mpcpu; 838 839 MPASS((flags & ~MBF_MASK) == 0); 840 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 841 842 if (vfs_op_thread_enter(mp, mpcpu)) { 843 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 844 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 845 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 846 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 847 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 848 vfs_op_thread_exit(mp, mpcpu); 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 return (0); 852 } 853 854 MNT_ILOCK(mp); 855 vfs_assert_mount_counters(mp); 856 MNT_REF(mp); 857 /* 858 * If mount point is currently being unmounted, sleep until the 859 * mount point fate is decided. If thread doing the unmounting fails, 860 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 861 * that this mount point has survived the unmount attempt and vfs_busy 862 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 863 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 864 * about to be really destroyed. vfs_busy needs to release its 865 * reference on the mount point in this case and return with ENOENT, 866 * telling the caller the mount it tried to busy is no longer valid. 867 */ 868 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 869 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 870 ("%s: non-empty upper mount list with pending unmount", 871 __func__)); 872 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 873 MNT_REL(mp); 874 MNT_IUNLOCK(mp); 875 CTR1(KTR_VFS, "%s: failed busying before sleeping", 876 __func__); 877 return (ENOENT); 878 } 879 if (flags & MBF_MNTLSTLOCK) 880 mtx_unlock(&mountlist_mtx); 881 mp->mnt_kern_flag |= MNTK_MWAIT; 882 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 883 if (flags & MBF_MNTLSTLOCK) 884 mtx_lock(&mountlist_mtx); 885 MNT_ILOCK(mp); 886 } 887 if (flags & MBF_MNTLSTLOCK) 888 mtx_unlock(&mountlist_mtx); 889 mp->mnt_lockref++; 890 MNT_IUNLOCK(mp); 891 return (0); 892 } 893 894 /* 895 * Free a busy filesystem. 896 */ 897 void 898 vfs_unbusy(struct mount *mp) 899 { 900 struct mount_pcpu *mpcpu; 901 int c; 902 903 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 904 905 if (vfs_op_thread_enter(mp, mpcpu)) { 906 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 907 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 908 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 909 vfs_op_thread_exit(mp, mpcpu); 910 return; 911 } 912 913 MNT_ILOCK(mp); 914 vfs_assert_mount_counters(mp); 915 MNT_REL(mp); 916 c = --mp->mnt_lockref; 917 if (mp->mnt_vfs_ops == 0) { 918 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 919 MNT_IUNLOCK(mp); 920 return; 921 } 922 if (c < 0) 923 vfs_dump_mount_counters(mp); 924 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 925 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 926 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 927 mp->mnt_kern_flag &= ~MNTK_DRAINING; 928 wakeup(&mp->mnt_lockref); 929 } 930 MNT_IUNLOCK(mp); 931 } 932 933 /* 934 * Lookup a mount point by filesystem identifier. 935 */ 936 struct mount * 937 vfs_getvfs(fsid_t *fsid) 938 { 939 struct mount *mp; 940 941 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 942 mtx_lock(&mountlist_mtx); 943 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 944 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 945 vfs_ref(mp); 946 mtx_unlock(&mountlist_mtx); 947 return (mp); 948 } 949 } 950 mtx_unlock(&mountlist_mtx); 951 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 952 return ((struct mount *) 0); 953 } 954 955 /* 956 * Lookup a mount point by filesystem identifier, busying it before 957 * returning. 958 * 959 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 960 * cache for popular filesystem identifiers. The cache is lockess, using 961 * the fact that struct mount's are never freed. In worst case we may 962 * get pointer to unmounted or even different filesystem, so we have to 963 * check what we got, and go slow way if so. 964 */ 965 struct mount * 966 vfs_busyfs(fsid_t *fsid) 967 { 968 #define FSID_CACHE_SIZE 256 969 typedef struct mount * volatile vmp_t; 970 static vmp_t cache[FSID_CACHE_SIZE]; 971 struct mount *mp; 972 int error; 973 uint32_t hash; 974 975 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 976 hash = fsid->val[0] ^ fsid->val[1]; 977 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 978 mp = cache[hash]; 979 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 980 goto slow; 981 if (vfs_busy(mp, 0) != 0) { 982 cache[hash] = NULL; 983 goto slow; 984 } 985 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 986 return (mp); 987 else 988 vfs_unbusy(mp); 989 990 slow: 991 mtx_lock(&mountlist_mtx); 992 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 993 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 994 error = vfs_busy(mp, MBF_MNTLSTLOCK); 995 if (error) { 996 cache[hash] = NULL; 997 mtx_unlock(&mountlist_mtx); 998 return (NULL); 999 } 1000 cache[hash] = mp; 1001 return (mp); 1002 } 1003 } 1004 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1005 mtx_unlock(&mountlist_mtx); 1006 return ((struct mount *) 0); 1007 } 1008 1009 /* 1010 * Check if a user can access privileged mount options. 1011 */ 1012 int 1013 vfs_suser(struct mount *mp, struct thread *td) 1014 { 1015 int error; 1016 1017 if (jailed(td->td_ucred)) { 1018 /* 1019 * If the jail of the calling thread lacks permission for 1020 * this type of file system, deny immediately. 1021 */ 1022 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1023 return (EPERM); 1024 1025 /* 1026 * If the file system was mounted outside the jail of the 1027 * calling thread, deny immediately. 1028 */ 1029 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1030 return (EPERM); 1031 } 1032 1033 /* 1034 * If file system supports delegated administration, we don't check 1035 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1036 * by the file system itself. 1037 * If this is not the user that did original mount, we check for 1038 * the PRIV_VFS_MOUNT_OWNER privilege. 1039 */ 1040 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1041 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1042 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1043 return (error); 1044 } 1045 return (0); 1046 } 1047 1048 /* 1049 * Get a new unique fsid. Try to make its val[0] unique, since this value 1050 * will be used to create fake device numbers for stat(). Also try (but 1051 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1052 * support 16-bit device numbers. We end up with unique val[0]'s for the 1053 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1054 * 1055 * Keep in mind that several mounts may be running in parallel. Starting 1056 * the search one past where the previous search terminated is both a 1057 * micro-optimization and a defense against returning the same fsid to 1058 * different mounts. 1059 */ 1060 void 1061 vfs_getnewfsid(struct mount *mp) 1062 { 1063 static uint16_t mntid_base; 1064 struct mount *nmp; 1065 fsid_t tfsid; 1066 int mtype; 1067 1068 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1069 mtx_lock(&mntid_mtx); 1070 mtype = mp->mnt_vfc->vfc_typenum; 1071 tfsid.val[1] = mtype; 1072 mtype = (mtype & 0xFF) << 24; 1073 for (;;) { 1074 tfsid.val[0] = makedev(255, 1075 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1076 mntid_base++; 1077 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1078 break; 1079 vfs_rel(nmp); 1080 } 1081 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1082 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1083 mtx_unlock(&mntid_mtx); 1084 } 1085 1086 /* 1087 * Knob to control the precision of file timestamps: 1088 * 1089 * 0 = seconds only; nanoseconds zeroed. 1090 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1091 * 2 = seconds and nanoseconds, truncated to microseconds. 1092 * >=3 = seconds and nanoseconds, maximum precision. 1093 */ 1094 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1095 1096 static int timestamp_precision = TSP_USEC; 1097 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1098 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1099 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1100 "3+: sec + ns (max. precision))"); 1101 1102 /* 1103 * Get a current timestamp. 1104 */ 1105 void 1106 vfs_timestamp(struct timespec *tsp) 1107 { 1108 struct timeval tv; 1109 1110 switch (timestamp_precision) { 1111 case TSP_SEC: 1112 tsp->tv_sec = time_second; 1113 tsp->tv_nsec = 0; 1114 break; 1115 case TSP_HZ: 1116 getnanotime(tsp); 1117 break; 1118 case TSP_USEC: 1119 microtime(&tv); 1120 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1121 break; 1122 case TSP_NSEC: 1123 default: 1124 nanotime(tsp); 1125 break; 1126 } 1127 } 1128 1129 /* 1130 * Set vnode attributes to VNOVAL 1131 */ 1132 void 1133 vattr_null(struct vattr *vap) 1134 { 1135 1136 vap->va_type = VNON; 1137 vap->va_size = VNOVAL; 1138 vap->va_bytes = VNOVAL; 1139 vap->va_mode = VNOVAL; 1140 vap->va_nlink = VNOVAL; 1141 vap->va_uid = VNOVAL; 1142 vap->va_gid = VNOVAL; 1143 vap->va_fsid = VNOVAL; 1144 vap->va_fileid = VNOVAL; 1145 vap->va_blocksize = VNOVAL; 1146 vap->va_rdev = VNOVAL; 1147 vap->va_atime.tv_sec = VNOVAL; 1148 vap->va_atime.tv_nsec = VNOVAL; 1149 vap->va_mtime.tv_sec = VNOVAL; 1150 vap->va_mtime.tv_nsec = VNOVAL; 1151 vap->va_ctime.tv_sec = VNOVAL; 1152 vap->va_ctime.tv_nsec = VNOVAL; 1153 vap->va_birthtime.tv_sec = VNOVAL; 1154 vap->va_birthtime.tv_nsec = VNOVAL; 1155 vap->va_flags = VNOVAL; 1156 vap->va_gen = VNOVAL; 1157 vap->va_vaflags = 0; 1158 } 1159 1160 /* 1161 * Try to reduce the total number of vnodes. 1162 * 1163 * This routine (and its user) are buggy in at least the following ways: 1164 * - all parameters were picked years ago when RAM sizes were significantly 1165 * smaller 1166 * - it can pick vnodes based on pages used by the vm object, but filesystems 1167 * like ZFS don't use it making the pick broken 1168 * - since ZFS has its own aging policy it gets partially combated by this one 1169 * - a dedicated method should be provided for filesystems to let them decide 1170 * whether the vnode should be recycled 1171 * 1172 * This routine is called when we have too many vnodes. It attempts 1173 * to free <count> vnodes and will potentially free vnodes that still 1174 * have VM backing store (VM backing store is typically the cause 1175 * of a vnode blowout so we want to do this). Therefore, this operation 1176 * is not considered cheap. 1177 * 1178 * A number of conditions may prevent a vnode from being reclaimed. 1179 * the buffer cache may have references on the vnode, a directory 1180 * vnode may still have references due to the namei cache representing 1181 * underlying files, or the vnode may be in active use. It is not 1182 * desirable to reuse such vnodes. These conditions may cause the 1183 * number of vnodes to reach some minimum value regardless of what 1184 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1185 * 1186 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1187 * entries if this argument is strue 1188 * @param trigger Only reclaim vnodes with fewer than this many resident 1189 * pages. 1190 * @param target How many vnodes to reclaim. 1191 * @return The number of vnodes that were reclaimed. 1192 */ 1193 static int 1194 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1195 { 1196 struct vnode *vp, *mvp; 1197 struct mount *mp; 1198 struct vm_object *object; 1199 u_long done; 1200 bool retried; 1201 1202 mtx_assert(&vnode_list_mtx, MA_OWNED); 1203 1204 retried = false; 1205 done = 0; 1206 1207 mvp = vnode_list_reclaim_marker; 1208 restart: 1209 vp = mvp; 1210 while (done < target) { 1211 vp = TAILQ_NEXT(vp, v_vnodelist); 1212 if (__predict_false(vp == NULL)) 1213 break; 1214 1215 if (__predict_false(vp->v_type == VMARKER)) 1216 continue; 1217 1218 /* 1219 * If it's been deconstructed already, it's still 1220 * referenced, or it exceeds the trigger, skip it. 1221 * Also skip free vnodes. We are trying to make space 1222 * to expand the free list, not reduce it. 1223 */ 1224 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1225 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1226 goto next_iter; 1227 1228 if (vp->v_type == VBAD || vp->v_type == VNON) 1229 goto next_iter; 1230 1231 object = atomic_load_ptr(&vp->v_object); 1232 if (object == NULL || object->resident_page_count > trigger) { 1233 goto next_iter; 1234 } 1235 1236 /* 1237 * Handle races against vnode allocation. Filesystems lock the 1238 * vnode some time after it gets returned from getnewvnode, 1239 * despite type and hold count being manipulated earlier. 1240 * Resorting to checking v_mount restores guarantees present 1241 * before the global list was reworked to contain all vnodes. 1242 */ 1243 if (!VI_TRYLOCK(vp)) 1244 goto next_iter; 1245 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1246 VI_UNLOCK(vp); 1247 goto next_iter; 1248 } 1249 if (vp->v_mount == NULL) { 1250 VI_UNLOCK(vp); 1251 goto next_iter; 1252 } 1253 vholdl(vp); 1254 VI_UNLOCK(vp); 1255 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1256 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1257 mtx_unlock(&vnode_list_mtx); 1258 1259 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1260 vdrop_recycle(vp); 1261 goto next_iter_unlocked; 1262 } 1263 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1264 vdrop_recycle(vp); 1265 vn_finished_write(mp); 1266 goto next_iter_unlocked; 1267 } 1268 1269 VI_LOCK(vp); 1270 if (vp->v_usecount > 0 || 1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1272 (vp->v_object != NULL && vp->v_object->handle == vp && 1273 vp->v_object->resident_page_count > trigger)) { 1274 VOP_UNLOCK(vp); 1275 vdropl_recycle(vp); 1276 vn_finished_write(mp); 1277 goto next_iter_unlocked; 1278 } 1279 counter_u64_add(recycles_count, 1); 1280 vgonel(vp); 1281 VOP_UNLOCK(vp); 1282 vdropl_recycle(vp); 1283 vn_finished_write(mp); 1284 done++; 1285 next_iter_unlocked: 1286 maybe_yield(); 1287 mtx_lock(&vnode_list_mtx); 1288 goto restart; 1289 next_iter: 1290 MPASS(vp->v_type != VMARKER); 1291 if (!should_yield()) 1292 continue; 1293 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1294 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1295 mtx_unlock(&vnode_list_mtx); 1296 kern_yield(PRI_USER); 1297 mtx_lock(&vnode_list_mtx); 1298 goto restart; 1299 } 1300 if (done == 0 && !retried) { 1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1302 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1303 retried = true; 1304 goto restart; 1305 } 1306 return (done); 1307 } 1308 1309 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1310 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1311 0, 1312 "limit on vnode free requests per call to the vnlru_free routine"); 1313 1314 /* 1315 * Attempt to reduce the free list by the requested amount. 1316 */ 1317 static int 1318 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1319 { 1320 struct vnode *vp; 1321 struct mount *mp; 1322 int ocount; 1323 bool retried; 1324 1325 mtx_assert(&vnode_list_mtx, MA_OWNED); 1326 if (count > max_vnlru_free) 1327 count = max_vnlru_free; 1328 if (count == 0) { 1329 mtx_unlock(&vnode_list_mtx); 1330 return (0); 1331 } 1332 ocount = count; 1333 retried = false; 1334 vp = mvp; 1335 for (;;) { 1336 vp = TAILQ_NEXT(vp, v_vnodelist); 1337 if (__predict_false(vp == NULL)) { 1338 /* 1339 * The free vnode marker can be past eligible vnodes: 1340 * 1. if vdbatch_process trylock failed 1341 * 2. if vtryrecycle failed 1342 * 1343 * If so, start the scan from scratch. 1344 */ 1345 if (!retried && vnlru_read_freevnodes() > 0) { 1346 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1347 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1348 vp = mvp; 1349 retried = true; 1350 continue; 1351 } 1352 1353 /* 1354 * Give up 1355 */ 1356 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1357 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1358 mtx_unlock(&vnode_list_mtx); 1359 break; 1360 } 1361 if (__predict_false(vp->v_type == VMARKER)) 1362 continue; 1363 if (vp->v_holdcnt > 0) 1364 continue; 1365 /* 1366 * Don't recycle if our vnode is from different type 1367 * of mount point. Note that mp is type-safe, the 1368 * check does not reach unmapped address even if 1369 * vnode is reclaimed. 1370 */ 1371 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1372 mp->mnt_op != mnt_op) { 1373 continue; 1374 } 1375 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1376 continue; 1377 } 1378 if (!vhold_recycle_free(vp)) 1379 continue; 1380 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1381 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1382 mtx_unlock(&vnode_list_mtx); 1383 /* 1384 * FIXME: ignores the return value, meaning it may be nothing 1385 * got recycled but it claims otherwise to the caller. 1386 * 1387 * Originally the value started being ignored in 2005 with 1388 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1389 * 1390 * Respecting the value can run into significant stalls if most 1391 * vnodes belong to one file system and it has writes 1392 * suspended. In presence of many threads and millions of 1393 * vnodes they keep contending on the vnode_list_mtx lock only 1394 * to find vnodes they can't recycle. 1395 * 1396 * The solution would be to pre-check if the vnode is likely to 1397 * be recycle-able, but it needs to happen with the 1398 * vnode_list_mtx lock held. This runs into a problem where 1399 * VOP_GETWRITEMOUNT (currently needed to find out about if 1400 * writes are frozen) can take locks which LOR against it. 1401 * 1402 * Check nullfs for one example (null_getwritemount). 1403 */ 1404 vtryrecycle(vp); 1405 count--; 1406 if (count == 0) { 1407 break; 1408 } 1409 mtx_lock(&vnode_list_mtx); 1410 vp = mvp; 1411 } 1412 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1413 return (ocount - count); 1414 } 1415 1416 /* 1417 * XXX: returns without vnode_list_mtx locked! 1418 */ 1419 static int 1420 vnlru_free_locked(int count) 1421 { 1422 int ret; 1423 1424 mtx_assert(&vnode_list_mtx, MA_OWNED); 1425 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker); 1426 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1427 return (ret); 1428 } 1429 1430 void 1431 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1432 { 1433 1434 MPASS(mnt_op != NULL); 1435 MPASS(mvp != NULL); 1436 VNPASS(mvp->v_type == VMARKER, mvp); 1437 mtx_lock(&vnode_list_mtx); 1438 vnlru_free_impl(count, mnt_op, mvp); 1439 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1440 } 1441 1442 struct vnode * 1443 vnlru_alloc_marker(void) 1444 { 1445 struct vnode *mvp; 1446 1447 mvp = vn_alloc_marker(NULL); 1448 mtx_lock(&vnode_list_mtx); 1449 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1450 mtx_unlock(&vnode_list_mtx); 1451 return (mvp); 1452 } 1453 1454 void 1455 vnlru_free_marker(struct vnode *mvp) 1456 { 1457 mtx_lock(&vnode_list_mtx); 1458 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1459 mtx_unlock(&vnode_list_mtx); 1460 vn_free_marker(mvp); 1461 } 1462 1463 static void 1464 vnlru_recalc(void) 1465 { 1466 1467 mtx_assert(&vnode_list_mtx, MA_OWNED); 1468 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1469 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1470 vlowat = vhiwat / 2; 1471 } 1472 1473 /* 1474 * Attempt to recycle vnodes in a context that is always safe to block. 1475 * Calling vlrurecycle() from the bowels of filesystem code has some 1476 * interesting deadlock problems. 1477 */ 1478 static struct proc *vnlruproc; 1479 static int vnlruproc_sig; 1480 1481 /* 1482 * The main freevnodes counter is only updated when a counter local to CPU 1483 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1484 * walked to compute a more accurate total. 1485 * 1486 * Note: the actual value at any given moment can still exceed slop, but it 1487 * should not be by significant margin in practice. 1488 */ 1489 #define VNLRU_FREEVNODES_SLOP 126 1490 1491 static void __noinline 1492 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1493 { 1494 1495 atomic_add_long(&freevnodes, *lfreevnodes); 1496 *lfreevnodes = 0; 1497 critical_exit(); 1498 } 1499 1500 static __inline void 1501 vfs_freevnodes_inc(void) 1502 { 1503 int8_t *lfreevnodes; 1504 1505 critical_enter(); 1506 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1507 (*lfreevnodes)++; 1508 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1509 vfs_freevnodes_rollup(lfreevnodes); 1510 else 1511 critical_exit(); 1512 } 1513 1514 static __inline void 1515 vfs_freevnodes_dec(void) 1516 { 1517 int8_t *lfreevnodes; 1518 1519 critical_enter(); 1520 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1521 (*lfreevnodes)--; 1522 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1523 vfs_freevnodes_rollup(lfreevnodes); 1524 else 1525 critical_exit(); 1526 } 1527 1528 static u_long 1529 vnlru_read_freevnodes(void) 1530 { 1531 long slop, rfreevnodes; 1532 int cpu; 1533 1534 rfreevnodes = atomic_load_long(&freevnodes); 1535 1536 if (rfreevnodes > freevnodes_old) 1537 slop = rfreevnodes - freevnodes_old; 1538 else 1539 slop = freevnodes_old - rfreevnodes; 1540 if (slop < VNLRU_FREEVNODES_SLOP) 1541 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1542 freevnodes_old = rfreevnodes; 1543 CPU_FOREACH(cpu) { 1544 freevnodes_old += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1545 } 1546 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1547 } 1548 1549 static bool 1550 vnlru_under(u_long rnumvnodes, u_long limit) 1551 { 1552 u_long rfreevnodes, space; 1553 1554 if (__predict_false(rnumvnodes > desiredvnodes)) 1555 return (true); 1556 1557 space = desiredvnodes - rnumvnodes; 1558 if (space < limit) { 1559 rfreevnodes = vnlru_read_freevnodes(); 1560 if (rfreevnodes > wantfreevnodes) 1561 space += rfreevnodes - wantfreevnodes; 1562 } 1563 return (space < limit); 1564 } 1565 1566 static void 1567 vnlru_kick_locked(void) 1568 { 1569 1570 mtx_assert(&vnode_list_mtx, MA_OWNED); 1571 if (vnlruproc_sig == 0) { 1572 vnlruproc_sig = 1; 1573 wakeup(vnlruproc); 1574 } 1575 } 1576 1577 static void 1578 vnlru_kick_cond(void) 1579 { 1580 1581 if (vnlruproc_sig) 1582 return; 1583 mtx_lock(&vnode_list_mtx); 1584 vnlru_kick_locked(); 1585 mtx_unlock(&vnode_list_mtx); 1586 } 1587 1588 static void 1589 vnlru_proc(void) 1590 { 1591 u_long rnumvnodes, rfreevnodes, target; 1592 unsigned long onumvnodes; 1593 int done, force, trigger, usevnodes; 1594 bool reclaim_nc_src, want_reread; 1595 1596 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1597 SHUTDOWN_PRI_FIRST); 1598 1599 force = 0; 1600 want_reread = false; 1601 for (;;) { 1602 kproc_suspend_check(vnlruproc); 1603 mtx_lock(&vnode_list_mtx); 1604 rnumvnodes = atomic_load_long(&numvnodes); 1605 1606 if (want_reread) { 1607 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1608 want_reread = false; 1609 } 1610 1611 /* 1612 * If numvnodes is too large (due to desiredvnodes being 1613 * adjusted using its sysctl, or emergency growth), first 1614 * try to reduce it by discarding from the free list. 1615 */ 1616 if (rnumvnodes > desiredvnodes) { 1617 vnlru_free_locked(rnumvnodes - desiredvnodes); 1618 mtx_lock(&vnode_list_mtx); 1619 rnumvnodes = atomic_load_long(&numvnodes); 1620 } 1621 /* 1622 * Sleep if the vnode cache is in a good state. This is 1623 * when it is not over-full and has space for about a 4% 1624 * or 9% expansion (by growing its size or inexcessively 1625 * reducing its free list). Otherwise, try to reclaim 1626 * space for a 10% expansion. 1627 */ 1628 if (vstir && force == 0) { 1629 force = 1; 1630 vstir = false; 1631 } 1632 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1633 vnlruproc_sig = 0; 1634 wakeup(&vnlruproc_sig); 1635 msleep(vnlruproc, &vnode_list_mtx, 1636 PVFS|PDROP, "vlruwt", hz); 1637 continue; 1638 } 1639 rfreevnodes = vnlru_read_freevnodes(); 1640 1641 onumvnodes = rnumvnodes; 1642 /* 1643 * Calculate parameters for recycling. These are the same 1644 * throughout the loop to give some semblance of fairness. 1645 * The trigger point is to avoid recycling vnodes with lots 1646 * of resident pages. We aren't trying to free memory; we 1647 * are trying to recycle or at least free vnodes. 1648 */ 1649 if (rnumvnodes <= desiredvnodes) 1650 usevnodes = rnumvnodes - rfreevnodes; 1651 else 1652 usevnodes = rnumvnodes; 1653 if (usevnodes <= 0) 1654 usevnodes = 1; 1655 /* 1656 * The trigger value is chosen to give a conservatively 1657 * large value to ensure that it alone doesn't prevent 1658 * making progress. The value can easily be so large that 1659 * it is effectively infinite in some congested and 1660 * misconfigured cases, and this is necessary. Normally 1661 * it is about 8 to 100 (pages), which is quite large. 1662 */ 1663 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1664 if (force < 2) 1665 trigger = vsmalltrigger; 1666 reclaim_nc_src = force >= 3; 1667 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1668 target = target / 10 + 1; 1669 done = vlrureclaim(reclaim_nc_src, trigger, target); 1670 mtx_unlock(&vnode_list_mtx); 1671 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1672 uma_reclaim(UMA_RECLAIM_DRAIN); 1673 if (done == 0) { 1674 if (force == 0 || force == 1) { 1675 force = 2; 1676 continue; 1677 } 1678 if (force == 2) { 1679 force = 3; 1680 continue; 1681 } 1682 want_reread = true; 1683 force = 0; 1684 vnlru_nowhere++; 1685 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1686 } else { 1687 want_reread = true; 1688 kern_yield(PRI_USER); 1689 } 1690 } 1691 } 1692 1693 static struct kproc_desc vnlru_kp = { 1694 "vnlru", 1695 vnlru_proc, 1696 &vnlruproc 1697 }; 1698 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1699 &vnlru_kp); 1700 1701 /* 1702 * Routines having to do with the management of the vnode table. 1703 */ 1704 1705 /* 1706 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1707 * before we actually vgone(). This function must be called with the vnode 1708 * held to prevent the vnode from being returned to the free list midway 1709 * through vgone(). 1710 */ 1711 static int 1712 vtryrecycle(struct vnode *vp) 1713 { 1714 struct mount *vnmp; 1715 1716 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1717 VNPASS(vp->v_holdcnt > 0, vp); 1718 /* 1719 * This vnode may found and locked via some other list, if so we 1720 * can't recycle it yet. 1721 */ 1722 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1723 CTR2(KTR_VFS, 1724 "%s: impossible to recycle, vp %p lock is already held", 1725 __func__, vp); 1726 vdrop_recycle(vp); 1727 return (EWOULDBLOCK); 1728 } 1729 /* 1730 * Don't recycle if its filesystem is being suspended. 1731 */ 1732 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1733 VOP_UNLOCK(vp); 1734 CTR2(KTR_VFS, 1735 "%s: impossible to recycle, cannot start the write for %p", 1736 __func__, vp); 1737 vdrop_recycle(vp); 1738 return (EBUSY); 1739 } 1740 /* 1741 * If we got this far, we need to acquire the interlock and see if 1742 * anyone picked up this vnode from another list. If not, we will 1743 * mark it with DOOMED via vgonel() so that anyone who does find it 1744 * will skip over it. 1745 */ 1746 VI_LOCK(vp); 1747 if (vp->v_usecount) { 1748 VOP_UNLOCK(vp); 1749 vdropl_recycle(vp); 1750 vn_finished_write(vnmp); 1751 CTR2(KTR_VFS, 1752 "%s: impossible to recycle, %p is already referenced", 1753 __func__, vp); 1754 return (EBUSY); 1755 } 1756 if (!VN_IS_DOOMED(vp)) { 1757 counter_u64_add(recycles_free_count, 1); 1758 vgonel(vp); 1759 } 1760 VOP_UNLOCK(vp); 1761 vdropl_recycle(vp); 1762 vn_finished_write(vnmp); 1763 return (0); 1764 } 1765 1766 /* 1767 * Allocate a new vnode. 1768 * 1769 * The operation never returns an error. Returning an error was disabled 1770 * in r145385 (dated 2005) with the following comment: 1771 * 1772 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1773 * 1774 * Given the age of this commit (almost 15 years at the time of writing this 1775 * comment) restoring the ability to fail requires a significant audit of 1776 * all codepaths. 1777 * 1778 * The routine can try to free a vnode or stall for up to 1 second waiting for 1779 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1780 */ 1781 static u_long vn_alloc_cyclecount; 1782 static u_long vn_alloc_sleeps; 1783 1784 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1785 "Number of times vnode allocation blocked waiting on vnlru"); 1786 1787 static struct vnode * __noinline 1788 vn_alloc_hard(struct mount *mp) 1789 { 1790 u_long rnumvnodes, rfreevnodes; 1791 1792 mtx_lock(&vnode_list_mtx); 1793 rnumvnodes = atomic_load_long(&numvnodes); 1794 if (rnumvnodes + 1 < desiredvnodes) { 1795 vn_alloc_cyclecount = 0; 1796 mtx_unlock(&vnode_list_mtx); 1797 goto alloc; 1798 } 1799 rfreevnodes = vnlru_read_freevnodes(); 1800 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1801 vn_alloc_cyclecount = 0; 1802 vstir = true; 1803 } 1804 /* 1805 * Grow the vnode cache if it will not be above its target max 1806 * after growing. Otherwise, if the free list is nonempty, try 1807 * to reclaim 1 item from it before growing the cache (possibly 1808 * above its target max if the reclamation failed or is delayed). 1809 * Otherwise, wait for some space. In all cases, schedule 1810 * vnlru_proc() if we are getting short of space. The watermarks 1811 * should be chosen so that we never wait or even reclaim from 1812 * the free list to below its target minimum. 1813 */ 1814 if (vnlru_free_locked(1) > 0) 1815 goto alloc; 1816 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1817 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1818 /* 1819 * Wait for space for a new vnode. 1820 */ 1821 mtx_lock(&vnode_list_mtx); 1822 vnlru_kick_locked(); 1823 vn_alloc_sleeps++; 1824 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1825 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1826 vnlru_read_freevnodes() > 1) 1827 vnlru_free_locked(1); 1828 else 1829 mtx_unlock(&vnode_list_mtx); 1830 } 1831 alloc: 1832 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1833 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1834 if (vnlru_under(rnumvnodes, vlowat)) 1835 vnlru_kick_cond(); 1836 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1837 } 1838 1839 static struct vnode * 1840 vn_alloc(struct mount *mp) 1841 { 1842 u_long rnumvnodes; 1843 1844 if (__predict_false(vn_alloc_cyclecount != 0)) 1845 return (vn_alloc_hard(mp)); 1846 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1847 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 1848 atomic_subtract_long(&numvnodes, 1); 1849 return (vn_alloc_hard(mp)); 1850 } 1851 1852 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1853 } 1854 1855 static void 1856 vn_free(struct vnode *vp) 1857 { 1858 1859 atomic_subtract_long(&numvnodes, 1); 1860 uma_zfree_smr(vnode_zone, vp); 1861 } 1862 1863 /* 1864 * Return the next vnode from the free list. 1865 */ 1866 int 1867 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1868 struct vnode **vpp) 1869 { 1870 struct vnode *vp; 1871 struct thread *td; 1872 struct lock_object *lo; 1873 1874 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1875 1876 KASSERT(vops->registered, 1877 ("%s: not registered vector op %p\n", __func__, vops)); 1878 cache_validate_vop_vector(mp, vops); 1879 1880 td = curthread; 1881 if (td->td_vp_reserved != NULL) { 1882 vp = td->td_vp_reserved; 1883 td->td_vp_reserved = NULL; 1884 } else { 1885 vp = vn_alloc(mp); 1886 } 1887 counter_u64_add(vnodes_created, 1); 1888 1889 vn_set_state(vp, VSTATE_UNINITIALIZED); 1890 1891 /* 1892 * Locks are given the generic name "vnode" when created. 1893 * Follow the historic practice of using the filesystem 1894 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1895 * 1896 * Locks live in a witness group keyed on their name. Thus, 1897 * when a lock is renamed, it must also move from the witness 1898 * group of its old name to the witness group of its new name. 1899 * 1900 * The change only needs to be made when the vnode moves 1901 * from one filesystem type to another. We ensure that each 1902 * filesystem use a single static name pointer for its tag so 1903 * that we can compare pointers rather than doing a strcmp(). 1904 */ 1905 lo = &vp->v_vnlock->lock_object; 1906 #ifdef WITNESS 1907 if (lo->lo_name != tag) { 1908 #endif 1909 lo->lo_name = tag; 1910 #ifdef WITNESS 1911 WITNESS_DESTROY(lo); 1912 WITNESS_INIT(lo, tag); 1913 } 1914 #endif 1915 /* 1916 * By default, don't allow shared locks unless filesystems opt-in. 1917 */ 1918 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1919 /* 1920 * Finalize various vnode identity bits. 1921 */ 1922 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1923 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1924 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1925 vp->v_type = VNON; 1926 vp->v_op = vops; 1927 vp->v_irflag = 0; 1928 v_init_counters(vp); 1929 vn_seqc_init(vp); 1930 vp->v_bufobj.bo_ops = &buf_ops_bio; 1931 #ifdef DIAGNOSTIC 1932 if (mp == NULL && vops != &dead_vnodeops) 1933 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1934 #endif 1935 #ifdef MAC 1936 mac_vnode_init(vp); 1937 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1938 mac_vnode_associate_singlelabel(mp, vp); 1939 #endif 1940 if (mp != NULL) { 1941 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1942 } 1943 1944 /* 1945 * For the filesystems which do not use vfs_hash_insert(), 1946 * still initialize v_hash to have vfs_hash_index() useful. 1947 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1948 * its own hashing. 1949 */ 1950 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1951 1952 *vpp = vp; 1953 return (0); 1954 } 1955 1956 void 1957 getnewvnode_reserve(void) 1958 { 1959 struct thread *td; 1960 1961 td = curthread; 1962 MPASS(td->td_vp_reserved == NULL); 1963 td->td_vp_reserved = vn_alloc(NULL); 1964 } 1965 1966 void 1967 getnewvnode_drop_reserve(void) 1968 { 1969 struct thread *td; 1970 1971 td = curthread; 1972 if (td->td_vp_reserved != NULL) { 1973 vn_free(td->td_vp_reserved); 1974 td->td_vp_reserved = NULL; 1975 } 1976 } 1977 1978 static void __noinline 1979 freevnode(struct vnode *vp) 1980 { 1981 struct bufobj *bo; 1982 1983 /* 1984 * The vnode has been marked for destruction, so free it. 1985 * 1986 * The vnode will be returned to the zone where it will 1987 * normally remain until it is needed for another vnode. We 1988 * need to cleanup (or verify that the cleanup has already 1989 * been done) any residual data left from its current use 1990 * so as not to contaminate the freshly allocated vnode. 1991 */ 1992 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1993 /* 1994 * Paired with vgone. 1995 */ 1996 vn_seqc_write_end_free(vp); 1997 1998 bo = &vp->v_bufobj; 1999 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2000 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2001 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2002 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2003 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2004 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2005 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2006 ("clean blk trie not empty")); 2007 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2008 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2009 ("dirty blk trie not empty")); 2010 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2011 ("Dangling rangelock waiters")); 2012 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2013 ("Leaked inactivation")); 2014 VI_UNLOCK(vp); 2015 cache_assert_no_entries(vp); 2016 2017 #ifdef MAC 2018 mac_vnode_destroy(vp); 2019 #endif 2020 if (vp->v_pollinfo != NULL) { 2021 /* 2022 * Use LK_NOWAIT to shut up witness about the lock. We may get 2023 * here while having another vnode locked when trying to 2024 * satisfy a lookup and needing to recycle. 2025 */ 2026 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2027 destroy_vpollinfo(vp->v_pollinfo); 2028 VOP_UNLOCK(vp); 2029 vp->v_pollinfo = NULL; 2030 } 2031 vp->v_mountedhere = NULL; 2032 vp->v_unpcb = NULL; 2033 vp->v_rdev = NULL; 2034 vp->v_fifoinfo = NULL; 2035 vp->v_iflag = 0; 2036 vp->v_vflag = 0; 2037 bo->bo_flag = 0; 2038 vn_free(vp); 2039 } 2040 2041 /* 2042 * Delete from old mount point vnode list, if on one. 2043 */ 2044 static void 2045 delmntque(struct vnode *vp) 2046 { 2047 struct mount *mp; 2048 2049 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2050 2051 mp = vp->v_mount; 2052 MNT_ILOCK(mp); 2053 VI_LOCK(vp); 2054 vp->v_mount = NULL; 2055 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2056 ("bad mount point vnode list size")); 2057 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2058 mp->mnt_nvnodelistsize--; 2059 MNT_REL(mp); 2060 MNT_IUNLOCK(mp); 2061 /* 2062 * The caller expects the interlock to be still held. 2063 */ 2064 ASSERT_VI_LOCKED(vp, __func__); 2065 } 2066 2067 static int 2068 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2069 { 2070 2071 KASSERT(vp->v_mount == NULL, 2072 ("insmntque: vnode already on per mount vnode list")); 2073 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2074 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2075 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2076 } else { 2077 KASSERT(!dtr, 2078 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2079 __func__)); 2080 } 2081 2082 /* 2083 * We acquire the vnode interlock early to ensure that the 2084 * vnode cannot be recycled by another process releasing a 2085 * holdcnt on it before we get it on both the vnode list 2086 * and the active vnode list. The mount mutex protects only 2087 * manipulation of the vnode list and the vnode freelist 2088 * mutex protects only manipulation of the active vnode list. 2089 * Hence the need to hold the vnode interlock throughout. 2090 */ 2091 MNT_ILOCK(mp); 2092 VI_LOCK(vp); 2093 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2094 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2095 mp->mnt_nvnodelistsize == 0)) && 2096 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2097 VI_UNLOCK(vp); 2098 MNT_IUNLOCK(mp); 2099 if (dtr) { 2100 vp->v_data = NULL; 2101 vp->v_op = &dead_vnodeops; 2102 vgone(vp); 2103 vput(vp); 2104 } 2105 return (EBUSY); 2106 } 2107 vp->v_mount = mp; 2108 MNT_REF(mp); 2109 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2110 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2111 ("neg mount point vnode list size")); 2112 mp->mnt_nvnodelistsize++; 2113 VI_UNLOCK(vp); 2114 MNT_IUNLOCK(mp); 2115 return (0); 2116 } 2117 2118 /* 2119 * Insert into list of vnodes for the new mount point, if available. 2120 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2121 * leaves handling of the vnode to the caller. 2122 */ 2123 int 2124 insmntque(struct vnode *vp, struct mount *mp) 2125 { 2126 return (insmntque1_int(vp, mp, true)); 2127 } 2128 2129 int 2130 insmntque1(struct vnode *vp, struct mount *mp) 2131 { 2132 return (insmntque1_int(vp, mp, false)); 2133 } 2134 2135 /* 2136 * Flush out and invalidate all buffers associated with a bufobj 2137 * Called with the underlying object locked. 2138 */ 2139 int 2140 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2141 { 2142 int error; 2143 2144 BO_LOCK(bo); 2145 if (flags & V_SAVE) { 2146 error = bufobj_wwait(bo, slpflag, slptimeo); 2147 if (error) { 2148 BO_UNLOCK(bo); 2149 return (error); 2150 } 2151 if (bo->bo_dirty.bv_cnt > 0) { 2152 BO_UNLOCK(bo); 2153 do { 2154 error = BO_SYNC(bo, MNT_WAIT); 2155 } while (error == ERELOOKUP); 2156 if (error != 0) 2157 return (error); 2158 BO_LOCK(bo); 2159 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2160 BO_UNLOCK(bo); 2161 return (EBUSY); 2162 } 2163 } 2164 } 2165 /* 2166 * If you alter this loop please notice that interlock is dropped and 2167 * reacquired in flushbuflist. Special care is needed to ensure that 2168 * no race conditions occur from this. 2169 */ 2170 do { 2171 error = flushbuflist(&bo->bo_clean, 2172 flags, bo, slpflag, slptimeo); 2173 if (error == 0 && !(flags & V_CLEANONLY)) 2174 error = flushbuflist(&bo->bo_dirty, 2175 flags, bo, slpflag, slptimeo); 2176 if (error != 0 && error != EAGAIN) { 2177 BO_UNLOCK(bo); 2178 return (error); 2179 } 2180 } while (error != 0); 2181 2182 /* 2183 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2184 * have write I/O in-progress but if there is a VM object then the 2185 * VM object can also have read-I/O in-progress. 2186 */ 2187 do { 2188 bufobj_wwait(bo, 0, 0); 2189 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2190 BO_UNLOCK(bo); 2191 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2192 BO_LOCK(bo); 2193 } 2194 } while (bo->bo_numoutput > 0); 2195 BO_UNLOCK(bo); 2196 2197 /* 2198 * Destroy the copy in the VM cache, too. 2199 */ 2200 if (bo->bo_object != NULL && 2201 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2202 VM_OBJECT_WLOCK(bo->bo_object); 2203 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2204 OBJPR_CLEANONLY : 0); 2205 VM_OBJECT_WUNLOCK(bo->bo_object); 2206 } 2207 2208 #ifdef INVARIANTS 2209 BO_LOCK(bo); 2210 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2211 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2212 bo->bo_clean.bv_cnt > 0)) 2213 panic("vinvalbuf: flush failed"); 2214 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2215 bo->bo_dirty.bv_cnt > 0) 2216 panic("vinvalbuf: flush dirty failed"); 2217 BO_UNLOCK(bo); 2218 #endif 2219 return (0); 2220 } 2221 2222 /* 2223 * Flush out and invalidate all buffers associated with a vnode. 2224 * Called with the underlying object locked. 2225 */ 2226 int 2227 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2228 { 2229 2230 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2231 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2232 if (vp->v_object != NULL && vp->v_object->handle != vp) 2233 return (0); 2234 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2235 } 2236 2237 /* 2238 * Flush out buffers on the specified list. 2239 * 2240 */ 2241 static int 2242 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2243 int slptimeo) 2244 { 2245 struct buf *bp, *nbp; 2246 int retval, error; 2247 daddr_t lblkno; 2248 b_xflags_t xflags; 2249 2250 ASSERT_BO_WLOCKED(bo); 2251 2252 retval = 0; 2253 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2254 /* 2255 * If we are flushing both V_NORMAL and V_ALT buffers then 2256 * do not skip any buffers. If we are flushing only V_NORMAL 2257 * buffers then skip buffers marked as BX_ALTDATA. If we are 2258 * flushing only V_ALT buffers then skip buffers not marked 2259 * as BX_ALTDATA. 2260 */ 2261 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2262 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2263 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2264 continue; 2265 } 2266 if (nbp != NULL) { 2267 lblkno = nbp->b_lblkno; 2268 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2269 } 2270 retval = EAGAIN; 2271 error = BUF_TIMELOCK(bp, 2272 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2273 "flushbuf", slpflag, slptimeo); 2274 if (error) { 2275 BO_LOCK(bo); 2276 return (error != ENOLCK ? error : EAGAIN); 2277 } 2278 KASSERT(bp->b_bufobj == bo, 2279 ("bp %p wrong b_bufobj %p should be %p", 2280 bp, bp->b_bufobj, bo)); 2281 /* 2282 * XXX Since there are no node locks for NFS, I 2283 * believe there is a slight chance that a delayed 2284 * write will occur while sleeping just above, so 2285 * check for it. 2286 */ 2287 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2288 (flags & V_SAVE)) { 2289 bremfree(bp); 2290 bp->b_flags |= B_ASYNC; 2291 bwrite(bp); 2292 BO_LOCK(bo); 2293 return (EAGAIN); /* XXX: why not loop ? */ 2294 } 2295 bremfree(bp); 2296 bp->b_flags |= (B_INVAL | B_RELBUF); 2297 bp->b_flags &= ~B_ASYNC; 2298 brelse(bp); 2299 BO_LOCK(bo); 2300 if (nbp == NULL) 2301 break; 2302 nbp = gbincore(bo, lblkno); 2303 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2304 != xflags) 2305 break; /* nbp invalid */ 2306 } 2307 return (retval); 2308 } 2309 2310 int 2311 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2312 { 2313 struct buf *bp; 2314 int error; 2315 daddr_t lblkno; 2316 2317 ASSERT_BO_LOCKED(bo); 2318 2319 for (lblkno = startn;;) { 2320 again: 2321 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2322 if (bp == NULL || bp->b_lblkno >= endn || 2323 bp->b_lblkno < startn) 2324 break; 2325 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2326 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2327 if (error != 0) { 2328 BO_RLOCK(bo); 2329 if (error == ENOLCK) 2330 goto again; 2331 return (error); 2332 } 2333 KASSERT(bp->b_bufobj == bo, 2334 ("bp %p wrong b_bufobj %p should be %p", 2335 bp, bp->b_bufobj, bo)); 2336 lblkno = bp->b_lblkno + 1; 2337 if ((bp->b_flags & B_MANAGED) == 0) 2338 bremfree(bp); 2339 bp->b_flags |= B_RELBUF; 2340 /* 2341 * In the VMIO case, use the B_NOREUSE flag to hint that the 2342 * pages backing each buffer in the range are unlikely to be 2343 * reused. Dirty buffers will have the hint applied once 2344 * they've been written. 2345 */ 2346 if ((bp->b_flags & B_VMIO) != 0) 2347 bp->b_flags |= B_NOREUSE; 2348 brelse(bp); 2349 BO_RLOCK(bo); 2350 } 2351 return (0); 2352 } 2353 2354 /* 2355 * Truncate a file's buffer and pages to a specified length. This 2356 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2357 * sync activity. 2358 */ 2359 int 2360 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2361 { 2362 struct buf *bp, *nbp; 2363 struct bufobj *bo; 2364 daddr_t startlbn; 2365 2366 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2367 vp, blksize, (uintmax_t)length); 2368 2369 /* 2370 * Round up to the *next* lbn. 2371 */ 2372 startlbn = howmany(length, blksize); 2373 2374 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2375 2376 bo = &vp->v_bufobj; 2377 restart_unlocked: 2378 BO_LOCK(bo); 2379 2380 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2381 ; 2382 2383 if (length > 0) { 2384 restartsync: 2385 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2386 if (bp->b_lblkno > 0) 2387 continue; 2388 /* 2389 * Since we hold the vnode lock this should only 2390 * fail if we're racing with the buf daemon. 2391 */ 2392 if (BUF_LOCK(bp, 2393 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2394 BO_LOCKPTR(bo)) == ENOLCK) 2395 goto restart_unlocked; 2396 2397 VNASSERT((bp->b_flags & B_DELWRI), vp, 2398 ("buf(%p) on dirty queue without DELWRI", bp)); 2399 2400 bremfree(bp); 2401 bawrite(bp); 2402 BO_LOCK(bo); 2403 goto restartsync; 2404 } 2405 } 2406 2407 bufobj_wwait(bo, 0, 0); 2408 BO_UNLOCK(bo); 2409 vnode_pager_setsize(vp, length); 2410 2411 return (0); 2412 } 2413 2414 /* 2415 * Invalidate the cached pages of a file's buffer within the range of block 2416 * numbers [startlbn, endlbn). 2417 */ 2418 void 2419 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2420 int blksize) 2421 { 2422 struct bufobj *bo; 2423 off_t start, end; 2424 2425 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2426 2427 start = blksize * startlbn; 2428 end = blksize * endlbn; 2429 2430 bo = &vp->v_bufobj; 2431 BO_LOCK(bo); 2432 MPASS(blksize == bo->bo_bsize); 2433 2434 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2435 ; 2436 2437 BO_UNLOCK(bo); 2438 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2439 } 2440 2441 static int 2442 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2443 daddr_t startlbn, daddr_t endlbn) 2444 { 2445 struct buf *bp, *nbp; 2446 bool anyfreed; 2447 2448 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2449 ASSERT_BO_LOCKED(bo); 2450 2451 do { 2452 anyfreed = false; 2453 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2454 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2455 continue; 2456 if (BUF_LOCK(bp, 2457 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2458 BO_LOCKPTR(bo)) == ENOLCK) { 2459 BO_LOCK(bo); 2460 return (EAGAIN); 2461 } 2462 2463 bremfree(bp); 2464 bp->b_flags |= B_INVAL | B_RELBUF; 2465 bp->b_flags &= ~B_ASYNC; 2466 brelse(bp); 2467 anyfreed = true; 2468 2469 BO_LOCK(bo); 2470 if (nbp != NULL && 2471 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2472 nbp->b_vp != vp || 2473 (nbp->b_flags & B_DELWRI) != 0)) 2474 return (EAGAIN); 2475 } 2476 2477 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2478 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2479 continue; 2480 if (BUF_LOCK(bp, 2481 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2482 BO_LOCKPTR(bo)) == ENOLCK) { 2483 BO_LOCK(bo); 2484 return (EAGAIN); 2485 } 2486 bremfree(bp); 2487 bp->b_flags |= B_INVAL | B_RELBUF; 2488 bp->b_flags &= ~B_ASYNC; 2489 brelse(bp); 2490 anyfreed = true; 2491 2492 BO_LOCK(bo); 2493 if (nbp != NULL && 2494 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2495 (nbp->b_vp != vp) || 2496 (nbp->b_flags & B_DELWRI) == 0)) 2497 return (EAGAIN); 2498 } 2499 } while (anyfreed); 2500 return (0); 2501 } 2502 2503 static void 2504 buf_vlist_remove(struct buf *bp) 2505 { 2506 struct bufv *bv; 2507 b_xflags_t flags; 2508 2509 flags = bp->b_xflags; 2510 2511 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2512 ASSERT_BO_WLOCKED(bp->b_bufobj); 2513 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2514 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2515 ("%s: buffer %p has invalid queue state", __func__, bp)); 2516 2517 if ((flags & BX_VNDIRTY) != 0) 2518 bv = &bp->b_bufobj->bo_dirty; 2519 else 2520 bv = &bp->b_bufobj->bo_clean; 2521 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2522 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2523 bv->bv_cnt--; 2524 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2525 } 2526 2527 /* 2528 * Add the buffer to the sorted clean or dirty block list. 2529 * 2530 * NOTE: xflags is passed as a constant, optimizing this inline function! 2531 */ 2532 static void 2533 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2534 { 2535 struct bufv *bv; 2536 struct buf *n; 2537 int error; 2538 2539 ASSERT_BO_WLOCKED(bo); 2540 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2541 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2542 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2543 ("dead bo %p", bo)); 2544 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2545 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2546 bp->b_xflags |= xflags; 2547 if (xflags & BX_VNDIRTY) 2548 bv = &bo->bo_dirty; 2549 else 2550 bv = &bo->bo_clean; 2551 2552 /* 2553 * Keep the list ordered. Optimize empty list insertion. Assume 2554 * we tend to grow at the tail so lookup_le should usually be cheaper 2555 * than _ge. 2556 */ 2557 if (bv->bv_cnt == 0 || 2558 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2559 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2560 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2561 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2562 else 2563 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2564 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2565 if (error) 2566 panic("buf_vlist_add: Preallocated nodes insufficient."); 2567 bv->bv_cnt++; 2568 } 2569 2570 /* 2571 * Look up a buffer using the buffer tries. 2572 */ 2573 struct buf * 2574 gbincore(struct bufobj *bo, daddr_t lblkno) 2575 { 2576 struct buf *bp; 2577 2578 ASSERT_BO_LOCKED(bo); 2579 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2580 if (bp != NULL) 2581 return (bp); 2582 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2583 } 2584 2585 /* 2586 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2587 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2588 * stability of the result. Like other lockless lookups, the found buf may 2589 * already be invalid by the time this function returns. 2590 */ 2591 struct buf * 2592 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2593 { 2594 struct buf *bp; 2595 2596 ASSERT_BO_UNLOCKED(bo); 2597 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2598 if (bp != NULL) 2599 return (bp); 2600 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2601 } 2602 2603 /* 2604 * Associate a buffer with a vnode. 2605 */ 2606 void 2607 bgetvp(struct vnode *vp, struct buf *bp) 2608 { 2609 struct bufobj *bo; 2610 2611 bo = &vp->v_bufobj; 2612 ASSERT_BO_WLOCKED(bo); 2613 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2614 2615 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2616 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2617 ("bgetvp: bp already attached! %p", bp)); 2618 2619 vhold(vp); 2620 bp->b_vp = vp; 2621 bp->b_bufobj = bo; 2622 /* 2623 * Insert onto list for new vnode. 2624 */ 2625 buf_vlist_add(bp, bo, BX_VNCLEAN); 2626 } 2627 2628 /* 2629 * Disassociate a buffer from a vnode. 2630 */ 2631 void 2632 brelvp(struct buf *bp) 2633 { 2634 struct bufobj *bo; 2635 struct vnode *vp; 2636 2637 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2638 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2639 2640 /* 2641 * Delete from old vnode list, if on one. 2642 */ 2643 vp = bp->b_vp; /* XXX */ 2644 bo = bp->b_bufobj; 2645 BO_LOCK(bo); 2646 buf_vlist_remove(bp); 2647 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2648 bo->bo_flag &= ~BO_ONWORKLST; 2649 mtx_lock(&sync_mtx); 2650 LIST_REMOVE(bo, bo_synclist); 2651 syncer_worklist_len--; 2652 mtx_unlock(&sync_mtx); 2653 } 2654 bp->b_vp = NULL; 2655 bp->b_bufobj = NULL; 2656 BO_UNLOCK(bo); 2657 vdrop(vp); 2658 } 2659 2660 /* 2661 * Add an item to the syncer work queue. 2662 */ 2663 static void 2664 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2665 { 2666 int slot; 2667 2668 ASSERT_BO_WLOCKED(bo); 2669 2670 mtx_lock(&sync_mtx); 2671 if (bo->bo_flag & BO_ONWORKLST) 2672 LIST_REMOVE(bo, bo_synclist); 2673 else { 2674 bo->bo_flag |= BO_ONWORKLST; 2675 syncer_worklist_len++; 2676 } 2677 2678 if (delay > syncer_maxdelay - 2) 2679 delay = syncer_maxdelay - 2; 2680 slot = (syncer_delayno + delay) & syncer_mask; 2681 2682 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2683 mtx_unlock(&sync_mtx); 2684 } 2685 2686 static int 2687 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2688 { 2689 int error, len; 2690 2691 mtx_lock(&sync_mtx); 2692 len = syncer_worklist_len - sync_vnode_count; 2693 mtx_unlock(&sync_mtx); 2694 error = SYSCTL_OUT(req, &len, sizeof(len)); 2695 return (error); 2696 } 2697 2698 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2699 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2700 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2701 2702 static struct proc *updateproc; 2703 static void sched_sync(void); 2704 static struct kproc_desc up_kp = { 2705 "syncer", 2706 sched_sync, 2707 &updateproc 2708 }; 2709 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2710 2711 static int 2712 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2713 { 2714 struct vnode *vp; 2715 struct mount *mp; 2716 2717 *bo = LIST_FIRST(slp); 2718 if (*bo == NULL) 2719 return (0); 2720 vp = bo2vnode(*bo); 2721 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2722 return (1); 2723 /* 2724 * We use vhold in case the vnode does not 2725 * successfully sync. vhold prevents the vnode from 2726 * going away when we unlock the sync_mtx so that 2727 * we can acquire the vnode interlock. 2728 */ 2729 vholdl(vp); 2730 mtx_unlock(&sync_mtx); 2731 VI_UNLOCK(vp); 2732 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2733 vdrop(vp); 2734 mtx_lock(&sync_mtx); 2735 return (*bo == LIST_FIRST(slp)); 2736 } 2737 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2738 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2739 ("suspended mp syncing vp %p", vp)); 2740 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2741 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2742 VOP_UNLOCK(vp); 2743 vn_finished_write(mp); 2744 BO_LOCK(*bo); 2745 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2746 /* 2747 * Put us back on the worklist. The worklist 2748 * routine will remove us from our current 2749 * position and then add us back in at a later 2750 * position. 2751 */ 2752 vn_syncer_add_to_worklist(*bo, syncdelay); 2753 } 2754 BO_UNLOCK(*bo); 2755 vdrop(vp); 2756 mtx_lock(&sync_mtx); 2757 return (0); 2758 } 2759 2760 static int first_printf = 1; 2761 2762 /* 2763 * System filesystem synchronizer daemon. 2764 */ 2765 static void 2766 sched_sync(void) 2767 { 2768 struct synclist *next, *slp; 2769 struct bufobj *bo; 2770 long starttime; 2771 struct thread *td = curthread; 2772 int last_work_seen; 2773 int net_worklist_len; 2774 int syncer_final_iter; 2775 int error; 2776 2777 last_work_seen = 0; 2778 syncer_final_iter = 0; 2779 syncer_state = SYNCER_RUNNING; 2780 starttime = time_uptime; 2781 td->td_pflags |= TDP_NORUNNINGBUF; 2782 2783 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2784 SHUTDOWN_PRI_LAST); 2785 2786 mtx_lock(&sync_mtx); 2787 for (;;) { 2788 if (syncer_state == SYNCER_FINAL_DELAY && 2789 syncer_final_iter == 0) { 2790 mtx_unlock(&sync_mtx); 2791 kproc_suspend_check(td->td_proc); 2792 mtx_lock(&sync_mtx); 2793 } 2794 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2795 if (syncer_state != SYNCER_RUNNING && 2796 starttime != time_uptime) { 2797 if (first_printf) { 2798 printf("\nSyncing disks, vnodes remaining... "); 2799 first_printf = 0; 2800 } 2801 printf("%d ", net_worklist_len); 2802 } 2803 starttime = time_uptime; 2804 2805 /* 2806 * Push files whose dirty time has expired. Be careful 2807 * of interrupt race on slp queue. 2808 * 2809 * Skip over empty worklist slots when shutting down. 2810 */ 2811 do { 2812 slp = &syncer_workitem_pending[syncer_delayno]; 2813 syncer_delayno += 1; 2814 if (syncer_delayno == syncer_maxdelay) 2815 syncer_delayno = 0; 2816 next = &syncer_workitem_pending[syncer_delayno]; 2817 /* 2818 * If the worklist has wrapped since the 2819 * it was emptied of all but syncer vnodes, 2820 * switch to the FINAL_DELAY state and run 2821 * for one more second. 2822 */ 2823 if (syncer_state == SYNCER_SHUTTING_DOWN && 2824 net_worklist_len == 0 && 2825 last_work_seen == syncer_delayno) { 2826 syncer_state = SYNCER_FINAL_DELAY; 2827 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2828 } 2829 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2830 syncer_worklist_len > 0); 2831 2832 /* 2833 * Keep track of the last time there was anything 2834 * on the worklist other than syncer vnodes. 2835 * Return to the SHUTTING_DOWN state if any 2836 * new work appears. 2837 */ 2838 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2839 last_work_seen = syncer_delayno; 2840 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2841 syncer_state = SYNCER_SHUTTING_DOWN; 2842 while (!LIST_EMPTY(slp)) { 2843 error = sync_vnode(slp, &bo, td); 2844 if (error == 1) { 2845 LIST_REMOVE(bo, bo_synclist); 2846 LIST_INSERT_HEAD(next, bo, bo_synclist); 2847 continue; 2848 } 2849 2850 if (first_printf == 0) { 2851 /* 2852 * Drop the sync mutex, because some watchdog 2853 * drivers need to sleep while patting 2854 */ 2855 mtx_unlock(&sync_mtx); 2856 wdog_kern_pat(WD_LASTVAL); 2857 mtx_lock(&sync_mtx); 2858 } 2859 } 2860 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2861 syncer_final_iter--; 2862 /* 2863 * The variable rushjob allows the kernel to speed up the 2864 * processing of the filesystem syncer process. A rushjob 2865 * value of N tells the filesystem syncer to process the next 2866 * N seconds worth of work on its queue ASAP. Currently rushjob 2867 * is used by the soft update code to speed up the filesystem 2868 * syncer process when the incore state is getting so far 2869 * ahead of the disk that the kernel memory pool is being 2870 * threatened with exhaustion. 2871 */ 2872 if (rushjob > 0) { 2873 rushjob -= 1; 2874 continue; 2875 } 2876 /* 2877 * Just sleep for a short period of time between 2878 * iterations when shutting down to allow some I/O 2879 * to happen. 2880 * 2881 * If it has taken us less than a second to process the 2882 * current work, then wait. Otherwise start right over 2883 * again. We can still lose time if any single round 2884 * takes more than two seconds, but it does not really 2885 * matter as we are just trying to generally pace the 2886 * filesystem activity. 2887 */ 2888 if (syncer_state != SYNCER_RUNNING || 2889 time_uptime == starttime) { 2890 thread_lock(td); 2891 sched_prio(td, PPAUSE); 2892 thread_unlock(td); 2893 } 2894 if (syncer_state != SYNCER_RUNNING) 2895 cv_timedwait(&sync_wakeup, &sync_mtx, 2896 hz / SYNCER_SHUTDOWN_SPEEDUP); 2897 else if (time_uptime == starttime) 2898 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2899 } 2900 } 2901 2902 /* 2903 * Request the syncer daemon to speed up its work. 2904 * We never push it to speed up more than half of its 2905 * normal turn time, otherwise it could take over the cpu. 2906 */ 2907 int 2908 speedup_syncer(void) 2909 { 2910 int ret = 0; 2911 2912 mtx_lock(&sync_mtx); 2913 if (rushjob < syncdelay / 2) { 2914 rushjob += 1; 2915 stat_rush_requests += 1; 2916 ret = 1; 2917 } 2918 mtx_unlock(&sync_mtx); 2919 cv_broadcast(&sync_wakeup); 2920 return (ret); 2921 } 2922 2923 /* 2924 * Tell the syncer to speed up its work and run though its work 2925 * list several times, then tell it to shut down. 2926 */ 2927 static void 2928 syncer_shutdown(void *arg, int howto) 2929 { 2930 2931 if (howto & RB_NOSYNC) 2932 return; 2933 mtx_lock(&sync_mtx); 2934 syncer_state = SYNCER_SHUTTING_DOWN; 2935 rushjob = 0; 2936 mtx_unlock(&sync_mtx); 2937 cv_broadcast(&sync_wakeup); 2938 kproc_shutdown(arg, howto); 2939 } 2940 2941 void 2942 syncer_suspend(void) 2943 { 2944 2945 syncer_shutdown(updateproc, 0); 2946 } 2947 2948 void 2949 syncer_resume(void) 2950 { 2951 2952 mtx_lock(&sync_mtx); 2953 first_printf = 1; 2954 syncer_state = SYNCER_RUNNING; 2955 mtx_unlock(&sync_mtx); 2956 cv_broadcast(&sync_wakeup); 2957 kproc_resume(updateproc); 2958 } 2959 2960 /* 2961 * Move the buffer between the clean and dirty lists of its vnode. 2962 */ 2963 void 2964 reassignbuf(struct buf *bp) 2965 { 2966 struct vnode *vp; 2967 struct bufobj *bo; 2968 int delay; 2969 #ifdef INVARIANTS 2970 struct bufv *bv; 2971 #endif 2972 2973 vp = bp->b_vp; 2974 bo = bp->b_bufobj; 2975 2976 KASSERT((bp->b_flags & B_PAGING) == 0, 2977 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2978 2979 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2980 bp, bp->b_vp, bp->b_flags); 2981 2982 BO_LOCK(bo); 2983 buf_vlist_remove(bp); 2984 2985 /* 2986 * If dirty, put on list of dirty buffers; otherwise insert onto list 2987 * of clean buffers. 2988 */ 2989 if (bp->b_flags & B_DELWRI) { 2990 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2991 switch (vp->v_type) { 2992 case VDIR: 2993 delay = dirdelay; 2994 break; 2995 case VCHR: 2996 delay = metadelay; 2997 break; 2998 default: 2999 delay = filedelay; 3000 } 3001 vn_syncer_add_to_worklist(bo, delay); 3002 } 3003 buf_vlist_add(bp, bo, BX_VNDIRTY); 3004 } else { 3005 buf_vlist_add(bp, bo, BX_VNCLEAN); 3006 3007 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3008 mtx_lock(&sync_mtx); 3009 LIST_REMOVE(bo, bo_synclist); 3010 syncer_worklist_len--; 3011 mtx_unlock(&sync_mtx); 3012 bo->bo_flag &= ~BO_ONWORKLST; 3013 } 3014 } 3015 #ifdef INVARIANTS 3016 bv = &bo->bo_clean; 3017 bp = TAILQ_FIRST(&bv->bv_hd); 3018 KASSERT(bp == NULL || bp->b_bufobj == bo, 3019 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3020 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3021 KASSERT(bp == NULL || bp->b_bufobj == bo, 3022 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3023 bv = &bo->bo_dirty; 3024 bp = TAILQ_FIRST(&bv->bv_hd); 3025 KASSERT(bp == NULL || bp->b_bufobj == bo, 3026 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3027 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3028 KASSERT(bp == NULL || bp->b_bufobj == bo, 3029 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3030 #endif 3031 BO_UNLOCK(bo); 3032 } 3033 3034 static void 3035 v_init_counters(struct vnode *vp) 3036 { 3037 3038 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3039 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3040 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3041 3042 refcount_init(&vp->v_holdcnt, 1); 3043 refcount_init(&vp->v_usecount, 1); 3044 } 3045 3046 /* 3047 * Grab a particular vnode from the free list, increment its 3048 * reference count and lock it. VIRF_DOOMED is set if the vnode 3049 * is being destroyed. Only callers who specify LK_RETRY will 3050 * see doomed vnodes. If inactive processing was delayed in 3051 * vput try to do it here. 3052 * 3053 * usecount is manipulated using atomics without holding any locks. 3054 * 3055 * holdcnt can be manipulated using atomics without holding any locks, 3056 * except when transitioning 1<->0, in which case the interlock is held. 3057 * 3058 * Consumers which don't guarantee liveness of the vnode can use SMR to 3059 * try to get a reference. Note this operation can fail since the vnode 3060 * may be awaiting getting freed by the time they get to it. 3061 */ 3062 enum vgetstate 3063 vget_prep_smr(struct vnode *vp) 3064 { 3065 enum vgetstate vs; 3066 3067 VFS_SMR_ASSERT_ENTERED(); 3068 3069 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3070 vs = VGET_USECOUNT; 3071 } else { 3072 if (vhold_smr(vp)) 3073 vs = VGET_HOLDCNT; 3074 else 3075 vs = VGET_NONE; 3076 } 3077 return (vs); 3078 } 3079 3080 enum vgetstate 3081 vget_prep(struct vnode *vp) 3082 { 3083 enum vgetstate vs; 3084 3085 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3086 vs = VGET_USECOUNT; 3087 } else { 3088 vhold(vp); 3089 vs = VGET_HOLDCNT; 3090 } 3091 return (vs); 3092 } 3093 3094 void 3095 vget_abort(struct vnode *vp, enum vgetstate vs) 3096 { 3097 3098 switch (vs) { 3099 case VGET_USECOUNT: 3100 vrele(vp); 3101 break; 3102 case VGET_HOLDCNT: 3103 vdrop(vp); 3104 break; 3105 default: 3106 __assert_unreachable(); 3107 } 3108 } 3109 3110 int 3111 vget(struct vnode *vp, int flags) 3112 { 3113 enum vgetstate vs; 3114 3115 vs = vget_prep(vp); 3116 return (vget_finish(vp, flags, vs)); 3117 } 3118 3119 int 3120 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3121 { 3122 int error; 3123 3124 if ((flags & LK_INTERLOCK) != 0) 3125 ASSERT_VI_LOCKED(vp, __func__); 3126 else 3127 ASSERT_VI_UNLOCKED(vp, __func__); 3128 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3129 VNPASS(vp->v_holdcnt > 0, vp); 3130 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3131 3132 error = vn_lock(vp, flags); 3133 if (__predict_false(error != 0)) { 3134 vget_abort(vp, vs); 3135 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3136 vp); 3137 return (error); 3138 } 3139 3140 vget_finish_ref(vp, vs); 3141 return (0); 3142 } 3143 3144 void 3145 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3146 { 3147 int old; 3148 3149 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3150 VNPASS(vp->v_holdcnt > 0, vp); 3151 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3152 3153 if (vs == VGET_USECOUNT) 3154 return; 3155 3156 /* 3157 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3158 * the vnode around. Otherwise someone else lended their hold count and 3159 * we have to drop ours. 3160 */ 3161 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3162 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3163 if (old != 0) { 3164 #ifdef INVARIANTS 3165 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3166 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3167 #else 3168 refcount_release(&vp->v_holdcnt); 3169 #endif 3170 } 3171 } 3172 3173 void 3174 vref(struct vnode *vp) 3175 { 3176 enum vgetstate vs; 3177 3178 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3179 vs = vget_prep(vp); 3180 vget_finish_ref(vp, vs); 3181 } 3182 3183 void 3184 vrefact(struct vnode *vp) 3185 { 3186 3187 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3188 #ifdef INVARIANTS 3189 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3190 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3191 #else 3192 refcount_acquire(&vp->v_usecount); 3193 #endif 3194 } 3195 3196 void 3197 vlazy(struct vnode *vp) 3198 { 3199 struct mount *mp; 3200 3201 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3202 3203 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3204 return; 3205 /* 3206 * We may get here for inactive routines after the vnode got doomed. 3207 */ 3208 if (VN_IS_DOOMED(vp)) 3209 return; 3210 mp = vp->v_mount; 3211 mtx_lock(&mp->mnt_listmtx); 3212 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3213 vp->v_mflag |= VMP_LAZYLIST; 3214 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3215 mp->mnt_lazyvnodelistsize++; 3216 } 3217 mtx_unlock(&mp->mnt_listmtx); 3218 } 3219 3220 static void 3221 vunlazy(struct vnode *vp) 3222 { 3223 struct mount *mp; 3224 3225 ASSERT_VI_LOCKED(vp, __func__); 3226 VNPASS(!VN_IS_DOOMED(vp), vp); 3227 3228 mp = vp->v_mount; 3229 mtx_lock(&mp->mnt_listmtx); 3230 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3231 /* 3232 * Don't remove the vnode from the lazy list if another thread 3233 * has increased the hold count. It may have re-enqueued the 3234 * vnode to the lazy list and is now responsible for its 3235 * removal. 3236 */ 3237 if (vp->v_holdcnt == 0) { 3238 vp->v_mflag &= ~VMP_LAZYLIST; 3239 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3240 mp->mnt_lazyvnodelistsize--; 3241 } 3242 mtx_unlock(&mp->mnt_listmtx); 3243 } 3244 3245 /* 3246 * This routine is only meant to be called from vgonel prior to dooming 3247 * the vnode. 3248 */ 3249 static void 3250 vunlazy_gone(struct vnode *vp) 3251 { 3252 struct mount *mp; 3253 3254 ASSERT_VOP_ELOCKED(vp, __func__); 3255 ASSERT_VI_LOCKED(vp, __func__); 3256 VNPASS(!VN_IS_DOOMED(vp), vp); 3257 3258 if (vp->v_mflag & VMP_LAZYLIST) { 3259 mp = vp->v_mount; 3260 mtx_lock(&mp->mnt_listmtx); 3261 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3262 vp->v_mflag &= ~VMP_LAZYLIST; 3263 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3264 mp->mnt_lazyvnodelistsize--; 3265 mtx_unlock(&mp->mnt_listmtx); 3266 } 3267 } 3268 3269 static void 3270 vdefer_inactive(struct vnode *vp) 3271 { 3272 3273 ASSERT_VI_LOCKED(vp, __func__); 3274 VNPASS(vp->v_holdcnt > 0, vp); 3275 if (VN_IS_DOOMED(vp)) { 3276 vdropl(vp); 3277 return; 3278 } 3279 if (vp->v_iflag & VI_DEFINACT) { 3280 VNPASS(vp->v_holdcnt > 1, vp); 3281 vdropl(vp); 3282 return; 3283 } 3284 if (vp->v_usecount > 0) { 3285 vp->v_iflag &= ~VI_OWEINACT; 3286 vdropl(vp); 3287 return; 3288 } 3289 vlazy(vp); 3290 vp->v_iflag |= VI_DEFINACT; 3291 VI_UNLOCK(vp); 3292 atomic_add_long(&deferred_inact, 1); 3293 } 3294 3295 static void 3296 vdefer_inactive_unlocked(struct vnode *vp) 3297 { 3298 3299 VI_LOCK(vp); 3300 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3301 vdropl(vp); 3302 return; 3303 } 3304 vdefer_inactive(vp); 3305 } 3306 3307 enum vput_op { VRELE, VPUT, VUNREF }; 3308 3309 /* 3310 * Handle ->v_usecount transitioning to 0. 3311 * 3312 * By releasing the last usecount we take ownership of the hold count which 3313 * provides liveness of the vnode, meaning we have to vdrop. 3314 * 3315 * For all vnodes we may need to perform inactive processing. It requires an 3316 * exclusive lock on the vnode, while it is legal to call here with only a 3317 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3318 * inactive processing gets deferred to the syncer. 3319 * 3320 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3321 * on the lock being held all the way until VOP_INACTIVE. This in particular 3322 * happens with UFS which adds half-constructed vnodes to the hash, where they 3323 * can be found by other code. 3324 */ 3325 static void 3326 vput_final(struct vnode *vp, enum vput_op func) 3327 { 3328 int error; 3329 bool want_unlock; 3330 3331 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3332 VNPASS(vp->v_holdcnt > 0, vp); 3333 3334 VI_LOCK(vp); 3335 3336 /* 3337 * By the time we got here someone else might have transitioned 3338 * the count back to > 0. 3339 */ 3340 if (vp->v_usecount > 0) 3341 goto out; 3342 3343 /* 3344 * If the vnode is doomed vgone already performed inactive processing 3345 * (if needed). 3346 */ 3347 if (VN_IS_DOOMED(vp)) 3348 goto out; 3349 3350 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3351 goto out; 3352 3353 if (vp->v_iflag & VI_DOINGINACT) 3354 goto out; 3355 3356 /* 3357 * Locking operations here will drop the interlock and possibly the 3358 * vnode lock, opening a window where the vnode can get doomed all the 3359 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3360 * perform inactive. 3361 */ 3362 vp->v_iflag |= VI_OWEINACT; 3363 want_unlock = false; 3364 error = 0; 3365 switch (func) { 3366 case VRELE: 3367 switch (VOP_ISLOCKED(vp)) { 3368 case LK_EXCLUSIVE: 3369 break; 3370 case LK_EXCLOTHER: 3371 case 0: 3372 want_unlock = true; 3373 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3374 VI_LOCK(vp); 3375 break; 3376 default: 3377 /* 3378 * The lock has at least one sharer, but we have no way 3379 * to conclude whether this is us. Play it safe and 3380 * defer processing. 3381 */ 3382 error = EAGAIN; 3383 break; 3384 } 3385 break; 3386 case VPUT: 3387 want_unlock = true; 3388 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3389 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3390 LK_NOWAIT); 3391 VI_LOCK(vp); 3392 } 3393 break; 3394 case VUNREF: 3395 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3396 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3397 VI_LOCK(vp); 3398 } 3399 break; 3400 } 3401 if (error == 0) { 3402 if (func == VUNREF) { 3403 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3404 ("recursive vunref")); 3405 vp->v_vflag |= VV_UNREF; 3406 } 3407 for (;;) { 3408 error = vinactive(vp); 3409 if (want_unlock) 3410 VOP_UNLOCK(vp); 3411 if (error != ERELOOKUP || !want_unlock) 3412 break; 3413 VOP_LOCK(vp, LK_EXCLUSIVE); 3414 } 3415 if (func == VUNREF) 3416 vp->v_vflag &= ~VV_UNREF; 3417 vdropl(vp); 3418 } else { 3419 vdefer_inactive(vp); 3420 } 3421 return; 3422 out: 3423 if (func == VPUT) 3424 VOP_UNLOCK(vp); 3425 vdropl(vp); 3426 } 3427 3428 /* 3429 * Decrement ->v_usecount for a vnode. 3430 * 3431 * Releasing the last use count requires additional processing, see vput_final 3432 * above for details. 3433 * 3434 * Comment above each variant denotes lock state on entry and exit. 3435 */ 3436 3437 /* 3438 * in: any 3439 * out: same as passed in 3440 */ 3441 void 3442 vrele(struct vnode *vp) 3443 { 3444 3445 ASSERT_VI_UNLOCKED(vp, __func__); 3446 if (!refcount_release(&vp->v_usecount)) 3447 return; 3448 vput_final(vp, VRELE); 3449 } 3450 3451 /* 3452 * in: locked 3453 * out: unlocked 3454 */ 3455 void 3456 vput(struct vnode *vp) 3457 { 3458 3459 ASSERT_VOP_LOCKED(vp, __func__); 3460 ASSERT_VI_UNLOCKED(vp, __func__); 3461 if (!refcount_release(&vp->v_usecount)) { 3462 VOP_UNLOCK(vp); 3463 return; 3464 } 3465 vput_final(vp, VPUT); 3466 } 3467 3468 /* 3469 * in: locked 3470 * out: locked 3471 */ 3472 void 3473 vunref(struct vnode *vp) 3474 { 3475 3476 ASSERT_VOP_LOCKED(vp, __func__); 3477 ASSERT_VI_UNLOCKED(vp, __func__); 3478 if (!refcount_release(&vp->v_usecount)) 3479 return; 3480 vput_final(vp, VUNREF); 3481 } 3482 3483 void 3484 vhold(struct vnode *vp) 3485 { 3486 int old; 3487 3488 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3489 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3490 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3491 ("%s: wrong hold count %d", __func__, old)); 3492 if (old == 0) 3493 vfs_freevnodes_dec(); 3494 } 3495 3496 void 3497 vholdnz(struct vnode *vp) 3498 { 3499 3500 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3501 #ifdef INVARIANTS 3502 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3503 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3504 ("%s: wrong hold count %d", __func__, old)); 3505 #else 3506 atomic_add_int(&vp->v_holdcnt, 1); 3507 #endif 3508 } 3509 3510 /* 3511 * Grab a hold count unless the vnode is freed. 3512 * 3513 * Only use this routine if vfs smr is the only protection you have against 3514 * freeing the vnode. 3515 * 3516 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3517 * is not set. After the flag is set the vnode becomes immutable to anyone but 3518 * the thread which managed to set the flag. 3519 * 3520 * It may be tempting to replace the loop with: 3521 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3522 * if (count & VHOLD_NO_SMR) { 3523 * backpedal and error out; 3524 * } 3525 * 3526 * However, while this is more performant, it hinders debugging by eliminating 3527 * the previously mentioned invariant. 3528 */ 3529 bool 3530 vhold_smr(struct vnode *vp) 3531 { 3532 int count; 3533 3534 VFS_SMR_ASSERT_ENTERED(); 3535 3536 count = atomic_load_int(&vp->v_holdcnt); 3537 for (;;) { 3538 if (count & VHOLD_NO_SMR) { 3539 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3540 ("non-zero hold count with flags %d\n", count)); 3541 return (false); 3542 } 3543 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3544 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3545 if (count == 0) 3546 vfs_freevnodes_dec(); 3547 return (true); 3548 } 3549 } 3550 } 3551 3552 /* 3553 * Hold a free vnode for recycling. 3554 * 3555 * Note: vnode_init references this comment. 3556 * 3557 * Attempts to recycle only need the global vnode list lock and have no use for 3558 * SMR. 3559 * 3560 * However, vnodes get inserted into the global list before they get fully 3561 * initialized and stay there until UMA decides to free the memory. This in 3562 * particular means the target can be found before it becomes usable and after 3563 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3564 * VHOLD_NO_SMR. 3565 * 3566 * Note: the vnode may gain more references after we transition the count 0->1. 3567 */ 3568 static bool 3569 vhold_recycle_free(struct vnode *vp) 3570 { 3571 int count; 3572 3573 mtx_assert(&vnode_list_mtx, MA_OWNED); 3574 3575 count = atomic_load_int(&vp->v_holdcnt); 3576 for (;;) { 3577 if (count & VHOLD_NO_SMR) { 3578 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3579 ("non-zero hold count with flags %d\n", count)); 3580 return (false); 3581 } 3582 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3583 if (count > 0) { 3584 return (false); 3585 } 3586 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3587 vfs_freevnodes_dec(); 3588 return (true); 3589 } 3590 } 3591 } 3592 3593 static void __noinline 3594 vdbatch_process(struct vdbatch *vd) 3595 { 3596 struct vnode *vp; 3597 int i; 3598 3599 mtx_assert(&vd->lock, MA_OWNED); 3600 MPASS(curthread->td_pinned > 0); 3601 MPASS(vd->index == VDBATCH_SIZE); 3602 3603 /* 3604 * Attempt to requeue the passed batch, but give up easily. 3605 * 3606 * Despite batching the mechanism is prone to transient *significant* 3607 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3608 * if multiple CPUs get here (one real-world example is highly parallel 3609 * do-nothing make , which will stat *tons* of vnodes). Since it is 3610 * quasi-LRU (read: not that great even if fully honoured) just dodge 3611 * the problem. Parties which don't like it are welcome to implement 3612 * something better. 3613 */ 3614 critical_enter(); 3615 if (mtx_trylock(&vnode_list_mtx)) { 3616 for (i = 0; i < VDBATCH_SIZE; i++) { 3617 vp = vd->tab[i]; 3618 vd->tab[i] = NULL; 3619 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3620 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3621 MPASS(vp->v_dbatchcpu != NOCPU); 3622 vp->v_dbatchcpu = NOCPU; 3623 } 3624 mtx_unlock(&vnode_list_mtx); 3625 } else { 3626 counter_u64_add(vnode_skipped_requeues, 1); 3627 3628 for (i = 0; i < VDBATCH_SIZE; i++) { 3629 vp = vd->tab[i]; 3630 vd->tab[i] = NULL; 3631 MPASS(vp->v_dbatchcpu != NOCPU); 3632 vp->v_dbatchcpu = NOCPU; 3633 } 3634 } 3635 vd->index = 0; 3636 critical_exit(); 3637 } 3638 3639 static void 3640 vdbatch_enqueue(struct vnode *vp) 3641 { 3642 struct vdbatch *vd; 3643 3644 ASSERT_VI_LOCKED(vp, __func__); 3645 VNPASS(!VN_IS_DOOMED(vp), vp); 3646 3647 if (vp->v_dbatchcpu != NOCPU) { 3648 VI_UNLOCK(vp); 3649 return; 3650 } 3651 3652 sched_pin(); 3653 vd = DPCPU_PTR(vd); 3654 mtx_lock(&vd->lock); 3655 MPASS(vd->index < VDBATCH_SIZE); 3656 MPASS(vd->tab[vd->index] == NULL); 3657 /* 3658 * A hack: we depend on being pinned so that we know what to put in 3659 * ->v_dbatchcpu. 3660 */ 3661 vp->v_dbatchcpu = curcpu; 3662 vd->tab[vd->index] = vp; 3663 vd->index++; 3664 VI_UNLOCK(vp); 3665 if (vd->index == VDBATCH_SIZE) 3666 vdbatch_process(vd); 3667 mtx_unlock(&vd->lock); 3668 sched_unpin(); 3669 } 3670 3671 /* 3672 * This routine must only be called for vnodes which are about to be 3673 * deallocated. Supporting dequeue for arbitrary vndoes would require 3674 * validating that the locked batch matches. 3675 */ 3676 static void 3677 vdbatch_dequeue(struct vnode *vp) 3678 { 3679 struct vdbatch *vd; 3680 int i; 3681 short cpu; 3682 3683 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3684 3685 cpu = vp->v_dbatchcpu; 3686 if (cpu == NOCPU) 3687 return; 3688 3689 vd = DPCPU_ID_PTR(cpu, vd); 3690 mtx_lock(&vd->lock); 3691 for (i = 0; i < vd->index; i++) { 3692 if (vd->tab[i] != vp) 3693 continue; 3694 vp->v_dbatchcpu = NOCPU; 3695 vd->index--; 3696 vd->tab[i] = vd->tab[vd->index]; 3697 vd->tab[vd->index] = NULL; 3698 break; 3699 } 3700 mtx_unlock(&vd->lock); 3701 /* 3702 * Either we dequeued the vnode above or the target CPU beat us to it. 3703 */ 3704 MPASS(vp->v_dbatchcpu == NOCPU); 3705 } 3706 3707 /* 3708 * Drop the hold count of the vnode. If this is the last reference to 3709 * the vnode we place it on the free list unless it has been vgone'd 3710 * (marked VIRF_DOOMED) in which case we will free it. 3711 * 3712 * Because the vnode vm object keeps a hold reference on the vnode if 3713 * there is at least one resident non-cached page, the vnode cannot 3714 * leave the active list without the page cleanup done. 3715 */ 3716 static void __noinline 3717 vdropl_final(struct vnode *vp) 3718 { 3719 3720 ASSERT_VI_LOCKED(vp, __func__); 3721 VNPASS(VN_IS_DOOMED(vp), vp); 3722 /* 3723 * Set the VHOLD_NO_SMR flag. 3724 * 3725 * We may be racing against vhold_smr. If they win we can just pretend 3726 * we never got this far, they will vdrop later. 3727 */ 3728 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3729 vfs_freevnodes_inc(); 3730 VI_UNLOCK(vp); 3731 /* 3732 * We lost the aforementioned race. Any subsequent access is 3733 * invalid as they might have managed to vdropl on their own. 3734 */ 3735 return; 3736 } 3737 /* 3738 * Don't bump freevnodes as this one is going away. 3739 */ 3740 freevnode(vp); 3741 } 3742 3743 void 3744 vdrop(struct vnode *vp) 3745 { 3746 3747 ASSERT_VI_UNLOCKED(vp, __func__); 3748 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3749 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3750 return; 3751 VI_LOCK(vp); 3752 vdropl(vp); 3753 } 3754 3755 static void __always_inline 3756 vdropl_impl(struct vnode *vp, bool enqueue) 3757 { 3758 3759 ASSERT_VI_LOCKED(vp, __func__); 3760 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3761 if (!refcount_release(&vp->v_holdcnt)) { 3762 VI_UNLOCK(vp); 3763 return; 3764 } 3765 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3766 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3767 if (VN_IS_DOOMED(vp)) { 3768 vdropl_final(vp); 3769 return; 3770 } 3771 3772 vfs_freevnodes_inc(); 3773 if (vp->v_mflag & VMP_LAZYLIST) { 3774 vunlazy(vp); 3775 } 3776 3777 if (!enqueue) { 3778 VI_UNLOCK(vp); 3779 return; 3780 } 3781 3782 /* 3783 * Also unlocks the interlock. We can't assert on it as we 3784 * released our hold and by now the vnode might have been 3785 * freed. 3786 */ 3787 vdbatch_enqueue(vp); 3788 } 3789 3790 void 3791 vdropl(struct vnode *vp) 3792 { 3793 3794 vdropl_impl(vp, true); 3795 } 3796 3797 /* 3798 * vdrop a vnode when recycling 3799 * 3800 * This is a special case routine only to be used when recycling, differs from 3801 * regular vdrop by not requeieing the vnode on LRU. 3802 * 3803 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3804 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3805 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3806 * loop which can last for as long as writes are frozen. 3807 */ 3808 static void 3809 vdropl_recycle(struct vnode *vp) 3810 { 3811 3812 vdropl_impl(vp, false); 3813 } 3814 3815 static void 3816 vdrop_recycle(struct vnode *vp) 3817 { 3818 3819 VI_LOCK(vp); 3820 vdropl_recycle(vp); 3821 } 3822 3823 /* 3824 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3825 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3826 */ 3827 static int 3828 vinactivef(struct vnode *vp) 3829 { 3830 struct vm_object *obj; 3831 int error; 3832 3833 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3834 ASSERT_VI_LOCKED(vp, "vinactive"); 3835 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3836 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3837 vp->v_iflag |= VI_DOINGINACT; 3838 vp->v_iflag &= ~VI_OWEINACT; 3839 VI_UNLOCK(vp); 3840 /* 3841 * Before moving off the active list, we must be sure that any 3842 * modified pages are converted into the vnode's dirty 3843 * buffers, since these will no longer be checked once the 3844 * vnode is on the inactive list. 3845 * 3846 * The write-out of the dirty pages is asynchronous. At the 3847 * point that VOP_INACTIVE() is called, there could still be 3848 * pending I/O and dirty pages in the object. 3849 */ 3850 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3851 vm_object_mightbedirty(obj)) { 3852 VM_OBJECT_WLOCK(obj); 3853 vm_object_page_clean(obj, 0, 0, 0); 3854 VM_OBJECT_WUNLOCK(obj); 3855 } 3856 error = VOP_INACTIVE(vp); 3857 VI_LOCK(vp); 3858 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 3859 vp->v_iflag &= ~VI_DOINGINACT; 3860 return (error); 3861 } 3862 3863 int 3864 vinactive(struct vnode *vp) 3865 { 3866 3867 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3868 ASSERT_VI_LOCKED(vp, "vinactive"); 3869 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3870 3871 if ((vp->v_iflag & VI_OWEINACT) == 0) 3872 return (0); 3873 if (vp->v_iflag & VI_DOINGINACT) 3874 return (0); 3875 if (vp->v_usecount > 0) { 3876 vp->v_iflag &= ~VI_OWEINACT; 3877 return (0); 3878 } 3879 return (vinactivef(vp)); 3880 } 3881 3882 /* 3883 * Remove any vnodes in the vnode table belonging to mount point mp. 3884 * 3885 * If FORCECLOSE is not specified, there should not be any active ones, 3886 * return error if any are found (nb: this is a user error, not a 3887 * system error). If FORCECLOSE is specified, detach any active vnodes 3888 * that are found. 3889 * 3890 * If WRITECLOSE is set, only flush out regular file vnodes open for 3891 * writing. 3892 * 3893 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3894 * 3895 * `rootrefs' specifies the base reference count for the root vnode 3896 * of this filesystem. The root vnode is considered busy if its 3897 * v_usecount exceeds this value. On a successful return, vflush(, td) 3898 * will call vrele() on the root vnode exactly rootrefs times. 3899 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3900 * be zero. 3901 */ 3902 #ifdef DIAGNOSTIC 3903 static int busyprt = 0; /* print out busy vnodes */ 3904 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3905 #endif 3906 3907 int 3908 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3909 { 3910 struct vnode *vp, *mvp, *rootvp = NULL; 3911 struct vattr vattr; 3912 int busy = 0, error; 3913 3914 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3915 rootrefs, flags); 3916 if (rootrefs > 0) { 3917 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3918 ("vflush: bad args")); 3919 /* 3920 * Get the filesystem root vnode. We can vput() it 3921 * immediately, since with rootrefs > 0, it won't go away. 3922 */ 3923 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3924 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3925 __func__, error); 3926 return (error); 3927 } 3928 vput(rootvp); 3929 } 3930 loop: 3931 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3932 vholdl(vp); 3933 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3934 if (error) { 3935 vdrop(vp); 3936 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3937 goto loop; 3938 } 3939 /* 3940 * Skip over a vnodes marked VV_SYSTEM. 3941 */ 3942 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3943 VOP_UNLOCK(vp); 3944 vdrop(vp); 3945 continue; 3946 } 3947 /* 3948 * If WRITECLOSE is set, flush out unlinked but still open 3949 * files (even if open only for reading) and regular file 3950 * vnodes open for writing. 3951 */ 3952 if (flags & WRITECLOSE) { 3953 if (vp->v_object != NULL) { 3954 VM_OBJECT_WLOCK(vp->v_object); 3955 vm_object_page_clean(vp->v_object, 0, 0, 0); 3956 VM_OBJECT_WUNLOCK(vp->v_object); 3957 } 3958 do { 3959 error = VOP_FSYNC(vp, MNT_WAIT, td); 3960 } while (error == ERELOOKUP); 3961 if (error != 0) { 3962 VOP_UNLOCK(vp); 3963 vdrop(vp); 3964 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3965 return (error); 3966 } 3967 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3968 VI_LOCK(vp); 3969 3970 if ((vp->v_type == VNON || 3971 (error == 0 && vattr.va_nlink > 0)) && 3972 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3973 VOP_UNLOCK(vp); 3974 vdropl(vp); 3975 continue; 3976 } 3977 } else 3978 VI_LOCK(vp); 3979 /* 3980 * With v_usecount == 0, all we need to do is clear out the 3981 * vnode data structures and we are done. 3982 * 3983 * If FORCECLOSE is set, forcibly close the vnode. 3984 */ 3985 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3986 vgonel(vp); 3987 } else { 3988 busy++; 3989 #ifdef DIAGNOSTIC 3990 if (busyprt) 3991 vn_printf(vp, "vflush: busy vnode "); 3992 #endif 3993 } 3994 VOP_UNLOCK(vp); 3995 vdropl(vp); 3996 } 3997 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3998 /* 3999 * If just the root vnode is busy, and if its refcount 4000 * is equal to `rootrefs', then go ahead and kill it. 4001 */ 4002 VI_LOCK(rootvp); 4003 KASSERT(busy > 0, ("vflush: not busy")); 4004 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4005 ("vflush: usecount %d < rootrefs %d", 4006 rootvp->v_usecount, rootrefs)); 4007 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4008 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4009 vgone(rootvp); 4010 VOP_UNLOCK(rootvp); 4011 busy = 0; 4012 } else 4013 VI_UNLOCK(rootvp); 4014 } 4015 if (busy) { 4016 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4017 busy); 4018 return (EBUSY); 4019 } 4020 for (; rootrefs > 0; rootrefs--) 4021 vrele(rootvp); 4022 return (0); 4023 } 4024 4025 /* 4026 * Recycle an unused vnode to the front of the free list. 4027 */ 4028 int 4029 vrecycle(struct vnode *vp) 4030 { 4031 int recycled; 4032 4033 VI_LOCK(vp); 4034 recycled = vrecyclel(vp); 4035 VI_UNLOCK(vp); 4036 return (recycled); 4037 } 4038 4039 /* 4040 * vrecycle, with the vp interlock held. 4041 */ 4042 int 4043 vrecyclel(struct vnode *vp) 4044 { 4045 int recycled; 4046 4047 ASSERT_VOP_ELOCKED(vp, __func__); 4048 ASSERT_VI_LOCKED(vp, __func__); 4049 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4050 recycled = 0; 4051 if (vp->v_usecount == 0) { 4052 recycled = 1; 4053 vgonel(vp); 4054 } 4055 return (recycled); 4056 } 4057 4058 /* 4059 * Eliminate all activity associated with a vnode 4060 * in preparation for reuse. 4061 */ 4062 void 4063 vgone(struct vnode *vp) 4064 { 4065 VI_LOCK(vp); 4066 vgonel(vp); 4067 VI_UNLOCK(vp); 4068 } 4069 4070 /* 4071 * Notify upper mounts about reclaimed or unlinked vnode. 4072 */ 4073 void 4074 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4075 { 4076 struct mount *mp; 4077 struct mount_upper_node *ump; 4078 4079 mp = atomic_load_ptr(&vp->v_mount); 4080 if (mp == NULL) 4081 return; 4082 if (TAILQ_EMPTY(&mp->mnt_notify)) 4083 return; 4084 4085 MNT_ILOCK(mp); 4086 mp->mnt_upper_pending++; 4087 KASSERT(mp->mnt_upper_pending > 0, 4088 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4089 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4090 MNT_IUNLOCK(mp); 4091 switch (event) { 4092 case VFS_NOTIFY_UPPER_RECLAIM: 4093 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4094 break; 4095 case VFS_NOTIFY_UPPER_UNLINK: 4096 VFS_UNLINK_LOWERVP(ump->mp, vp); 4097 break; 4098 } 4099 MNT_ILOCK(mp); 4100 } 4101 mp->mnt_upper_pending--; 4102 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4103 mp->mnt_upper_pending == 0) { 4104 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4105 wakeup(&mp->mnt_uppers); 4106 } 4107 MNT_IUNLOCK(mp); 4108 } 4109 4110 /* 4111 * vgone, with the vp interlock held. 4112 */ 4113 static void 4114 vgonel(struct vnode *vp) 4115 { 4116 struct thread *td; 4117 struct mount *mp; 4118 vm_object_t object; 4119 bool active, doinginact, oweinact; 4120 4121 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4122 ASSERT_VI_LOCKED(vp, "vgonel"); 4123 VNASSERT(vp->v_holdcnt, vp, 4124 ("vgonel: vp %p has no reference.", vp)); 4125 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4126 td = curthread; 4127 4128 /* 4129 * Don't vgonel if we're already doomed. 4130 */ 4131 if (VN_IS_DOOMED(vp)) { 4132 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4133 vn_get_state(vp) == VSTATE_DEAD, vp); 4134 return; 4135 } 4136 /* 4137 * Paired with freevnode. 4138 */ 4139 vn_seqc_write_begin_locked(vp); 4140 vunlazy_gone(vp); 4141 vn_irflag_set_locked(vp, VIRF_DOOMED); 4142 vn_set_state(vp, VSTATE_DESTROYING); 4143 4144 /* 4145 * Check to see if the vnode is in use. If so, we have to 4146 * call VOP_CLOSE() and VOP_INACTIVE(). 4147 * 4148 * It could be that VOP_INACTIVE() requested reclamation, in 4149 * which case we should avoid recursion, so check 4150 * VI_DOINGINACT. This is not precise but good enough. 4151 */ 4152 active = vp->v_usecount > 0; 4153 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4154 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4155 4156 /* 4157 * If we need to do inactive VI_OWEINACT will be set. 4158 */ 4159 if (vp->v_iflag & VI_DEFINACT) { 4160 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4161 vp->v_iflag &= ~VI_DEFINACT; 4162 vdropl(vp); 4163 } else { 4164 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4165 VI_UNLOCK(vp); 4166 } 4167 cache_purge_vgone(vp); 4168 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4169 4170 /* 4171 * If purging an active vnode, it must be closed and 4172 * deactivated before being reclaimed. 4173 */ 4174 if (active) 4175 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4176 if (!doinginact) { 4177 do { 4178 if (oweinact || active) { 4179 VI_LOCK(vp); 4180 vinactivef(vp); 4181 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4182 VI_UNLOCK(vp); 4183 } 4184 } while (oweinact); 4185 } 4186 if (vp->v_type == VSOCK) 4187 vfs_unp_reclaim(vp); 4188 4189 /* 4190 * Clean out any buffers associated with the vnode. 4191 * If the flush fails, just toss the buffers. 4192 */ 4193 mp = NULL; 4194 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4195 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4196 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4197 while (vinvalbuf(vp, 0, 0, 0) != 0) 4198 ; 4199 } 4200 4201 BO_LOCK(&vp->v_bufobj); 4202 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4203 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4204 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4205 vp->v_bufobj.bo_clean.bv_cnt == 0, 4206 ("vp %p bufobj not invalidated", vp)); 4207 4208 /* 4209 * For VMIO bufobj, BO_DEAD is set later, or in 4210 * vm_object_terminate() after the object's page queue is 4211 * flushed. 4212 */ 4213 object = vp->v_bufobj.bo_object; 4214 if (object == NULL) 4215 vp->v_bufobj.bo_flag |= BO_DEAD; 4216 BO_UNLOCK(&vp->v_bufobj); 4217 4218 /* 4219 * Handle the VM part. Tmpfs handles v_object on its own (the 4220 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4221 * should not touch the object borrowed from the lower vnode 4222 * (the handle check). 4223 */ 4224 if (object != NULL && object->type == OBJT_VNODE && 4225 object->handle == vp) 4226 vnode_destroy_vobject(vp); 4227 4228 /* 4229 * Reclaim the vnode. 4230 */ 4231 if (VOP_RECLAIM(vp)) 4232 panic("vgone: cannot reclaim"); 4233 if (mp != NULL) 4234 vn_finished_secondary_write(mp); 4235 VNASSERT(vp->v_object == NULL, vp, 4236 ("vop_reclaim left v_object vp=%p", vp)); 4237 /* 4238 * Clear the advisory locks and wake up waiting threads. 4239 */ 4240 if (vp->v_lockf != NULL) { 4241 (void)VOP_ADVLOCKPURGE(vp); 4242 vp->v_lockf = NULL; 4243 } 4244 /* 4245 * Delete from old mount point vnode list. 4246 */ 4247 if (vp->v_mount == NULL) { 4248 VI_LOCK(vp); 4249 } else { 4250 delmntque(vp); 4251 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4252 } 4253 /* 4254 * Done with purge, reset to the standard lock and invalidate 4255 * the vnode. 4256 */ 4257 vp->v_vnlock = &vp->v_lock; 4258 vp->v_op = &dead_vnodeops; 4259 vp->v_type = VBAD; 4260 vn_set_state(vp, VSTATE_DEAD); 4261 } 4262 4263 /* 4264 * Print out a description of a vnode. 4265 */ 4266 static const char *const vtypename[] = { 4267 [VNON] = "VNON", 4268 [VREG] = "VREG", 4269 [VDIR] = "VDIR", 4270 [VBLK] = "VBLK", 4271 [VCHR] = "VCHR", 4272 [VLNK] = "VLNK", 4273 [VSOCK] = "VSOCK", 4274 [VFIFO] = "VFIFO", 4275 [VBAD] = "VBAD", 4276 [VMARKER] = "VMARKER", 4277 }; 4278 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4279 "vnode type name not added to vtypename"); 4280 4281 static const char *const vstatename[] = { 4282 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4283 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4284 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4285 [VSTATE_DEAD] = "VSTATE_DEAD", 4286 }; 4287 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4288 "vnode state name not added to vstatename"); 4289 4290 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4291 "new hold count flag not added to vn_printf"); 4292 4293 void 4294 vn_printf(struct vnode *vp, const char *fmt, ...) 4295 { 4296 va_list ap; 4297 char buf[256], buf2[16]; 4298 u_long flags; 4299 u_int holdcnt; 4300 short irflag; 4301 4302 va_start(ap, fmt); 4303 vprintf(fmt, ap); 4304 va_end(ap); 4305 printf("%p: ", (void *)vp); 4306 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4307 vstatename[vp->v_state], vp->v_op); 4308 holdcnt = atomic_load_int(&vp->v_holdcnt); 4309 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4310 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4311 vp->v_seqc_users); 4312 switch (vp->v_type) { 4313 case VDIR: 4314 printf(" mountedhere %p\n", vp->v_mountedhere); 4315 break; 4316 case VCHR: 4317 printf(" rdev %p\n", vp->v_rdev); 4318 break; 4319 case VSOCK: 4320 printf(" socket %p\n", vp->v_unpcb); 4321 break; 4322 case VFIFO: 4323 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4324 break; 4325 default: 4326 printf("\n"); 4327 break; 4328 } 4329 buf[0] = '\0'; 4330 buf[1] = '\0'; 4331 if (holdcnt & VHOLD_NO_SMR) 4332 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4333 printf(" hold count flags (%s)\n", buf + 1); 4334 4335 buf[0] = '\0'; 4336 buf[1] = '\0'; 4337 irflag = vn_irflag_read(vp); 4338 if (irflag & VIRF_DOOMED) 4339 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4340 if (irflag & VIRF_PGREAD) 4341 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4342 if (irflag & VIRF_MOUNTPOINT) 4343 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4344 if (irflag & VIRF_TEXT_REF) 4345 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4346 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4347 if (flags != 0) { 4348 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4349 strlcat(buf, buf2, sizeof(buf)); 4350 } 4351 if (vp->v_vflag & VV_ROOT) 4352 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4353 if (vp->v_vflag & VV_ISTTY) 4354 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4355 if (vp->v_vflag & VV_NOSYNC) 4356 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4357 if (vp->v_vflag & VV_ETERNALDEV) 4358 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4359 if (vp->v_vflag & VV_CACHEDLABEL) 4360 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4361 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4362 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4363 if (vp->v_vflag & VV_COPYONWRITE) 4364 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4365 if (vp->v_vflag & VV_SYSTEM) 4366 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4367 if (vp->v_vflag & VV_PROCDEP) 4368 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4369 if (vp->v_vflag & VV_DELETED) 4370 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4371 if (vp->v_vflag & VV_MD) 4372 strlcat(buf, "|VV_MD", sizeof(buf)); 4373 if (vp->v_vflag & VV_FORCEINSMQ) 4374 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4375 if (vp->v_vflag & VV_READLINK) 4376 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4377 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4378 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4379 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4380 if (flags != 0) { 4381 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4382 strlcat(buf, buf2, sizeof(buf)); 4383 } 4384 if (vp->v_iflag & VI_MOUNT) 4385 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4386 if (vp->v_iflag & VI_DOINGINACT) 4387 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4388 if (vp->v_iflag & VI_OWEINACT) 4389 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4390 if (vp->v_iflag & VI_DEFINACT) 4391 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4392 if (vp->v_iflag & VI_FOPENING) 4393 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4394 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4395 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4396 if (flags != 0) { 4397 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4398 strlcat(buf, buf2, sizeof(buf)); 4399 } 4400 if (vp->v_mflag & VMP_LAZYLIST) 4401 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4402 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4403 if (flags != 0) { 4404 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4405 strlcat(buf, buf2, sizeof(buf)); 4406 } 4407 printf(" flags (%s)", buf + 1); 4408 if (mtx_owned(VI_MTX(vp))) 4409 printf(" VI_LOCKed"); 4410 printf("\n"); 4411 if (vp->v_object != NULL) 4412 printf(" v_object %p ref %d pages %d " 4413 "cleanbuf %d dirtybuf %d\n", 4414 vp->v_object, vp->v_object->ref_count, 4415 vp->v_object->resident_page_count, 4416 vp->v_bufobj.bo_clean.bv_cnt, 4417 vp->v_bufobj.bo_dirty.bv_cnt); 4418 printf(" "); 4419 lockmgr_printinfo(vp->v_vnlock); 4420 if (vp->v_data != NULL) 4421 VOP_PRINT(vp); 4422 } 4423 4424 #ifdef DDB 4425 /* 4426 * List all of the locked vnodes in the system. 4427 * Called when debugging the kernel. 4428 */ 4429 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4430 { 4431 struct mount *mp; 4432 struct vnode *vp; 4433 4434 /* 4435 * Note: because this is DDB, we can't obey the locking semantics 4436 * for these structures, which means we could catch an inconsistent 4437 * state and dereference a nasty pointer. Not much to be done 4438 * about that. 4439 */ 4440 db_printf("Locked vnodes\n"); 4441 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4442 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4443 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4444 vn_printf(vp, "vnode "); 4445 } 4446 } 4447 } 4448 4449 /* 4450 * Show details about the given vnode. 4451 */ 4452 DB_SHOW_COMMAND(vnode, db_show_vnode) 4453 { 4454 struct vnode *vp; 4455 4456 if (!have_addr) 4457 return; 4458 vp = (struct vnode *)addr; 4459 vn_printf(vp, "vnode "); 4460 } 4461 4462 /* 4463 * Show details about the given mount point. 4464 */ 4465 DB_SHOW_COMMAND(mount, db_show_mount) 4466 { 4467 struct mount *mp; 4468 struct vfsopt *opt; 4469 struct statfs *sp; 4470 struct vnode *vp; 4471 char buf[512]; 4472 uint64_t mflags; 4473 u_int flags; 4474 4475 if (!have_addr) { 4476 /* No address given, print short info about all mount points. */ 4477 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4478 db_printf("%p %s on %s (%s)\n", mp, 4479 mp->mnt_stat.f_mntfromname, 4480 mp->mnt_stat.f_mntonname, 4481 mp->mnt_stat.f_fstypename); 4482 if (db_pager_quit) 4483 break; 4484 } 4485 db_printf("\nMore info: show mount <addr>\n"); 4486 return; 4487 } 4488 4489 mp = (struct mount *)addr; 4490 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4491 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4492 4493 buf[0] = '\0'; 4494 mflags = mp->mnt_flag; 4495 #define MNT_FLAG(flag) do { \ 4496 if (mflags & (flag)) { \ 4497 if (buf[0] != '\0') \ 4498 strlcat(buf, ", ", sizeof(buf)); \ 4499 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4500 mflags &= ~(flag); \ 4501 } \ 4502 } while (0) 4503 MNT_FLAG(MNT_RDONLY); 4504 MNT_FLAG(MNT_SYNCHRONOUS); 4505 MNT_FLAG(MNT_NOEXEC); 4506 MNT_FLAG(MNT_NOSUID); 4507 MNT_FLAG(MNT_NFS4ACLS); 4508 MNT_FLAG(MNT_UNION); 4509 MNT_FLAG(MNT_ASYNC); 4510 MNT_FLAG(MNT_SUIDDIR); 4511 MNT_FLAG(MNT_SOFTDEP); 4512 MNT_FLAG(MNT_NOSYMFOLLOW); 4513 MNT_FLAG(MNT_GJOURNAL); 4514 MNT_FLAG(MNT_MULTILABEL); 4515 MNT_FLAG(MNT_ACLS); 4516 MNT_FLAG(MNT_NOATIME); 4517 MNT_FLAG(MNT_NOCLUSTERR); 4518 MNT_FLAG(MNT_NOCLUSTERW); 4519 MNT_FLAG(MNT_SUJ); 4520 MNT_FLAG(MNT_EXRDONLY); 4521 MNT_FLAG(MNT_EXPORTED); 4522 MNT_FLAG(MNT_DEFEXPORTED); 4523 MNT_FLAG(MNT_EXPORTANON); 4524 MNT_FLAG(MNT_EXKERB); 4525 MNT_FLAG(MNT_EXPUBLIC); 4526 MNT_FLAG(MNT_LOCAL); 4527 MNT_FLAG(MNT_QUOTA); 4528 MNT_FLAG(MNT_ROOTFS); 4529 MNT_FLAG(MNT_USER); 4530 MNT_FLAG(MNT_IGNORE); 4531 MNT_FLAG(MNT_UPDATE); 4532 MNT_FLAG(MNT_DELEXPORT); 4533 MNT_FLAG(MNT_RELOAD); 4534 MNT_FLAG(MNT_FORCE); 4535 MNT_FLAG(MNT_SNAPSHOT); 4536 MNT_FLAG(MNT_BYFSID); 4537 #undef MNT_FLAG 4538 if (mflags != 0) { 4539 if (buf[0] != '\0') 4540 strlcat(buf, ", ", sizeof(buf)); 4541 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4542 "0x%016jx", mflags); 4543 } 4544 db_printf(" mnt_flag = %s\n", buf); 4545 4546 buf[0] = '\0'; 4547 flags = mp->mnt_kern_flag; 4548 #define MNT_KERN_FLAG(flag) do { \ 4549 if (flags & (flag)) { \ 4550 if (buf[0] != '\0') \ 4551 strlcat(buf, ", ", sizeof(buf)); \ 4552 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4553 flags &= ~(flag); \ 4554 } \ 4555 } while (0) 4556 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4557 MNT_KERN_FLAG(MNTK_ASYNC); 4558 MNT_KERN_FLAG(MNTK_SOFTDEP); 4559 MNT_KERN_FLAG(MNTK_NOMSYNC); 4560 MNT_KERN_FLAG(MNTK_DRAINING); 4561 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4562 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4563 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4564 MNT_KERN_FLAG(MNTK_NO_IOPF); 4565 MNT_KERN_FLAG(MNTK_RECURSE); 4566 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4567 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4568 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4569 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4570 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4571 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4572 MNT_KERN_FLAG(MNTK_NOASYNC); 4573 MNT_KERN_FLAG(MNTK_UNMOUNT); 4574 MNT_KERN_FLAG(MNTK_MWAIT); 4575 MNT_KERN_FLAG(MNTK_SUSPEND); 4576 MNT_KERN_FLAG(MNTK_SUSPEND2); 4577 MNT_KERN_FLAG(MNTK_SUSPENDED); 4578 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4579 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4580 #undef MNT_KERN_FLAG 4581 if (flags != 0) { 4582 if (buf[0] != '\0') 4583 strlcat(buf, ", ", sizeof(buf)); 4584 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4585 "0x%08x", flags); 4586 } 4587 db_printf(" mnt_kern_flag = %s\n", buf); 4588 4589 db_printf(" mnt_opt = "); 4590 opt = TAILQ_FIRST(mp->mnt_opt); 4591 if (opt != NULL) { 4592 db_printf("%s", opt->name); 4593 opt = TAILQ_NEXT(opt, link); 4594 while (opt != NULL) { 4595 db_printf(", %s", opt->name); 4596 opt = TAILQ_NEXT(opt, link); 4597 } 4598 } 4599 db_printf("\n"); 4600 4601 sp = &mp->mnt_stat; 4602 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4603 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4604 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4605 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4606 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4607 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4608 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4609 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4610 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4611 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4612 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4613 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4614 4615 db_printf(" mnt_cred = { uid=%u ruid=%u", 4616 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4617 if (jailed(mp->mnt_cred)) 4618 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4619 db_printf(" }\n"); 4620 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4621 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4622 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4623 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4624 db_printf(" mnt_lazyvnodelistsize = %d\n", 4625 mp->mnt_lazyvnodelistsize); 4626 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4627 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4628 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4629 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4630 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4631 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4632 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4633 db_printf(" mnt_secondary_accwrites = %d\n", 4634 mp->mnt_secondary_accwrites); 4635 db_printf(" mnt_gjprovider = %s\n", 4636 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4637 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4638 4639 db_printf("\n\nList of active vnodes\n"); 4640 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4641 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4642 vn_printf(vp, "vnode "); 4643 if (db_pager_quit) 4644 break; 4645 } 4646 } 4647 db_printf("\n\nList of inactive vnodes\n"); 4648 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4649 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4650 vn_printf(vp, "vnode "); 4651 if (db_pager_quit) 4652 break; 4653 } 4654 } 4655 } 4656 #endif /* DDB */ 4657 4658 /* 4659 * Fill in a struct xvfsconf based on a struct vfsconf. 4660 */ 4661 static int 4662 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4663 { 4664 struct xvfsconf xvfsp; 4665 4666 bzero(&xvfsp, sizeof(xvfsp)); 4667 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4668 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4669 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4670 xvfsp.vfc_flags = vfsp->vfc_flags; 4671 /* 4672 * These are unused in userland, we keep them 4673 * to not break binary compatibility. 4674 */ 4675 xvfsp.vfc_vfsops = NULL; 4676 xvfsp.vfc_next = NULL; 4677 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4678 } 4679 4680 #ifdef COMPAT_FREEBSD32 4681 struct xvfsconf32 { 4682 uint32_t vfc_vfsops; 4683 char vfc_name[MFSNAMELEN]; 4684 int32_t vfc_typenum; 4685 int32_t vfc_refcount; 4686 int32_t vfc_flags; 4687 uint32_t vfc_next; 4688 }; 4689 4690 static int 4691 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4692 { 4693 struct xvfsconf32 xvfsp; 4694 4695 bzero(&xvfsp, sizeof(xvfsp)); 4696 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4697 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4698 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4699 xvfsp.vfc_flags = vfsp->vfc_flags; 4700 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4701 } 4702 #endif 4703 4704 /* 4705 * Top level filesystem related information gathering. 4706 */ 4707 static int 4708 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4709 { 4710 struct vfsconf *vfsp; 4711 int error; 4712 4713 error = 0; 4714 vfsconf_slock(); 4715 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4716 #ifdef COMPAT_FREEBSD32 4717 if (req->flags & SCTL_MASK32) 4718 error = vfsconf2x32(req, vfsp); 4719 else 4720 #endif 4721 error = vfsconf2x(req, vfsp); 4722 if (error) 4723 break; 4724 } 4725 vfsconf_sunlock(); 4726 return (error); 4727 } 4728 4729 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4730 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4731 "S,xvfsconf", "List of all configured filesystems"); 4732 4733 #ifndef BURN_BRIDGES 4734 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4735 4736 static int 4737 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4738 { 4739 int *name = (int *)arg1 - 1; /* XXX */ 4740 u_int namelen = arg2 + 1; /* XXX */ 4741 struct vfsconf *vfsp; 4742 4743 log(LOG_WARNING, "userland calling deprecated sysctl, " 4744 "please rebuild world\n"); 4745 4746 #if 1 || defined(COMPAT_PRELITE2) 4747 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4748 if (namelen == 1) 4749 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4750 #endif 4751 4752 switch (name[1]) { 4753 case VFS_MAXTYPENUM: 4754 if (namelen != 2) 4755 return (ENOTDIR); 4756 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4757 case VFS_CONF: 4758 if (namelen != 3) 4759 return (ENOTDIR); /* overloaded */ 4760 vfsconf_slock(); 4761 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4762 if (vfsp->vfc_typenum == name[2]) 4763 break; 4764 } 4765 vfsconf_sunlock(); 4766 if (vfsp == NULL) 4767 return (EOPNOTSUPP); 4768 #ifdef COMPAT_FREEBSD32 4769 if (req->flags & SCTL_MASK32) 4770 return (vfsconf2x32(req, vfsp)); 4771 else 4772 #endif 4773 return (vfsconf2x(req, vfsp)); 4774 } 4775 return (EOPNOTSUPP); 4776 } 4777 4778 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4779 CTLFLAG_MPSAFE, vfs_sysctl, 4780 "Generic filesystem"); 4781 4782 #if 1 || defined(COMPAT_PRELITE2) 4783 4784 static int 4785 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4786 { 4787 int error; 4788 struct vfsconf *vfsp; 4789 struct ovfsconf ovfs; 4790 4791 vfsconf_slock(); 4792 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4793 bzero(&ovfs, sizeof(ovfs)); 4794 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4795 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4796 ovfs.vfc_index = vfsp->vfc_typenum; 4797 ovfs.vfc_refcount = vfsp->vfc_refcount; 4798 ovfs.vfc_flags = vfsp->vfc_flags; 4799 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4800 if (error != 0) { 4801 vfsconf_sunlock(); 4802 return (error); 4803 } 4804 } 4805 vfsconf_sunlock(); 4806 return (0); 4807 } 4808 4809 #endif /* 1 || COMPAT_PRELITE2 */ 4810 #endif /* !BURN_BRIDGES */ 4811 4812 static void 4813 unmount_or_warn(struct mount *mp) 4814 { 4815 int error; 4816 4817 error = dounmount(mp, MNT_FORCE, curthread); 4818 if (error != 0) { 4819 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4820 if (error == EBUSY) 4821 printf("BUSY)\n"); 4822 else 4823 printf("%d)\n", error); 4824 } 4825 } 4826 4827 /* 4828 * Unmount all filesystems. The list is traversed in reverse order 4829 * of mounting to avoid dependencies. 4830 */ 4831 void 4832 vfs_unmountall(void) 4833 { 4834 struct mount *mp, *tmp; 4835 4836 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4837 4838 /* 4839 * Since this only runs when rebooting, it is not interlocked. 4840 */ 4841 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4842 vfs_ref(mp); 4843 4844 /* 4845 * Forcibly unmounting "/dev" before "/" would prevent clean 4846 * unmount of the latter. 4847 */ 4848 if (mp == rootdevmp) 4849 continue; 4850 4851 unmount_or_warn(mp); 4852 } 4853 4854 if (rootdevmp != NULL) 4855 unmount_or_warn(rootdevmp); 4856 } 4857 4858 static void 4859 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4860 { 4861 4862 ASSERT_VI_LOCKED(vp, __func__); 4863 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4864 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4865 vdropl(vp); 4866 return; 4867 } 4868 if (vn_lock(vp, lkflags) == 0) { 4869 VI_LOCK(vp); 4870 vinactive(vp); 4871 VOP_UNLOCK(vp); 4872 vdropl(vp); 4873 return; 4874 } 4875 vdefer_inactive_unlocked(vp); 4876 } 4877 4878 static int 4879 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4880 { 4881 4882 return (vp->v_iflag & VI_DEFINACT); 4883 } 4884 4885 static void __noinline 4886 vfs_periodic_inactive(struct mount *mp, int flags) 4887 { 4888 struct vnode *vp, *mvp; 4889 int lkflags; 4890 4891 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4892 if (flags != MNT_WAIT) 4893 lkflags |= LK_NOWAIT; 4894 4895 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4896 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4897 VI_UNLOCK(vp); 4898 continue; 4899 } 4900 vp->v_iflag &= ~VI_DEFINACT; 4901 vfs_deferred_inactive(vp, lkflags); 4902 } 4903 } 4904 4905 static inline bool 4906 vfs_want_msync(struct vnode *vp) 4907 { 4908 struct vm_object *obj; 4909 4910 /* 4911 * This test may be performed without any locks held. 4912 * We rely on vm_object's type stability. 4913 */ 4914 if (vp->v_vflag & VV_NOSYNC) 4915 return (false); 4916 obj = vp->v_object; 4917 return (obj != NULL && vm_object_mightbedirty(obj)); 4918 } 4919 4920 static int 4921 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4922 { 4923 4924 if (vp->v_vflag & VV_NOSYNC) 4925 return (false); 4926 if (vp->v_iflag & VI_DEFINACT) 4927 return (true); 4928 return (vfs_want_msync(vp)); 4929 } 4930 4931 static void __noinline 4932 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4933 { 4934 struct vnode *vp, *mvp; 4935 struct vm_object *obj; 4936 int lkflags, objflags; 4937 bool seen_defer; 4938 4939 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4940 if (flags != MNT_WAIT) { 4941 lkflags |= LK_NOWAIT; 4942 objflags = OBJPC_NOSYNC; 4943 } else { 4944 objflags = OBJPC_SYNC; 4945 } 4946 4947 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4948 seen_defer = false; 4949 if (vp->v_iflag & VI_DEFINACT) { 4950 vp->v_iflag &= ~VI_DEFINACT; 4951 seen_defer = true; 4952 } 4953 if (!vfs_want_msync(vp)) { 4954 if (seen_defer) 4955 vfs_deferred_inactive(vp, lkflags); 4956 else 4957 VI_UNLOCK(vp); 4958 continue; 4959 } 4960 if (vget(vp, lkflags) == 0) { 4961 obj = vp->v_object; 4962 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4963 VM_OBJECT_WLOCK(obj); 4964 vm_object_page_clean(obj, 0, 0, objflags); 4965 VM_OBJECT_WUNLOCK(obj); 4966 } 4967 vput(vp); 4968 if (seen_defer) 4969 vdrop(vp); 4970 } else { 4971 if (seen_defer) 4972 vdefer_inactive_unlocked(vp); 4973 } 4974 } 4975 } 4976 4977 void 4978 vfs_periodic(struct mount *mp, int flags) 4979 { 4980 4981 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4982 4983 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4984 vfs_periodic_inactive(mp, flags); 4985 else 4986 vfs_periodic_msync_inactive(mp, flags); 4987 } 4988 4989 static void 4990 destroy_vpollinfo_free(struct vpollinfo *vi) 4991 { 4992 4993 knlist_destroy(&vi->vpi_selinfo.si_note); 4994 mtx_destroy(&vi->vpi_lock); 4995 free(vi, M_VNODEPOLL); 4996 } 4997 4998 static void 4999 destroy_vpollinfo(struct vpollinfo *vi) 5000 { 5001 5002 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5003 seldrain(&vi->vpi_selinfo); 5004 destroy_vpollinfo_free(vi); 5005 } 5006 5007 /* 5008 * Initialize per-vnode helper structure to hold poll-related state. 5009 */ 5010 void 5011 v_addpollinfo(struct vnode *vp) 5012 { 5013 struct vpollinfo *vi; 5014 5015 if (vp->v_pollinfo != NULL) 5016 return; 5017 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5018 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5019 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5020 vfs_knlunlock, vfs_knl_assert_lock); 5021 VI_LOCK(vp); 5022 if (vp->v_pollinfo != NULL) { 5023 VI_UNLOCK(vp); 5024 destroy_vpollinfo_free(vi); 5025 return; 5026 } 5027 vp->v_pollinfo = vi; 5028 VI_UNLOCK(vp); 5029 } 5030 5031 /* 5032 * Record a process's interest in events which might happen to 5033 * a vnode. Because poll uses the historic select-style interface 5034 * internally, this routine serves as both the ``check for any 5035 * pending events'' and the ``record my interest in future events'' 5036 * functions. (These are done together, while the lock is held, 5037 * to avoid race conditions.) 5038 */ 5039 int 5040 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5041 { 5042 5043 v_addpollinfo(vp); 5044 mtx_lock(&vp->v_pollinfo->vpi_lock); 5045 if (vp->v_pollinfo->vpi_revents & events) { 5046 /* 5047 * This leaves events we are not interested 5048 * in available for the other process which 5049 * which presumably had requested them 5050 * (otherwise they would never have been 5051 * recorded). 5052 */ 5053 events &= vp->v_pollinfo->vpi_revents; 5054 vp->v_pollinfo->vpi_revents &= ~events; 5055 5056 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5057 return (events); 5058 } 5059 vp->v_pollinfo->vpi_events |= events; 5060 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5061 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5062 return (0); 5063 } 5064 5065 /* 5066 * Routine to create and manage a filesystem syncer vnode. 5067 */ 5068 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5069 static int sync_fsync(struct vop_fsync_args *); 5070 static int sync_inactive(struct vop_inactive_args *); 5071 static int sync_reclaim(struct vop_reclaim_args *); 5072 5073 static struct vop_vector sync_vnodeops = { 5074 .vop_bypass = VOP_EOPNOTSUPP, 5075 .vop_close = sync_close, 5076 .vop_fsync = sync_fsync, 5077 .vop_getwritemount = vop_stdgetwritemount, 5078 .vop_inactive = sync_inactive, 5079 .vop_need_inactive = vop_stdneed_inactive, 5080 .vop_reclaim = sync_reclaim, 5081 .vop_lock1 = vop_stdlock, 5082 .vop_unlock = vop_stdunlock, 5083 .vop_islocked = vop_stdislocked, 5084 .vop_fplookup_vexec = VOP_EAGAIN, 5085 .vop_fplookup_symlink = VOP_EAGAIN, 5086 }; 5087 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5088 5089 /* 5090 * Create a new filesystem syncer vnode for the specified mount point. 5091 */ 5092 void 5093 vfs_allocate_syncvnode(struct mount *mp) 5094 { 5095 struct vnode *vp; 5096 struct bufobj *bo; 5097 static long start, incr, next; 5098 int error; 5099 5100 /* Allocate a new vnode */ 5101 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5102 if (error != 0) 5103 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5104 vp->v_type = VNON; 5105 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5106 vp->v_vflag |= VV_FORCEINSMQ; 5107 error = insmntque1(vp, mp); 5108 if (error != 0) 5109 panic("vfs_allocate_syncvnode: insmntque() failed"); 5110 vp->v_vflag &= ~VV_FORCEINSMQ; 5111 vn_set_state(vp, VSTATE_CONSTRUCTED); 5112 VOP_UNLOCK(vp); 5113 /* 5114 * Place the vnode onto the syncer worklist. We attempt to 5115 * scatter them about on the list so that they will go off 5116 * at evenly distributed times even if all the filesystems 5117 * are mounted at once. 5118 */ 5119 next += incr; 5120 if (next == 0 || next > syncer_maxdelay) { 5121 start /= 2; 5122 incr /= 2; 5123 if (start == 0) { 5124 start = syncer_maxdelay / 2; 5125 incr = syncer_maxdelay; 5126 } 5127 next = start; 5128 } 5129 bo = &vp->v_bufobj; 5130 BO_LOCK(bo); 5131 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5132 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5133 mtx_lock(&sync_mtx); 5134 sync_vnode_count++; 5135 if (mp->mnt_syncer == NULL) { 5136 mp->mnt_syncer = vp; 5137 vp = NULL; 5138 } 5139 mtx_unlock(&sync_mtx); 5140 BO_UNLOCK(bo); 5141 if (vp != NULL) { 5142 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5143 vgone(vp); 5144 vput(vp); 5145 } 5146 } 5147 5148 void 5149 vfs_deallocate_syncvnode(struct mount *mp) 5150 { 5151 struct vnode *vp; 5152 5153 mtx_lock(&sync_mtx); 5154 vp = mp->mnt_syncer; 5155 if (vp != NULL) 5156 mp->mnt_syncer = NULL; 5157 mtx_unlock(&sync_mtx); 5158 if (vp != NULL) 5159 vrele(vp); 5160 } 5161 5162 /* 5163 * Do a lazy sync of the filesystem. 5164 */ 5165 static int 5166 sync_fsync(struct vop_fsync_args *ap) 5167 { 5168 struct vnode *syncvp = ap->a_vp; 5169 struct mount *mp = syncvp->v_mount; 5170 int error, save; 5171 struct bufobj *bo; 5172 5173 /* 5174 * We only need to do something if this is a lazy evaluation. 5175 */ 5176 if (ap->a_waitfor != MNT_LAZY) 5177 return (0); 5178 5179 /* 5180 * Move ourselves to the back of the sync list. 5181 */ 5182 bo = &syncvp->v_bufobj; 5183 BO_LOCK(bo); 5184 vn_syncer_add_to_worklist(bo, syncdelay); 5185 BO_UNLOCK(bo); 5186 5187 /* 5188 * Walk the list of vnodes pushing all that are dirty and 5189 * not already on the sync list. 5190 */ 5191 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5192 return (0); 5193 VOP_UNLOCK(syncvp); 5194 save = curthread_pflags_set(TDP_SYNCIO); 5195 /* 5196 * The filesystem at hand may be idle with free vnodes stored in the 5197 * batch. Return them instead of letting them stay there indefinitely. 5198 */ 5199 vfs_periodic(mp, MNT_NOWAIT); 5200 error = VFS_SYNC(mp, MNT_LAZY); 5201 curthread_pflags_restore(save); 5202 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5203 vfs_unbusy(mp); 5204 return (error); 5205 } 5206 5207 /* 5208 * The syncer vnode is no referenced. 5209 */ 5210 static int 5211 sync_inactive(struct vop_inactive_args *ap) 5212 { 5213 5214 vgone(ap->a_vp); 5215 return (0); 5216 } 5217 5218 /* 5219 * The syncer vnode is no longer needed and is being decommissioned. 5220 * 5221 * Modifications to the worklist must be protected by sync_mtx. 5222 */ 5223 static int 5224 sync_reclaim(struct vop_reclaim_args *ap) 5225 { 5226 struct vnode *vp = ap->a_vp; 5227 struct bufobj *bo; 5228 5229 bo = &vp->v_bufobj; 5230 BO_LOCK(bo); 5231 mtx_lock(&sync_mtx); 5232 if (vp->v_mount->mnt_syncer == vp) 5233 vp->v_mount->mnt_syncer = NULL; 5234 if (bo->bo_flag & BO_ONWORKLST) { 5235 LIST_REMOVE(bo, bo_synclist); 5236 syncer_worklist_len--; 5237 sync_vnode_count--; 5238 bo->bo_flag &= ~BO_ONWORKLST; 5239 } 5240 mtx_unlock(&sync_mtx); 5241 BO_UNLOCK(bo); 5242 5243 return (0); 5244 } 5245 5246 int 5247 vn_need_pageq_flush(struct vnode *vp) 5248 { 5249 struct vm_object *obj; 5250 5251 obj = vp->v_object; 5252 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5253 vm_object_mightbedirty(obj)); 5254 } 5255 5256 /* 5257 * Check if vnode represents a disk device 5258 */ 5259 bool 5260 vn_isdisk_error(struct vnode *vp, int *errp) 5261 { 5262 int error; 5263 5264 if (vp->v_type != VCHR) { 5265 error = ENOTBLK; 5266 goto out; 5267 } 5268 error = 0; 5269 dev_lock(); 5270 if (vp->v_rdev == NULL) 5271 error = ENXIO; 5272 else if (vp->v_rdev->si_devsw == NULL) 5273 error = ENXIO; 5274 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5275 error = ENOTBLK; 5276 dev_unlock(); 5277 out: 5278 *errp = error; 5279 return (error == 0); 5280 } 5281 5282 bool 5283 vn_isdisk(struct vnode *vp) 5284 { 5285 int error; 5286 5287 return (vn_isdisk_error(vp, &error)); 5288 } 5289 5290 /* 5291 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5292 * the comment above cache_fplookup for details. 5293 */ 5294 int 5295 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5296 { 5297 int error; 5298 5299 VFS_SMR_ASSERT_ENTERED(); 5300 5301 /* Check the owner. */ 5302 if (cred->cr_uid == file_uid) { 5303 if (file_mode & S_IXUSR) 5304 return (0); 5305 goto out_error; 5306 } 5307 5308 /* Otherwise, check the groups (first match) */ 5309 if (groupmember(file_gid, cred)) { 5310 if (file_mode & S_IXGRP) 5311 return (0); 5312 goto out_error; 5313 } 5314 5315 /* Otherwise, check everyone else. */ 5316 if (file_mode & S_IXOTH) 5317 return (0); 5318 out_error: 5319 /* 5320 * Permission check failed, but it is possible denial will get overwritten 5321 * (e.g., when root is traversing through a 700 directory owned by someone 5322 * else). 5323 * 5324 * vaccess() calls priv_check_cred which in turn can descent into MAC 5325 * modules overriding this result. It's quite unclear what semantics 5326 * are allowed for them to operate, thus for safety we don't call them 5327 * from within the SMR section. This also means if any such modules 5328 * are present, we have to let the regular lookup decide. 5329 */ 5330 error = priv_check_cred_vfs_lookup_nomac(cred); 5331 switch (error) { 5332 case 0: 5333 return (0); 5334 case EAGAIN: 5335 /* 5336 * MAC modules present. 5337 */ 5338 return (EAGAIN); 5339 case EPERM: 5340 return (EACCES); 5341 default: 5342 return (error); 5343 } 5344 } 5345 5346 /* 5347 * Common filesystem object access control check routine. Accepts a 5348 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5349 * Returns 0 on success, or an errno on failure. 5350 */ 5351 int 5352 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5353 accmode_t accmode, struct ucred *cred) 5354 { 5355 accmode_t dac_granted; 5356 accmode_t priv_granted; 5357 5358 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5359 ("invalid bit in accmode")); 5360 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5361 ("VAPPEND without VWRITE")); 5362 5363 /* 5364 * Look for a normal, non-privileged way to access the file/directory 5365 * as requested. If it exists, go with that. 5366 */ 5367 5368 dac_granted = 0; 5369 5370 /* Check the owner. */ 5371 if (cred->cr_uid == file_uid) { 5372 dac_granted |= VADMIN; 5373 if (file_mode & S_IXUSR) 5374 dac_granted |= VEXEC; 5375 if (file_mode & S_IRUSR) 5376 dac_granted |= VREAD; 5377 if (file_mode & S_IWUSR) 5378 dac_granted |= (VWRITE | VAPPEND); 5379 5380 if ((accmode & dac_granted) == accmode) 5381 return (0); 5382 5383 goto privcheck; 5384 } 5385 5386 /* Otherwise, check the groups (first match) */ 5387 if (groupmember(file_gid, cred)) { 5388 if (file_mode & S_IXGRP) 5389 dac_granted |= VEXEC; 5390 if (file_mode & S_IRGRP) 5391 dac_granted |= VREAD; 5392 if (file_mode & S_IWGRP) 5393 dac_granted |= (VWRITE | VAPPEND); 5394 5395 if ((accmode & dac_granted) == accmode) 5396 return (0); 5397 5398 goto privcheck; 5399 } 5400 5401 /* Otherwise, check everyone else. */ 5402 if (file_mode & S_IXOTH) 5403 dac_granted |= VEXEC; 5404 if (file_mode & S_IROTH) 5405 dac_granted |= VREAD; 5406 if (file_mode & S_IWOTH) 5407 dac_granted |= (VWRITE | VAPPEND); 5408 if ((accmode & dac_granted) == accmode) 5409 return (0); 5410 5411 privcheck: 5412 /* 5413 * Build a privilege mask to determine if the set of privileges 5414 * satisfies the requirements when combined with the granted mask 5415 * from above. For each privilege, if the privilege is required, 5416 * bitwise or the request type onto the priv_granted mask. 5417 */ 5418 priv_granted = 0; 5419 5420 if (type == VDIR) { 5421 /* 5422 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5423 * requests, instead of PRIV_VFS_EXEC. 5424 */ 5425 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5426 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5427 priv_granted |= VEXEC; 5428 } else { 5429 /* 5430 * Ensure that at least one execute bit is on. Otherwise, 5431 * a privileged user will always succeed, and we don't want 5432 * this to happen unless the file really is executable. 5433 */ 5434 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5435 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5436 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5437 priv_granted |= VEXEC; 5438 } 5439 5440 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5441 !priv_check_cred(cred, PRIV_VFS_READ)) 5442 priv_granted |= VREAD; 5443 5444 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5445 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5446 priv_granted |= (VWRITE | VAPPEND); 5447 5448 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5449 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5450 priv_granted |= VADMIN; 5451 5452 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5453 return (0); 5454 } 5455 5456 return ((accmode & VADMIN) ? EPERM : EACCES); 5457 } 5458 5459 /* 5460 * Credential check based on process requesting service, and per-attribute 5461 * permissions. 5462 */ 5463 int 5464 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5465 struct thread *td, accmode_t accmode) 5466 { 5467 5468 /* 5469 * Kernel-invoked always succeeds. 5470 */ 5471 if (cred == NOCRED) 5472 return (0); 5473 5474 /* 5475 * Do not allow privileged processes in jail to directly manipulate 5476 * system attributes. 5477 */ 5478 switch (attrnamespace) { 5479 case EXTATTR_NAMESPACE_SYSTEM: 5480 /* Potentially should be: return (EPERM); */ 5481 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5482 case EXTATTR_NAMESPACE_USER: 5483 return (VOP_ACCESS(vp, accmode, cred, td)); 5484 default: 5485 return (EPERM); 5486 } 5487 } 5488 5489 #ifdef DEBUG_VFS_LOCKS 5490 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5491 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5492 "Drop into debugger on lock violation"); 5493 5494 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5495 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5496 0, "Check for interlock across VOPs"); 5497 5498 int vfs_badlock_print = 1; /* Print lock violations. */ 5499 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5500 0, "Print lock violations"); 5501 5502 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5503 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5504 0, "Print vnode details on lock violations"); 5505 5506 #ifdef KDB 5507 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5508 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5509 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5510 #endif 5511 5512 static void 5513 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5514 { 5515 5516 #ifdef KDB 5517 if (vfs_badlock_backtrace) 5518 kdb_backtrace(); 5519 #endif 5520 if (vfs_badlock_vnode) 5521 vn_printf(vp, "vnode "); 5522 if (vfs_badlock_print) 5523 printf("%s: %p %s\n", str, (void *)vp, msg); 5524 if (vfs_badlock_ddb) 5525 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5526 } 5527 5528 void 5529 assert_vi_locked(struct vnode *vp, const char *str) 5530 { 5531 5532 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5533 vfs_badlock("interlock is not locked but should be", str, vp); 5534 } 5535 5536 void 5537 assert_vi_unlocked(struct vnode *vp, const char *str) 5538 { 5539 5540 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5541 vfs_badlock("interlock is locked but should not be", str, vp); 5542 } 5543 5544 void 5545 assert_vop_locked(struct vnode *vp, const char *str) 5546 { 5547 if (KERNEL_PANICKED() || vp == NULL) 5548 return; 5549 5550 #ifdef WITNESS 5551 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5552 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5553 #else 5554 int locked = VOP_ISLOCKED(vp); 5555 if (locked == 0 || locked == LK_EXCLOTHER) 5556 #endif 5557 vfs_badlock("is not locked but should be", str, vp); 5558 } 5559 5560 void 5561 assert_vop_unlocked(struct vnode *vp, const char *str) 5562 { 5563 if (KERNEL_PANICKED() || vp == NULL) 5564 return; 5565 5566 #ifdef WITNESS 5567 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5568 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5569 #else 5570 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5571 #endif 5572 vfs_badlock("is locked but should not be", str, vp); 5573 } 5574 5575 void 5576 assert_vop_elocked(struct vnode *vp, const char *str) 5577 { 5578 if (KERNEL_PANICKED() || vp == NULL) 5579 return; 5580 5581 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5582 vfs_badlock("is not exclusive locked but should be", str, vp); 5583 } 5584 #endif /* DEBUG_VFS_LOCKS */ 5585 5586 void 5587 vop_rename_fail(struct vop_rename_args *ap) 5588 { 5589 5590 if (ap->a_tvp != NULL) 5591 vput(ap->a_tvp); 5592 if (ap->a_tdvp == ap->a_tvp) 5593 vrele(ap->a_tdvp); 5594 else 5595 vput(ap->a_tdvp); 5596 vrele(ap->a_fdvp); 5597 vrele(ap->a_fvp); 5598 } 5599 5600 void 5601 vop_rename_pre(void *ap) 5602 { 5603 struct vop_rename_args *a = ap; 5604 5605 #ifdef DEBUG_VFS_LOCKS 5606 if (a->a_tvp) 5607 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5608 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5609 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5610 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5611 5612 /* Check the source (from). */ 5613 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5614 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5615 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5616 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5617 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5618 5619 /* Check the target. */ 5620 if (a->a_tvp) 5621 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5622 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5623 #endif 5624 /* 5625 * It may be tempting to add vn_seqc_write_begin/end calls here and 5626 * in vop_rename_post but that's not going to work out since some 5627 * filesystems relookup vnodes mid-rename. This is probably a bug. 5628 * 5629 * For now filesystems are expected to do the relevant calls after they 5630 * decide what vnodes to operate on. 5631 */ 5632 if (a->a_tdvp != a->a_fdvp) 5633 vhold(a->a_fdvp); 5634 if (a->a_tvp != a->a_fvp) 5635 vhold(a->a_fvp); 5636 vhold(a->a_tdvp); 5637 if (a->a_tvp) 5638 vhold(a->a_tvp); 5639 } 5640 5641 #ifdef DEBUG_VFS_LOCKS 5642 void 5643 vop_fplookup_vexec_debugpre(void *ap __unused) 5644 { 5645 5646 VFS_SMR_ASSERT_ENTERED(); 5647 } 5648 5649 void 5650 vop_fplookup_vexec_debugpost(void *ap, int rc) 5651 { 5652 struct vop_fplookup_vexec_args *a; 5653 struct vnode *vp; 5654 5655 a = ap; 5656 vp = a->a_vp; 5657 5658 VFS_SMR_ASSERT_ENTERED(); 5659 if (rc == EOPNOTSUPP) 5660 VNPASS(VN_IS_DOOMED(vp), vp); 5661 } 5662 5663 void 5664 vop_fplookup_symlink_debugpre(void *ap __unused) 5665 { 5666 5667 VFS_SMR_ASSERT_ENTERED(); 5668 } 5669 5670 void 5671 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5672 { 5673 5674 VFS_SMR_ASSERT_ENTERED(); 5675 } 5676 5677 static void 5678 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5679 { 5680 if (vp->v_type == VCHR) 5681 ; 5682 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5683 ASSERT_VOP_LOCKED(vp, name); 5684 else 5685 ASSERT_VOP_ELOCKED(vp, name); 5686 } 5687 5688 void 5689 vop_fsync_debugpre(void *a) 5690 { 5691 struct vop_fsync_args *ap; 5692 5693 ap = a; 5694 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5695 } 5696 5697 void 5698 vop_fsync_debugpost(void *a, int rc __unused) 5699 { 5700 struct vop_fsync_args *ap; 5701 5702 ap = a; 5703 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5704 } 5705 5706 void 5707 vop_fdatasync_debugpre(void *a) 5708 { 5709 struct vop_fdatasync_args *ap; 5710 5711 ap = a; 5712 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5713 } 5714 5715 void 5716 vop_fdatasync_debugpost(void *a, int rc __unused) 5717 { 5718 struct vop_fdatasync_args *ap; 5719 5720 ap = a; 5721 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5722 } 5723 5724 void 5725 vop_strategy_debugpre(void *ap) 5726 { 5727 struct vop_strategy_args *a; 5728 struct buf *bp; 5729 5730 a = ap; 5731 bp = a->a_bp; 5732 5733 /* 5734 * Cluster ops lock their component buffers but not the IO container. 5735 */ 5736 if ((bp->b_flags & B_CLUSTER) != 0) 5737 return; 5738 5739 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5740 if (vfs_badlock_print) 5741 printf( 5742 "VOP_STRATEGY: bp is not locked but should be\n"); 5743 if (vfs_badlock_ddb) 5744 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5745 } 5746 } 5747 5748 void 5749 vop_lock_debugpre(void *ap) 5750 { 5751 struct vop_lock1_args *a = ap; 5752 5753 if ((a->a_flags & LK_INTERLOCK) == 0) 5754 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5755 else 5756 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5757 } 5758 5759 void 5760 vop_lock_debugpost(void *ap, int rc) 5761 { 5762 struct vop_lock1_args *a = ap; 5763 5764 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5765 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5766 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5767 } 5768 5769 void 5770 vop_unlock_debugpre(void *ap) 5771 { 5772 struct vop_unlock_args *a = ap; 5773 struct vnode *vp = a->a_vp; 5774 5775 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5776 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5777 } 5778 5779 void 5780 vop_need_inactive_debugpre(void *ap) 5781 { 5782 struct vop_need_inactive_args *a = ap; 5783 5784 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5785 } 5786 5787 void 5788 vop_need_inactive_debugpost(void *ap, int rc) 5789 { 5790 struct vop_need_inactive_args *a = ap; 5791 5792 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5793 } 5794 #endif 5795 5796 void 5797 vop_create_pre(void *ap) 5798 { 5799 struct vop_create_args *a; 5800 struct vnode *dvp; 5801 5802 a = ap; 5803 dvp = a->a_dvp; 5804 vn_seqc_write_begin(dvp); 5805 } 5806 5807 void 5808 vop_create_post(void *ap, int rc) 5809 { 5810 struct vop_create_args *a; 5811 struct vnode *dvp; 5812 5813 a = ap; 5814 dvp = a->a_dvp; 5815 vn_seqc_write_end(dvp); 5816 if (!rc) 5817 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5818 } 5819 5820 void 5821 vop_whiteout_pre(void *ap) 5822 { 5823 struct vop_whiteout_args *a; 5824 struct vnode *dvp; 5825 5826 a = ap; 5827 dvp = a->a_dvp; 5828 vn_seqc_write_begin(dvp); 5829 } 5830 5831 void 5832 vop_whiteout_post(void *ap, int rc) 5833 { 5834 struct vop_whiteout_args *a; 5835 struct vnode *dvp; 5836 5837 a = ap; 5838 dvp = a->a_dvp; 5839 vn_seqc_write_end(dvp); 5840 } 5841 5842 void 5843 vop_deleteextattr_pre(void *ap) 5844 { 5845 struct vop_deleteextattr_args *a; 5846 struct vnode *vp; 5847 5848 a = ap; 5849 vp = a->a_vp; 5850 vn_seqc_write_begin(vp); 5851 } 5852 5853 void 5854 vop_deleteextattr_post(void *ap, int rc) 5855 { 5856 struct vop_deleteextattr_args *a; 5857 struct vnode *vp; 5858 5859 a = ap; 5860 vp = a->a_vp; 5861 vn_seqc_write_end(vp); 5862 if (!rc) 5863 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5864 } 5865 5866 void 5867 vop_link_pre(void *ap) 5868 { 5869 struct vop_link_args *a; 5870 struct vnode *vp, *tdvp; 5871 5872 a = ap; 5873 vp = a->a_vp; 5874 tdvp = a->a_tdvp; 5875 vn_seqc_write_begin(vp); 5876 vn_seqc_write_begin(tdvp); 5877 } 5878 5879 void 5880 vop_link_post(void *ap, int rc) 5881 { 5882 struct vop_link_args *a; 5883 struct vnode *vp, *tdvp; 5884 5885 a = ap; 5886 vp = a->a_vp; 5887 tdvp = a->a_tdvp; 5888 vn_seqc_write_end(vp); 5889 vn_seqc_write_end(tdvp); 5890 if (!rc) { 5891 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5892 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5893 } 5894 } 5895 5896 void 5897 vop_mkdir_pre(void *ap) 5898 { 5899 struct vop_mkdir_args *a; 5900 struct vnode *dvp; 5901 5902 a = ap; 5903 dvp = a->a_dvp; 5904 vn_seqc_write_begin(dvp); 5905 } 5906 5907 void 5908 vop_mkdir_post(void *ap, int rc) 5909 { 5910 struct vop_mkdir_args *a; 5911 struct vnode *dvp; 5912 5913 a = ap; 5914 dvp = a->a_dvp; 5915 vn_seqc_write_end(dvp); 5916 if (!rc) 5917 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5918 } 5919 5920 #ifdef DEBUG_VFS_LOCKS 5921 void 5922 vop_mkdir_debugpost(void *ap, int rc) 5923 { 5924 struct vop_mkdir_args *a; 5925 5926 a = ap; 5927 if (!rc) 5928 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5929 } 5930 #endif 5931 5932 void 5933 vop_mknod_pre(void *ap) 5934 { 5935 struct vop_mknod_args *a; 5936 struct vnode *dvp; 5937 5938 a = ap; 5939 dvp = a->a_dvp; 5940 vn_seqc_write_begin(dvp); 5941 } 5942 5943 void 5944 vop_mknod_post(void *ap, int rc) 5945 { 5946 struct vop_mknod_args *a; 5947 struct vnode *dvp; 5948 5949 a = ap; 5950 dvp = a->a_dvp; 5951 vn_seqc_write_end(dvp); 5952 if (!rc) 5953 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5954 } 5955 5956 void 5957 vop_reclaim_post(void *ap, int rc) 5958 { 5959 struct vop_reclaim_args *a; 5960 struct vnode *vp; 5961 5962 a = ap; 5963 vp = a->a_vp; 5964 ASSERT_VOP_IN_SEQC(vp); 5965 if (!rc) 5966 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5967 } 5968 5969 void 5970 vop_remove_pre(void *ap) 5971 { 5972 struct vop_remove_args *a; 5973 struct vnode *dvp, *vp; 5974 5975 a = ap; 5976 dvp = a->a_dvp; 5977 vp = a->a_vp; 5978 vn_seqc_write_begin(dvp); 5979 vn_seqc_write_begin(vp); 5980 } 5981 5982 void 5983 vop_remove_post(void *ap, int rc) 5984 { 5985 struct vop_remove_args *a; 5986 struct vnode *dvp, *vp; 5987 5988 a = ap; 5989 dvp = a->a_dvp; 5990 vp = a->a_vp; 5991 vn_seqc_write_end(dvp); 5992 vn_seqc_write_end(vp); 5993 if (!rc) { 5994 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5995 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5996 } 5997 } 5998 5999 void 6000 vop_rename_post(void *ap, int rc) 6001 { 6002 struct vop_rename_args *a = ap; 6003 long hint; 6004 6005 if (!rc) { 6006 hint = NOTE_WRITE; 6007 if (a->a_fdvp == a->a_tdvp) { 6008 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6009 hint |= NOTE_LINK; 6010 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6011 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6012 } else { 6013 hint |= NOTE_EXTEND; 6014 if (a->a_fvp->v_type == VDIR) 6015 hint |= NOTE_LINK; 6016 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6017 6018 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6019 a->a_tvp->v_type == VDIR) 6020 hint &= ~NOTE_LINK; 6021 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6022 } 6023 6024 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6025 if (a->a_tvp) 6026 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6027 } 6028 if (a->a_tdvp != a->a_fdvp) 6029 vdrop(a->a_fdvp); 6030 if (a->a_tvp != a->a_fvp) 6031 vdrop(a->a_fvp); 6032 vdrop(a->a_tdvp); 6033 if (a->a_tvp) 6034 vdrop(a->a_tvp); 6035 } 6036 6037 void 6038 vop_rmdir_pre(void *ap) 6039 { 6040 struct vop_rmdir_args *a; 6041 struct vnode *dvp, *vp; 6042 6043 a = ap; 6044 dvp = a->a_dvp; 6045 vp = a->a_vp; 6046 vn_seqc_write_begin(dvp); 6047 vn_seqc_write_begin(vp); 6048 } 6049 6050 void 6051 vop_rmdir_post(void *ap, int rc) 6052 { 6053 struct vop_rmdir_args *a; 6054 struct vnode *dvp, *vp; 6055 6056 a = ap; 6057 dvp = a->a_dvp; 6058 vp = a->a_vp; 6059 vn_seqc_write_end(dvp); 6060 vn_seqc_write_end(vp); 6061 if (!rc) { 6062 vp->v_vflag |= VV_UNLINKED; 6063 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6064 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6065 } 6066 } 6067 6068 void 6069 vop_setattr_pre(void *ap) 6070 { 6071 struct vop_setattr_args *a; 6072 struct vnode *vp; 6073 6074 a = ap; 6075 vp = a->a_vp; 6076 vn_seqc_write_begin(vp); 6077 } 6078 6079 void 6080 vop_setattr_post(void *ap, int rc) 6081 { 6082 struct vop_setattr_args *a; 6083 struct vnode *vp; 6084 6085 a = ap; 6086 vp = a->a_vp; 6087 vn_seqc_write_end(vp); 6088 if (!rc) 6089 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6090 } 6091 6092 void 6093 vop_setacl_pre(void *ap) 6094 { 6095 struct vop_setacl_args *a; 6096 struct vnode *vp; 6097 6098 a = ap; 6099 vp = a->a_vp; 6100 vn_seqc_write_begin(vp); 6101 } 6102 6103 void 6104 vop_setacl_post(void *ap, int rc __unused) 6105 { 6106 struct vop_setacl_args *a; 6107 struct vnode *vp; 6108 6109 a = ap; 6110 vp = a->a_vp; 6111 vn_seqc_write_end(vp); 6112 } 6113 6114 void 6115 vop_setextattr_pre(void *ap) 6116 { 6117 struct vop_setextattr_args *a; 6118 struct vnode *vp; 6119 6120 a = ap; 6121 vp = a->a_vp; 6122 vn_seqc_write_begin(vp); 6123 } 6124 6125 void 6126 vop_setextattr_post(void *ap, int rc) 6127 { 6128 struct vop_setextattr_args *a; 6129 struct vnode *vp; 6130 6131 a = ap; 6132 vp = a->a_vp; 6133 vn_seqc_write_end(vp); 6134 if (!rc) 6135 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6136 } 6137 6138 void 6139 vop_symlink_pre(void *ap) 6140 { 6141 struct vop_symlink_args *a; 6142 struct vnode *dvp; 6143 6144 a = ap; 6145 dvp = a->a_dvp; 6146 vn_seqc_write_begin(dvp); 6147 } 6148 6149 void 6150 vop_symlink_post(void *ap, int rc) 6151 { 6152 struct vop_symlink_args *a; 6153 struct vnode *dvp; 6154 6155 a = ap; 6156 dvp = a->a_dvp; 6157 vn_seqc_write_end(dvp); 6158 if (!rc) 6159 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6160 } 6161 6162 void 6163 vop_open_post(void *ap, int rc) 6164 { 6165 struct vop_open_args *a = ap; 6166 6167 if (!rc) 6168 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6169 } 6170 6171 void 6172 vop_close_post(void *ap, int rc) 6173 { 6174 struct vop_close_args *a = ap; 6175 6176 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6177 !VN_IS_DOOMED(a->a_vp))) { 6178 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6179 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6180 } 6181 } 6182 6183 void 6184 vop_read_post(void *ap, int rc) 6185 { 6186 struct vop_read_args *a = ap; 6187 6188 if (!rc) 6189 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6190 } 6191 6192 void 6193 vop_read_pgcache_post(void *ap, int rc) 6194 { 6195 struct vop_read_pgcache_args *a = ap; 6196 6197 if (!rc) 6198 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6199 } 6200 6201 void 6202 vop_readdir_post(void *ap, int rc) 6203 { 6204 struct vop_readdir_args *a = ap; 6205 6206 if (!rc) 6207 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6208 } 6209 6210 static struct knlist fs_knlist; 6211 6212 static void 6213 vfs_event_init(void *arg) 6214 { 6215 knlist_init_mtx(&fs_knlist, NULL); 6216 } 6217 /* XXX - correct order? */ 6218 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6219 6220 void 6221 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6222 { 6223 6224 KNOTE_UNLOCKED(&fs_knlist, event); 6225 } 6226 6227 static int filt_fsattach(struct knote *kn); 6228 static void filt_fsdetach(struct knote *kn); 6229 static int filt_fsevent(struct knote *kn, long hint); 6230 6231 struct filterops fs_filtops = { 6232 .f_isfd = 0, 6233 .f_attach = filt_fsattach, 6234 .f_detach = filt_fsdetach, 6235 .f_event = filt_fsevent 6236 }; 6237 6238 static int 6239 filt_fsattach(struct knote *kn) 6240 { 6241 6242 kn->kn_flags |= EV_CLEAR; 6243 knlist_add(&fs_knlist, kn, 0); 6244 return (0); 6245 } 6246 6247 static void 6248 filt_fsdetach(struct knote *kn) 6249 { 6250 6251 knlist_remove(&fs_knlist, kn, 0); 6252 } 6253 6254 static int 6255 filt_fsevent(struct knote *kn, long hint) 6256 { 6257 6258 kn->kn_fflags |= kn->kn_sfflags & hint; 6259 6260 return (kn->kn_fflags != 0); 6261 } 6262 6263 static int 6264 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6265 { 6266 struct vfsidctl vc; 6267 int error; 6268 struct mount *mp; 6269 6270 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6271 if (error) 6272 return (error); 6273 if (vc.vc_vers != VFS_CTL_VERS1) 6274 return (EINVAL); 6275 mp = vfs_getvfs(&vc.vc_fsid); 6276 if (mp == NULL) 6277 return (ENOENT); 6278 /* ensure that a specific sysctl goes to the right filesystem. */ 6279 if (strcmp(vc.vc_fstypename, "*") != 0 && 6280 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6281 vfs_rel(mp); 6282 return (EINVAL); 6283 } 6284 VCTLTOREQ(&vc, req); 6285 error = VFS_SYSCTL(mp, vc.vc_op, req); 6286 vfs_rel(mp); 6287 return (error); 6288 } 6289 6290 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6291 NULL, 0, sysctl_vfs_ctl, "", 6292 "Sysctl by fsid"); 6293 6294 /* 6295 * Function to initialize a va_filerev field sensibly. 6296 * XXX: Wouldn't a random number make a lot more sense ?? 6297 */ 6298 u_quad_t 6299 init_va_filerev(void) 6300 { 6301 struct bintime bt; 6302 6303 getbinuptime(&bt); 6304 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6305 } 6306 6307 static int filt_vfsread(struct knote *kn, long hint); 6308 static int filt_vfswrite(struct knote *kn, long hint); 6309 static int filt_vfsvnode(struct knote *kn, long hint); 6310 static void filt_vfsdetach(struct knote *kn); 6311 static struct filterops vfsread_filtops = { 6312 .f_isfd = 1, 6313 .f_detach = filt_vfsdetach, 6314 .f_event = filt_vfsread 6315 }; 6316 static struct filterops vfswrite_filtops = { 6317 .f_isfd = 1, 6318 .f_detach = filt_vfsdetach, 6319 .f_event = filt_vfswrite 6320 }; 6321 static struct filterops vfsvnode_filtops = { 6322 .f_isfd = 1, 6323 .f_detach = filt_vfsdetach, 6324 .f_event = filt_vfsvnode 6325 }; 6326 6327 static void 6328 vfs_knllock(void *arg) 6329 { 6330 struct vnode *vp = arg; 6331 6332 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6333 } 6334 6335 static void 6336 vfs_knlunlock(void *arg) 6337 { 6338 struct vnode *vp = arg; 6339 6340 VOP_UNLOCK(vp); 6341 } 6342 6343 static void 6344 vfs_knl_assert_lock(void *arg, int what) 6345 { 6346 #ifdef DEBUG_VFS_LOCKS 6347 struct vnode *vp = arg; 6348 6349 if (what == LA_LOCKED) 6350 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6351 else 6352 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6353 #endif 6354 } 6355 6356 int 6357 vfs_kqfilter(struct vop_kqfilter_args *ap) 6358 { 6359 struct vnode *vp = ap->a_vp; 6360 struct knote *kn = ap->a_kn; 6361 struct knlist *knl; 6362 6363 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6364 kn->kn_filter != EVFILT_WRITE), 6365 ("READ/WRITE filter on a FIFO leaked through")); 6366 switch (kn->kn_filter) { 6367 case EVFILT_READ: 6368 kn->kn_fop = &vfsread_filtops; 6369 break; 6370 case EVFILT_WRITE: 6371 kn->kn_fop = &vfswrite_filtops; 6372 break; 6373 case EVFILT_VNODE: 6374 kn->kn_fop = &vfsvnode_filtops; 6375 break; 6376 default: 6377 return (EINVAL); 6378 } 6379 6380 kn->kn_hook = (caddr_t)vp; 6381 6382 v_addpollinfo(vp); 6383 if (vp->v_pollinfo == NULL) 6384 return (ENOMEM); 6385 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6386 vhold(vp); 6387 knlist_add(knl, kn, 0); 6388 6389 return (0); 6390 } 6391 6392 /* 6393 * Detach knote from vnode 6394 */ 6395 static void 6396 filt_vfsdetach(struct knote *kn) 6397 { 6398 struct vnode *vp = (struct vnode *)kn->kn_hook; 6399 6400 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6401 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6402 vdrop(vp); 6403 } 6404 6405 /*ARGSUSED*/ 6406 static int 6407 filt_vfsread(struct knote *kn, long hint) 6408 { 6409 struct vnode *vp = (struct vnode *)kn->kn_hook; 6410 off_t size; 6411 int res; 6412 6413 /* 6414 * filesystem is gone, so set the EOF flag and schedule 6415 * the knote for deletion. 6416 */ 6417 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6418 VI_LOCK(vp); 6419 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6420 VI_UNLOCK(vp); 6421 return (1); 6422 } 6423 6424 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6425 return (0); 6426 6427 VI_LOCK(vp); 6428 kn->kn_data = size - kn->kn_fp->f_offset; 6429 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6430 VI_UNLOCK(vp); 6431 return (res); 6432 } 6433 6434 /*ARGSUSED*/ 6435 static int 6436 filt_vfswrite(struct knote *kn, long hint) 6437 { 6438 struct vnode *vp = (struct vnode *)kn->kn_hook; 6439 6440 VI_LOCK(vp); 6441 6442 /* 6443 * filesystem is gone, so set the EOF flag and schedule 6444 * the knote for deletion. 6445 */ 6446 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6447 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6448 6449 kn->kn_data = 0; 6450 VI_UNLOCK(vp); 6451 return (1); 6452 } 6453 6454 static int 6455 filt_vfsvnode(struct knote *kn, long hint) 6456 { 6457 struct vnode *vp = (struct vnode *)kn->kn_hook; 6458 int res; 6459 6460 VI_LOCK(vp); 6461 if (kn->kn_sfflags & hint) 6462 kn->kn_fflags |= hint; 6463 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6464 kn->kn_flags |= EV_EOF; 6465 VI_UNLOCK(vp); 6466 return (1); 6467 } 6468 res = (kn->kn_fflags != 0); 6469 VI_UNLOCK(vp); 6470 return (res); 6471 } 6472 6473 int 6474 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6475 { 6476 int error; 6477 6478 if (dp->d_reclen > ap->a_uio->uio_resid) 6479 return (ENAMETOOLONG); 6480 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6481 if (error) { 6482 if (ap->a_ncookies != NULL) { 6483 if (ap->a_cookies != NULL) 6484 free(ap->a_cookies, M_TEMP); 6485 ap->a_cookies = NULL; 6486 *ap->a_ncookies = 0; 6487 } 6488 return (error); 6489 } 6490 if (ap->a_ncookies == NULL) 6491 return (0); 6492 6493 KASSERT(ap->a_cookies, 6494 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6495 6496 *ap->a_cookies = realloc(*ap->a_cookies, 6497 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6498 (*ap->a_cookies)[*ap->a_ncookies] = off; 6499 *ap->a_ncookies += 1; 6500 return (0); 6501 } 6502 6503 /* 6504 * The purpose of this routine is to remove granularity from accmode_t, 6505 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6506 * VADMIN and VAPPEND. 6507 * 6508 * If it returns 0, the caller is supposed to continue with the usual 6509 * access checks using 'accmode' as modified by this routine. If it 6510 * returns nonzero value, the caller is supposed to return that value 6511 * as errno. 6512 * 6513 * Note that after this routine runs, accmode may be zero. 6514 */ 6515 int 6516 vfs_unixify_accmode(accmode_t *accmode) 6517 { 6518 /* 6519 * There is no way to specify explicit "deny" rule using 6520 * file mode or POSIX.1e ACLs. 6521 */ 6522 if (*accmode & VEXPLICIT_DENY) { 6523 *accmode = 0; 6524 return (0); 6525 } 6526 6527 /* 6528 * None of these can be translated into usual access bits. 6529 * Also, the common case for NFSv4 ACLs is to not contain 6530 * either of these bits. Caller should check for VWRITE 6531 * on the containing directory instead. 6532 */ 6533 if (*accmode & (VDELETE_CHILD | VDELETE)) 6534 return (EPERM); 6535 6536 if (*accmode & VADMIN_PERMS) { 6537 *accmode &= ~VADMIN_PERMS; 6538 *accmode |= VADMIN; 6539 } 6540 6541 /* 6542 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6543 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6544 */ 6545 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6546 6547 return (0); 6548 } 6549 6550 /* 6551 * Clear out a doomed vnode (if any) and replace it with a new one as long 6552 * as the fs is not being unmounted. Return the root vnode to the caller. 6553 */ 6554 static int __noinline 6555 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6556 { 6557 struct vnode *vp; 6558 int error; 6559 6560 restart: 6561 if (mp->mnt_rootvnode != NULL) { 6562 MNT_ILOCK(mp); 6563 vp = mp->mnt_rootvnode; 6564 if (vp != NULL) { 6565 if (!VN_IS_DOOMED(vp)) { 6566 vrefact(vp); 6567 MNT_IUNLOCK(mp); 6568 error = vn_lock(vp, flags); 6569 if (error == 0) { 6570 *vpp = vp; 6571 return (0); 6572 } 6573 vrele(vp); 6574 goto restart; 6575 } 6576 /* 6577 * Clear the old one. 6578 */ 6579 mp->mnt_rootvnode = NULL; 6580 } 6581 MNT_IUNLOCK(mp); 6582 if (vp != NULL) { 6583 vfs_op_barrier_wait(mp); 6584 vrele(vp); 6585 } 6586 } 6587 error = VFS_CACHEDROOT(mp, flags, vpp); 6588 if (error != 0) 6589 return (error); 6590 if (mp->mnt_vfs_ops == 0) { 6591 MNT_ILOCK(mp); 6592 if (mp->mnt_vfs_ops != 0) { 6593 MNT_IUNLOCK(mp); 6594 return (0); 6595 } 6596 if (mp->mnt_rootvnode == NULL) { 6597 vrefact(*vpp); 6598 mp->mnt_rootvnode = *vpp; 6599 } else { 6600 if (mp->mnt_rootvnode != *vpp) { 6601 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6602 panic("%s: mismatch between vnode returned " 6603 " by VFS_CACHEDROOT and the one cached " 6604 " (%p != %p)", 6605 __func__, *vpp, mp->mnt_rootvnode); 6606 } 6607 } 6608 } 6609 MNT_IUNLOCK(mp); 6610 } 6611 return (0); 6612 } 6613 6614 int 6615 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6616 { 6617 struct mount_pcpu *mpcpu; 6618 struct vnode *vp; 6619 int error; 6620 6621 if (!vfs_op_thread_enter(mp, mpcpu)) 6622 return (vfs_cache_root_fallback(mp, flags, vpp)); 6623 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6624 if (vp == NULL || VN_IS_DOOMED(vp)) { 6625 vfs_op_thread_exit(mp, mpcpu); 6626 return (vfs_cache_root_fallback(mp, flags, vpp)); 6627 } 6628 vrefact(vp); 6629 vfs_op_thread_exit(mp, mpcpu); 6630 error = vn_lock(vp, flags); 6631 if (error != 0) { 6632 vrele(vp); 6633 return (vfs_cache_root_fallback(mp, flags, vpp)); 6634 } 6635 *vpp = vp; 6636 return (0); 6637 } 6638 6639 struct vnode * 6640 vfs_cache_root_clear(struct mount *mp) 6641 { 6642 struct vnode *vp; 6643 6644 /* 6645 * ops > 0 guarantees there is nobody who can see this vnode 6646 */ 6647 MPASS(mp->mnt_vfs_ops > 0); 6648 vp = mp->mnt_rootvnode; 6649 if (vp != NULL) 6650 vn_seqc_write_begin(vp); 6651 mp->mnt_rootvnode = NULL; 6652 return (vp); 6653 } 6654 6655 void 6656 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6657 { 6658 6659 MPASS(mp->mnt_vfs_ops > 0); 6660 vrefact(vp); 6661 mp->mnt_rootvnode = vp; 6662 } 6663 6664 /* 6665 * These are helper functions for filesystems to traverse all 6666 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6667 * 6668 * This interface replaces MNT_VNODE_FOREACH. 6669 */ 6670 6671 struct vnode * 6672 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6673 { 6674 struct vnode *vp; 6675 6676 maybe_yield(); 6677 MNT_ILOCK(mp); 6678 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6679 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6680 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6681 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6682 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6683 continue; 6684 VI_LOCK(vp); 6685 if (VN_IS_DOOMED(vp)) { 6686 VI_UNLOCK(vp); 6687 continue; 6688 } 6689 break; 6690 } 6691 if (vp == NULL) { 6692 __mnt_vnode_markerfree_all(mvp, mp); 6693 /* MNT_IUNLOCK(mp); -- done in above function */ 6694 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6695 return (NULL); 6696 } 6697 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6698 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6699 MNT_IUNLOCK(mp); 6700 return (vp); 6701 } 6702 6703 struct vnode * 6704 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6705 { 6706 struct vnode *vp; 6707 6708 *mvp = vn_alloc_marker(mp); 6709 MNT_ILOCK(mp); 6710 MNT_REF(mp); 6711 6712 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6713 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6714 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6715 continue; 6716 VI_LOCK(vp); 6717 if (VN_IS_DOOMED(vp)) { 6718 VI_UNLOCK(vp); 6719 continue; 6720 } 6721 break; 6722 } 6723 if (vp == NULL) { 6724 MNT_REL(mp); 6725 MNT_IUNLOCK(mp); 6726 vn_free_marker(*mvp); 6727 *mvp = NULL; 6728 return (NULL); 6729 } 6730 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6731 MNT_IUNLOCK(mp); 6732 return (vp); 6733 } 6734 6735 void 6736 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6737 { 6738 6739 if (*mvp == NULL) { 6740 MNT_IUNLOCK(mp); 6741 return; 6742 } 6743 6744 mtx_assert(MNT_MTX(mp), MA_OWNED); 6745 6746 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6747 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6748 MNT_REL(mp); 6749 MNT_IUNLOCK(mp); 6750 vn_free_marker(*mvp); 6751 *mvp = NULL; 6752 } 6753 6754 /* 6755 * These are helper functions for filesystems to traverse their 6756 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6757 */ 6758 static void 6759 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6760 { 6761 6762 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6763 6764 MNT_ILOCK(mp); 6765 MNT_REL(mp); 6766 MNT_IUNLOCK(mp); 6767 vn_free_marker(*mvp); 6768 *mvp = NULL; 6769 } 6770 6771 /* 6772 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6773 * conventional lock order during mnt_vnode_next_lazy iteration. 6774 * 6775 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6776 * The list lock is dropped and reacquired. On success, both locks are held. 6777 * On failure, the mount vnode list lock is held but the vnode interlock is 6778 * not, and the procedure may have yielded. 6779 */ 6780 static bool 6781 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6782 struct vnode *vp) 6783 { 6784 6785 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6786 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6787 ("%s: bad marker", __func__)); 6788 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6789 ("%s: inappropriate vnode", __func__)); 6790 ASSERT_VI_UNLOCKED(vp, __func__); 6791 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6792 6793 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6794 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6795 6796 /* 6797 * Note we may be racing against vdrop which transitioned the hold 6798 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6799 * if we are the only user after we get the interlock we will just 6800 * vdrop. 6801 */ 6802 vhold(vp); 6803 mtx_unlock(&mp->mnt_listmtx); 6804 VI_LOCK(vp); 6805 if (VN_IS_DOOMED(vp)) { 6806 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6807 goto out_lost; 6808 } 6809 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6810 /* 6811 * There is nothing to do if we are the last user. 6812 */ 6813 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6814 goto out_lost; 6815 mtx_lock(&mp->mnt_listmtx); 6816 return (true); 6817 out_lost: 6818 vdropl(vp); 6819 maybe_yield(); 6820 mtx_lock(&mp->mnt_listmtx); 6821 return (false); 6822 } 6823 6824 static struct vnode * 6825 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6826 void *cbarg) 6827 { 6828 struct vnode *vp; 6829 6830 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6831 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6832 restart: 6833 vp = TAILQ_NEXT(*mvp, v_lazylist); 6834 while (vp != NULL) { 6835 if (vp->v_type == VMARKER) { 6836 vp = TAILQ_NEXT(vp, v_lazylist); 6837 continue; 6838 } 6839 /* 6840 * See if we want to process the vnode. Note we may encounter a 6841 * long string of vnodes we don't care about and hog the list 6842 * as a result. Check for it and requeue the marker. 6843 */ 6844 VNPASS(!VN_IS_DOOMED(vp), vp); 6845 if (!cb(vp, cbarg)) { 6846 if (!should_yield()) { 6847 vp = TAILQ_NEXT(vp, v_lazylist); 6848 continue; 6849 } 6850 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6851 v_lazylist); 6852 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6853 v_lazylist); 6854 mtx_unlock(&mp->mnt_listmtx); 6855 kern_yield(PRI_USER); 6856 mtx_lock(&mp->mnt_listmtx); 6857 goto restart; 6858 } 6859 /* 6860 * Try-lock because this is the wrong lock order. 6861 */ 6862 if (!VI_TRYLOCK(vp) && 6863 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6864 goto restart; 6865 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6866 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6867 ("alien vnode on the lazy list %p %p", vp, mp)); 6868 VNPASS(vp->v_mount == mp, vp); 6869 VNPASS(!VN_IS_DOOMED(vp), vp); 6870 break; 6871 } 6872 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6873 6874 /* Check if we are done */ 6875 if (vp == NULL) { 6876 mtx_unlock(&mp->mnt_listmtx); 6877 mnt_vnode_markerfree_lazy(mvp, mp); 6878 return (NULL); 6879 } 6880 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6881 mtx_unlock(&mp->mnt_listmtx); 6882 ASSERT_VI_LOCKED(vp, "lazy iter"); 6883 return (vp); 6884 } 6885 6886 struct vnode * 6887 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6888 void *cbarg) 6889 { 6890 6891 maybe_yield(); 6892 mtx_lock(&mp->mnt_listmtx); 6893 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6894 } 6895 6896 struct vnode * 6897 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6898 void *cbarg) 6899 { 6900 struct vnode *vp; 6901 6902 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6903 return (NULL); 6904 6905 *mvp = vn_alloc_marker(mp); 6906 MNT_ILOCK(mp); 6907 MNT_REF(mp); 6908 MNT_IUNLOCK(mp); 6909 6910 mtx_lock(&mp->mnt_listmtx); 6911 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6912 if (vp == NULL) { 6913 mtx_unlock(&mp->mnt_listmtx); 6914 mnt_vnode_markerfree_lazy(mvp, mp); 6915 return (NULL); 6916 } 6917 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6918 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6919 } 6920 6921 void 6922 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6923 { 6924 6925 if (*mvp == NULL) 6926 return; 6927 6928 mtx_lock(&mp->mnt_listmtx); 6929 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6930 mtx_unlock(&mp->mnt_listmtx); 6931 mnt_vnode_markerfree_lazy(mvp, mp); 6932 } 6933 6934 int 6935 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6936 { 6937 6938 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6939 cnp->cn_flags &= ~NOEXECCHECK; 6940 return (0); 6941 } 6942 6943 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6944 } 6945 6946 /* 6947 * Do not use this variant unless you have means other than the hold count 6948 * to prevent the vnode from getting freed. 6949 */ 6950 void 6951 vn_seqc_write_begin_locked(struct vnode *vp) 6952 { 6953 6954 ASSERT_VI_LOCKED(vp, __func__); 6955 VNPASS(vp->v_holdcnt > 0, vp); 6956 VNPASS(vp->v_seqc_users >= 0, vp); 6957 vp->v_seqc_users++; 6958 if (vp->v_seqc_users == 1) 6959 seqc_sleepable_write_begin(&vp->v_seqc); 6960 } 6961 6962 void 6963 vn_seqc_write_begin(struct vnode *vp) 6964 { 6965 6966 VI_LOCK(vp); 6967 vn_seqc_write_begin_locked(vp); 6968 VI_UNLOCK(vp); 6969 } 6970 6971 void 6972 vn_seqc_write_end_locked(struct vnode *vp) 6973 { 6974 6975 ASSERT_VI_LOCKED(vp, __func__); 6976 VNPASS(vp->v_seqc_users > 0, vp); 6977 vp->v_seqc_users--; 6978 if (vp->v_seqc_users == 0) 6979 seqc_sleepable_write_end(&vp->v_seqc); 6980 } 6981 6982 void 6983 vn_seqc_write_end(struct vnode *vp) 6984 { 6985 6986 VI_LOCK(vp); 6987 vn_seqc_write_end_locked(vp); 6988 VI_UNLOCK(vp); 6989 } 6990 6991 /* 6992 * Special case handling for allocating and freeing vnodes. 6993 * 6994 * The counter remains unchanged on free so that a doomed vnode will 6995 * keep testing as in modify as long as it is accessible with SMR. 6996 */ 6997 static void 6998 vn_seqc_init(struct vnode *vp) 6999 { 7000 7001 vp->v_seqc = 0; 7002 vp->v_seqc_users = 0; 7003 } 7004 7005 static void 7006 vn_seqc_write_end_free(struct vnode *vp) 7007 { 7008 7009 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7010 VNPASS(vp->v_seqc_users == 1, vp); 7011 } 7012 7013 void 7014 vn_irflag_set_locked(struct vnode *vp, short toset) 7015 { 7016 short flags; 7017 7018 ASSERT_VI_LOCKED(vp, __func__); 7019 flags = vn_irflag_read(vp); 7020 VNASSERT((flags & toset) == 0, vp, 7021 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7022 __func__, flags, toset)); 7023 atomic_store_short(&vp->v_irflag, flags | toset); 7024 } 7025 7026 void 7027 vn_irflag_set(struct vnode *vp, short toset) 7028 { 7029 7030 VI_LOCK(vp); 7031 vn_irflag_set_locked(vp, toset); 7032 VI_UNLOCK(vp); 7033 } 7034 7035 void 7036 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7037 { 7038 short flags; 7039 7040 ASSERT_VI_LOCKED(vp, __func__); 7041 flags = vn_irflag_read(vp); 7042 atomic_store_short(&vp->v_irflag, flags | toset); 7043 } 7044 7045 void 7046 vn_irflag_set_cond(struct vnode *vp, short toset) 7047 { 7048 7049 VI_LOCK(vp); 7050 vn_irflag_set_cond_locked(vp, toset); 7051 VI_UNLOCK(vp); 7052 } 7053 7054 void 7055 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7056 { 7057 short flags; 7058 7059 ASSERT_VI_LOCKED(vp, __func__); 7060 flags = vn_irflag_read(vp); 7061 VNASSERT((flags & tounset) == tounset, vp, 7062 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7063 __func__, flags, tounset)); 7064 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7065 } 7066 7067 void 7068 vn_irflag_unset(struct vnode *vp, short tounset) 7069 { 7070 7071 VI_LOCK(vp); 7072 vn_irflag_unset_locked(vp, tounset); 7073 VI_UNLOCK(vp); 7074 } 7075 7076 int 7077 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7078 { 7079 struct vattr vattr; 7080 int error; 7081 7082 ASSERT_VOP_LOCKED(vp, __func__); 7083 error = VOP_GETATTR(vp, &vattr, cred); 7084 if (__predict_true(error == 0)) { 7085 if (vattr.va_size <= OFF_MAX) 7086 *size = vattr.va_size; 7087 else 7088 error = EFBIG; 7089 } 7090 return (error); 7091 } 7092 7093 int 7094 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7095 { 7096 int error; 7097 7098 VOP_LOCK(vp, LK_SHARED); 7099 error = vn_getsize_locked(vp, size, cred); 7100 VOP_UNLOCK(vp); 7101 return (error); 7102 } 7103 7104 #ifdef INVARIANTS 7105 void 7106 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7107 { 7108 7109 switch (vp->v_state) { 7110 case VSTATE_UNINITIALIZED: 7111 switch (state) { 7112 case VSTATE_CONSTRUCTED: 7113 case VSTATE_DESTROYING: 7114 return; 7115 default: 7116 break; 7117 } 7118 break; 7119 case VSTATE_CONSTRUCTED: 7120 ASSERT_VOP_ELOCKED(vp, __func__); 7121 switch (state) { 7122 case VSTATE_DESTROYING: 7123 return; 7124 default: 7125 break; 7126 } 7127 break; 7128 case VSTATE_DESTROYING: 7129 ASSERT_VOP_ELOCKED(vp, __func__); 7130 switch (state) { 7131 case VSTATE_DEAD: 7132 return; 7133 default: 7134 break; 7135 } 7136 break; 7137 case VSTATE_DEAD: 7138 switch (state) { 7139 case VSTATE_UNINITIALIZED: 7140 return; 7141 default: 7142 break; 7143 } 7144 break; 7145 } 7146 7147 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7148 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7149 } 7150 #endif 7151