1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/asan.h> 52 #include <sys/bio.h> 53 #include <sys/buf.h> 54 #include <sys/capsicum.h> 55 #include <sys/condvar.h> 56 #include <sys/conf.h> 57 #include <sys/counter.h> 58 #include <sys/dirent.h> 59 #include <sys/event.h> 60 #include <sys/eventhandler.h> 61 #include <sys/extattr.h> 62 #include <sys/file.h> 63 #include <sys/fcntl.h> 64 #include <sys/jail.h> 65 #include <sys/kdb.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/ktr.h> 69 #include <sys/limits.h> 70 #include <sys/lockf.h> 71 #include <sys/malloc.h> 72 #include <sys/mount.h> 73 #include <sys/namei.h> 74 #include <sys/pctrie.h> 75 #include <sys/priv.h> 76 #include <sys/reboot.h> 77 #include <sys/refcount.h> 78 #include <sys/rwlock.h> 79 #include <sys/sched.h> 80 #include <sys/sleepqueue.h> 81 #include <sys/smr.h> 82 #include <sys/smp.h> 83 #include <sys/stat.h> 84 #include <sys/sysctl.h> 85 #include <sys/syslog.h> 86 #include <sys/vmmeter.h> 87 #include <sys/vnode.h> 88 #include <sys/watchdog.h> 89 90 #include <machine/stdarg.h> 91 92 #include <security/mac/mac_framework.h> 93 94 #include <vm/vm.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_extern.h> 97 #include <vm/pmap.h> 98 #include <vm/vm_map.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_kern.h> 101 #include <vm/uma.h> 102 103 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 104 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 105 #endif 106 107 #ifdef DDB 108 #include <ddb/ddb.h> 109 #endif 110 111 static void delmntque(struct vnode *vp); 112 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 113 int slpflag, int slptimeo); 114 static void syncer_shutdown(void *arg, int howto); 115 static int vtryrecycle(struct vnode *vp); 116 static void v_init_counters(struct vnode *); 117 static void vn_seqc_init(struct vnode *); 118 static void vn_seqc_write_end_free(struct vnode *vp); 119 static void vgonel(struct vnode *); 120 static bool vhold_recycle_free(struct vnode *); 121 static void vdropl_recycle(struct vnode *vp); 122 static void vdrop_recycle(struct vnode *vp); 123 static void vfs_knllock(void *arg); 124 static void vfs_knlunlock(void *arg); 125 static void vfs_knl_assert_lock(void *arg, int what); 126 static void destroy_vpollinfo(struct vpollinfo *vi); 127 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 128 daddr_t startlbn, daddr_t endlbn); 129 static void vnlru_recalc(void); 130 131 /* 132 * Number of vnodes in existence. Increased whenever getnewvnode() 133 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 134 */ 135 static u_long __exclusive_cache_line numvnodes; 136 137 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 138 "Number of vnodes in existence"); 139 140 static counter_u64_t vnodes_created; 141 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 142 "Number of vnodes created by getnewvnode"); 143 144 /* 145 * Conversion tables for conversion from vnode types to inode formats 146 * and back. 147 */ 148 enum vtype iftovt_tab[16] = { 149 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 150 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 151 }; 152 int vttoif_tab[10] = { 153 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 154 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 155 }; 156 157 /* 158 * List of allocates vnodes in the system. 159 */ 160 static TAILQ_HEAD(freelst, vnode) vnode_list; 161 static struct vnode *vnode_list_free_marker; 162 static struct vnode *vnode_list_reclaim_marker; 163 164 /* 165 * "Free" vnode target. Free vnodes are rarely completely free, but are 166 * just ones that are cheap to recycle. Usually they are for files which 167 * have been stat'd but not read; these usually have inode and namecache 168 * data attached to them. This target is the preferred minimum size of a 169 * sub-cache consisting mostly of such files. The system balances the size 170 * of this sub-cache with its complement to try to prevent either from 171 * thrashing while the other is relatively inactive. The targets express 172 * a preference for the best balance. 173 * 174 * "Above" this target there are 2 further targets (watermarks) related 175 * to recyling of free vnodes. In the best-operating case, the cache is 176 * exactly full, the free list has size between vlowat and vhiwat above the 177 * free target, and recycling from it and normal use maintains this state. 178 * Sometimes the free list is below vlowat or even empty, but this state 179 * is even better for immediate use provided the cache is not full. 180 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 181 * ones) to reach one of these states. The watermarks are currently hard- 182 * coded as 4% and 9% of the available space higher. These and the default 183 * of 25% for wantfreevnodes are too large if the memory size is large. 184 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 185 * whenever vnlru_proc() becomes active. 186 */ 187 static long wantfreevnodes; 188 static long __exclusive_cache_line freevnodes; 189 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 190 &freevnodes, 0, "Number of \"free\" vnodes"); 191 static long freevnodes_old; 192 193 static counter_u64_t recycles_count; 194 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 195 "Number of vnodes recycled to meet vnode cache targets"); 196 197 static counter_u64_t recycles_free_count; 198 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 199 "Number of free vnodes recycled to meet vnode cache targets"); 200 201 static u_long deferred_inact; 202 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 203 &deferred_inact, 0, "Number of times inactive processing was deferred"); 204 205 /* To keep more than one thread at a time from running vfs_getnewfsid */ 206 static struct mtx mntid_mtx; 207 208 /* 209 * Lock for any access to the following: 210 * vnode_list 211 * numvnodes 212 * freevnodes 213 */ 214 static struct mtx __exclusive_cache_line vnode_list_mtx; 215 216 /* Publicly exported FS */ 217 struct nfs_public nfs_pub; 218 219 static uma_zone_t buf_trie_zone; 220 static smr_t buf_trie_smr; 221 222 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 223 static uma_zone_t vnode_zone; 224 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 225 226 __read_frequently smr_t vfs_smr; 227 228 /* 229 * The workitem queue. 230 * 231 * It is useful to delay writes of file data and filesystem metadata 232 * for tens of seconds so that quickly created and deleted files need 233 * not waste disk bandwidth being created and removed. To realize this, 234 * we append vnodes to a "workitem" queue. When running with a soft 235 * updates implementation, most pending metadata dependencies should 236 * not wait for more than a few seconds. Thus, mounted on block devices 237 * are delayed only about a half the time that file data is delayed. 238 * Similarly, directory updates are more critical, so are only delayed 239 * about a third the time that file data is delayed. Thus, there are 240 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 241 * one each second (driven off the filesystem syncer process). The 242 * syncer_delayno variable indicates the next queue that is to be processed. 243 * Items that need to be processed soon are placed in this queue: 244 * 245 * syncer_workitem_pending[syncer_delayno] 246 * 247 * A delay of fifteen seconds is done by placing the request fifteen 248 * entries later in the queue: 249 * 250 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 251 * 252 */ 253 static int syncer_delayno; 254 static long syncer_mask; 255 LIST_HEAD(synclist, bufobj); 256 static struct synclist *syncer_workitem_pending; 257 /* 258 * The sync_mtx protects: 259 * bo->bo_synclist 260 * sync_vnode_count 261 * syncer_delayno 262 * syncer_state 263 * syncer_workitem_pending 264 * syncer_worklist_len 265 * rushjob 266 */ 267 static struct mtx sync_mtx; 268 static struct cv sync_wakeup; 269 270 #define SYNCER_MAXDELAY 32 271 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 272 static int syncdelay = 30; /* max time to delay syncing data */ 273 static int filedelay = 30; /* time to delay syncing files */ 274 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 275 "Time to delay syncing files (in seconds)"); 276 static int dirdelay = 29; /* time to delay syncing directories */ 277 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 278 "Time to delay syncing directories (in seconds)"); 279 static int metadelay = 28; /* time to delay syncing metadata */ 280 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 281 "Time to delay syncing metadata (in seconds)"); 282 static int rushjob; /* number of slots to run ASAP */ 283 static int stat_rush_requests; /* number of times I/O speeded up */ 284 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 285 "Number of times I/O speeded up (rush requests)"); 286 287 #define VDBATCH_SIZE 8 288 struct vdbatch { 289 u_int index; 290 struct mtx lock; 291 struct vnode *tab[VDBATCH_SIZE]; 292 }; 293 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 294 295 static void vdbatch_dequeue(struct vnode *vp); 296 297 /* 298 * When shutting down the syncer, run it at four times normal speed. 299 */ 300 #define SYNCER_SHUTDOWN_SPEEDUP 4 301 static int sync_vnode_count; 302 static int syncer_worklist_len; 303 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 304 syncer_state; 305 306 /* Target for maximum number of vnodes. */ 307 u_long desiredvnodes; 308 static u_long gapvnodes; /* gap between wanted and desired */ 309 static u_long vhiwat; /* enough extras after expansion */ 310 static u_long vlowat; /* minimal extras before expansion */ 311 static u_long vstir; /* nonzero to stir non-free vnodes */ 312 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 313 314 static u_long vnlru_read_freevnodes(void); 315 316 /* 317 * Note that no attempt is made to sanitize these parameters. 318 */ 319 static int 320 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 321 { 322 u_long val; 323 int error; 324 325 val = desiredvnodes; 326 error = sysctl_handle_long(oidp, &val, 0, req); 327 if (error != 0 || req->newptr == NULL) 328 return (error); 329 330 if (val == desiredvnodes) 331 return (0); 332 mtx_lock(&vnode_list_mtx); 333 desiredvnodes = val; 334 wantfreevnodes = desiredvnodes / 4; 335 vnlru_recalc(); 336 mtx_unlock(&vnode_list_mtx); 337 /* 338 * XXX There is no protection against multiple threads changing 339 * desiredvnodes at the same time. Locking above only helps vnlru and 340 * getnewvnode. 341 */ 342 vfs_hash_changesize(desiredvnodes); 343 cache_changesize(desiredvnodes); 344 return (0); 345 } 346 347 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 348 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 349 "LU", "Target for maximum number of vnodes"); 350 351 static int 352 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 353 { 354 u_long val; 355 int error; 356 357 val = wantfreevnodes; 358 error = sysctl_handle_long(oidp, &val, 0, req); 359 if (error != 0 || req->newptr == NULL) 360 return (error); 361 362 if (val == wantfreevnodes) 363 return (0); 364 mtx_lock(&vnode_list_mtx); 365 wantfreevnodes = val; 366 vnlru_recalc(); 367 mtx_unlock(&vnode_list_mtx); 368 return (0); 369 } 370 371 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 372 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 373 "LU", "Target for minimum number of \"free\" vnodes"); 374 375 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 376 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 377 static int vnlru_nowhere; 378 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW | CTLFLAG_STATS, 379 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 380 381 static int 382 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 383 { 384 struct vnode *vp; 385 struct nameidata nd; 386 char *buf; 387 unsigned long ndflags; 388 int error; 389 390 if (req->newptr == NULL) 391 return (EINVAL); 392 if (req->newlen >= PATH_MAX) 393 return (E2BIG); 394 395 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 396 error = SYSCTL_IN(req, buf, req->newlen); 397 if (error != 0) 398 goto out; 399 400 buf[req->newlen] = '\0'; 401 402 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 403 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 404 if ((error = namei(&nd)) != 0) 405 goto out; 406 vp = nd.ni_vp; 407 408 if (VN_IS_DOOMED(vp)) { 409 /* 410 * This vnode is being recycled. Return != 0 to let the caller 411 * know that the sysctl had no effect. Return EAGAIN because a 412 * subsequent call will likely succeed (since namei will create 413 * a new vnode if necessary) 414 */ 415 error = EAGAIN; 416 goto putvnode; 417 } 418 419 counter_u64_add(recycles_count, 1); 420 vgone(vp); 421 putvnode: 422 vput(vp); 423 NDFREE_PNBUF(&nd); 424 out: 425 free(buf, M_TEMP); 426 return (error); 427 } 428 429 static int 430 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 431 { 432 struct thread *td = curthread; 433 struct vnode *vp; 434 struct file *fp; 435 int error; 436 int fd; 437 438 if (req->newptr == NULL) 439 return (EBADF); 440 441 error = sysctl_handle_int(oidp, &fd, 0, req); 442 if (error != 0) 443 return (error); 444 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 445 if (error != 0) 446 return (error); 447 vp = fp->f_vnode; 448 449 error = vn_lock(vp, LK_EXCLUSIVE); 450 if (error != 0) 451 goto drop; 452 453 counter_u64_add(recycles_count, 1); 454 vgone(vp); 455 VOP_UNLOCK(vp); 456 drop: 457 fdrop(fp, td); 458 return (error); 459 } 460 461 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 462 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 463 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 464 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 465 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 466 sysctl_ftry_reclaim_vnode, "I", 467 "Try to reclaim a vnode by its file descriptor"); 468 469 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 470 #define vnsz2log 8 471 #ifndef DEBUG_LOCKS 472 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 473 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 474 "vnsz2log needs to be updated"); 475 #endif 476 477 /* 478 * Support for the bufobj clean & dirty pctrie. 479 */ 480 static void * 481 buf_trie_alloc(struct pctrie *ptree) 482 { 483 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 484 } 485 486 static void 487 buf_trie_free(struct pctrie *ptree, void *node) 488 { 489 uma_zfree_smr(buf_trie_zone, node); 490 } 491 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 492 buf_trie_smr); 493 494 /* 495 * Initialize the vnode management data structures. 496 * 497 * Reevaluate the following cap on the number of vnodes after the physical 498 * memory size exceeds 512GB. In the limit, as the physical memory size 499 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 500 */ 501 #ifndef MAXVNODES_MAX 502 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 503 #endif 504 505 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 506 507 static struct vnode * 508 vn_alloc_marker(struct mount *mp) 509 { 510 struct vnode *vp; 511 512 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 513 vp->v_type = VMARKER; 514 vp->v_mount = mp; 515 516 return (vp); 517 } 518 519 static void 520 vn_free_marker(struct vnode *vp) 521 { 522 523 MPASS(vp->v_type == VMARKER); 524 free(vp, M_VNODE_MARKER); 525 } 526 527 #ifdef KASAN 528 static int 529 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 530 { 531 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 532 return (0); 533 } 534 535 static void 536 vnode_dtor(void *mem, int size, void *arg __unused) 537 { 538 size_t end1, end2, off1, off2; 539 540 _Static_assert(offsetof(struct vnode, v_vnodelist) < 541 offsetof(struct vnode, v_dbatchcpu), 542 "KASAN marks require updating"); 543 544 off1 = offsetof(struct vnode, v_vnodelist); 545 off2 = offsetof(struct vnode, v_dbatchcpu); 546 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 547 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 548 549 /* 550 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 551 * after the vnode has been freed. Try to get some KASAN coverage by 552 * marking everything except those two fields as invalid. Because 553 * KASAN's tracking is not byte-granular, any preceding fields sharing 554 * the same 8-byte aligned word must also be marked valid. 555 */ 556 557 /* Handle the area from the start until v_vnodelist... */ 558 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 559 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 560 561 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 562 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 563 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 564 if (off2 > off1) 565 kasan_mark((void *)((char *)mem + off1), off2 - off1, 566 off2 - off1, KASAN_UMA_FREED); 567 568 /* ... and finally the area from v_dbatchcpu to the end. */ 569 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 570 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 571 KASAN_UMA_FREED); 572 } 573 #endif /* KASAN */ 574 575 /* 576 * Initialize a vnode as it first enters the zone. 577 */ 578 static int 579 vnode_init(void *mem, int size, int flags) 580 { 581 struct vnode *vp; 582 583 vp = mem; 584 bzero(vp, size); 585 /* 586 * Setup locks. 587 */ 588 vp->v_vnlock = &vp->v_lock; 589 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 590 /* 591 * By default, don't allow shared locks unless filesystems opt-in. 592 */ 593 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 594 LK_NOSHARE | LK_IS_VNODE); 595 /* 596 * Initialize bufobj. 597 */ 598 bufobj_init(&vp->v_bufobj, vp); 599 /* 600 * Initialize namecache. 601 */ 602 cache_vnode_init(vp); 603 /* 604 * Initialize rangelocks. 605 */ 606 rangelock_init(&vp->v_rl); 607 608 vp->v_dbatchcpu = NOCPU; 609 610 vp->v_state = VSTATE_DEAD; 611 612 /* 613 * Check vhold_recycle_free for an explanation. 614 */ 615 vp->v_holdcnt = VHOLD_NO_SMR; 616 vp->v_type = VNON; 617 mtx_lock(&vnode_list_mtx); 618 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 619 mtx_unlock(&vnode_list_mtx); 620 return (0); 621 } 622 623 /* 624 * Free a vnode when it is cleared from the zone. 625 */ 626 static void 627 vnode_fini(void *mem, int size) 628 { 629 struct vnode *vp; 630 struct bufobj *bo; 631 632 vp = mem; 633 vdbatch_dequeue(vp); 634 mtx_lock(&vnode_list_mtx); 635 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 636 mtx_unlock(&vnode_list_mtx); 637 rangelock_destroy(&vp->v_rl); 638 lockdestroy(vp->v_vnlock); 639 mtx_destroy(&vp->v_interlock); 640 bo = &vp->v_bufobj; 641 rw_destroy(BO_LOCKPTR(bo)); 642 643 kasan_mark(mem, size, size, 0); 644 } 645 646 /* 647 * Provide the size of NFS nclnode and NFS fh for calculation of the 648 * vnode memory consumption. The size is specified directly to 649 * eliminate dependency on NFS-private header. 650 * 651 * Other filesystems may use bigger or smaller (like UFS and ZFS) 652 * private inode data, but the NFS-based estimation is ample enough. 653 * Still, we care about differences in the size between 64- and 32-bit 654 * platforms. 655 * 656 * Namecache structure size is heuristically 657 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 658 */ 659 #ifdef _LP64 660 #define NFS_NCLNODE_SZ (528 + 64) 661 #define NC_SZ 148 662 #else 663 #define NFS_NCLNODE_SZ (360 + 32) 664 #define NC_SZ 92 665 #endif 666 667 static void 668 vntblinit(void *dummy __unused) 669 { 670 struct vdbatch *vd; 671 uma_ctor ctor; 672 uma_dtor dtor; 673 int cpu, physvnodes, virtvnodes; 674 675 /* 676 * Desiredvnodes is a function of the physical memory size and the 677 * kernel's heap size. Generally speaking, it scales with the 678 * physical memory size. The ratio of desiredvnodes to the physical 679 * memory size is 1:16 until desiredvnodes exceeds 98,304. 680 * Thereafter, the 681 * marginal ratio of desiredvnodes to the physical memory size is 682 * 1:64. However, desiredvnodes is limited by the kernel's heap 683 * size. The memory required by desiredvnodes vnodes and vm objects 684 * must not exceed 1/10th of the kernel's heap size. 685 */ 686 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 687 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 688 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 689 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 690 desiredvnodes = min(physvnodes, virtvnodes); 691 if (desiredvnodes > MAXVNODES_MAX) { 692 if (bootverbose) 693 printf("Reducing kern.maxvnodes %lu -> %lu\n", 694 desiredvnodes, MAXVNODES_MAX); 695 desiredvnodes = MAXVNODES_MAX; 696 } 697 wantfreevnodes = desiredvnodes / 4; 698 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 699 TAILQ_INIT(&vnode_list); 700 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 701 /* 702 * The lock is taken to appease WITNESS. 703 */ 704 mtx_lock(&vnode_list_mtx); 705 vnlru_recalc(); 706 mtx_unlock(&vnode_list_mtx); 707 vnode_list_free_marker = vn_alloc_marker(NULL); 708 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 709 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 710 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 711 712 #ifdef KASAN 713 ctor = vnode_ctor; 714 dtor = vnode_dtor; 715 #else 716 ctor = NULL; 717 dtor = NULL; 718 #endif 719 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 720 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 721 uma_zone_set_smr(vnode_zone, vfs_smr); 722 723 /* 724 * Preallocate enough nodes to support one-per buf so that 725 * we can not fail an insert. reassignbuf() callers can not 726 * tolerate the insertion failure. 727 */ 728 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 729 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 730 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 731 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 732 uma_prealloc(buf_trie_zone, nbuf); 733 734 vnodes_created = counter_u64_alloc(M_WAITOK); 735 recycles_count = counter_u64_alloc(M_WAITOK); 736 recycles_free_count = counter_u64_alloc(M_WAITOK); 737 738 /* 739 * Initialize the filesystem syncer. 740 */ 741 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 742 &syncer_mask); 743 syncer_maxdelay = syncer_mask + 1; 744 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 745 cv_init(&sync_wakeup, "syncer"); 746 747 CPU_FOREACH(cpu) { 748 vd = DPCPU_ID_PTR((cpu), vd); 749 bzero(vd, sizeof(*vd)); 750 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 751 } 752 } 753 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 754 755 /* 756 * Mark a mount point as busy. Used to synchronize access and to delay 757 * unmounting. Eventually, mountlist_mtx is not released on failure. 758 * 759 * vfs_busy() is a custom lock, it can block the caller. 760 * vfs_busy() only sleeps if the unmount is active on the mount point. 761 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 762 * vnode belonging to mp. 763 * 764 * Lookup uses vfs_busy() to traverse mount points. 765 * root fs var fs 766 * / vnode lock A / vnode lock (/var) D 767 * /var vnode lock B /log vnode lock(/var/log) E 768 * vfs_busy lock C vfs_busy lock F 769 * 770 * Within each file system, the lock order is C->A->B and F->D->E. 771 * 772 * When traversing across mounts, the system follows that lock order: 773 * 774 * C->A->B 775 * | 776 * +->F->D->E 777 * 778 * The lookup() process for namei("/var") illustrates the process: 779 * 1. VOP_LOOKUP() obtains B while A is held 780 * 2. vfs_busy() obtains a shared lock on F while A and B are held 781 * 3. vput() releases lock on B 782 * 4. vput() releases lock on A 783 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 784 * 6. vfs_unbusy() releases shared lock on F 785 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 786 * Attempt to lock A (instead of vp_crossmp) while D is held would 787 * violate the global order, causing deadlocks. 788 * 789 * dounmount() locks B while F is drained. Note that for stacked 790 * filesystems, D and B in the example above may be the same lock, 791 * which introdues potential lock order reversal deadlock between 792 * dounmount() and step 5 above. These filesystems may avoid the LOR 793 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 794 * remain held until after step 5. 795 */ 796 int 797 vfs_busy(struct mount *mp, int flags) 798 { 799 struct mount_pcpu *mpcpu; 800 801 MPASS((flags & ~MBF_MASK) == 0); 802 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 803 804 if (vfs_op_thread_enter(mp, mpcpu)) { 805 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 806 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 807 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 808 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 809 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 810 vfs_op_thread_exit(mp, mpcpu); 811 if (flags & MBF_MNTLSTLOCK) 812 mtx_unlock(&mountlist_mtx); 813 return (0); 814 } 815 816 MNT_ILOCK(mp); 817 vfs_assert_mount_counters(mp); 818 MNT_REF(mp); 819 /* 820 * If mount point is currently being unmounted, sleep until the 821 * mount point fate is decided. If thread doing the unmounting fails, 822 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 823 * that this mount point has survived the unmount attempt and vfs_busy 824 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 825 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 826 * about to be really destroyed. vfs_busy needs to release its 827 * reference on the mount point in this case and return with ENOENT, 828 * telling the caller the mount it tried to busy is no longer valid. 829 */ 830 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 831 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 832 ("%s: non-empty upper mount list with pending unmount", 833 __func__)); 834 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 835 MNT_REL(mp); 836 MNT_IUNLOCK(mp); 837 CTR1(KTR_VFS, "%s: failed busying before sleeping", 838 __func__); 839 return (ENOENT); 840 } 841 if (flags & MBF_MNTLSTLOCK) 842 mtx_unlock(&mountlist_mtx); 843 mp->mnt_kern_flag |= MNTK_MWAIT; 844 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 845 if (flags & MBF_MNTLSTLOCK) 846 mtx_lock(&mountlist_mtx); 847 MNT_ILOCK(mp); 848 } 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 mp->mnt_lockref++; 852 MNT_IUNLOCK(mp); 853 return (0); 854 } 855 856 /* 857 * Free a busy filesystem. 858 */ 859 void 860 vfs_unbusy(struct mount *mp) 861 { 862 struct mount_pcpu *mpcpu; 863 int c; 864 865 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 866 867 if (vfs_op_thread_enter(mp, mpcpu)) { 868 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 869 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 870 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 871 vfs_op_thread_exit(mp, mpcpu); 872 return; 873 } 874 875 MNT_ILOCK(mp); 876 vfs_assert_mount_counters(mp); 877 MNT_REL(mp); 878 c = --mp->mnt_lockref; 879 if (mp->mnt_vfs_ops == 0) { 880 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 881 MNT_IUNLOCK(mp); 882 return; 883 } 884 if (c < 0) 885 vfs_dump_mount_counters(mp); 886 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 887 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 888 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 889 mp->mnt_kern_flag &= ~MNTK_DRAINING; 890 wakeup(&mp->mnt_lockref); 891 } 892 MNT_IUNLOCK(mp); 893 } 894 895 /* 896 * Lookup a mount point by filesystem identifier. 897 */ 898 struct mount * 899 vfs_getvfs(fsid_t *fsid) 900 { 901 struct mount *mp; 902 903 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 904 mtx_lock(&mountlist_mtx); 905 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 906 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 907 vfs_ref(mp); 908 mtx_unlock(&mountlist_mtx); 909 return (mp); 910 } 911 } 912 mtx_unlock(&mountlist_mtx); 913 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 914 return ((struct mount *) 0); 915 } 916 917 /* 918 * Lookup a mount point by filesystem identifier, busying it before 919 * returning. 920 * 921 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 922 * cache for popular filesystem identifiers. The cache is lockess, using 923 * the fact that struct mount's are never freed. In worst case we may 924 * get pointer to unmounted or even different filesystem, so we have to 925 * check what we got, and go slow way if so. 926 */ 927 struct mount * 928 vfs_busyfs(fsid_t *fsid) 929 { 930 #define FSID_CACHE_SIZE 256 931 typedef struct mount * volatile vmp_t; 932 static vmp_t cache[FSID_CACHE_SIZE]; 933 struct mount *mp; 934 int error; 935 uint32_t hash; 936 937 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 938 hash = fsid->val[0] ^ fsid->val[1]; 939 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 940 mp = cache[hash]; 941 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 942 goto slow; 943 if (vfs_busy(mp, 0) != 0) { 944 cache[hash] = NULL; 945 goto slow; 946 } 947 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 948 return (mp); 949 else 950 vfs_unbusy(mp); 951 952 slow: 953 mtx_lock(&mountlist_mtx); 954 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 955 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 956 error = vfs_busy(mp, MBF_MNTLSTLOCK); 957 if (error) { 958 cache[hash] = NULL; 959 mtx_unlock(&mountlist_mtx); 960 return (NULL); 961 } 962 cache[hash] = mp; 963 return (mp); 964 } 965 } 966 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 967 mtx_unlock(&mountlist_mtx); 968 return ((struct mount *) 0); 969 } 970 971 /* 972 * Check if a user can access privileged mount options. 973 */ 974 int 975 vfs_suser(struct mount *mp, struct thread *td) 976 { 977 int error; 978 979 if (jailed(td->td_ucred)) { 980 /* 981 * If the jail of the calling thread lacks permission for 982 * this type of file system, deny immediately. 983 */ 984 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 985 return (EPERM); 986 987 /* 988 * If the file system was mounted outside the jail of the 989 * calling thread, deny immediately. 990 */ 991 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 992 return (EPERM); 993 } 994 995 /* 996 * If file system supports delegated administration, we don't check 997 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 998 * by the file system itself. 999 * If this is not the user that did original mount, we check for 1000 * the PRIV_VFS_MOUNT_OWNER privilege. 1001 */ 1002 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1003 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1004 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1005 return (error); 1006 } 1007 return (0); 1008 } 1009 1010 /* 1011 * Get a new unique fsid. Try to make its val[0] unique, since this value 1012 * will be used to create fake device numbers for stat(). Also try (but 1013 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1014 * support 16-bit device numbers. We end up with unique val[0]'s for the 1015 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1016 * 1017 * Keep in mind that several mounts may be running in parallel. Starting 1018 * the search one past where the previous search terminated is both a 1019 * micro-optimization and a defense against returning the same fsid to 1020 * different mounts. 1021 */ 1022 void 1023 vfs_getnewfsid(struct mount *mp) 1024 { 1025 static uint16_t mntid_base; 1026 struct mount *nmp; 1027 fsid_t tfsid; 1028 int mtype; 1029 1030 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1031 mtx_lock(&mntid_mtx); 1032 mtype = mp->mnt_vfc->vfc_typenum; 1033 tfsid.val[1] = mtype; 1034 mtype = (mtype & 0xFF) << 24; 1035 for (;;) { 1036 tfsid.val[0] = makedev(255, 1037 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1038 mntid_base++; 1039 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1040 break; 1041 vfs_rel(nmp); 1042 } 1043 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1044 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1045 mtx_unlock(&mntid_mtx); 1046 } 1047 1048 /* 1049 * Knob to control the precision of file timestamps: 1050 * 1051 * 0 = seconds only; nanoseconds zeroed. 1052 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1053 * 2 = seconds and nanoseconds, truncated to microseconds. 1054 * >=3 = seconds and nanoseconds, maximum precision. 1055 */ 1056 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1057 1058 static int timestamp_precision = TSP_USEC; 1059 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1060 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1061 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1062 "3+: sec + ns (max. precision))"); 1063 1064 /* 1065 * Get a current timestamp. 1066 */ 1067 void 1068 vfs_timestamp(struct timespec *tsp) 1069 { 1070 struct timeval tv; 1071 1072 switch (timestamp_precision) { 1073 case TSP_SEC: 1074 tsp->tv_sec = time_second; 1075 tsp->tv_nsec = 0; 1076 break; 1077 case TSP_HZ: 1078 getnanotime(tsp); 1079 break; 1080 case TSP_USEC: 1081 microtime(&tv); 1082 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1083 break; 1084 case TSP_NSEC: 1085 default: 1086 nanotime(tsp); 1087 break; 1088 } 1089 } 1090 1091 /* 1092 * Set vnode attributes to VNOVAL 1093 */ 1094 void 1095 vattr_null(struct vattr *vap) 1096 { 1097 1098 vap->va_type = VNON; 1099 vap->va_size = VNOVAL; 1100 vap->va_bytes = VNOVAL; 1101 vap->va_mode = VNOVAL; 1102 vap->va_nlink = VNOVAL; 1103 vap->va_uid = VNOVAL; 1104 vap->va_gid = VNOVAL; 1105 vap->va_fsid = VNOVAL; 1106 vap->va_fileid = VNOVAL; 1107 vap->va_blocksize = VNOVAL; 1108 vap->va_rdev = VNOVAL; 1109 vap->va_atime.tv_sec = VNOVAL; 1110 vap->va_atime.tv_nsec = VNOVAL; 1111 vap->va_mtime.tv_sec = VNOVAL; 1112 vap->va_mtime.tv_nsec = VNOVAL; 1113 vap->va_ctime.tv_sec = VNOVAL; 1114 vap->va_ctime.tv_nsec = VNOVAL; 1115 vap->va_birthtime.tv_sec = VNOVAL; 1116 vap->va_birthtime.tv_nsec = VNOVAL; 1117 vap->va_flags = VNOVAL; 1118 vap->va_gen = VNOVAL; 1119 vap->va_vaflags = 0; 1120 } 1121 1122 /* 1123 * Try to reduce the total number of vnodes. 1124 * 1125 * This routine (and its user) are buggy in at least the following ways: 1126 * - all parameters were picked years ago when RAM sizes were significantly 1127 * smaller 1128 * - it can pick vnodes based on pages used by the vm object, but filesystems 1129 * like ZFS don't use it making the pick broken 1130 * - since ZFS has its own aging policy it gets partially combated by this one 1131 * - a dedicated method should be provided for filesystems to let them decide 1132 * whether the vnode should be recycled 1133 * 1134 * This routine is called when we have too many vnodes. It attempts 1135 * to free <count> vnodes and will potentially free vnodes that still 1136 * have VM backing store (VM backing store is typically the cause 1137 * of a vnode blowout so we want to do this). Therefore, this operation 1138 * is not considered cheap. 1139 * 1140 * A number of conditions may prevent a vnode from being reclaimed. 1141 * the buffer cache may have references on the vnode, a directory 1142 * vnode may still have references due to the namei cache representing 1143 * underlying files, or the vnode may be in active use. It is not 1144 * desirable to reuse such vnodes. These conditions may cause the 1145 * number of vnodes to reach some minimum value regardless of what 1146 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1147 * 1148 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1149 * entries if this argument is strue 1150 * @param trigger Only reclaim vnodes with fewer than this many resident 1151 * pages. 1152 * @param target How many vnodes to reclaim. 1153 * @return The number of vnodes that were reclaimed. 1154 */ 1155 static int 1156 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1157 { 1158 struct vnode *vp, *mvp; 1159 struct mount *mp; 1160 struct vm_object *object; 1161 u_long done; 1162 bool retried; 1163 1164 mtx_assert(&vnode_list_mtx, MA_OWNED); 1165 1166 retried = false; 1167 done = 0; 1168 1169 mvp = vnode_list_reclaim_marker; 1170 restart: 1171 vp = mvp; 1172 while (done < target) { 1173 vp = TAILQ_NEXT(vp, v_vnodelist); 1174 if (__predict_false(vp == NULL)) 1175 break; 1176 1177 if (__predict_false(vp->v_type == VMARKER)) 1178 continue; 1179 1180 /* 1181 * If it's been deconstructed already, it's still 1182 * referenced, or it exceeds the trigger, skip it. 1183 * Also skip free vnodes. We are trying to make space 1184 * to expand the free list, not reduce it. 1185 */ 1186 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1187 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1188 goto next_iter; 1189 1190 if (vp->v_type == VBAD || vp->v_type == VNON) 1191 goto next_iter; 1192 1193 object = atomic_load_ptr(&vp->v_object); 1194 if (object == NULL || object->resident_page_count > trigger) { 1195 goto next_iter; 1196 } 1197 1198 /* 1199 * Handle races against vnode allocation. Filesystems lock the 1200 * vnode some time after it gets returned from getnewvnode, 1201 * despite type and hold count being manipulated earlier. 1202 * Resorting to checking v_mount restores guarantees present 1203 * before the global list was reworked to contain all vnodes. 1204 */ 1205 if (!VI_TRYLOCK(vp)) 1206 goto next_iter; 1207 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1208 VI_UNLOCK(vp); 1209 goto next_iter; 1210 } 1211 if (vp->v_mount == NULL) { 1212 VI_UNLOCK(vp); 1213 goto next_iter; 1214 } 1215 vholdl(vp); 1216 VI_UNLOCK(vp); 1217 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1218 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1219 mtx_unlock(&vnode_list_mtx); 1220 1221 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1222 vdrop_recycle(vp); 1223 goto next_iter_unlocked; 1224 } 1225 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1226 vdrop_recycle(vp); 1227 vn_finished_write(mp); 1228 goto next_iter_unlocked; 1229 } 1230 1231 VI_LOCK(vp); 1232 if (vp->v_usecount > 0 || 1233 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1234 (vp->v_object != NULL && vp->v_object->handle == vp && 1235 vp->v_object->resident_page_count > trigger)) { 1236 VOP_UNLOCK(vp); 1237 vdropl_recycle(vp); 1238 vn_finished_write(mp); 1239 goto next_iter_unlocked; 1240 } 1241 counter_u64_add(recycles_count, 1); 1242 vgonel(vp); 1243 VOP_UNLOCK(vp); 1244 vdropl_recycle(vp); 1245 vn_finished_write(mp); 1246 done++; 1247 next_iter_unlocked: 1248 maybe_yield(); 1249 mtx_lock(&vnode_list_mtx); 1250 goto restart; 1251 next_iter: 1252 MPASS(vp->v_type != VMARKER); 1253 if (!should_yield()) 1254 continue; 1255 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1256 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1257 mtx_unlock(&vnode_list_mtx); 1258 kern_yield(PRI_USER); 1259 mtx_lock(&vnode_list_mtx); 1260 goto restart; 1261 } 1262 if (done == 0 && !retried) { 1263 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1264 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1265 retried = true; 1266 goto restart; 1267 } 1268 return (done); 1269 } 1270 1271 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1272 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1273 0, 1274 "limit on vnode free requests per call to the vnlru_free routine"); 1275 1276 /* 1277 * Attempt to reduce the free list by the requested amount. 1278 */ 1279 static int 1280 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1281 { 1282 struct vnode *vp; 1283 struct mount *mp; 1284 int ocount; 1285 1286 mtx_assert(&vnode_list_mtx, MA_OWNED); 1287 if (count > max_vnlru_free) 1288 count = max_vnlru_free; 1289 ocount = count; 1290 vp = mvp; 1291 for (;;) { 1292 if (count == 0) { 1293 break; 1294 } 1295 vp = TAILQ_NEXT(vp, v_vnodelist); 1296 if (__predict_false(vp == NULL)) { 1297 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1298 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1299 break; 1300 } 1301 if (__predict_false(vp->v_type == VMARKER)) 1302 continue; 1303 if (vp->v_holdcnt > 0) 1304 continue; 1305 /* 1306 * Don't recycle if our vnode is from different type 1307 * of mount point. Note that mp is type-safe, the 1308 * check does not reach unmapped address even if 1309 * vnode is reclaimed. 1310 */ 1311 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1312 mp->mnt_op != mnt_op) { 1313 continue; 1314 } 1315 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1316 continue; 1317 } 1318 if (!vhold_recycle_free(vp)) 1319 continue; 1320 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1321 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1322 mtx_unlock(&vnode_list_mtx); 1323 /* 1324 * FIXME: ignores the return value, meaning it may be nothing 1325 * got recycled but it claims otherwise to the caller. 1326 * 1327 * Originally the value started being ignored in 2005 with 1328 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1329 * 1330 * Respecting the value can run into significant stalls if most 1331 * vnodes belong to one file system and it has writes 1332 * suspended. In presence of many threads and millions of 1333 * vnodes they keep contending on the vnode_list_mtx lock only 1334 * to find vnodes they can't recycle. 1335 * 1336 * The solution would be to pre-check if the vnode is likely to 1337 * be recycle-able, but it needs to happen with the 1338 * vnode_list_mtx lock held. This runs into a problem where 1339 * VOP_GETWRITEMOUNT (currently needed to find out about if 1340 * writes are frozen) can take locks which LOR against it. 1341 * 1342 * Check nullfs for one example (null_getwritemount). 1343 */ 1344 vtryrecycle(vp); 1345 count--; 1346 mtx_lock(&vnode_list_mtx); 1347 vp = mvp; 1348 } 1349 return (ocount - count); 1350 } 1351 1352 static int 1353 vnlru_free_locked(int count) 1354 { 1355 1356 mtx_assert(&vnode_list_mtx, MA_OWNED); 1357 return (vnlru_free_impl(count, NULL, vnode_list_free_marker)); 1358 } 1359 1360 void 1361 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1362 { 1363 1364 MPASS(mnt_op != NULL); 1365 MPASS(mvp != NULL); 1366 VNPASS(mvp->v_type == VMARKER, mvp); 1367 mtx_lock(&vnode_list_mtx); 1368 vnlru_free_impl(count, mnt_op, mvp); 1369 mtx_unlock(&vnode_list_mtx); 1370 } 1371 1372 struct vnode * 1373 vnlru_alloc_marker(void) 1374 { 1375 struct vnode *mvp; 1376 1377 mvp = vn_alloc_marker(NULL); 1378 mtx_lock(&vnode_list_mtx); 1379 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1380 mtx_unlock(&vnode_list_mtx); 1381 return (mvp); 1382 } 1383 1384 void 1385 vnlru_free_marker(struct vnode *mvp) 1386 { 1387 mtx_lock(&vnode_list_mtx); 1388 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1389 mtx_unlock(&vnode_list_mtx); 1390 vn_free_marker(mvp); 1391 } 1392 1393 static void 1394 vnlru_recalc(void) 1395 { 1396 1397 mtx_assert(&vnode_list_mtx, MA_OWNED); 1398 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1399 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1400 vlowat = vhiwat / 2; 1401 } 1402 1403 /* 1404 * Attempt to recycle vnodes in a context that is always safe to block. 1405 * Calling vlrurecycle() from the bowels of filesystem code has some 1406 * interesting deadlock problems. 1407 */ 1408 static struct proc *vnlruproc; 1409 static int vnlruproc_sig; 1410 1411 /* 1412 * The main freevnodes counter is only updated when threads requeue their vnode 1413 * batches. CPUs are conditionally walked to compute a more accurate total. 1414 * 1415 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1416 * at any given moment can still exceed slop, but it should not be by significant 1417 * margin in practice. 1418 */ 1419 #define VNLRU_FREEVNODES_SLOP 126 1420 1421 static void __noinline 1422 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1423 { 1424 1425 atomic_add_long(&freevnodes, *lfreevnodes); 1426 *lfreevnodes = 0; 1427 critical_exit(); 1428 } 1429 1430 static __inline void 1431 vfs_freevnodes_inc(void) 1432 { 1433 int8_t *lfreevnodes; 1434 1435 critical_enter(); 1436 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1437 (*lfreevnodes)++; 1438 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1439 vfs_freevnodes_rollup(lfreevnodes); 1440 else 1441 critical_exit(); 1442 } 1443 1444 static __inline void 1445 vfs_freevnodes_dec(void) 1446 { 1447 int8_t *lfreevnodes; 1448 1449 critical_enter(); 1450 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1451 (*lfreevnodes)--; 1452 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1453 vfs_freevnodes_rollup(lfreevnodes); 1454 else 1455 critical_exit(); 1456 } 1457 1458 static u_long 1459 vnlru_read_freevnodes(void) 1460 { 1461 long slop, rfreevnodes; 1462 int cpu; 1463 1464 rfreevnodes = atomic_load_long(&freevnodes); 1465 1466 if (rfreevnodes > freevnodes_old) 1467 slop = rfreevnodes - freevnodes_old; 1468 else 1469 slop = freevnodes_old - rfreevnodes; 1470 if (slop < VNLRU_FREEVNODES_SLOP) 1471 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1472 freevnodes_old = rfreevnodes; 1473 CPU_FOREACH(cpu) { 1474 freevnodes_old += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1475 } 1476 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1477 } 1478 1479 static bool 1480 vnlru_under(u_long rnumvnodes, u_long limit) 1481 { 1482 u_long rfreevnodes, space; 1483 1484 if (__predict_false(rnumvnodes > desiredvnodes)) 1485 return (true); 1486 1487 space = desiredvnodes - rnumvnodes; 1488 if (space < limit) { 1489 rfreevnodes = vnlru_read_freevnodes(); 1490 if (rfreevnodes > wantfreevnodes) 1491 space += rfreevnodes - wantfreevnodes; 1492 } 1493 return (space < limit); 1494 } 1495 1496 static bool 1497 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1498 { 1499 long rfreevnodes, space; 1500 1501 if (__predict_false(rnumvnodes > desiredvnodes)) 1502 return (true); 1503 1504 space = desiredvnodes - rnumvnodes; 1505 if (space < limit) { 1506 rfreevnodes = atomic_load_long(&freevnodes); 1507 if (rfreevnodes > wantfreevnodes) 1508 space += rfreevnodes - wantfreevnodes; 1509 } 1510 return (space < limit); 1511 } 1512 1513 static void 1514 vnlru_kick(void) 1515 { 1516 1517 mtx_assert(&vnode_list_mtx, MA_OWNED); 1518 if (vnlruproc_sig == 0) { 1519 vnlruproc_sig = 1; 1520 wakeup(vnlruproc); 1521 } 1522 } 1523 1524 static void 1525 vnlru_proc(void) 1526 { 1527 u_long rnumvnodes, rfreevnodes, target; 1528 unsigned long onumvnodes; 1529 int done, force, trigger, usevnodes; 1530 bool reclaim_nc_src, want_reread; 1531 1532 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1533 SHUTDOWN_PRI_FIRST); 1534 1535 force = 0; 1536 want_reread = false; 1537 for (;;) { 1538 kproc_suspend_check(vnlruproc); 1539 mtx_lock(&vnode_list_mtx); 1540 rnumvnodes = atomic_load_long(&numvnodes); 1541 1542 if (want_reread) { 1543 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1544 want_reread = false; 1545 } 1546 1547 /* 1548 * If numvnodes is too large (due to desiredvnodes being 1549 * adjusted using its sysctl, or emergency growth), first 1550 * try to reduce it by discarding from the free list. 1551 */ 1552 if (rnumvnodes > desiredvnodes) { 1553 vnlru_free_locked(rnumvnodes - desiredvnodes); 1554 rnumvnodes = atomic_load_long(&numvnodes); 1555 } 1556 /* 1557 * Sleep if the vnode cache is in a good state. This is 1558 * when it is not over-full and has space for about a 4% 1559 * or 9% expansion (by growing its size or inexcessively 1560 * reducing its free list). Otherwise, try to reclaim 1561 * space for a 10% expansion. 1562 */ 1563 if (vstir && force == 0) { 1564 force = 1; 1565 vstir = 0; 1566 } 1567 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1568 vnlruproc_sig = 0; 1569 wakeup(&vnlruproc_sig); 1570 msleep(vnlruproc, &vnode_list_mtx, 1571 PVFS|PDROP, "vlruwt", hz); 1572 continue; 1573 } 1574 rfreevnodes = vnlru_read_freevnodes(); 1575 1576 onumvnodes = rnumvnodes; 1577 /* 1578 * Calculate parameters for recycling. These are the same 1579 * throughout the loop to give some semblance of fairness. 1580 * The trigger point is to avoid recycling vnodes with lots 1581 * of resident pages. We aren't trying to free memory; we 1582 * are trying to recycle or at least free vnodes. 1583 */ 1584 if (rnumvnodes <= desiredvnodes) 1585 usevnodes = rnumvnodes - rfreevnodes; 1586 else 1587 usevnodes = rnumvnodes; 1588 if (usevnodes <= 0) 1589 usevnodes = 1; 1590 /* 1591 * The trigger value is chosen to give a conservatively 1592 * large value to ensure that it alone doesn't prevent 1593 * making progress. The value can easily be so large that 1594 * it is effectively infinite in some congested and 1595 * misconfigured cases, and this is necessary. Normally 1596 * it is about 8 to 100 (pages), which is quite large. 1597 */ 1598 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1599 if (force < 2) 1600 trigger = vsmalltrigger; 1601 reclaim_nc_src = force >= 3; 1602 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1603 target = target / 10 + 1; 1604 done = vlrureclaim(reclaim_nc_src, trigger, target); 1605 mtx_unlock(&vnode_list_mtx); 1606 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1607 uma_reclaim(UMA_RECLAIM_DRAIN); 1608 if (done == 0) { 1609 if (force == 0 || force == 1) { 1610 force = 2; 1611 continue; 1612 } 1613 if (force == 2) { 1614 force = 3; 1615 continue; 1616 } 1617 want_reread = true; 1618 force = 0; 1619 vnlru_nowhere++; 1620 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1621 } else { 1622 want_reread = true; 1623 kern_yield(PRI_USER); 1624 } 1625 } 1626 } 1627 1628 static struct kproc_desc vnlru_kp = { 1629 "vnlru", 1630 vnlru_proc, 1631 &vnlruproc 1632 }; 1633 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1634 &vnlru_kp); 1635 1636 /* 1637 * Routines having to do with the management of the vnode table. 1638 */ 1639 1640 /* 1641 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1642 * before we actually vgone(). This function must be called with the vnode 1643 * held to prevent the vnode from being returned to the free list midway 1644 * through vgone(). 1645 */ 1646 static int 1647 vtryrecycle(struct vnode *vp) 1648 { 1649 struct mount *vnmp; 1650 1651 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1652 VNPASS(vp->v_holdcnt > 0, vp); 1653 /* 1654 * This vnode may found and locked via some other list, if so we 1655 * can't recycle it yet. 1656 */ 1657 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1658 CTR2(KTR_VFS, 1659 "%s: impossible to recycle, vp %p lock is already held", 1660 __func__, vp); 1661 vdrop_recycle(vp); 1662 return (EWOULDBLOCK); 1663 } 1664 /* 1665 * Don't recycle if its filesystem is being suspended. 1666 */ 1667 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1668 VOP_UNLOCK(vp); 1669 CTR2(KTR_VFS, 1670 "%s: impossible to recycle, cannot start the write for %p", 1671 __func__, vp); 1672 vdrop_recycle(vp); 1673 return (EBUSY); 1674 } 1675 /* 1676 * If we got this far, we need to acquire the interlock and see if 1677 * anyone picked up this vnode from another list. If not, we will 1678 * mark it with DOOMED via vgonel() so that anyone who does find it 1679 * will skip over it. 1680 */ 1681 VI_LOCK(vp); 1682 if (vp->v_usecount) { 1683 VOP_UNLOCK(vp); 1684 vdropl_recycle(vp); 1685 vn_finished_write(vnmp); 1686 CTR2(KTR_VFS, 1687 "%s: impossible to recycle, %p is already referenced", 1688 __func__, vp); 1689 return (EBUSY); 1690 } 1691 if (!VN_IS_DOOMED(vp)) { 1692 counter_u64_add(recycles_free_count, 1); 1693 vgonel(vp); 1694 } 1695 VOP_UNLOCK(vp); 1696 vdropl_recycle(vp); 1697 vn_finished_write(vnmp); 1698 return (0); 1699 } 1700 1701 /* 1702 * Allocate a new vnode. 1703 * 1704 * The operation never returns an error. Returning an error was disabled 1705 * in r145385 (dated 2005) with the following comment: 1706 * 1707 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1708 * 1709 * Given the age of this commit (almost 15 years at the time of writing this 1710 * comment) restoring the ability to fail requires a significant audit of 1711 * all codepaths. 1712 * 1713 * The routine can try to free a vnode or stall for up to 1 second waiting for 1714 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1715 */ 1716 static u_long vn_alloc_cyclecount; 1717 1718 static struct vnode * __noinline 1719 vn_alloc_hard(struct mount *mp) 1720 { 1721 u_long rnumvnodes, rfreevnodes; 1722 1723 mtx_lock(&vnode_list_mtx); 1724 rnumvnodes = atomic_load_long(&numvnodes); 1725 if (rnumvnodes + 1 < desiredvnodes) { 1726 vn_alloc_cyclecount = 0; 1727 goto alloc; 1728 } 1729 rfreevnodes = vnlru_read_freevnodes(); 1730 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1731 vn_alloc_cyclecount = 0; 1732 vstir = 1; 1733 } 1734 /* 1735 * Grow the vnode cache if it will not be above its target max 1736 * after growing. Otherwise, if the free list is nonempty, try 1737 * to reclaim 1 item from it before growing the cache (possibly 1738 * above its target max if the reclamation failed or is delayed). 1739 * Otherwise, wait for some space. In all cases, schedule 1740 * vnlru_proc() if we are getting short of space. The watermarks 1741 * should be chosen so that we never wait or even reclaim from 1742 * the free list to below its target minimum. 1743 */ 1744 if (vnlru_free_locked(1) > 0) 1745 goto alloc; 1746 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1747 /* 1748 * Wait for space for a new vnode. 1749 */ 1750 vnlru_kick(); 1751 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1752 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1753 vnlru_read_freevnodes() > 1) 1754 vnlru_free_locked(1); 1755 } 1756 alloc: 1757 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1758 if (vnlru_under(rnumvnodes, vlowat)) 1759 vnlru_kick(); 1760 mtx_unlock(&vnode_list_mtx); 1761 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1762 } 1763 1764 static struct vnode * 1765 vn_alloc(struct mount *mp) 1766 { 1767 u_long rnumvnodes; 1768 1769 if (__predict_false(vn_alloc_cyclecount != 0)) 1770 return (vn_alloc_hard(mp)); 1771 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1772 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1773 atomic_subtract_long(&numvnodes, 1); 1774 return (vn_alloc_hard(mp)); 1775 } 1776 1777 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1778 } 1779 1780 static void 1781 vn_free(struct vnode *vp) 1782 { 1783 1784 atomic_subtract_long(&numvnodes, 1); 1785 uma_zfree_smr(vnode_zone, vp); 1786 } 1787 1788 /* 1789 * Return the next vnode from the free list. 1790 */ 1791 int 1792 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1793 struct vnode **vpp) 1794 { 1795 struct vnode *vp; 1796 struct thread *td; 1797 struct lock_object *lo; 1798 1799 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1800 1801 KASSERT(vops->registered, 1802 ("%s: not registered vector op %p\n", __func__, vops)); 1803 1804 td = curthread; 1805 if (td->td_vp_reserved != NULL) { 1806 vp = td->td_vp_reserved; 1807 td->td_vp_reserved = NULL; 1808 } else { 1809 vp = vn_alloc(mp); 1810 } 1811 counter_u64_add(vnodes_created, 1); 1812 1813 vn_set_state(vp, VSTATE_UNINITIALIZED); 1814 1815 /* 1816 * Locks are given the generic name "vnode" when created. 1817 * Follow the historic practice of using the filesystem 1818 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1819 * 1820 * Locks live in a witness group keyed on their name. Thus, 1821 * when a lock is renamed, it must also move from the witness 1822 * group of its old name to the witness group of its new name. 1823 * 1824 * The change only needs to be made when the vnode moves 1825 * from one filesystem type to another. We ensure that each 1826 * filesystem use a single static name pointer for its tag so 1827 * that we can compare pointers rather than doing a strcmp(). 1828 */ 1829 lo = &vp->v_vnlock->lock_object; 1830 #ifdef WITNESS 1831 if (lo->lo_name != tag) { 1832 #endif 1833 lo->lo_name = tag; 1834 #ifdef WITNESS 1835 WITNESS_DESTROY(lo); 1836 WITNESS_INIT(lo, tag); 1837 } 1838 #endif 1839 /* 1840 * By default, don't allow shared locks unless filesystems opt-in. 1841 */ 1842 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1843 /* 1844 * Finalize various vnode identity bits. 1845 */ 1846 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1847 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1848 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1849 vp->v_type = VNON; 1850 vp->v_op = vops; 1851 vp->v_irflag = 0; 1852 v_init_counters(vp); 1853 vn_seqc_init(vp); 1854 vp->v_bufobj.bo_ops = &buf_ops_bio; 1855 #ifdef DIAGNOSTIC 1856 if (mp == NULL && vops != &dead_vnodeops) 1857 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1858 #endif 1859 #ifdef MAC 1860 mac_vnode_init(vp); 1861 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1862 mac_vnode_associate_singlelabel(mp, vp); 1863 #endif 1864 if (mp != NULL) { 1865 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1866 } 1867 1868 /* 1869 * For the filesystems which do not use vfs_hash_insert(), 1870 * still initialize v_hash to have vfs_hash_index() useful. 1871 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1872 * its own hashing. 1873 */ 1874 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1875 1876 *vpp = vp; 1877 return (0); 1878 } 1879 1880 void 1881 getnewvnode_reserve(void) 1882 { 1883 struct thread *td; 1884 1885 td = curthread; 1886 MPASS(td->td_vp_reserved == NULL); 1887 td->td_vp_reserved = vn_alloc(NULL); 1888 } 1889 1890 void 1891 getnewvnode_drop_reserve(void) 1892 { 1893 struct thread *td; 1894 1895 td = curthread; 1896 if (td->td_vp_reserved != NULL) { 1897 vn_free(td->td_vp_reserved); 1898 td->td_vp_reserved = NULL; 1899 } 1900 } 1901 1902 static void __noinline 1903 freevnode(struct vnode *vp) 1904 { 1905 struct bufobj *bo; 1906 1907 /* 1908 * The vnode has been marked for destruction, so free it. 1909 * 1910 * The vnode will be returned to the zone where it will 1911 * normally remain until it is needed for another vnode. We 1912 * need to cleanup (or verify that the cleanup has already 1913 * been done) any residual data left from its current use 1914 * so as not to contaminate the freshly allocated vnode. 1915 */ 1916 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1917 /* 1918 * Paired with vgone. 1919 */ 1920 vn_seqc_write_end_free(vp); 1921 1922 bo = &vp->v_bufobj; 1923 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1924 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1925 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1926 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1927 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1928 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1929 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1930 ("clean blk trie not empty")); 1931 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1932 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1933 ("dirty blk trie not empty")); 1934 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1935 ("Dangling rangelock waiters")); 1936 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 1937 ("Leaked inactivation")); 1938 VI_UNLOCK(vp); 1939 cache_assert_no_entries(vp); 1940 1941 #ifdef MAC 1942 mac_vnode_destroy(vp); 1943 #endif 1944 if (vp->v_pollinfo != NULL) { 1945 /* 1946 * Use LK_NOWAIT to shut up witness about the lock. We may get 1947 * here while having another vnode locked when trying to 1948 * satisfy a lookup and needing to recycle. 1949 */ 1950 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 1951 destroy_vpollinfo(vp->v_pollinfo); 1952 VOP_UNLOCK(vp); 1953 vp->v_pollinfo = NULL; 1954 } 1955 vp->v_mountedhere = NULL; 1956 vp->v_unpcb = NULL; 1957 vp->v_rdev = NULL; 1958 vp->v_fifoinfo = NULL; 1959 vp->v_iflag = 0; 1960 vp->v_vflag = 0; 1961 bo->bo_flag = 0; 1962 vn_free(vp); 1963 } 1964 1965 /* 1966 * Delete from old mount point vnode list, if on one. 1967 */ 1968 static void 1969 delmntque(struct vnode *vp) 1970 { 1971 struct mount *mp; 1972 1973 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1974 1975 mp = vp->v_mount; 1976 MNT_ILOCK(mp); 1977 VI_LOCK(vp); 1978 vp->v_mount = NULL; 1979 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1980 ("bad mount point vnode list size")); 1981 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1982 mp->mnt_nvnodelistsize--; 1983 MNT_REL(mp); 1984 MNT_IUNLOCK(mp); 1985 /* 1986 * The caller expects the interlock to be still held. 1987 */ 1988 ASSERT_VI_LOCKED(vp, __func__); 1989 } 1990 1991 static int 1992 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 1993 { 1994 1995 KASSERT(vp->v_mount == NULL, 1996 ("insmntque: vnode already on per mount vnode list")); 1997 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1998 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 1999 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2000 } else { 2001 KASSERT(!dtr, 2002 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2003 __func__)); 2004 } 2005 2006 /* 2007 * We acquire the vnode interlock early to ensure that the 2008 * vnode cannot be recycled by another process releasing a 2009 * holdcnt on it before we get it on both the vnode list 2010 * and the active vnode list. The mount mutex protects only 2011 * manipulation of the vnode list and the vnode freelist 2012 * mutex protects only manipulation of the active vnode list. 2013 * Hence the need to hold the vnode interlock throughout. 2014 */ 2015 MNT_ILOCK(mp); 2016 VI_LOCK(vp); 2017 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2018 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2019 mp->mnt_nvnodelistsize == 0)) && 2020 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2021 VI_UNLOCK(vp); 2022 MNT_IUNLOCK(mp); 2023 if (dtr) { 2024 vp->v_data = NULL; 2025 vp->v_op = &dead_vnodeops; 2026 vgone(vp); 2027 vput(vp); 2028 } 2029 return (EBUSY); 2030 } 2031 vp->v_mount = mp; 2032 MNT_REF(mp); 2033 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2034 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2035 ("neg mount point vnode list size")); 2036 mp->mnt_nvnodelistsize++; 2037 VI_UNLOCK(vp); 2038 MNT_IUNLOCK(mp); 2039 return (0); 2040 } 2041 2042 /* 2043 * Insert into list of vnodes for the new mount point, if available. 2044 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2045 * leaves handling of the vnode to the caller. 2046 */ 2047 int 2048 insmntque(struct vnode *vp, struct mount *mp) 2049 { 2050 return (insmntque1_int(vp, mp, true)); 2051 } 2052 2053 int 2054 insmntque1(struct vnode *vp, struct mount *mp) 2055 { 2056 return (insmntque1_int(vp, mp, false)); 2057 } 2058 2059 /* 2060 * Flush out and invalidate all buffers associated with a bufobj 2061 * Called with the underlying object locked. 2062 */ 2063 int 2064 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2065 { 2066 int error; 2067 2068 BO_LOCK(bo); 2069 if (flags & V_SAVE) { 2070 error = bufobj_wwait(bo, slpflag, slptimeo); 2071 if (error) { 2072 BO_UNLOCK(bo); 2073 return (error); 2074 } 2075 if (bo->bo_dirty.bv_cnt > 0) { 2076 BO_UNLOCK(bo); 2077 do { 2078 error = BO_SYNC(bo, MNT_WAIT); 2079 } while (error == ERELOOKUP); 2080 if (error != 0) 2081 return (error); 2082 BO_LOCK(bo); 2083 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2084 BO_UNLOCK(bo); 2085 return (EBUSY); 2086 } 2087 } 2088 } 2089 /* 2090 * If you alter this loop please notice that interlock is dropped and 2091 * reacquired in flushbuflist. Special care is needed to ensure that 2092 * no race conditions occur from this. 2093 */ 2094 do { 2095 error = flushbuflist(&bo->bo_clean, 2096 flags, bo, slpflag, slptimeo); 2097 if (error == 0 && !(flags & V_CLEANONLY)) 2098 error = flushbuflist(&bo->bo_dirty, 2099 flags, bo, slpflag, slptimeo); 2100 if (error != 0 && error != EAGAIN) { 2101 BO_UNLOCK(bo); 2102 return (error); 2103 } 2104 } while (error != 0); 2105 2106 /* 2107 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2108 * have write I/O in-progress but if there is a VM object then the 2109 * VM object can also have read-I/O in-progress. 2110 */ 2111 do { 2112 bufobj_wwait(bo, 0, 0); 2113 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2114 BO_UNLOCK(bo); 2115 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2116 BO_LOCK(bo); 2117 } 2118 } while (bo->bo_numoutput > 0); 2119 BO_UNLOCK(bo); 2120 2121 /* 2122 * Destroy the copy in the VM cache, too. 2123 */ 2124 if (bo->bo_object != NULL && 2125 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2126 VM_OBJECT_WLOCK(bo->bo_object); 2127 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2128 OBJPR_CLEANONLY : 0); 2129 VM_OBJECT_WUNLOCK(bo->bo_object); 2130 } 2131 2132 #ifdef INVARIANTS 2133 BO_LOCK(bo); 2134 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2135 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2136 bo->bo_clean.bv_cnt > 0)) 2137 panic("vinvalbuf: flush failed"); 2138 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2139 bo->bo_dirty.bv_cnt > 0) 2140 panic("vinvalbuf: flush dirty failed"); 2141 BO_UNLOCK(bo); 2142 #endif 2143 return (0); 2144 } 2145 2146 /* 2147 * Flush out and invalidate all buffers associated with a vnode. 2148 * Called with the underlying object locked. 2149 */ 2150 int 2151 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2152 { 2153 2154 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2155 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2156 if (vp->v_object != NULL && vp->v_object->handle != vp) 2157 return (0); 2158 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2159 } 2160 2161 /* 2162 * Flush out buffers on the specified list. 2163 * 2164 */ 2165 static int 2166 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2167 int slptimeo) 2168 { 2169 struct buf *bp, *nbp; 2170 int retval, error; 2171 daddr_t lblkno; 2172 b_xflags_t xflags; 2173 2174 ASSERT_BO_WLOCKED(bo); 2175 2176 retval = 0; 2177 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2178 /* 2179 * If we are flushing both V_NORMAL and V_ALT buffers then 2180 * do not skip any buffers. If we are flushing only V_NORMAL 2181 * buffers then skip buffers marked as BX_ALTDATA. If we are 2182 * flushing only V_ALT buffers then skip buffers not marked 2183 * as BX_ALTDATA. 2184 */ 2185 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2186 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2187 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2188 continue; 2189 } 2190 if (nbp != NULL) { 2191 lblkno = nbp->b_lblkno; 2192 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2193 } 2194 retval = EAGAIN; 2195 error = BUF_TIMELOCK(bp, 2196 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2197 "flushbuf", slpflag, slptimeo); 2198 if (error) { 2199 BO_LOCK(bo); 2200 return (error != ENOLCK ? error : EAGAIN); 2201 } 2202 KASSERT(bp->b_bufobj == bo, 2203 ("bp %p wrong b_bufobj %p should be %p", 2204 bp, bp->b_bufobj, bo)); 2205 /* 2206 * XXX Since there are no node locks for NFS, I 2207 * believe there is a slight chance that a delayed 2208 * write will occur while sleeping just above, so 2209 * check for it. 2210 */ 2211 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2212 (flags & V_SAVE)) { 2213 bremfree(bp); 2214 bp->b_flags |= B_ASYNC; 2215 bwrite(bp); 2216 BO_LOCK(bo); 2217 return (EAGAIN); /* XXX: why not loop ? */ 2218 } 2219 bremfree(bp); 2220 bp->b_flags |= (B_INVAL | B_RELBUF); 2221 bp->b_flags &= ~B_ASYNC; 2222 brelse(bp); 2223 BO_LOCK(bo); 2224 if (nbp == NULL) 2225 break; 2226 nbp = gbincore(bo, lblkno); 2227 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2228 != xflags) 2229 break; /* nbp invalid */ 2230 } 2231 return (retval); 2232 } 2233 2234 int 2235 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2236 { 2237 struct buf *bp; 2238 int error; 2239 daddr_t lblkno; 2240 2241 ASSERT_BO_LOCKED(bo); 2242 2243 for (lblkno = startn;;) { 2244 again: 2245 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2246 if (bp == NULL || bp->b_lblkno >= endn || 2247 bp->b_lblkno < startn) 2248 break; 2249 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2250 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2251 if (error != 0) { 2252 BO_RLOCK(bo); 2253 if (error == ENOLCK) 2254 goto again; 2255 return (error); 2256 } 2257 KASSERT(bp->b_bufobj == bo, 2258 ("bp %p wrong b_bufobj %p should be %p", 2259 bp, bp->b_bufobj, bo)); 2260 lblkno = bp->b_lblkno + 1; 2261 if ((bp->b_flags & B_MANAGED) == 0) 2262 bremfree(bp); 2263 bp->b_flags |= B_RELBUF; 2264 /* 2265 * In the VMIO case, use the B_NOREUSE flag to hint that the 2266 * pages backing each buffer in the range are unlikely to be 2267 * reused. Dirty buffers will have the hint applied once 2268 * they've been written. 2269 */ 2270 if ((bp->b_flags & B_VMIO) != 0) 2271 bp->b_flags |= B_NOREUSE; 2272 brelse(bp); 2273 BO_RLOCK(bo); 2274 } 2275 return (0); 2276 } 2277 2278 /* 2279 * Truncate a file's buffer and pages to a specified length. This 2280 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2281 * sync activity. 2282 */ 2283 int 2284 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2285 { 2286 struct buf *bp, *nbp; 2287 struct bufobj *bo; 2288 daddr_t startlbn; 2289 2290 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2291 vp, blksize, (uintmax_t)length); 2292 2293 /* 2294 * Round up to the *next* lbn. 2295 */ 2296 startlbn = howmany(length, blksize); 2297 2298 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2299 2300 bo = &vp->v_bufobj; 2301 restart_unlocked: 2302 BO_LOCK(bo); 2303 2304 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2305 ; 2306 2307 if (length > 0) { 2308 restartsync: 2309 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2310 if (bp->b_lblkno > 0) 2311 continue; 2312 /* 2313 * Since we hold the vnode lock this should only 2314 * fail if we're racing with the buf daemon. 2315 */ 2316 if (BUF_LOCK(bp, 2317 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2318 BO_LOCKPTR(bo)) == ENOLCK) 2319 goto restart_unlocked; 2320 2321 VNASSERT((bp->b_flags & B_DELWRI), vp, 2322 ("buf(%p) on dirty queue without DELWRI", bp)); 2323 2324 bremfree(bp); 2325 bawrite(bp); 2326 BO_LOCK(bo); 2327 goto restartsync; 2328 } 2329 } 2330 2331 bufobj_wwait(bo, 0, 0); 2332 BO_UNLOCK(bo); 2333 vnode_pager_setsize(vp, length); 2334 2335 return (0); 2336 } 2337 2338 /* 2339 * Invalidate the cached pages of a file's buffer within the range of block 2340 * numbers [startlbn, endlbn). 2341 */ 2342 void 2343 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2344 int blksize) 2345 { 2346 struct bufobj *bo; 2347 off_t start, end; 2348 2349 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2350 2351 start = blksize * startlbn; 2352 end = blksize * endlbn; 2353 2354 bo = &vp->v_bufobj; 2355 BO_LOCK(bo); 2356 MPASS(blksize == bo->bo_bsize); 2357 2358 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2359 ; 2360 2361 BO_UNLOCK(bo); 2362 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2363 } 2364 2365 static int 2366 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2367 daddr_t startlbn, daddr_t endlbn) 2368 { 2369 struct buf *bp, *nbp; 2370 bool anyfreed; 2371 2372 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2373 ASSERT_BO_LOCKED(bo); 2374 2375 do { 2376 anyfreed = false; 2377 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2378 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2379 continue; 2380 if (BUF_LOCK(bp, 2381 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2382 BO_LOCKPTR(bo)) == ENOLCK) { 2383 BO_LOCK(bo); 2384 return (EAGAIN); 2385 } 2386 2387 bremfree(bp); 2388 bp->b_flags |= B_INVAL | B_RELBUF; 2389 bp->b_flags &= ~B_ASYNC; 2390 brelse(bp); 2391 anyfreed = true; 2392 2393 BO_LOCK(bo); 2394 if (nbp != NULL && 2395 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2396 nbp->b_vp != vp || 2397 (nbp->b_flags & B_DELWRI) != 0)) 2398 return (EAGAIN); 2399 } 2400 2401 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2402 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2403 continue; 2404 if (BUF_LOCK(bp, 2405 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2406 BO_LOCKPTR(bo)) == ENOLCK) { 2407 BO_LOCK(bo); 2408 return (EAGAIN); 2409 } 2410 bremfree(bp); 2411 bp->b_flags |= B_INVAL | B_RELBUF; 2412 bp->b_flags &= ~B_ASYNC; 2413 brelse(bp); 2414 anyfreed = true; 2415 2416 BO_LOCK(bo); 2417 if (nbp != NULL && 2418 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2419 (nbp->b_vp != vp) || 2420 (nbp->b_flags & B_DELWRI) == 0)) 2421 return (EAGAIN); 2422 } 2423 } while (anyfreed); 2424 return (0); 2425 } 2426 2427 static void 2428 buf_vlist_remove(struct buf *bp) 2429 { 2430 struct bufv *bv; 2431 b_xflags_t flags; 2432 2433 flags = bp->b_xflags; 2434 2435 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2436 ASSERT_BO_WLOCKED(bp->b_bufobj); 2437 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2438 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2439 ("%s: buffer %p has invalid queue state", __func__, bp)); 2440 2441 if ((flags & BX_VNDIRTY) != 0) 2442 bv = &bp->b_bufobj->bo_dirty; 2443 else 2444 bv = &bp->b_bufobj->bo_clean; 2445 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2446 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2447 bv->bv_cnt--; 2448 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2449 } 2450 2451 /* 2452 * Add the buffer to the sorted clean or dirty block list. 2453 * 2454 * NOTE: xflags is passed as a constant, optimizing this inline function! 2455 */ 2456 static void 2457 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2458 { 2459 struct bufv *bv; 2460 struct buf *n; 2461 int error; 2462 2463 ASSERT_BO_WLOCKED(bo); 2464 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2465 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2466 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2467 ("dead bo %p", bo)); 2468 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2469 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2470 bp->b_xflags |= xflags; 2471 if (xflags & BX_VNDIRTY) 2472 bv = &bo->bo_dirty; 2473 else 2474 bv = &bo->bo_clean; 2475 2476 /* 2477 * Keep the list ordered. Optimize empty list insertion. Assume 2478 * we tend to grow at the tail so lookup_le should usually be cheaper 2479 * than _ge. 2480 */ 2481 if (bv->bv_cnt == 0 || 2482 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2483 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2484 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2485 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2486 else 2487 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2488 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2489 if (error) 2490 panic("buf_vlist_add: Preallocated nodes insufficient."); 2491 bv->bv_cnt++; 2492 } 2493 2494 /* 2495 * Look up a buffer using the buffer tries. 2496 */ 2497 struct buf * 2498 gbincore(struct bufobj *bo, daddr_t lblkno) 2499 { 2500 struct buf *bp; 2501 2502 ASSERT_BO_LOCKED(bo); 2503 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2504 if (bp != NULL) 2505 return (bp); 2506 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2507 } 2508 2509 /* 2510 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2511 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2512 * stability of the result. Like other lockless lookups, the found buf may 2513 * already be invalid by the time this function returns. 2514 */ 2515 struct buf * 2516 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2517 { 2518 struct buf *bp; 2519 2520 ASSERT_BO_UNLOCKED(bo); 2521 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2522 if (bp != NULL) 2523 return (bp); 2524 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2525 } 2526 2527 /* 2528 * Associate a buffer with a vnode. 2529 */ 2530 void 2531 bgetvp(struct vnode *vp, struct buf *bp) 2532 { 2533 struct bufobj *bo; 2534 2535 bo = &vp->v_bufobj; 2536 ASSERT_BO_WLOCKED(bo); 2537 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2538 2539 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2540 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2541 ("bgetvp: bp already attached! %p", bp)); 2542 2543 vhold(vp); 2544 bp->b_vp = vp; 2545 bp->b_bufobj = bo; 2546 /* 2547 * Insert onto list for new vnode. 2548 */ 2549 buf_vlist_add(bp, bo, BX_VNCLEAN); 2550 } 2551 2552 /* 2553 * Disassociate a buffer from a vnode. 2554 */ 2555 void 2556 brelvp(struct buf *bp) 2557 { 2558 struct bufobj *bo; 2559 struct vnode *vp; 2560 2561 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2562 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2563 2564 /* 2565 * Delete from old vnode list, if on one. 2566 */ 2567 vp = bp->b_vp; /* XXX */ 2568 bo = bp->b_bufobj; 2569 BO_LOCK(bo); 2570 buf_vlist_remove(bp); 2571 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2572 bo->bo_flag &= ~BO_ONWORKLST; 2573 mtx_lock(&sync_mtx); 2574 LIST_REMOVE(bo, bo_synclist); 2575 syncer_worklist_len--; 2576 mtx_unlock(&sync_mtx); 2577 } 2578 bp->b_vp = NULL; 2579 bp->b_bufobj = NULL; 2580 BO_UNLOCK(bo); 2581 vdrop(vp); 2582 } 2583 2584 /* 2585 * Add an item to the syncer work queue. 2586 */ 2587 static void 2588 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2589 { 2590 int slot; 2591 2592 ASSERT_BO_WLOCKED(bo); 2593 2594 mtx_lock(&sync_mtx); 2595 if (bo->bo_flag & BO_ONWORKLST) 2596 LIST_REMOVE(bo, bo_synclist); 2597 else { 2598 bo->bo_flag |= BO_ONWORKLST; 2599 syncer_worklist_len++; 2600 } 2601 2602 if (delay > syncer_maxdelay - 2) 2603 delay = syncer_maxdelay - 2; 2604 slot = (syncer_delayno + delay) & syncer_mask; 2605 2606 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2607 mtx_unlock(&sync_mtx); 2608 } 2609 2610 static int 2611 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2612 { 2613 int error, len; 2614 2615 mtx_lock(&sync_mtx); 2616 len = syncer_worklist_len - sync_vnode_count; 2617 mtx_unlock(&sync_mtx); 2618 error = SYSCTL_OUT(req, &len, sizeof(len)); 2619 return (error); 2620 } 2621 2622 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2623 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2624 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2625 2626 static struct proc *updateproc; 2627 static void sched_sync(void); 2628 static struct kproc_desc up_kp = { 2629 "syncer", 2630 sched_sync, 2631 &updateproc 2632 }; 2633 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2634 2635 static int 2636 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2637 { 2638 struct vnode *vp; 2639 struct mount *mp; 2640 2641 *bo = LIST_FIRST(slp); 2642 if (*bo == NULL) 2643 return (0); 2644 vp = bo2vnode(*bo); 2645 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2646 return (1); 2647 /* 2648 * We use vhold in case the vnode does not 2649 * successfully sync. vhold prevents the vnode from 2650 * going away when we unlock the sync_mtx so that 2651 * we can acquire the vnode interlock. 2652 */ 2653 vholdl(vp); 2654 mtx_unlock(&sync_mtx); 2655 VI_UNLOCK(vp); 2656 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2657 vdrop(vp); 2658 mtx_lock(&sync_mtx); 2659 return (*bo == LIST_FIRST(slp)); 2660 } 2661 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2662 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2663 VOP_UNLOCK(vp); 2664 vn_finished_write(mp); 2665 BO_LOCK(*bo); 2666 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2667 /* 2668 * Put us back on the worklist. The worklist 2669 * routine will remove us from our current 2670 * position and then add us back in at a later 2671 * position. 2672 */ 2673 vn_syncer_add_to_worklist(*bo, syncdelay); 2674 } 2675 BO_UNLOCK(*bo); 2676 vdrop(vp); 2677 mtx_lock(&sync_mtx); 2678 return (0); 2679 } 2680 2681 static int first_printf = 1; 2682 2683 /* 2684 * System filesystem synchronizer daemon. 2685 */ 2686 static void 2687 sched_sync(void) 2688 { 2689 struct synclist *next, *slp; 2690 struct bufobj *bo; 2691 long starttime; 2692 struct thread *td = curthread; 2693 int last_work_seen; 2694 int net_worklist_len; 2695 int syncer_final_iter; 2696 int error; 2697 2698 last_work_seen = 0; 2699 syncer_final_iter = 0; 2700 syncer_state = SYNCER_RUNNING; 2701 starttime = time_uptime; 2702 td->td_pflags |= TDP_NORUNNINGBUF; 2703 2704 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2705 SHUTDOWN_PRI_LAST); 2706 2707 mtx_lock(&sync_mtx); 2708 for (;;) { 2709 if (syncer_state == SYNCER_FINAL_DELAY && 2710 syncer_final_iter == 0) { 2711 mtx_unlock(&sync_mtx); 2712 kproc_suspend_check(td->td_proc); 2713 mtx_lock(&sync_mtx); 2714 } 2715 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2716 if (syncer_state != SYNCER_RUNNING && 2717 starttime != time_uptime) { 2718 if (first_printf) { 2719 printf("\nSyncing disks, vnodes remaining... "); 2720 first_printf = 0; 2721 } 2722 printf("%d ", net_worklist_len); 2723 } 2724 starttime = time_uptime; 2725 2726 /* 2727 * Push files whose dirty time has expired. Be careful 2728 * of interrupt race on slp queue. 2729 * 2730 * Skip over empty worklist slots when shutting down. 2731 */ 2732 do { 2733 slp = &syncer_workitem_pending[syncer_delayno]; 2734 syncer_delayno += 1; 2735 if (syncer_delayno == syncer_maxdelay) 2736 syncer_delayno = 0; 2737 next = &syncer_workitem_pending[syncer_delayno]; 2738 /* 2739 * If the worklist has wrapped since the 2740 * it was emptied of all but syncer vnodes, 2741 * switch to the FINAL_DELAY state and run 2742 * for one more second. 2743 */ 2744 if (syncer_state == SYNCER_SHUTTING_DOWN && 2745 net_worklist_len == 0 && 2746 last_work_seen == syncer_delayno) { 2747 syncer_state = SYNCER_FINAL_DELAY; 2748 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2749 } 2750 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2751 syncer_worklist_len > 0); 2752 2753 /* 2754 * Keep track of the last time there was anything 2755 * on the worklist other than syncer vnodes. 2756 * Return to the SHUTTING_DOWN state if any 2757 * new work appears. 2758 */ 2759 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2760 last_work_seen = syncer_delayno; 2761 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2762 syncer_state = SYNCER_SHUTTING_DOWN; 2763 while (!LIST_EMPTY(slp)) { 2764 error = sync_vnode(slp, &bo, td); 2765 if (error == 1) { 2766 LIST_REMOVE(bo, bo_synclist); 2767 LIST_INSERT_HEAD(next, bo, bo_synclist); 2768 continue; 2769 } 2770 2771 if (first_printf == 0) { 2772 /* 2773 * Drop the sync mutex, because some watchdog 2774 * drivers need to sleep while patting 2775 */ 2776 mtx_unlock(&sync_mtx); 2777 wdog_kern_pat(WD_LASTVAL); 2778 mtx_lock(&sync_mtx); 2779 } 2780 } 2781 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2782 syncer_final_iter--; 2783 /* 2784 * The variable rushjob allows the kernel to speed up the 2785 * processing of the filesystem syncer process. A rushjob 2786 * value of N tells the filesystem syncer to process the next 2787 * N seconds worth of work on its queue ASAP. Currently rushjob 2788 * is used by the soft update code to speed up the filesystem 2789 * syncer process when the incore state is getting so far 2790 * ahead of the disk that the kernel memory pool is being 2791 * threatened with exhaustion. 2792 */ 2793 if (rushjob > 0) { 2794 rushjob -= 1; 2795 continue; 2796 } 2797 /* 2798 * Just sleep for a short period of time between 2799 * iterations when shutting down to allow some I/O 2800 * to happen. 2801 * 2802 * If it has taken us less than a second to process the 2803 * current work, then wait. Otherwise start right over 2804 * again. We can still lose time if any single round 2805 * takes more than two seconds, but it does not really 2806 * matter as we are just trying to generally pace the 2807 * filesystem activity. 2808 */ 2809 if (syncer_state != SYNCER_RUNNING || 2810 time_uptime == starttime) { 2811 thread_lock(td); 2812 sched_prio(td, PPAUSE); 2813 thread_unlock(td); 2814 } 2815 if (syncer_state != SYNCER_RUNNING) 2816 cv_timedwait(&sync_wakeup, &sync_mtx, 2817 hz / SYNCER_SHUTDOWN_SPEEDUP); 2818 else if (time_uptime == starttime) 2819 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2820 } 2821 } 2822 2823 /* 2824 * Request the syncer daemon to speed up its work. 2825 * We never push it to speed up more than half of its 2826 * normal turn time, otherwise it could take over the cpu. 2827 */ 2828 int 2829 speedup_syncer(void) 2830 { 2831 int ret = 0; 2832 2833 mtx_lock(&sync_mtx); 2834 if (rushjob < syncdelay / 2) { 2835 rushjob += 1; 2836 stat_rush_requests += 1; 2837 ret = 1; 2838 } 2839 mtx_unlock(&sync_mtx); 2840 cv_broadcast(&sync_wakeup); 2841 return (ret); 2842 } 2843 2844 /* 2845 * Tell the syncer to speed up its work and run though its work 2846 * list several times, then tell it to shut down. 2847 */ 2848 static void 2849 syncer_shutdown(void *arg, int howto) 2850 { 2851 2852 if (howto & RB_NOSYNC) 2853 return; 2854 mtx_lock(&sync_mtx); 2855 syncer_state = SYNCER_SHUTTING_DOWN; 2856 rushjob = 0; 2857 mtx_unlock(&sync_mtx); 2858 cv_broadcast(&sync_wakeup); 2859 kproc_shutdown(arg, howto); 2860 } 2861 2862 void 2863 syncer_suspend(void) 2864 { 2865 2866 syncer_shutdown(updateproc, 0); 2867 } 2868 2869 void 2870 syncer_resume(void) 2871 { 2872 2873 mtx_lock(&sync_mtx); 2874 first_printf = 1; 2875 syncer_state = SYNCER_RUNNING; 2876 mtx_unlock(&sync_mtx); 2877 cv_broadcast(&sync_wakeup); 2878 kproc_resume(updateproc); 2879 } 2880 2881 /* 2882 * Move the buffer between the clean and dirty lists of its vnode. 2883 */ 2884 void 2885 reassignbuf(struct buf *bp) 2886 { 2887 struct vnode *vp; 2888 struct bufobj *bo; 2889 int delay; 2890 #ifdef INVARIANTS 2891 struct bufv *bv; 2892 #endif 2893 2894 vp = bp->b_vp; 2895 bo = bp->b_bufobj; 2896 2897 KASSERT((bp->b_flags & B_PAGING) == 0, 2898 ("%s: cannot reassign paging buffer %p", __func__, bp)); 2899 2900 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2901 bp, bp->b_vp, bp->b_flags); 2902 2903 BO_LOCK(bo); 2904 buf_vlist_remove(bp); 2905 2906 /* 2907 * If dirty, put on list of dirty buffers; otherwise insert onto list 2908 * of clean buffers. 2909 */ 2910 if (bp->b_flags & B_DELWRI) { 2911 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2912 switch (vp->v_type) { 2913 case VDIR: 2914 delay = dirdelay; 2915 break; 2916 case VCHR: 2917 delay = metadelay; 2918 break; 2919 default: 2920 delay = filedelay; 2921 } 2922 vn_syncer_add_to_worklist(bo, delay); 2923 } 2924 buf_vlist_add(bp, bo, BX_VNDIRTY); 2925 } else { 2926 buf_vlist_add(bp, bo, BX_VNCLEAN); 2927 2928 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2929 mtx_lock(&sync_mtx); 2930 LIST_REMOVE(bo, bo_synclist); 2931 syncer_worklist_len--; 2932 mtx_unlock(&sync_mtx); 2933 bo->bo_flag &= ~BO_ONWORKLST; 2934 } 2935 } 2936 #ifdef INVARIANTS 2937 bv = &bo->bo_clean; 2938 bp = TAILQ_FIRST(&bv->bv_hd); 2939 KASSERT(bp == NULL || bp->b_bufobj == bo, 2940 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2941 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2942 KASSERT(bp == NULL || bp->b_bufobj == bo, 2943 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2944 bv = &bo->bo_dirty; 2945 bp = TAILQ_FIRST(&bv->bv_hd); 2946 KASSERT(bp == NULL || bp->b_bufobj == bo, 2947 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2948 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2949 KASSERT(bp == NULL || bp->b_bufobj == bo, 2950 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2951 #endif 2952 BO_UNLOCK(bo); 2953 } 2954 2955 static void 2956 v_init_counters(struct vnode *vp) 2957 { 2958 2959 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2960 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2961 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2962 2963 refcount_init(&vp->v_holdcnt, 1); 2964 refcount_init(&vp->v_usecount, 1); 2965 } 2966 2967 /* 2968 * Grab a particular vnode from the free list, increment its 2969 * reference count and lock it. VIRF_DOOMED is set if the vnode 2970 * is being destroyed. Only callers who specify LK_RETRY will 2971 * see doomed vnodes. If inactive processing was delayed in 2972 * vput try to do it here. 2973 * 2974 * usecount is manipulated using atomics without holding any locks. 2975 * 2976 * holdcnt can be manipulated using atomics without holding any locks, 2977 * except when transitioning 1<->0, in which case the interlock is held. 2978 * 2979 * Consumers which don't guarantee liveness of the vnode can use SMR to 2980 * try to get a reference. Note this operation can fail since the vnode 2981 * may be awaiting getting freed by the time they get to it. 2982 */ 2983 enum vgetstate 2984 vget_prep_smr(struct vnode *vp) 2985 { 2986 enum vgetstate vs; 2987 2988 VFS_SMR_ASSERT_ENTERED(); 2989 2990 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2991 vs = VGET_USECOUNT; 2992 } else { 2993 if (vhold_smr(vp)) 2994 vs = VGET_HOLDCNT; 2995 else 2996 vs = VGET_NONE; 2997 } 2998 return (vs); 2999 } 3000 3001 enum vgetstate 3002 vget_prep(struct vnode *vp) 3003 { 3004 enum vgetstate vs; 3005 3006 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3007 vs = VGET_USECOUNT; 3008 } else { 3009 vhold(vp); 3010 vs = VGET_HOLDCNT; 3011 } 3012 return (vs); 3013 } 3014 3015 void 3016 vget_abort(struct vnode *vp, enum vgetstate vs) 3017 { 3018 3019 switch (vs) { 3020 case VGET_USECOUNT: 3021 vrele(vp); 3022 break; 3023 case VGET_HOLDCNT: 3024 vdrop(vp); 3025 break; 3026 default: 3027 __assert_unreachable(); 3028 } 3029 } 3030 3031 int 3032 vget(struct vnode *vp, int flags) 3033 { 3034 enum vgetstate vs; 3035 3036 vs = vget_prep(vp); 3037 return (vget_finish(vp, flags, vs)); 3038 } 3039 3040 int 3041 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3042 { 3043 int error; 3044 3045 if ((flags & LK_INTERLOCK) != 0) 3046 ASSERT_VI_LOCKED(vp, __func__); 3047 else 3048 ASSERT_VI_UNLOCKED(vp, __func__); 3049 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3050 VNPASS(vp->v_holdcnt > 0, vp); 3051 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3052 3053 error = vn_lock(vp, flags); 3054 if (__predict_false(error != 0)) { 3055 vget_abort(vp, vs); 3056 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3057 vp); 3058 return (error); 3059 } 3060 3061 vget_finish_ref(vp, vs); 3062 return (0); 3063 } 3064 3065 void 3066 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3067 { 3068 int old; 3069 3070 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3071 VNPASS(vp->v_holdcnt > 0, vp); 3072 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3073 3074 if (vs == VGET_USECOUNT) 3075 return; 3076 3077 /* 3078 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3079 * the vnode around. Otherwise someone else lended their hold count and 3080 * we have to drop ours. 3081 */ 3082 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3083 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3084 if (old != 0) { 3085 #ifdef INVARIANTS 3086 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3087 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3088 #else 3089 refcount_release(&vp->v_holdcnt); 3090 #endif 3091 } 3092 } 3093 3094 void 3095 vref(struct vnode *vp) 3096 { 3097 enum vgetstate vs; 3098 3099 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3100 vs = vget_prep(vp); 3101 vget_finish_ref(vp, vs); 3102 } 3103 3104 void 3105 vrefact(struct vnode *vp) 3106 { 3107 3108 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3109 #ifdef INVARIANTS 3110 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3111 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3112 #else 3113 refcount_acquire(&vp->v_usecount); 3114 #endif 3115 } 3116 3117 void 3118 vlazy(struct vnode *vp) 3119 { 3120 struct mount *mp; 3121 3122 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3123 3124 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3125 return; 3126 /* 3127 * We may get here for inactive routines after the vnode got doomed. 3128 */ 3129 if (VN_IS_DOOMED(vp)) 3130 return; 3131 mp = vp->v_mount; 3132 mtx_lock(&mp->mnt_listmtx); 3133 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3134 vp->v_mflag |= VMP_LAZYLIST; 3135 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3136 mp->mnt_lazyvnodelistsize++; 3137 } 3138 mtx_unlock(&mp->mnt_listmtx); 3139 } 3140 3141 static void 3142 vunlazy(struct vnode *vp) 3143 { 3144 struct mount *mp; 3145 3146 ASSERT_VI_LOCKED(vp, __func__); 3147 VNPASS(!VN_IS_DOOMED(vp), vp); 3148 3149 mp = vp->v_mount; 3150 mtx_lock(&mp->mnt_listmtx); 3151 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3152 /* 3153 * Don't remove the vnode from the lazy list if another thread 3154 * has increased the hold count. It may have re-enqueued the 3155 * vnode to the lazy list and is now responsible for its 3156 * removal. 3157 */ 3158 if (vp->v_holdcnt == 0) { 3159 vp->v_mflag &= ~VMP_LAZYLIST; 3160 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3161 mp->mnt_lazyvnodelistsize--; 3162 } 3163 mtx_unlock(&mp->mnt_listmtx); 3164 } 3165 3166 /* 3167 * This routine is only meant to be called from vgonel prior to dooming 3168 * the vnode. 3169 */ 3170 static void 3171 vunlazy_gone(struct vnode *vp) 3172 { 3173 struct mount *mp; 3174 3175 ASSERT_VOP_ELOCKED(vp, __func__); 3176 ASSERT_VI_LOCKED(vp, __func__); 3177 VNPASS(!VN_IS_DOOMED(vp), vp); 3178 3179 if (vp->v_mflag & VMP_LAZYLIST) { 3180 mp = vp->v_mount; 3181 mtx_lock(&mp->mnt_listmtx); 3182 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3183 vp->v_mflag &= ~VMP_LAZYLIST; 3184 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3185 mp->mnt_lazyvnodelistsize--; 3186 mtx_unlock(&mp->mnt_listmtx); 3187 } 3188 } 3189 3190 static void 3191 vdefer_inactive(struct vnode *vp) 3192 { 3193 3194 ASSERT_VI_LOCKED(vp, __func__); 3195 VNPASS(vp->v_holdcnt > 0, vp); 3196 if (VN_IS_DOOMED(vp)) { 3197 vdropl(vp); 3198 return; 3199 } 3200 if (vp->v_iflag & VI_DEFINACT) { 3201 VNPASS(vp->v_holdcnt > 1, vp); 3202 vdropl(vp); 3203 return; 3204 } 3205 if (vp->v_usecount > 0) { 3206 vp->v_iflag &= ~VI_OWEINACT; 3207 vdropl(vp); 3208 return; 3209 } 3210 vlazy(vp); 3211 vp->v_iflag |= VI_DEFINACT; 3212 VI_UNLOCK(vp); 3213 atomic_add_long(&deferred_inact, 1); 3214 } 3215 3216 static void 3217 vdefer_inactive_unlocked(struct vnode *vp) 3218 { 3219 3220 VI_LOCK(vp); 3221 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3222 vdropl(vp); 3223 return; 3224 } 3225 vdefer_inactive(vp); 3226 } 3227 3228 enum vput_op { VRELE, VPUT, VUNREF }; 3229 3230 /* 3231 * Handle ->v_usecount transitioning to 0. 3232 * 3233 * By releasing the last usecount we take ownership of the hold count which 3234 * provides liveness of the vnode, meaning we have to vdrop. 3235 * 3236 * For all vnodes we may need to perform inactive processing. It requires an 3237 * exclusive lock on the vnode, while it is legal to call here with only a 3238 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3239 * inactive processing gets deferred to the syncer. 3240 * 3241 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3242 * on the lock being held all the way until VOP_INACTIVE. This in particular 3243 * happens with UFS which adds half-constructed vnodes to the hash, where they 3244 * can be found by other code. 3245 */ 3246 static void 3247 vput_final(struct vnode *vp, enum vput_op func) 3248 { 3249 int error; 3250 bool want_unlock; 3251 3252 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3253 VNPASS(vp->v_holdcnt > 0, vp); 3254 3255 VI_LOCK(vp); 3256 3257 /* 3258 * By the time we got here someone else might have transitioned 3259 * the count back to > 0. 3260 */ 3261 if (vp->v_usecount > 0) 3262 goto out; 3263 3264 /* 3265 * If the vnode is doomed vgone already performed inactive processing 3266 * (if needed). 3267 */ 3268 if (VN_IS_DOOMED(vp)) 3269 goto out; 3270 3271 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3272 goto out; 3273 3274 if (vp->v_iflag & VI_DOINGINACT) 3275 goto out; 3276 3277 /* 3278 * Locking operations here will drop the interlock and possibly the 3279 * vnode lock, opening a window where the vnode can get doomed all the 3280 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3281 * perform inactive. 3282 */ 3283 vp->v_iflag |= VI_OWEINACT; 3284 want_unlock = false; 3285 error = 0; 3286 switch (func) { 3287 case VRELE: 3288 switch (VOP_ISLOCKED(vp)) { 3289 case LK_EXCLUSIVE: 3290 break; 3291 case LK_EXCLOTHER: 3292 case 0: 3293 want_unlock = true; 3294 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3295 VI_LOCK(vp); 3296 break; 3297 default: 3298 /* 3299 * The lock has at least one sharer, but we have no way 3300 * to conclude whether this is us. Play it safe and 3301 * defer processing. 3302 */ 3303 error = EAGAIN; 3304 break; 3305 } 3306 break; 3307 case VPUT: 3308 want_unlock = true; 3309 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3310 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3311 LK_NOWAIT); 3312 VI_LOCK(vp); 3313 } 3314 break; 3315 case VUNREF: 3316 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3317 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3318 VI_LOCK(vp); 3319 } 3320 break; 3321 } 3322 if (error == 0) { 3323 if (func == VUNREF) { 3324 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3325 ("recursive vunref")); 3326 vp->v_vflag |= VV_UNREF; 3327 } 3328 for (;;) { 3329 error = vinactive(vp); 3330 if (want_unlock) 3331 VOP_UNLOCK(vp); 3332 if (error != ERELOOKUP || !want_unlock) 3333 break; 3334 VOP_LOCK(vp, LK_EXCLUSIVE); 3335 } 3336 if (func == VUNREF) 3337 vp->v_vflag &= ~VV_UNREF; 3338 vdropl(vp); 3339 } else { 3340 vdefer_inactive(vp); 3341 } 3342 return; 3343 out: 3344 if (func == VPUT) 3345 VOP_UNLOCK(vp); 3346 vdropl(vp); 3347 } 3348 3349 /* 3350 * Decrement ->v_usecount for a vnode. 3351 * 3352 * Releasing the last use count requires additional processing, see vput_final 3353 * above for details. 3354 * 3355 * Comment above each variant denotes lock state on entry and exit. 3356 */ 3357 3358 /* 3359 * in: any 3360 * out: same as passed in 3361 */ 3362 void 3363 vrele(struct vnode *vp) 3364 { 3365 3366 ASSERT_VI_UNLOCKED(vp, __func__); 3367 if (!refcount_release(&vp->v_usecount)) 3368 return; 3369 vput_final(vp, VRELE); 3370 } 3371 3372 /* 3373 * in: locked 3374 * out: unlocked 3375 */ 3376 void 3377 vput(struct vnode *vp) 3378 { 3379 3380 ASSERT_VOP_LOCKED(vp, __func__); 3381 ASSERT_VI_UNLOCKED(vp, __func__); 3382 if (!refcount_release(&vp->v_usecount)) { 3383 VOP_UNLOCK(vp); 3384 return; 3385 } 3386 vput_final(vp, VPUT); 3387 } 3388 3389 /* 3390 * in: locked 3391 * out: locked 3392 */ 3393 void 3394 vunref(struct vnode *vp) 3395 { 3396 3397 ASSERT_VOP_LOCKED(vp, __func__); 3398 ASSERT_VI_UNLOCKED(vp, __func__); 3399 if (!refcount_release(&vp->v_usecount)) 3400 return; 3401 vput_final(vp, VUNREF); 3402 } 3403 3404 void 3405 vhold(struct vnode *vp) 3406 { 3407 int old; 3408 3409 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3410 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3411 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3412 ("%s: wrong hold count %d", __func__, old)); 3413 if (old == 0) 3414 vfs_freevnodes_dec(); 3415 } 3416 3417 void 3418 vholdnz(struct vnode *vp) 3419 { 3420 3421 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3422 #ifdef INVARIANTS 3423 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3424 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3425 ("%s: wrong hold count %d", __func__, old)); 3426 #else 3427 atomic_add_int(&vp->v_holdcnt, 1); 3428 #endif 3429 } 3430 3431 /* 3432 * Grab a hold count unless the vnode is freed. 3433 * 3434 * Only use this routine if vfs smr is the only protection you have against 3435 * freeing the vnode. 3436 * 3437 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3438 * is not set. After the flag is set the vnode becomes immutable to anyone but 3439 * the thread which managed to set the flag. 3440 * 3441 * It may be tempting to replace the loop with: 3442 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3443 * if (count & VHOLD_NO_SMR) { 3444 * backpedal and error out; 3445 * } 3446 * 3447 * However, while this is more performant, it hinders debugging by eliminating 3448 * the previously mentioned invariant. 3449 */ 3450 bool 3451 vhold_smr(struct vnode *vp) 3452 { 3453 int count; 3454 3455 VFS_SMR_ASSERT_ENTERED(); 3456 3457 count = atomic_load_int(&vp->v_holdcnt); 3458 for (;;) { 3459 if (count & VHOLD_NO_SMR) { 3460 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3461 ("non-zero hold count with flags %d\n", count)); 3462 return (false); 3463 } 3464 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3465 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3466 if (count == 0) 3467 vfs_freevnodes_dec(); 3468 return (true); 3469 } 3470 } 3471 } 3472 3473 /* 3474 * Hold a free vnode for recycling. 3475 * 3476 * Note: vnode_init references this comment. 3477 * 3478 * Attempts to recycle only need the global vnode list lock and have no use for 3479 * SMR. 3480 * 3481 * However, vnodes get inserted into the global list before they get fully 3482 * initialized and stay there until UMA decides to free the memory. This in 3483 * particular means the target can be found before it becomes usable and after 3484 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3485 * VHOLD_NO_SMR. 3486 * 3487 * Note: the vnode may gain more references after we transition the count 0->1. 3488 */ 3489 static bool 3490 vhold_recycle_free(struct vnode *vp) 3491 { 3492 int count; 3493 3494 mtx_assert(&vnode_list_mtx, MA_OWNED); 3495 3496 count = atomic_load_int(&vp->v_holdcnt); 3497 for (;;) { 3498 if (count & VHOLD_NO_SMR) { 3499 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3500 ("non-zero hold count with flags %d\n", count)); 3501 return (false); 3502 } 3503 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3504 if (count > 0) { 3505 return (false); 3506 } 3507 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3508 vfs_freevnodes_dec(); 3509 return (true); 3510 } 3511 } 3512 } 3513 3514 static void __noinline 3515 vdbatch_process(struct vdbatch *vd) 3516 { 3517 struct vnode *vp; 3518 int i; 3519 3520 mtx_assert(&vd->lock, MA_OWNED); 3521 MPASS(curthread->td_pinned > 0); 3522 MPASS(vd->index == VDBATCH_SIZE); 3523 3524 critical_enter(); 3525 if (mtx_trylock(&vnode_list_mtx)) { 3526 for (i = 0; i < VDBATCH_SIZE; i++) { 3527 vp = vd->tab[i]; 3528 vd->tab[i] = NULL; 3529 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3530 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3531 MPASS(vp->v_dbatchcpu != NOCPU); 3532 vp->v_dbatchcpu = NOCPU; 3533 } 3534 mtx_unlock(&vnode_list_mtx); 3535 } else { 3536 for (i = 0; i < VDBATCH_SIZE; i++) { 3537 vp = vd->tab[i]; 3538 vd->tab[i] = NULL; 3539 MPASS(vp->v_dbatchcpu != NOCPU); 3540 vp->v_dbatchcpu = NOCPU; 3541 } 3542 } 3543 vd->index = 0; 3544 critical_exit(); 3545 } 3546 3547 static void 3548 vdbatch_enqueue(struct vnode *vp) 3549 { 3550 struct vdbatch *vd; 3551 3552 ASSERT_VI_LOCKED(vp, __func__); 3553 VNPASS(!VN_IS_DOOMED(vp), vp); 3554 3555 if (vp->v_dbatchcpu != NOCPU) { 3556 VI_UNLOCK(vp); 3557 return; 3558 } 3559 3560 sched_pin(); 3561 vd = DPCPU_PTR(vd); 3562 mtx_lock(&vd->lock); 3563 MPASS(vd->index < VDBATCH_SIZE); 3564 MPASS(vd->tab[vd->index] == NULL); 3565 /* 3566 * A hack: we depend on being pinned so that we know what to put in 3567 * ->v_dbatchcpu. 3568 */ 3569 vp->v_dbatchcpu = curcpu; 3570 vd->tab[vd->index] = vp; 3571 vd->index++; 3572 VI_UNLOCK(vp); 3573 if (vd->index == VDBATCH_SIZE) 3574 vdbatch_process(vd); 3575 mtx_unlock(&vd->lock); 3576 sched_unpin(); 3577 } 3578 3579 /* 3580 * This routine must only be called for vnodes which are about to be 3581 * deallocated. Supporting dequeue for arbitrary vndoes would require 3582 * validating that the locked batch matches. 3583 */ 3584 static void 3585 vdbatch_dequeue(struct vnode *vp) 3586 { 3587 struct vdbatch *vd; 3588 int i; 3589 short cpu; 3590 3591 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3592 3593 cpu = vp->v_dbatchcpu; 3594 if (cpu == NOCPU) 3595 return; 3596 3597 vd = DPCPU_ID_PTR(cpu, vd); 3598 mtx_lock(&vd->lock); 3599 for (i = 0; i < vd->index; i++) { 3600 if (vd->tab[i] != vp) 3601 continue; 3602 vp->v_dbatchcpu = NOCPU; 3603 vd->index--; 3604 vd->tab[i] = vd->tab[vd->index]; 3605 vd->tab[vd->index] = NULL; 3606 break; 3607 } 3608 mtx_unlock(&vd->lock); 3609 /* 3610 * Either we dequeued the vnode above or the target CPU beat us to it. 3611 */ 3612 MPASS(vp->v_dbatchcpu == NOCPU); 3613 } 3614 3615 /* 3616 * Drop the hold count of the vnode. If this is the last reference to 3617 * the vnode we place it on the free list unless it has been vgone'd 3618 * (marked VIRF_DOOMED) in which case we will free it. 3619 * 3620 * Because the vnode vm object keeps a hold reference on the vnode if 3621 * there is at least one resident non-cached page, the vnode cannot 3622 * leave the active list without the page cleanup done. 3623 */ 3624 static void __noinline 3625 vdropl_final(struct vnode *vp) 3626 { 3627 3628 ASSERT_VI_LOCKED(vp, __func__); 3629 VNPASS(VN_IS_DOOMED(vp), vp); 3630 /* 3631 * Set the VHOLD_NO_SMR flag. 3632 * 3633 * We may be racing against vhold_smr. If they win we can just pretend 3634 * we never got this far, they will vdrop later. 3635 */ 3636 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3637 vfs_freevnodes_inc(); 3638 VI_UNLOCK(vp); 3639 /* 3640 * We lost the aforementioned race. Any subsequent access is 3641 * invalid as they might have managed to vdropl on their own. 3642 */ 3643 return; 3644 } 3645 /* 3646 * Don't bump freevnodes as this one is going away. 3647 */ 3648 freevnode(vp); 3649 } 3650 3651 void 3652 vdrop(struct vnode *vp) 3653 { 3654 3655 ASSERT_VI_UNLOCKED(vp, __func__); 3656 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3657 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3658 return; 3659 VI_LOCK(vp); 3660 vdropl(vp); 3661 } 3662 3663 static void __always_inline 3664 vdropl_impl(struct vnode *vp, bool enqueue) 3665 { 3666 3667 ASSERT_VI_LOCKED(vp, __func__); 3668 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3669 if (!refcount_release(&vp->v_holdcnt)) { 3670 VI_UNLOCK(vp); 3671 return; 3672 } 3673 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3674 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3675 if (VN_IS_DOOMED(vp)) { 3676 vdropl_final(vp); 3677 return; 3678 } 3679 3680 vfs_freevnodes_inc(); 3681 if (vp->v_mflag & VMP_LAZYLIST) { 3682 vunlazy(vp); 3683 } 3684 3685 if (!enqueue) { 3686 VI_UNLOCK(vp); 3687 return; 3688 } 3689 3690 /* 3691 * Also unlocks the interlock. We can't assert on it as we 3692 * released our hold and by now the vnode might have been 3693 * freed. 3694 */ 3695 vdbatch_enqueue(vp); 3696 } 3697 3698 void 3699 vdropl(struct vnode *vp) 3700 { 3701 3702 vdropl_impl(vp, true); 3703 } 3704 3705 /* 3706 * vdrop a vnode when recycling 3707 * 3708 * This is a special case routine only to be used when recycling, differs from 3709 * regular vdrop by not requeieing the vnode on LRU. 3710 * 3711 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3712 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3713 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3714 * loop which can last for as long as writes are frozen. 3715 */ 3716 static void 3717 vdropl_recycle(struct vnode *vp) 3718 { 3719 3720 vdropl_impl(vp, false); 3721 } 3722 3723 static void 3724 vdrop_recycle(struct vnode *vp) 3725 { 3726 3727 VI_LOCK(vp); 3728 vdropl_recycle(vp); 3729 } 3730 3731 /* 3732 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3733 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3734 */ 3735 static int 3736 vinactivef(struct vnode *vp) 3737 { 3738 struct vm_object *obj; 3739 int error; 3740 3741 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3742 ASSERT_VI_LOCKED(vp, "vinactive"); 3743 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3744 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3745 vp->v_iflag |= VI_DOINGINACT; 3746 vp->v_iflag &= ~VI_OWEINACT; 3747 VI_UNLOCK(vp); 3748 /* 3749 * Before moving off the active list, we must be sure that any 3750 * modified pages are converted into the vnode's dirty 3751 * buffers, since these will no longer be checked once the 3752 * vnode is on the inactive list. 3753 * 3754 * The write-out of the dirty pages is asynchronous. At the 3755 * point that VOP_INACTIVE() is called, there could still be 3756 * pending I/O and dirty pages in the object. 3757 */ 3758 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3759 vm_object_mightbedirty(obj)) { 3760 VM_OBJECT_WLOCK(obj); 3761 vm_object_page_clean(obj, 0, 0, 0); 3762 VM_OBJECT_WUNLOCK(obj); 3763 } 3764 error = VOP_INACTIVE(vp); 3765 VI_LOCK(vp); 3766 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 3767 vp->v_iflag &= ~VI_DOINGINACT; 3768 return (error); 3769 } 3770 3771 int 3772 vinactive(struct vnode *vp) 3773 { 3774 3775 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3776 ASSERT_VI_LOCKED(vp, "vinactive"); 3777 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3778 3779 if ((vp->v_iflag & VI_OWEINACT) == 0) 3780 return (0); 3781 if (vp->v_iflag & VI_DOINGINACT) 3782 return (0); 3783 if (vp->v_usecount > 0) { 3784 vp->v_iflag &= ~VI_OWEINACT; 3785 return (0); 3786 } 3787 return (vinactivef(vp)); 3788 } 3789 3790 /* 3791 * Remove any vnodes in the vnode table belonging to mount point mp. 3792 * 3793 * If FORCECLOSE is not specified, there should not be any active ones, 3794 * return error if any are found (nb: this is a user error, not a 3795 * system error). If FORCECLOSE is specified, detach any active vnodes 3796 * that are found. 3797 * 3798 * If WRITECLOSE is set, only flush out regular file vnodes open for 3799 * writing. 3800 * 3801 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3802 * 3803 * `rootrefs' specifies the base reference count for the root vnode 3804 * of this filesystem. The root vnode is considered busy if its 3805 * v_usecount exceeds this value. On a successful return, vflush(, td) 3806 * will call vrele() on the root vnode exactly rootrefs times. 3807 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3808 * be zero. 3809 */ 3810 #ifdef DIAGNOSTIC 3811 static int busyprt = 0; /* print out busy vnodes */ 3812 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3813 #endif 3814 3815 int 3816 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3817 { 3818 struct vnode *vp, *mvp, *rootvp = NULL; 3819 struct vattr vattr; 3820 int busy = 0, error; 3821 3822 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3823 rootrefs, flags); 3824 if (rootrefs > 0) { 3825 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3826 ("vflush: bad args")); 3827 /* 3828 * Get the filesystem root vnode. We can vput() it 3829 * immediately, since with rootrefs > 0, it won't go away. 3830 */ 3831 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3832 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3833 __func__, error); 3834 return (error); 3835 } 3836 vput(rootvp); 3837 } 3838 loop: 3839 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3840 vholdl(vp); 3841 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3842 if (error) { 3843 vdrop(vp); 3844 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3845 goto loop; 3846 } 3847 /* 3848 * Skip over a vnodes marked VV_SYSTEM. 3849 */ 3850 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3851 VOP_UNLOCK(vp); 3852 vdrop(vp); 3853 continue; 3854 } 3855 /* 3856 * If WRITECLOSE is set, flush out unlinked but still open 3857 * files (even if open only for reading) and regular file 3858 * vnodes open for writing. 3859 */ 3860 if (flags & WRITECLOSE) { 3861 if (vp->v_object != NULL) { 3862 VM_OBJECT_WLOCK(vp->v_object); 3863 vm_object_page_clean(vp->v_object, 0, 0, 0); 3864 VM_OBJECT_WUNLOCK(vp->v_object); 3865 } 3866 do { 3867 error = VOP_FSYNC(vp, MNT_WAIT, td); 3868 } while (error == ERELOOKUP); 3869 if (error != 0) { 3870 VOP_UNLOCK(vp); 3871 vdrop(vp); 3872 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3873 return (error); 3874 } 3875 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3876 VI_LOCK(vp); 3877 3878 if ((vp->v_type == VNON || 3879 (error == 0 && vattr.va_nlink > 0)) && 3880 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3881 VOP_UNLOCK(vp); 3882 vdropl(vp); 3883 continue; 3884 } 3885 } else 3886 VI_LOCK(vp); 3887 /* 3888 * With v_usecount == 0, all we need to do is clear out the 3889 * vnode data structures and we are done. 3890 * 3891 * If FORCECLOSE is set, forcibly close the vnode. 3892 */ 3893 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3894 vgonel(vp); 3895 } else { 3896 busy++; 3897 #ifdef DIAGNOSTIC 3898 if (busyprt) 3899 vn_printf(vp, "vflush: busy vnode "); 3900 #endif 3901 } 3902 VOP_UNLOCK(vp); 3903 vdropl(vp); 3904 } 3905 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3906 /* 3907 * If just the root vnode is busy, and if its refcount 3908 * is equal to `rootrefs', then go ahead and kill it. 3909 */ 3910 VI_LOCK(rootvp); 3911 KASSERT(busy > 0, ("vflush: not busy")); 3912 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3913 ("vflush: usecount %d < rootrefs %d", 3914 rootvp->v_usecount, rootrefs)); 3915 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3916 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3917 vgone(rootvp); 3918 VOP_UNLOCK(rootvp); 3919 busy = 0; 3920 } else 3921 VI_UNLOCK(rootvp); 3922 } 3923 if (busy) { 3924 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3925 busy); 3926 return (EBUSY); 3927 } 3928 for (; rootrefs > 0; rootrefs--) 3929 vrele(rootvp); 3930 return (0); 3931 } 3932 3933 /* 3934 * Recycle an unused vnode to the front of the free list. 3935 */ 3936 int 3937 vrecycle(struct vnode *vp) 3938 { 3939 int recycled; 3940 3941 VI_LOCK(vp); 3942 recycled = vrecyclel(vp); 3943 VI_UNLOCK(vp); 3944 return (recycled); 3945 } 3946 3947 /* 3948 * vrecycle, with the vp interlock held. 3949 */ 3950 int 3951 vrecyclel(struct vnode *vp) 3952 { 3953 int recycled; 3954 3955 ASSERT_VOP_ELOCKED(vp, __func__); 3956 ASSERT_VI_LOCKED(vp, __func__); 3957 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3958 recycled = 0; 3959 if (vp->v_usecount == 0) { 3960 recycled = 1; 3961 vgonel(vp); 3962 } 3963 return (recycled); 3964 } 3965 3966 /* 3967 * Eliminate all activity associated with a vnode 3968 * in preparation for reuse. 3969 */ 3970 void 3971 vgone(struct vnode *vp) 3972 { 3973 VI_LOCK(vp); 3974 vgonel(vp); 3975 VI_UNLOCK(vp); 3976 } 3977 3978 /* 3979 * Notify upper mounts about reclaimed or unlinked vnode. 3980 */ 3981 void 3982 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 3983 { 3984 struct mount *mp; 3985 struct mount_upper_node *ump; 3986 3987 mp = atomic_load_ptr(&vp->v_mount); 3988 if (mp == NULL) 3989 return; 3990 if (TAILQ_EMPTY(&mp->mnt_notify)) 3991 return; 3992 3993 MNT_ILOCK(mp); 3994 mp->mnt_upper_pending++; 3995 KASSERT(mp->mnt_upper_pending > 0, 3996 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 3997 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 3998 MNT_IUNLOCK(mp); 3999 switch (event) { 4000 case VFS_NOTIFY_UPPER_RECLAIM: 4001 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4002 break; 4003 case VFS_NOTIFY_UPPER_UNLINK: 4004 VFS_UNLINK_LOWERVP(ump->mp, vp); 4005 break; 4006 } 4007 MNT_ILOCK(mp); 4008 } 4009 mp->mnt_upper_pending--; 4010 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4011 mp->mnt_upper_pending == 0) { 4012 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4013 wakeup(&mp->mnt_uppers); 4014 } 4015 MNT_IUNLOCK(mp); 4016 } 4017 4018 /* 4019 * vgone, with the vp interlock held. 4020 */ 4021 static void 4022 vgonel(struct vnode *vp) 4023 { 4024 struct thread *td; 4025 struct mount *mp; 4026 vm_object_t object; 4027 bool active, doinginact, oweinact; 4028 4029 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4030 ASSERT_VI_LOCKED(vp, "vgonel"); 4031 VNASSERT(vp->v_holdcnt, vp, 4032 ("vgonel: vp %p has no reference.", vp)); 4033 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4034 td = curthread; 4035 4036 /* 4037 * Don't vgonel if we're already doomed. 4038 */ 4039 if (VN_IS_DOOMED(vp)) { 4040 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4041 vn_get_state(vp) == VSTATE_DEAD, vp); 4042 return; 4043 } 4044 /* 4045 * Paired with freevnode. 4046 */ 4047 vn_seqc_write_begin_locked(vp); 4048 vunlazy_gone(vp); 4049 vn_irflag_set_locked(vp, VIRF_DOOMED); 4050 vn_set_state(vp, VSTATE_DESTROYING); 4051 4052 /* 4053 * Check to see if the vnode is in use. If so, we have to 4054 * call VOP_CLOSE() and VOP_INACTIVE(). 4055 * 4056 * It could be that VOP_INACTIVE() requested reclamation, in 4057 * which case we should avoid recursion, so check 4058 * VI_DOINGINACT. This is not precise but good enough. 4059 */ 4060 active = vp->v_usecount > 0; 4061 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4062 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4063 4064 /* 4065 * If we need to do inactive VI_OWEINACT will be set. 4066 */ 4067 if (vp->v_iflag & VI_DEFINACT) { 4068 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4069 vp->v_iflag &= ~VI_DEFINACT; 4070 vdropl(vp); 4071 } else { 4072 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4073 VI_UNLOCK(vp); 4074 } 4075 cache_purge_vgone(vp); 4076 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4077 4078 /* 4079 * If purging an active vnode, it must be closed and 4080 * deactivated before being reclaimed. 4081 */ 4082 if (active) 4083 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4084 if (!doinginact) { 4085 do { 4086 if (oweinact || active) { 4087 VI_LOCK(vp); 4088 vinactivef(vp); 4089 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4090 VI_UNLOCK(vp); 4091 } 4092 } while (oweinact); 4093 } 4094 if (vp->v_type == VSOCK) 4095 vfs_unp_reclaim(vp); 4096 4097 /* 4098 * Clean out any buffers associated with the vnode. 4099 * If the flush fails, just toss the buffers. 4100 */ 4101 mp = NULL; 4102 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4103 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4104 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4105 while (vinvalbuf(vp, 0, 0, 0) != 0) 4106 ; 4107 } 4108 4109 BO_LOCK(&vp->v_bufobj); 4110 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4111 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4112 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4113 vp->v_bufobj.bo_clean.bv_cnt == 0, 4114 ("vp %p bufobj not invalidated", vp)); 4115 4116 /* 4117 * For VMIO bufobj, BO_DEAD is set later, or in 4118 * vm_object_terminate() after the object's page queue is 4119 * flushed. 4120 */ 4121 object = vp->v_bufobj.bo_object; 4122 if (object == NULL) 4123 vp->v_bufobj.bo_flag |= BO_DEAD; 4124 BO_UNLOCK(&vp->v_bufobj); 4125 4126 /* 4127 * Handle the VM part. Tmpfs handles v_object on its own (the 4128 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4129 * should not touch the object borrowed from the lower vnode 4130 * (the handle check). 4131 */ 4132 if (object != NULL && object->type == OBJT_VNODE && 4133 object->handle == vp) 4134 vnode_destroy_vobject(vp); 4135 4136 /* 4137 * Reclaim the vnode. 4138 */ 4139 if (VOP_RECLAIM(vp)) 4140 panic("vgone: cannot reclaim"); 4141 if (mp != NULL) 4142 vn_finished_secondary_write(mp); 4143 VNASSERT(vp->v_object == NULL, vp, 4144 ("vop_reclaim left v_object vp=%p", vp)); 4145 /* 4146 * Clear the advisory locks and wake up waiting threads. 4147 */ 4148 if (vp->v_lockf != NULL) { 4149 (void)VOP_ADVLOCKPURGE(vp); 4150 vp->v_lockf = NULL; 4151 } 4152 /* 4153 * Delete from old mount point vnode list. 4154 */ 4155 if (vp->v_mount == NULL) { 4156 VI_LOCK(vp); 4157 } else { 4158 delmntque(vp); 4159 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4160 } 4161 /* 4162 * Done with purge, reset to the standard lock and invalidate 4163 * the vnode. 4164 */ 4165 vp->v_vnlock = &vp->v_lock; 4166 vp->v_op = &dead_vnodeops; 4167 vp->v_type = VBAD; 4168 vn_set_state(vp, VSTATE_DEAD); 4169 } 4170 4171 /* 4172 * Print out a description of a vnode. 4173 */ 4174 static const char *const vtypename[] = { 4175 [VNON] = "VNON", 4176 [VREG] = "VREG", 4177 [VDIR] = "VDIR", 4178 [VBLK] = "VBLK", 4179 [VCHR] = "VCHR", 4180 [VLNK] = "VLNK", 4181 [VSOCK] = "VSOCK", 4182 [VFIFO] = "VFIFO", 4183 [VBAD] = "VBAD", 4184 [VMARKER] = "VMARKER", 4185 }; 4186 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4187 "vnode type name not added to vtypename"); 4188 4189 static const char *const vstatename[] = { 4190 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4191 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4192 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4193 [VSTATE_DEAD] = "VSTATE_DEAD", 4194 }; 4195 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4196 "vnode state name not added to vstatename"); 4197 4198 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4199 "new hold count flag not added to vn_printf"); 4200 4201 void 4202 vn_printf(struct vnode *vp, const char *fmt, ...) 4203 { 4204 va_list ap; 4205 char buf[256], buf2[16]; 4206 u_long flags; 4207 u_int holdcnt; 4208 short irflag; 4209 4210 va_start(ap, fmt); 4211 vprintf(fmt, ap); 4212 va_end(ap); 4213 printf("%p: ", (void *)vp); 4214 printf("type %s state %s\n", vtypename[vp->v_type], vstatename[vp->v_state]); 4215 holdcnt = atomic_load_int(&vp->v_holdcnt); 4216 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4217 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4218 vp->v_seqc_users); 4219 switch (vp->v_type) { 4220 case VDIR: 4221 printf(" mountedhere %p\n", vp->v_mountedhere); 4222 break; 4223 case VCHR: 4224 printf(" rdev %p\n", vp->v_rdev); 4225 break; 4226 case VSOCK: 4227 printf(" socket %p\n", vp->v_unpcb); 4228 break; 4229 case VFIFO: 4230 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4231 break; 4232 default: 4233 printf("\n"); 4234 break; 4235 } 4236 buf[0] = '\0'; 4237 buf[1] = '\0'; 4238 if (holdcnt & VHOLD_NO_SMR) 4239 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4240 printf(" hold count flags (%s)\n", buf + 1); 4241 4242 buf[0] = '\0'; 4243 buf[1] = '\0'; 4244 irflag = vn_irflag_read(vp); 4245 if (irflag & VIRF_DOOMED) 4246 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4247 if (irflag & VIRF_PGREAD) 4248 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4249 if (irflag & VIRF_MOUNTPOINT) 4250 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4251 if (irflag & VIRF_TEXT_REF) 4252 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4253 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4254 if (flags != 0) { 4255 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4256 strlcat(buf, buf2, sizeof(buf)); 4257 } 4258 if (vp->v_vflag & VV_ROOT) 4259 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4260 if (vp->v_vflag & VV_ISTTY) 4261 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4262 if (vp->v_vflag & VV_NOSYNC) 4263 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4264 if (vp->v_vflag & VV_ETERNALDEV) 4265 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4266 if (vp->v_vflag & VV_CACHEDLABEL) 4267 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4268 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4269 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4270 if (vp->v_vflag & VV_COPYONWRITE) 4271 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4272 if (vp->v_vflag & VV_SYSTEM) 4273 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4274 if (vp->v_vflag & VV_PROCDEP) 4275 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4276 if (vp->v_vflag & VV_DELETED) 4277 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4278 if (vp->v_vflag & VV_MD) 4279 strlcat(buf, "|VV_MD", sizeof(buf)); 4280 if (vp->v_vflag & VV_FORCEINSMQ) 4281 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4282 if (vp->v_vflag & VV_READLINK) 4283 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4284 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4285 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4286 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4287 if (flags != 0) { 4288 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4289 strlcat(buf, buf2, sizeof(buf)); 4290 } 4291 if (vp->v_iflag & VI_MOUNT) 4292 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4293 if (vp->v_iflag & VI_DOINGINACT) 4294 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4295 if (vp->v_iflag & VI_OWEINACT) 4296 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4297 if (vp->v_iflag & VI_DEFINACT) 4298 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4299 if (vp->v_iflag & VI_FOPENING) 4300 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4301 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4302 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4303 if (flags != 0) { 4304 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4305 strlcat(buf, buf2, sizeof(buf)); 4306 } 4307 if (vp->v_mflag & VMP_LAZYLIST) 4308 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4309 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4310 if (flags != 0) { 4311 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4312 strlcat(buf, buf2, sizeof(buf)); 4313 } 4314 printf(" flags (%s)", buf + 1); 4315 if (mtx_owned(VI_MTX(vp))) 4316 printf(" VI_LOCKed"); 4317 printf("\n"); 4318 if (vp->v_object != NULL) 4319 printf(" v_object %p ref %d pages %d " 4320 "cleanbuf %d dirtybuf %d\n", 4321 vp->v_object, vp->v_object->ref_count, 4322 vp->v_object->resident_page_count, 4323 vp->v_bufobj.bo_clean.bv_cnt, 4324 vp->v_bufobj.bo_dirty.bv_cnt); 4325 printf(" "); 4326 lockmgr_printinfo(vp->v_vnlock); 4327 if (vp->v_data != NULL) 4328 VOP_PRINT(vp); 4329 } 4330 4331 #ifdef DDB 4332 /* 4333 * List all of the locked vnodes in the system. 4334 * Called when debugging the kernel. 4335 */ 4336 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4337 { 4338 struct mount *mp; 4339 struct vnode *vp; 4340 4341 /* 4342 * Note: because this is DDB, we can't obey the locking semantics 4343 * for these structures, which means we could catch an inconsistent 4344 * state and dereference a nasty pointer. Not much to be done 4345 * about that. 4346 */ 4347 db_printf("Locked vnodes\n"); 4348 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4349 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4350 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4351 vn_printf(vp, "vnode "); 4352 } 4353 } 4354 } 4355 4356 /* 4357 * Show details about the given vnode. 4358 */ 4359 DB_SHOW_COMMAND(vnode, db_show_vnode) 4360 { 4361 struct vnode *vp; 4362 4363 if (!have_addr) 4364 return; 4365 vp = (struct vnode *)addr; 4366 vn_printf(vp, "vnode "); 4367 } 4368 4369 /* 4370 * Show details about the given mount point. 4371 */ 4372 DB_SHOW_COMMAND(mount, db_show_mount) 4373 { 4374 struct mount *mp; 4375 struct vfsopt *opt; 4376 struct statfs *sp; 4377 struct vnode *vp; 4378 char buf[512]; 4379 uint64_t mflags; 4380 u_int flags; 4381 4382 if (!have_addr) { 4383 /* No address given, print short info about all mount points. */ 4384 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4385 db_printf("%p %s on %s (%s)\n", mp, 4386 mp->mnt_stat.f_mntfromname, 4387 mp->mnt_stat.f_mntonname, 4388 mp->mnt_stat.f_fstypename); 4389 if (db_pager_quit) 4390 break; 4391 } 4392 db_printf("\nMore info: show mount <addr>\n"); 4393 return; 4394 } 4395 4396 mp = (struct mount *)addr; 4397 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4398 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4399 4400 buf[0] = '\0'; 4401 mflags = mp->mnt_flag; 4402 #define MNT_FLAG(flag) do { \ 4403 if (mflags & (flag)) { \ 4404 if (buf[0] != '\0') \ 4405 strlcat(buf, ", ", sizeof(buf)); \ 4406 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4407 mflags &= ~(flag); \ 4408 } \ 4409 } while (0) 4410 MNT_FLAG(MNT_RDONLY); 4411 MNT_FLAG(MNT_SYNCHRONOUS); 4412 MNT_FLAG(MNT_NOEXEC); 4413 MNT_FLAG(MNT_NOSUID); 4414 MNT_FLAG(MNT_NFS4ACLS); 4415 MNT_FLAG(MNT_UNION); 4416 MNT_FLAG(MNT_ASYNC); 4417 MNT_FLAG(MNT_SUIDDIR); 4418 MNT_FLAG(MNT_SOFTDEP); 4419 MNT_FLAG(MNT_NOSYMFOLLOW); 4420 MNT_FLAG(MNT_GJOURNAL); 4421 MNT_FLAG(MNT_MULTILABEL); 4422 MNT_FLAG(MNT_ACLS); 4423 MNT_FLAG(MNT_NOATIME); 4424 MNT_FLAG(MNT_NOCLUSTERR); 4425 MNT_FLAG(MNT_NOCLUSTERW); 4426 MNT_FLAG(MNT_SUJ); 4427 MNT_FLAG(MNT_EXRDONLY); 4428 MNT_FLAG(MNT_EXPORTED); 4429 MNT_FLAG(MNT_DEFEXPORTED); 4430 MNT_FLAG(MNT_EXPORTANON); 4431 MNT_FLAG(MNT_EXKERB); 4432 MNT_FLAG(MNT_EXPUBLIC); 4433 MNT_FLAG(MNT_LOCAL); 4434 MNT_FLAG(MNT_QUOTA); 4435 MNT_FLAG(MNT_ROOTFS); 4436 MNT_FLAG(MNT_USER); 4437 MNT_FLAG(MNT_IGNORE); 4438 MNT_FLAG(MNT_UPDATE); 4439 MNT_FLAG(MNT_DELEXPORT); 4440 MNT_FLAG(MNT_RELOAD); 4441 MNT_FLAG(MNT_FORCE); 4442 MNT_FLAG(MNT_SNAPSHOT); 4443 MNT_FLAG(MNT_BYFSID); 4444 #undef MNT_FLAG 4445 if (mflags != 0) { 4446 if (buf[0] != '\0') 4447 strlcat(buf, ", ", sizeof(buf)); 4448 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4449 "0x%016jx", mflags); 4450 } 4451 db_printf(" mnt_flag = %s\n", buf); 4452 4453 buf[0] = '\0'; 4454 flags = mp->mnt_kern_flag; 4455 #define MNT_KERN_FLAG(flag) do { \ 4456 if (flags & (flag)) { \ 4457 if (buf[0] != '\0') \ 4458 strlcat(buf, ", ", sizeof(buf)); \ 4459 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4460 flags &= ~(flag); \ 4461 } \ 4462 } while (0) 4463 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4464 MNT_KERN_FLAG(MNTK_ASYNC); 4465 MNT_KERN_FLAG(MNTK_SOFTDEP); 4466 MNT_KERN_FLAG(MNTK_NOMSYNC); 4467 MNT_KERN_FLAG(MNTK_DRAINING); 4468 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4469 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4470 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4471 MNT_KERN_FLAG(MNTK_NO_IOPF); 4472 MNT_KERN_FLAG(MNTK_RECURSE); 4473 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4474 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4475 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4476 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4477 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4478 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4479 MNT_KERN_FLAG(MNTK_NOASYNC); 4480 MNT_KERN_FLAG(MNTK_UNMOUNT); 4481 MNT_KERN_FLAG(MNTK_MWAIT); 4482 MNT_KERN_FLAG(MNTK_SUSPEND); 4483 MNT_KERN_FLAG(MNTK_SUSPEND2); 4484 MNT_KERN_FLAG(MNTK_SUSPENDED); 4485 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4486 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4487 #undef MNT_KERN_FLAG 4488 if (flags != 0) { 4489 if (buf[0] != '\0') 4490 strlcat(buf, ", ", sizeof(buf)); 4491 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4492 "0x%08x", flags); 4493 } 4494 db_printf(" mnt_kern_flag = %s\n", buf); 4495 4496 db_printf(" mnt_opt = "); 4497 opt = TAILQ_FIRST(mp->mnt_opt); 4498 if (opt != NULL) { 4499 db_printf("%s", opt->name); 4500 opt = TAILQ_NEXT(opt, link); 4501 while (opt != NULL) { 4502 db_printf(", %s", opt->name); 4503 opt = TAILQ_NEXT(opt, link); 4504 } 4505 } 4506 db_printf("\n"); 4507 4508 sp = &mp->mnt_stat; 4509 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4510 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4511 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4512 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4513 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4514 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4515 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4516 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4517 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4518 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4519 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4520 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4521 4522 db_printf(" mnt_cred = { uid=%u ruid=%u", 4523 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4524 if (jailed(mp->mnt_cred)) 4525 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4526 db_printf(" }\n"); 4527 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4528 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4529 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4530 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4531 db_printf(" mnt_lazyvnodelistsize = %d\n", 4532 mp->mnt_lazyvnodelistsize); 4533 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4534 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4535 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4536 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4537 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4538 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4539 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4540 db_printf(" mnt_secondary_accwrites = %d\n", 4541 mp->mnt_secondary_accwrites); 4542 db_printf(" mnt_gjprovider = %s\n", 4543 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4544 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4545 4546 db_printf("\n\nList of active vnodes\n"); 4547 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4548 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4549 vn_printf(vp, "vnode "); 4550 if (db_pager_quit) 4551 break; 4552 } 4553 } 4554 db_printf("\n\nList of inactive vnodes\n"); 4555 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4556 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4557 vn_printf(vp, "vnode "); 4558 if (db_pager_quit) 4559 break; 4560 } 4561 } 4562 } 4563 #endif /* DDB */ 4564 4565 /* 4566 * Fill in a struct xvfsconf based on a struct vfsconf. 4567 */ 4568 static int 4569 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4570 { 4571 struct xvfsconf xvfsp; 4572 4573 bzero(&xvfsp, sizeof(xvfsp)); 4574 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4575 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4576 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4577 xvfsp.vfc_flags = vfsp->vfc_flags; 4578 /* 4579 * These are unused in userland, we keep them 4580 * to not break binary compatibility. 4581 */ 4582 xvfsp.vfc_vfsops = NULL; 4583 xvfsp.vfc_next = NULL; 4584 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4585 } 4586 4587 #ifdef COMPAT_FREEBSD32 4588 struct xvfsconf32 { 4589 uint32_t vfc_vfsops; 4590 char vfc_name[MFSNAMELEN]; 4591 int32_t vfc_typenum; 4592 int32_t vfc_refcount; 4593 int32_t vfc_flags; 4594 uint32_t vfc_next; 4595 }; 4596 4597 static int 4598 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4599 { 4600 struct xvfsconf32 xvfsp; 4601 4602 bzero(&xvfsp, sizeof(xvfsp)); 4603 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4604 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4605 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4606 xvfsp.vfc_flags = vfsp->vfc_flags; 4607 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4608 } 4609 #endif 4610 4611 /* 4612 * Top level filesystem related information gathering. 4613 */ 4614 static int 4615 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4616 { 4617 struct vfsconf *vfsp; 4618 int error; 4619 4620 error = 0; 4621 vfsconf_slock(); 4622 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4623 #ifdef COMPAT_FREEBSD32 4624 if (req->flags & SCTL_MASK32) 4625 error = vfsconf2x32(req, vfsp); 4626 else 4627 #endif 4628 error = vfsconf2x(req, vfsp); 4629 if (error) 4630 break; 4631 } 4632 vfsconf_sunlock(); 4633 return (error); 4634 } 4635 4636 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4637 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4638 "S,xvfsconf", "List of all configured filesystems"); 4639 4640 #ifndef BURN_BRIDGES 4641 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4642 4643 static int 4644 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4645 { 4646 int *name = (int *)arg1 - 1; /* XXX */ 4647 u_int namelen = arg2 + 1; /* XXX */ 4648 struct vfsconf *vfsp; 4649 4650 log(LOG_WARNING, "userland calling deprecated sysctl, " 4651 "please rebuild world\n"); 4652 4653 #if 1 || defined(COMPAT_PRELITE2) 4654 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4655 if (namelen == 1) 4656 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4657 #endif 4658 4659 switch (name[1]) { 4660 case VFS_MAXTYPENUM: 4661 if (namelen != 2) 4662 return (ENOTDIR); 4663 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4664 case VFS_CONF: 4665 if (namelen != 3) 4666 return (ENOTDIR); /* overloaded */ 4667 vfsconf_slock(); 4668 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4669 if (vfsp->vfc_typenum == name[2]) 4670 break; 4671 } 4672 vfsconf_sunlock(); 4673 if (vfsp == NULL) 4674 return (EOPNOTSUPP); 4675 #ifdef COMPAT_FREEBSD32 4676 if (req->flags & SCTL_MASK32) 4677 return (vfsconf2x32(req, vfsp)); 4678 else 4679 #endif 4680 return (vfsconf2x(req, vfsp)); 4681 } 4682 return (EOPNOTSUPP); 4683 } 4684 4685 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4686 CTLFLAG_MPSAFE, vfs_sysctl, 4687 "Generic filesystem"); 4688 4689 #if 1 || defined(COMPAT_PRELITE2) 4690 4691 static int 4692 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4693 { 4694 int error; 4695 struct vfsconf *vfsp; 4696 struct ovfsconf ovfs; 4697 4698 vfsconf_slock(); 4699 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4700 bzero(&ovfs, sizeof(ovfs)); 4701 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4702 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4703 ovfs.vfc_index = vfsp->vfc_typenum; 4704 ovfs.vfc_refcount = vfsp->vfc_refcount; 4705 ovfs.vfc_flags = vfsp->vfc_flags; 4706 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4707 if (error != 0) { 4708 vfsconf_sunlock(); 4709 return (error); 4710 } 4711 } 4712 vfsconf_sunlock(); 4713 return (0); 4714 } 4715 4716 #endif /* 1 || COMPAT_PRELITE2 */ 4717 #endif /* !BURN_BRIDGES */ 4718 4719 static void 4720 unmount_or_warn(struct mount *mp) 4721 { 4722 int error; 4723 4724 error = dounmount(mp, MNT_FORCE, curthread); 4725 if (error != 0) { 4726 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4727 if (error == EBUSY) 4728 printf("BUSY)\n"); 4729 else 4730 printf("%d)\n", error); 4731 } 4732 } 4733 4734 /* 4735 * Unmount all filesystems. The list is traversed in reverse order 4736 * of mounting to avoid dependencies. 4737 */ 4738 void 4739 vfs_unmountall(void) 4740 { 4741 struct mount *mp, *tmp; 4742 4743 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4744 4745 /* 4746 * Since this only runs when rebooting, it is not interlocked. 4747 */ 4748 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4749 vfs_ref(mp); 4750 4751 /* 4752 * Forcibly unmounting "/dev" before "/" would prevent clean 4753 * unmount of the latter. 4754 */ 4755 if (mp == rootdevmp) 4756 continue; 4757 4758 unmount_or_warn(mp); 4759 } 4760 4761 if (rootdevmp != NULL) 4762 unmount_or_warn(rootdevmp); 4763 } 4764 4765 static void 4766 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4767 { 4768 4769 ASSERT_VI_LOCKED(vp, __func__); 4770 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4771 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4772 vdropl(vp); 4773 return; 4774 } 4775 if (vn_lock(vp, lkflags) == 0) { 4776 VI_LOCK(vp); 4777 vinactive(vp); 4778 VOP_UNLOCK(vp); 4779 vdropl(vp); 4780 return; 4781 } 4782 vdefer_inactive_unlocked(vp); 4783 } 4784 4785 static int 4786 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4787 { 4788 4789 return (vp->v_iflag & VI_DEFINACT); 4790 } 4791 4792 static void __noinline 4793 vfs_periodic_inactive(struct mount *mp, int flags) 4794 { 4795 struct vnode *vp, *mvp; 4796 int lkflags; 4797 4798 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4799 if (flags != MNT_WAIT) 4800 lkflags |= LK_NOWAIT; 4801 4802 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4803 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4804 VI_UNLOCK(vp); 4805 continue; 4806 } 4807 vp->v_iflag &= ~VI_DEFINACT; 4808 vfs_deferred_inactive(vp, lkflags); 4809 } 4810 } 4811 4812 static inline bool 4813 vfs_want_msync(struct vnode *vp) 4814 { 4815 struct vm_object *obj; 4816 4817 /* 4818 * This test may be performed without any locks held. 4819 * We rely on vm_object's type stability. 4820 */ 4821 if (vp->v_vflag & VV_NOSYNC) 4822 return (false); 4823 obj = vp->v_object; 4824 return (obj != NULL && vm_object_mightbedirty(obj)); 4825 } 4826 4827 static int 4828 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4829 { 4830 4831 if (vp->v_vflag & VV_NOSYNC) 4832 return (false); 4833 if (vp->v_iflag & VI_DEFINACT) 4834 return (true); 4835 return (vfs_want_msync(vp)); 4836 } 4837 4838 static void __noinline 4839 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4840 { 4841 struct vnode *vp, *mvp; 4842 struct vm_object *obj; 4843 int lkflags, objflags; 4844 bool seen_defer; 4845 4846 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4847 if (flags != MNT_WAIT) { 4848 lkflags |= LK_NOWAIT; 4849 objflags = OBJPC_NOSYNC; 4850 } else { 4851 objflags = OBJPC_SYNC; 4852 } 4853 4854 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4855 seen_defer = false; 4856 if (vp->v_iflag & VI_DEFINACT) { 4857 vp->v_iflag &= ~VI_DEFINACT; 4858 seen_defer = true; 4859 } 4860 if (!vfs_want_msync(vp)) { 4861 if (seen_defer) 4862 vfs_deferred_inactive(vp, lkflags); 4863 else 4864 VI_UNLOCK(vp); 4865 continue; 4866 } 4867 if (vget(vp, lkflags) == 0) { 4868 obj = vp->v_object; 4869 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4870 VM_OBJECT_WLOCK(obj); 4871 vm_object_page_clean(obj, 0, 0, objflags); 4872 VM_OBJECT_WUNLOCK(obj); 4873 } 4874 vput(vp); 4875 if (seen_defer) 4876 vdrop(vp); 4877 } else { 4878 if (seen_defer) 4879 vdefer_inactive_unlocked(vp); 4880 } 4881 } 4882 } 4883 4884 void 4885 vfs_periodic(struct mount *mp, int flags) 4886 { 4887 4888 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4889 4890 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4891 vfs_periodic_inactive(mp, flags); 4892 else 4893 vfs_periodic_msync_inactive(mp, flags); 4894 } 4895 4896 static void 4897 destroy_vpollinfo_free(struct vpollinfo *vi) 4898 { 4899 4900 knlist_destroy(&vi->vpi_selinfo.si_note); 4901 mtx_destroy(&vi->vpi_lock); 4902 free(vi, M_VNODEPOLL); 4903 } 4904 4905 static void 4906 destroy_vpollinfo(struct vpollinfo *vi) 4907 { 4908 4909 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4910 seldrain(&vi->vpi_selinfo); 4911 destroy_vpollinfo_free(vi); 4912 } 4913 4914 /* 4915 * Initialize per-vnode helper structure to hold poll-related state. 4916 */ 4917 void 4918 v_addpollinfo(struct vnode *vp) 4919 { 4920 struct vpollinfo *vi; 4921 4922 if (vp->v_pollinfo != NULL) 4923 return; 4924 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 4925 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4926 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4927 vfs_knlunlock, vfs_knl_assert_lock); 4928 VI_LOCK(vp); 4929 if (vp->v_pollinfo != NULL) { 4930 VI_UNLOCK(vp); 4931 destroy_vpollinfo_free(vi); 4932 return; 4933 } 4934 vp->v_pollinfo = vi; 4935 VI_UNLOCK(vp); 4936 } 4937 4938 /* 4939 * Record a process's interest in events which might happen to 4940 * a vnode. Because poll uses the historic select-style interface 4941 * internally, this routine serves as both the ``check for any 4942 * pending events'' and the ``record my interest in future events'' 4943 * functions. (These are done together, while the lock is held, 4944 * to avoid race conditions.) 4945 */ 4946 int 4947 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4948 { 4949 4950 v_addpollinfo(vp); 4951 mtx_lock(&vp->v_pollinfo->vpi_lock); 4952 if (vp->v_pollinfo->vpi_revents & events) { 4953 /* 4954 * This leaves events we are not interested 4955 * in available for the other process which 4956 * which presumably had requested them 4957 * (otherwise they would never have been 4958 * recorded). 4959 */ 4960 events &= vp->v_pollinfo->vpi_revents; 4961 vp->v_pollinfo->vpi_revents &= ~events; 4962 4963 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4964 return (events); 4965 } 4966 vp->v_pollinfo->vpi_events |= events; 4967 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4968 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4969 return (0); 4970 } 4971 4972 /* 4973 * Routine to create and manage a filesystem syncer vnode. 4974 */ 4975 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4976 static int sync_fsync(struct vop_fsync_args *); 4977 static int sync_inactive(struct vop_inactive_args *); 4978 static int sync_reclaim(struct vop_reclaim_args *); 4979 4980 static struct vop_vector sync_vnodeops = { 4981 .vop_bypass = VOP_EOPNOTSUPP, 4982 .vop_close = sync_close, /* close */ 4983 .vop_fsync = sync_fsync, /* fsync */ 4984 .vop_inactive = sync_inactive, /* inactive */ 4985 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4986 .vop_reclaim = sync_reclaim, /* reclaim */ 4987 .vop_lock1 = vop_stdlock, /* lock */ 4988 .vop_unlock = vop_stdunlock, /* unlock */ 4989 .vop_islocked = vop_stdislocked, /* islocked */ 4990 }; 4991 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4992 4993 /* 4994 * Create a new filesystem syncer vnode for the specified mount point. 4995 */ 4996 void 4997 vfs_allocate_syncvnode(struct mount *mp) 4998 { 4999 struct vnode *vp; 5000 struct bufobj *bo; 5001 static long start, incr, next; 5002 int error; 5003 5004 /* Allocate a new vnode */ 5005 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5006 if (error != 0) 5007 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5008 vp->v_type = VNON; 5009 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5010 vp->v_vflag |= VV_FORCEINSMQ; 5011 error = insmntque1(vp, mp); 5012 if (error != 0) 5013 panic("vfs_allocate_syncvnode: insmntque() failed"); 5014 vp->v_vflag &= ~VV_FORCEINSMQ; 5015 vn_set_state(vp, VSTATE_CONSTRUCTED); 5016 VOP_UNLOCK(vp); 5017 /* 5018 * Place the vnode onto the syncer worklist. We attempt to 5019 * scatter them about on the list so that they will go off 5020 * at evenly distributed times even if all the filesystems 5021 * are mounted at once. 5022 */ 5023 next += incr; 5024 if (next == 0 || next > syncer_maxdelay) { 5025 start /= 2; 5026 incr /= 2; 5027 if (start == 0) { 5028 start = syncer_maxdelay / 2; 5029 incr = syncer_maxdelay; 5030 } 5031 next = start; 5032 } 5033 bo = &vp->v_bufobj; 5034 BO_LOCK(bo); 5035 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5036 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5037 mtx_lock(&sync_mtx); 5038 sync_vnode_count++; 5039 if (mp->mnt_syncer == NULL) { 5040 mp->mnt_syncer = vp; 5041 vp = NULL; 5042 } 5043 mtx_unlock(&sync_mtx); 5044 BO_UNLOCK(bo); 5045 if (vp != NULL) { 5046 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5047 vgone(vp); 5048 vput(vp); 5049 } 5050 } 5051 5052 void 5053 vfs_deallocate_syncvnode(struct mount *mp) 5054 { 5055 struct vnode *vp; 5056 5057 mtx_lock(&sync_mtx); 5058 vp = mp->mnt_syncer; 5059 if (vp != NULL) 5060 mp->mnt_syncer = NULL; 5061 mtx_unlock(&sync_mtx); 5062 if (vp != NULL) 5063 vrele(vp); 5064 } 5065 5066 /* 5067 * Do a lazy sync of the filesystem. 5068 */ 5069 static int 5070 sync_fsync(struct vop_fsync_args *ap) 5071 { 5072 struct vnode *syncvp = ap->a_vp; 5073 struct mount *mp = syncvp->v_mount; 5074 int error, save; 5075 struct bufobj *bo; 5076 5077 /* 5078 * We only need to do something if this is a lazy evaluation. 5079 */ 5080 if (ap->a_waitfor != MNT_LAZY) 5081 return (0); 5082 5083 /* 5084 * Move ourselves to the back of the sync list. 5085 */ 5086 bo = &syncvp->v_bufobj; 5087 BO_LOCK(bo); 5088 vn_syncer_add_to_worklist(bo, syncdelay); 5089 BO_UNLOCK(bo); 5090 5091 /* 5092 * Walk the list of vnodes pushing all that are dirty and 5093 * not already on the sync list. 5094 */ 5095 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5096 return (0); 5097 VOP_UNLOCK(syncvp); 5098 save = curthread_pflags_set(TDP_SYNCIO); 5099 /* 5100 * The filesystem at hand may be idle with free vnodes stored in the 5101 * batch. Return them instead of letting them stay there indefinitely. 5102 */ 5103 vfs_periodic(mp, MNT_NOWAIT); 5104 error = VFS_SYNC(mp, MNT_LAZY); 5105 curthread_pflags_restore(save); 5106 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5107 vfs_unbusy(mp); 5108 return (error); 5109 } 5110 5111 /* 5112 * The syncer vnode is no referenced. 5113 */ 5114 static int 5115 sync_inactive(struct vop_inactive_args *ap) 5116 { 5117 5118 vgone(ap->a_vp); 5119 return (0); 5120 } 5121 5122 /* 5123 * The syncer vnode is no longer needed and is being decommissioned. 5124 * 5125 * Modifications to the worklist must be protected by sync_mtx. 5126 */ 5127 static int 5128 sync_reclaim(struct vop_reclaim_args *ap) 5129 { 5130 struct vnode *vp = ap->a_vp; 5131 struct bufobj *bo; 5132 5133 bo = &vp->v_bufobj; 5134 BO_LOCK(bo); 5135 mtx_lock(&sync_mtx); 5136 if (vp->v_mount->mnt_syncer == vp) 5137 vp->v_mount->mnt_syncer = NULL; 5138 if (bo->bo_flag & BO_ONWORKLST) { 5139 LIST_REMOVE(bo, bo_synclist); 5140 syncer_worklist_len--; 5141 sync_vnode_count--; 5142 bo->bo_flag &= ~BO_ONWORKLST; 5143 } 5144 mtx_unlock(&sync_mtx); 5145 BO_UNLOCK(bo); 5146 5147 return (0); 5148 } 5149 5150 int 5151 vn_need_pageq_flush(struct vnode *vp) 5152 { 5153 struct vm_object *obj; 5154 5155 obj = vp->v_object; 5156 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5157 vm_object_mightbedirty(obj)); 5158 } 5159 5160 /* 5161 * Check if vnode represents a disk device 5162 */ 5163 bool 5164 vn_isdisk_error(struct vnode *vp, int *errp) 5165 { 5166 int error; 5167 5168 if (vp->v_type != VCHR) { 5169 error = ENOTBLK; 5170 goto out; 5171 } 5172 error = 0; 5173 dev_lock(); 5174 if (vp->v_rdev == NULL) 5175 error = ENXIO; 5176 else if (vp->v_rdev->si_devsw == NULL) 5177 error = ENXIO; 5178 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5179 error = ENOTBLK; 5180 dev_unlock(); 5181 out: 5182 *errp = error; 5183 return (error == 0); 5184 } 5185 5186 bool 5187 vn_isdisk(struct vnode *vp) 5188 { 5189 int error; 5190 5191 return (vn_isdisk_error(vp, &error)); 5192 } 5193 5194 /* 5195 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5196 * the comment above cache_fplookup for details. 5197 */ 5198 int 5199 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5200 { 5201 int error; 5202 5203 VFS_SMR_ASSERT_ENTERED(); 5204 5205 /* Check the owner. */ 5206 if (cred->cr_uid == file_uid) { 5207 if (file_mode & S_IXUSR) 5208 return (0); 5209 goto out_error; 5210 } 5211 5212 /* Otherwise, check the groups (first match) */ 5213 if (groupmember(file_gid, cred)) { 5214 if (file_mode & S_IXGRP) 5215 return (0); 5216 goto out_error; 5217 } 5218 5219 /* Otherwise, check everyone else. */ 5220 if (file_mode & S_IXOTH) 5221 return (0); 5222 out_error: 5223 /* 5224 * Permission check failed, but it is possible denial will get overwritten 5225 * (e.g., when root is traversing through a 700 directory owned by someone 5226 * else). 5227 * 5228 * vaccess() calls priv_check_cred which in turn can descent into MAC 5229 * modules overriding this result. It's quite unclear what semantics 5230 * are allowed for them to operate, thus for safety we don't call them 5231 * from within the SMR section. This also means if any such modules 5232 * are present, we have to let the regular lookup decide. 5233 */ 5234 error = priv_check_cred_vfs_lookup_nomac(cred); 5235 switch (error) { 5236 case 0: 5237 return (0); 5238 case EAGAIN: 5239 /* 5240 * MAC modules present. 5241 */ 5242 return (EAGAIN); 5243 case EPERM: 5244 return (EACCES); 5245 default: 5246 return (error); 5247 } 5248 } 5249 5250 /* 5251 * Common filesystem object access control check routine. Accepts a 5252 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5253 * Returns 0 on success, or an errno on failure. 5254 */ 5255 int 5256 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5257 accmode_t accmode, struct ucred *cred) 5258 { 5259 accmode_t dac_granted; 5260 accmode_t priv_granted; 5261 5262 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5263 ("invalid bit in accmode")); 5264 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5265 ("VAPPEND without VWRITE")); 5266 5267 /* 5268 * Look for a normal, non-privileged way to access the file/directory 5269 * as requested. If it exists, go with that. 5270 */ 5271 5272 dac_granted = 0; 5273 5274 /* Check the owner. */ 5275 if (cred->cr_uid == file_uid) { 5276 dac_granted |= VADMIN; 5277 if (file_mode & S_IXUSR) 5278 dac_granted |= VEXEC; 5279 if (file_mode & S_IRUSR) 5280 dac_granted |= VREAD; 5281 if (file_mode & S_IWUSR) 5282 dac_granted |= (VWRITE | VAPPEND); 5283 5284 if ((accmode & dac_granted) == accmode) 5285 return (0); 5286 5287 goto privcheck; 5288 } 5289 5290 /* Otherwise, check the groups (first match) */ 5291 if (groupmember(file_gid, cred)) { 5292 if (file_mode & S_IXGRP) 5293 dac_granted |= VEXEC; 5294 if (file_mode & S_IRGRP) 5295 dac_granted |= VREAD; 5296 if (file_mode & S_IWGRP) 5297 dac_granted |= (VWRITE | VAPPEND); 5298 5299 if ((accmode & dac_granted) == accmode) 5300 return (0); 5301 5302 goto privcheck; 5303 } 5304 5305 /* Otherwise, check everyone else. */ 5306 if (file_mode & S_IXOTH) 5307 dac_granted |= VEXEC; 5308 if (file_mode & S_IROTH) 5309 dac_granted |= VREAD; 5310 if (file_mode & S_IWOTH) 5311 dac_granted |= (VWRITE | VAPPEND); 5312 if ((accmode & dac_granted) == accmode) 5313 return (0); 5314 5315 privcheck: 5316 /* 5317 * Build a privilege mask to determine if the set of privileges 5318 * satisfies the requirements when combined with the granted mask 5319 * from above. For each privilege, if the privilege is required, 5320 * bitwise or the request type onto the priv_granted mask. 5321 */ 5322 priv_granted = 0; 5323 5324 if (type == VDIR) { 5325 /* 5326 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5327 * requests, instead of PRIV_VFS_EXEC. 5328 */ 5329 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5330 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5331 priv_granted |= VEXEC; 5332 } else { 5333 /* 5334 * Ensure that at least one execute bit is on. Otherwise, 5335 * a privileged user will always succeed, and we don't want 5336 * this to happen unless the file really is executable. 5337 */ 5338 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5339 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5340 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5341 priv_granted |= VEXEC; 5342 } 5343 5344 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5345 !priv_check_cred(cred, PRIV_VFS_READ)) 5346 priv_granted |= VREAD; 5347 5348 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5349 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5350 priv_granted |= (VWRITE | VAPPEND); 5351 5352 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5353 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5354 priv_granted |= VADMIN; 5355 5356 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5357 return (0); 5358 } 5359 5360 return ((accmode & VADMIN) ? EPERM : EACCES); 5361 } 5362 5363 /* 5364 * Credential check based on process requesting service, and per-attribute 5365 * permissions. 5366 */ 5367 int 5368 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5369 struct thread *td, accmode_t accmode) 5370 { 5371 5372 /* 5373 * Kernel-invoked always succeeds. 5374 */ 5375 if (cred == NOCRED) 5376 return (0); 5377 5378 /* 5379 * Do not allow privileged processes in jail to directly manipulate 5380 * system attributes. 5381 */ 5382 switch (attrnamespace) { 5383 case EXTATTR_NAMESPACE_SYSTEM: 5384 /* Potentially should be: return (EPERM); */ 5385 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5386 case EXTATTR_NAMESPACE_USER: 5387 return (VOP_ACCESS(vp, accmode, cred, td)); 5388 default: 5389 return (EPERM); 5390 } 5391 } 5392 5393 #ifdef DEBUG_VFS_LOCKS 5394 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5395 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5396 "Drop into debugger on lock violation"); 5397 5398 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5399 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5400 0, "Check for interlock across VOPs"); 5401 5402 int vfs_badlock_print = 1; /* Print lock violations. */ 5403 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5404 0, "Print lock violations"); 5405 5406 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5407 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5408 0, "Print vnode details on lock violations"); 5409 5410 #ifdef KDB 5411 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5412 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5413 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5414 #endif 5415 5416 static void 5417 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5418 { 5419 5420 #ifdef KDB 5421 if (vfs_badlock_backtrace) 5422 kdb_backtrace(); 5423 #endif 5424 if (vfs_badlock_vnode) 5425 vn_printf(vp, "vnode "); 5426 if (vfs_badlock_print) 5427 printf("%s: %p %s\n", str, (void *)vp, msg); 5428 if (vfs_badlock_ddb) 5429 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5430 } 5431 5432 void 5433 assert_vi_locked(struct vnode *vp, const char *str) 5434 { 5435 5436 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5437 vfs_badlock("interlock is not locked but should be", str, vp); 5438 } 5439 5440 void 5441 assert_vi_unlocked(struct vnode *vp, const char *str) 5442 { 5443 5444 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5445 vfs_badlock("interlock is locked but should not be", str, vp); 5446 } 5447 5448 void 5449 assert_vop_locked(struct vnode *vp, const char *str) 5450 { 5451 int locked; 5452 5453 if (KERNEL_PANICKED() || vp == NULL) 5454 return; 5455 5456 locked = VOP_ISLOCKED(vp); 5457 if (locked == 0 || locked == LK_EXCLOTHER) 5458 vfs_badlock("is not locked but should be", str, vp); 5459 } 5460 5461 void 5462 assert_vop_unlocked(struct vnode *vp, const char *str) 5463 { 5464 if (KERNEL_PANICKED() || vp == NULL) 5465 return; 5466 5467 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5468 vfs_badlock("is locked but should not be", str, vp); 5469 } 5470 5471 void 5472 assert_vop_elocked(struct vnode *vp, const char *str) 5473 { 5474 if (KERNEL_PANICKED() || vp == NULL) 5475 return; 5476 5477 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5478 vfs_badlock("is not exclusive locked but should be", str, vp); 5479 } 5480 #endif /* DEBUG_VFS_LOCKS */ 5481 5482 void 5483 vop_rename_fail(struct vop_rename_args *ap) 5484 { 5485 5486 if (ap->a_tvp != NULL) 5487 vput(ap->a_tvp); 5488 if (ap->a_tdvp == ap->a_tvp) 5489 vrele(ap->a_tdvp); 5490 else 5491 vput(ap->a_tdvp); 5492 vrele(ap->a_fdvp); 5493 vrele(ap->a_fvp); 5494 } 5495 5496 void 5497 vop_rename_pre(void *ap) 5498 { 5499 struct vop_rename_args *a = ap; 5500 5501 #ifdef DEBUG_VFS_LOCKS 5502 if (a->a_tvp) 5503 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5504 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5505 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5506 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5507 5508 /* Check the source (from). */ 5509 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5510 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5511 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5512 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5513 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5514 5515 /* Check the target. */ 5516 if (a->a_tvp) 5517 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5518 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5519 #endif 5520 /* 5521 * It may be tempting to add vn_seqc_write_begin/end calls here and 5522 * in vop_rename_post but that's not going to work out since some 5523 * filesystems relookup vnodes mid-rename. This is probably a bug. 5524 * 5525 * For now filesystems are expected to do the relevant calls after they 5526 * decide what vnodes to operate on. 5527 */ 5528 if (a->a_tdvp != a->a_fdvp) 5529 vhold(a->a_fdvp); 5530 if (a->a_tvp != a->a_fvp) 5531 vhold(a->a_fvp); 5532 vhold(a->a_tdvp); 5533 if (a->a_tvp) 5534 vhold(a->a_tvp); 5535 } 5536 5537 #ifdef DEBUG_VFS_LOCKS 5538 void 5539 vop_fplookup_vexec_debugpre(void *ap __unused) 5540 { 5541 5542 VFS_SMR_ASSERT_ENTERED(); 5543 } 5544 5545 void 5546 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5547 { 5548 5549 VFS_SMR_ASSERT_ENTERED(); 5550 } 5551 5552 void 5553 vop_fplookup_symlink_debugpre(void *ap __unused) 5554 { 5555 5556 VFS_SMR_ASSERT_ENTERED(); 5557 } 5558 5559 void 5560 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5561 { 5562 5563 VFS_SMR_ASSERT_ENTERED(); 5564 } 5565 5566 static void 5567 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5568 { 5569 if (vp->v_type == VCHR) 5570 ; 5571 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5572 ASSERT_VOP_LOCKED(vp, name); 5573 else 5574 ASSERT_VOP_ELOCKED(vp, name); 5575 } 5576 5577 void 5578 vop_fsync_debugpre(void *a) 5579 { 5580 struct vop_fsync_args *ap; 5581 5582 ap = a; 5583 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5584 } 5585 5586 void 5587 vop_fsync_debugpost(void *a, int rc __unused) 5588 { 5589 struct vop_fsync_args *ap; 5590 5591 ap = a; 5592 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5593 } 5594 5595 void 5596 vop_fdatasync_debugpre(void *a) 5597 { 5598 struct vop_fdatasync_args *ap; 5599 5600 ap = a; 5601 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5602 } 5603 5604 void 5605 vop_fdatasync_debugpost(void *a, int rc __unused) 5606 { 5607 struct vop_fdatasync_args *ap; 5608 5609 ap = a; 5610 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5611 } 5612 5613 void 5614 vop_strategy_debugpre(void *ap) 5615 { 5616 struct vop_strategy_args *a; 5617 struct buf *bp; 5618 5619 a = ap; 5620 bp = a->a_bp; 5621 5622 /* 5623 * Cluster ops lock their component buffers but not the IO container. 5624 */ 5625 if ((bp->b_flags & B_CLUSTER) != 0) 5626 return; 5627 5628 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5629 if (vfs_badlock_print) 5630 printf( 5631 "VOP_STRATEGY: bp is not locked but should be\n"); 5632 if (vfs_badlock_ddb) 5633 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5634 } 5635 } 5636 5637 void 5638 vop_lock_debugpre(void *ap) 5639 { 5640 struct vop_lock1_args *a = ap; 5641 5642 if ((a->a_flags & LK_INTERLOCK) == 0) 5643 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5644 else 5645 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5646 } 5647 5648 void 5649 vop_lock_debugpost(void *ap, int rc) 5650 { 5651 struct vop_lock1_args *a = ap; 5652 5653 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5654 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5655 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5656 } 5657 5658 void 5659 vop_unlock_debugpre(void *ap) 5660 { 5661 struct vop_unlock_args *a = ap; 5662 struct vnode *vp = a->a_vp; 5663 5664 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5665 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5666 } 5667 5668 void 5669 vop_need_inactive_debugpre(void *ap) 5670 { 5671 struct vop_need_inactive_args *a = ap; 5672 5673 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5674 } 5675 5676 void 5677 vop_need_inactive_debugpost(void *ap, int rc) 5678 { 5679 struct vop_need_inactive_args *a = ap; 5680 5681 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5682 } 5683 #endif 5684 5685 void 5686 vop_create_pre(void *ap) 5687 { 5688 struct vop_create_args *a; 5689 struct vnode *dvp; 5690 5691 a = ap; 5692 dvp = a->a_dvp; 5693 vn_seqc_write_begin(dvp); 5694 } 5695 5696 void 5697 vop_create_post(void *ap, int rc) 5698 { 5699 struct vop_create_args *a; 5700 struct vnode *dvp; 5701 5702 a = ap; 5703 dvp = a->a_dvp; 5704 vn_seqc_write_end(dvp); 5705 if (!rc) 5706 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5707 } 5708 5709 void 5710 vop_whiteout_pre(void *ap) 5711 { 5712 struct vop_whiteout_args *a; 5713 struct vnode *dvp; 5714 5715 a = ap; 5716 dvp = a->a_dvp; 5717 vn_seqc_write_begin(dvp); 5718 } 5719 5720 void 5721 vop_whiteout_post(void *ap, int rc) 5722 { 5723 struct vop_whiteout_args *a; 5724 struct vnode *dvp; 5725 5726 a = ap; 5727 dvp = a->a_dvp; 5728 vn_seqc_write_end(dvp); 5729 } 5730 5731 void 5732 vop_deleteextattr_pre(void *ap) 5733 { 5734 struct vop_deleteextattr_args *a; 5735 struct vnode *vp; 5736 5737 a = ap; 5738 vp = a->a_vp; 5739 vn_seqc_write_begin(vp); 5740 } 5741 5742 void 5743 vop_deleteextattr_post(void *ap, int rc) 5744 { 5745 struct vop_deleteextattr_args *a; 5746 struct vnode *vp; 5747 5748 a = ap; 5749 vp = a->a_vp; 5750 vn_seqc_write_end(vp); 5751 if (!rc) 5752 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5753 } 5754 5755 void 5756 vop_link_pre(void *ap) 5757 { 5758 struct vop_link_args *a; 5759 struct vnode *vp, *tdvp; 5760 5761 a = ap; 5762 vp = a->a_vp; 5763 tdvp = a->a_tdvp; 5764 vn_seqc_write_begin(vp); 5765 vn_seqc_write_begin(tdvp); 5766 } 5767 5768 void 5769 vop_link_post(void *ap, int rc) 5770 { 5771 struct vop_link_args *a; 5772 struct vnode *vp, *tdvp; 5773 5774 a = ap; 5775 vp = a->a_vp; 5776 tdvp = a->a_tdvp; 5777 vn_seqc_write_end(vp); 5778 vn_seqc_write_end(tdvp); 5779 if (!rc) { 5780 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5781 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5782 } 5783 } 5784 5785 void 5786 vop_mkdir_pre(void *ap) 5787 { 5788 struct vop_mkdir_args *a; 5789 struct vnode *dvp; 5790 5791 a = ap; 5792 dvp = a->a_dvp; 5793 vn_seqc_write_begin(dvp); 5794 } 5795 5796 void 5797 vop_mkdir_post(void *ap, int rc) 5798 { 5799 struct vop_mkdir_args *a; 5800 struct vnode *dvp; 5801 5802 a = ap; 5803 dvp = a->a_dvp; 5804 vn_seqc_write_end(dvp); 5805 if (!rc) 5806 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5807 } 5808 5809 #ifdef DEBUG_VFS_LOCKS 5810 void 5811 vop_mkdir_debugpost(void *ap, int rc) 5812 { 5813 struct vop_mkdir_args *a; 5814 5815 a = ap; 5816 if (!rc) 5817 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 5818 } 5819 #endif 5820 5821 void 5822 vop_mknod_pre(void *ap) 5823 { 5824 struct vop_mknod_args *a; 5825 struct vnode *dvp; 5826 5827 a = ap; 5828 dvp = a->a_dvp; 5829 vn_seqc_write_begin(dvp); 5830 } 5831 5832 void 5833 vop_mknod_post(void *ap, int rc) 5834 { 5835 struct vop_mknod_args *a; 5836 struct vnode *dvp; 5837 5838 a = ap; 5839 dvp = a->a_dvp; 5840 vn_seqc_write_end(dvp); 5841 if (!rc) 5842 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5843 } 5844 5845 void 5846 vop_reclaim_post(void *ap, int rc) 5847 { 5848 struct vop_reclaim_args *a; 5849 struct vnode *vp; 5850 5851 a = ap; 5852 vp = a->a_vp; 5853 ASSERT_VOP_IN_SEQC(vp); 5854 if (!rc) 5855 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5856 } 5857 5858 void 5859 vop_remove_pre(void *ap) 5860 { 5861 struct vop_remove_args *a; 5862 struct vnode *dvp, *vp; 5863 5864 a = ap; 5865 dvp = a->a_dvp; 5866 vp = a->a_vp; 5867 vn_seqc_write_begin(dvp); 5868 vn_seqc_write_begin(vp); 5869 } 5870 5871 void 5872 vop_remove_post(void *ap, int rc) 5873 { 5874 struct vop_remove_args *a; 5875 struct vnode *dvp, *vp; 5876 5877 a = ap; 5878 dvp = a->a_dvp; 5879 vp = a->a_vp; 5880 vn_seqc_write_end(dvp); 5881 vn_seqc_write_end(vp); 5882 if (!rc) { 5883 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5884 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5885 } 5886 } 5887 5888 void 5889 vop_rename_post(void *ap, int rc) 5890 { 5891 struct vop_rename_args *a = ap; 5892 long hint; 5893 5894 if (!rc) { 5895 hint = NOTE_WRITE; 5896 if (a->a_fdvp == a->a_tdvp) { 5897 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5898 hint |= NOTE_LINK; 5899 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5900 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5901 } else { 5902 hint |= NOTE_EXTEND; 5903 if (a->a_fvp->v_type == VDIR) 5904 hint |= NOTE_LINK; 5905 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5906 5907 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5908 a->a_tvp->v_type == VDIR) 5909 hint &= ~NOTE_LINK; 5910 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5911 } 5912 5913 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5914 if (a->a_tvp) 5915 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5916 } 5917 if (a->a_tdvp != a->a_fdvp) 5918 vdrop(a->a_fdvp); 5919 if (a->a_tvp != a->a_fvp) 5920 vdrop(a->a_fvp); 5921 vdrop(a->a_tdvp); 5922 if (a->a_tvp) 5923 vdrop(a->a_tvp); 5924 } 5925 5926 void 5927 vop_rmdir_pre(void *ap) 5928 { 5929 struct vop_rmdir_args *a; 5930 struct vnode *dvp, *vp; 5931 5932 a = ap; 5933 dvp = a->a_dvp; 5934 vp = a->a_vp; 5935 vn_seqc_write_begin(dvp); 5936 vn_seqc_write_begin(vp); 5937 } 5938 5939 void 5940 vop_rmdir_post(void *ap, int rc) 5941 { 5942 struct vop_rmdir_args *a; 5943 struct vnode *dvp, *vp; 5944 5945 a = ap; 5946 dvp = a->a_dvp; 5947 vp = a->a_vp; 5948 vn_seqc_write_end(dvp); 5949 vn_seqc_write_end(vp); 5950 if (!rc) { 5951 vp->v_vflag |= VV_UNLINKED; 5952 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5953 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5954 } 5955 } 5956 5957 void 5958 vop_setattr_pre(void *ap) 5959 { 5960 struct vop_setattr_args *a; 5961 struct vnode *vp; 5962 5963 a = ap; 5964 vp = a->a_vp; 5965 vn_seqc_write_begin(vp); 5966 } 5967 5968 void 5969 vop_setattr_post(void *ap, int rc) 5970 { 5971 struct vop_setattr_args *a; 5972 struct vnode *vp; 5973 5974 a = ap; 5975 vp = a->a_vp; 5976 vn_seqc_write_end(vp); 5977 if (!rc) 5978 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5979 } 5980 5981 void 5982 vop_setacl_pre(void *ap) 5983 { 5984 struct vop_setacl_args *a; 5985 struct vnode *vp; 5986 5987 a = ap; 5988 vp = a->a_vp; 5989 vn_seqc_write_begin(vp); 5990 } 5991 5992 void 5993 vop_setacl_post(void *ap, int rc __unused) 5994 { 5995 struct vop_setacl_args *a; 5996 struct vnode *vp; 5997 5998 a = ap; 5999 vp = a->a_vp; 6000 vn_seqc_write_end(vp); 6001 } 6002 6003 void 6004 vop_setextattr_pre(void *ap) 6005 { 6006 struct vop_setextattr_args *a; 6007 struct vnode *vp; 6008 6009 a = ap; 6010 vp = a->a_vp; 6011 vn_seqc_write_begin(vp); 6012 } 6013 6014 void 6015 vop_setextattr_post(void *ap, int rc) 6016 { 6017 struct vop_setextattr_args *a; 6018 struct vnode *vp; 6019 6020 a = ap; 6021 vp = a->a_vp; 6022 vn_seqc_write_end(vp); 6023 if (!rc) 6024 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6025 } 6026 6027 void 6028 vop_symlink_pre(void *ap) 6029 { 6030 struct vop_symlink_args *a; 6031 struct vnode *dvp; 6032 6033 a = ap; 6034 dvp = a->a_dvp; 6035 vn_seqc_write_begin(dvp); 6036 } 6037 6038 void 6039 vop_symlink_post(void *ap, int rc) 6040 { 6041 struct vop_symlink_args *a; 6042 struct vnode *dvp; 6043 6044 a = ap; 6045 dvp = a->a_dvp; 6046 vn_seqc_write_end(dvp); 6047 if (!rc) 6048 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6049 } 6050 6051 void 6052 vop_open_post(void *ap, int rc) 6053 { 6054 struct vop_open_args *a = ap; 6055 6056 if (!rc) 6057 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6058 } 6059 6060 void 6061 vop_close_post(void *ap, int rc) 6062 { 6063 struct vop_close_args *a = ap; 6064 6065 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6066 !VN_IS_DOOMED(a->a_vp))) { 6067 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6068 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6069 } 6070 } 6071 6072 void 6073 vop_read_post(void *ap, int rc) 6074 { 6075 struct vop_read_args *a = ap; 6076 6077 if (!rc) 6078 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6079 } 6080 6081 void 6082 vop_read_pgcache_post(void *ap, int rc) 6083 { 6084 struct vop_read_pgcache_args *a = ap; 6085 6086 if (!rc) 6087 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6088 } 6089 6090 void 6091 vop_readdir_post(void *ap, int rc) 6092 { 6093 struct vop_readdir_args *a = ap; 6094 6095 if (!rc) 6096 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6097 } 6098 6099 static struct knlist fs_knlist; 6100 6101 static void 6102 vfs_event_init(void *arg) 6103 { 6104 knlist_init_mtx(&fs_knlist, NULL); 6105 } 6106 /* XXX - correct order? */ 6107 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6108 6109 void 6110 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6111 { 6112 6113 KNOTE_UNLOCKED(&fs_knlist, event); 6114 } 6115 6116 static int filt_fsattach(struct knote *kn); 6117 static void filt_fsdetach(struct knote *kn); 6118 static int filt_fsevent(struct knote *kn, long hint); 6119 6120 struct filterops fs_filtops = { 6121 .f_isfd = 0, 6122 .f_attach = filt_fsattach, 6123 .f_detach = filt_fsdetach, 6124 .f_event = filt_fsevent 6125 }; 6126 6127 static int 6128 filt_fsattach(struct knote *kn) 6129 { 6130 6131 kn->kn_flags |= EV_CLEAR; 6132 knlist_add(&fs_knlist, kn, 0); 6133 return (0); 6134 } 6135 6136 static void 6137 filt_fsdetach(struct knote *kn) 6138 { 6139 6140 knlist_remove(&fs_knlist, kn, 0); 6141 } 6142 6143 static int 6144 filt_fsevent(struct knote *kn, long hint) 6145 { 6146 6147 kn->kn_fflags |= kn->kn_sfflags & hint; 6148 6149 return (kn->kn_fflags != 0); 6150 } 6151 6152 static int 6153 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6154 { 6155 struct vfsidctl vc; 6156 int error; 6157 struct mount *mp; 6158 6159 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6160 if (error) 6161 return (error); 6162 if (vc.vc_vers != VFS_CTL_VERS1) 6163 return (EINVAL); 6164 mp = vfs_getvfs(&vc.vc_fsid); 6165 if (mp == NULL) 6166 return (ENOENT); 6167 /* ensure that a specific sysctl goes to the right filesystem. */ 6168 if (strcmp(vc.vc_fstypename, "*") != 0 && 6169 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6170 vfs_rel(mp); 6171 return (EINVAL); 6172 } 6173 VCTLTOREQ(&vc, req); 6174 error = VFS_SYSCTL(mp, vc.vc_op, req); 6175 vfs_rel(mp); 6176 return (error); 6177 } 6178 6179 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6180 NULL, 0, sysctl_vfs_ctl, "", 6181 "Sysctl by fsid"); 6182 6183 /* 6184 * Function to initialize a va_filerev field sensibly. 6185 * XXX: Wouldn't a random number make a lot more sense ?? 6186 */ 6187 u_quad_t 6188 init_va_filerev(void) 6189 { 6190 struct bintime bt; 6191 6192 getbinuptime(&bt); 6193 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6194 } 6195 6196 static int filt_vfsread(struct knote *kn, long hint); 6197 static int filt_vfswrite(struct knote *kn, long hint); 6198 static int filt_vfsvnode(struct knote *kn, long hint); 6199 static void filt_vfsdetach(struct knote *kn); 6200 static struct filterops vfsread_filtops = { 6201 .f_isfd = 1, 6202 .f_detach = filt_vfsdetach, 6203 .f_event = filt_vfsread 6204 }; 6205 static struct filterops vfswrite_filtops = { 6206 .f_isfd = 1, 6207 .f_detach = filt_vfsdetach, 6208 .f_event = filt_vfswrite 6209 }; 6210 static struct filterops vfsvnode_filtops = { 6211 .f_isfd = 1, 6212 .f_detach = filt_vfsdetach, 6213 .f_event = filt_vfsvnode 6214 }; 6215 6216 static void 6217 vfs_knllock(void *arg) 6218 { 6219 struct vnode *vp = arg; 6220 6221 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6222 } 6223 6224 static void 6225 vfs_knlunlock(void *arg) 6226 { 6227 struct vnode *vp = arg; 6228 6229 VOP_UNLOCK(vp); 6230 } 6231 6232 static void 6233 vfs_knl_assert_lock(void *arg, int what) 6234 { 6235 #ifdef DEBUG_VFS_LOCKS 6236 struct vnode *vp = arg; 6237 6238 if (what == LA_LOCKED) 6239 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6240 else 6241 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6242 #endif 6243 } 6244 6245 int 6246 vfs_kqfilter(struct vop_kqfilter_args *ap) 6247 { 6248 struct vnode *vp = ap->a_vp; 6249 struct knote *kn = ap->a_kn; 6250 struct knlist *knl; 6251 6252 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6253 kn->kn_filter != EVFILT_WRITE), 6254 ("READ/WRITE filter on a FIFO leaked through")); 6255 switch (kn->kn_filter) { 6256 case EVFILT_READ: 6257 kn->kn_fop = &vfsread_filtops; 6258 break; 6259 case EVFILT_WRITE: 6260 kn->kn_fop = &vfswrite_filtops; 6261 break; 6262 case EVFILT_VNODE: 6263 kn->kn_fop = &vfsvnode_filtops; 6264 break; 6265 default: 6266 return (EINVAL); 6267 } 6268 6269 kn->kn_hook = (caddr_t)vp; 6270 6271 v_addpollinfo(vp); 6272 if (vp->v_pollinfo == NULL) 6273 return (ENOMEM); 6274 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6275 vhold(vp); 6276 knlist_add(knl, kn, 0); 6277 6278 return (0); 6279 } 6280 6281 /* 6282 * Detach knote from vnode 6283 */ 6284 static void 6285 filt_vfsdetach(struct knote *kn) 6286 { 6287 struct vnode *vp = (struct vnode *)kn->kn_hook; 6288 6289 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6290 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6291 vdrop(vp); 6292 } 6293 6294 /*ARGSUSED*/ 6295 static int 6296 filt_vfsread(struct knote *kn, long hint) 6297 { 6298 struct vnode *vp = (struct vnode *)kn->kn_hook; 6299 off_t size; 6300 int res; 6301 6302 /* 6303 * filesystem is gone, so set the EOF flag and schedule 6304 * the knote for deletion. 6305 */ 6306 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6307 VI_LOCK(vp); 6308 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6309 VI_UNLOCK(vp); 6310 return (1); 6311 } 6312 6313 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6314 return (0); 6315 6316 VI_LOCK(vp); 6317 kn->kn_data = size - kn->kn_fp->f_offset; 6318 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6319 VI_UNLOCK(vp); 6320 return (res); 6321 } 6322 6323 /*ARGSUSED*/ 6324 static int 6325 filt_vfswrite(struct knote *kn, long hint) 6326 { 6327 struct vnode *vp = (struct vnode *)kn->kn_hook; 6328 6329 VI_LOCK(vp); 6330 6331 /* 6332 * filesystem is gone, so set the EOF flag and schedule 6333 * the knote for deletion. 6334 */ 6335 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6336 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6337 6338 kn->kn_data = 0; 6339 VI_UNLOCK(vp); 6340 return (1); 6341 } 6342 6343 static int 6344 filt_vfsvnode(struct knote *kn, long hint) 6345 { 6346 struct vnode *vp = (struct vnode *)kn->kn_hook; 6347 int res; 6348 6349 VI_LOCK(vp); 6350 if (kn->kn_sfflags & hint) 6351 kn->kn_fflags |= hint; 6352 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6353 kn->kn_flags |= EV_EOF; 6354 VI_UNLOCK(vp); 6355 return (1); 6356 } 6357 res = (kn->kn_fflags != 0); 6358 VI_UNLOCK(vp); 6359 return (res); 6360 } 6361 6362 /* 6363 * Returns whether the directory is empty or not. 6364 * If it is empty, the return value is 0; otherwise 6365 * the return value is an error value (which may 6366 * be ENOTEMPTY). 6367 */ 6368 int 6369 vfs_emptydir(struct vnode *vp) 6370 { 6371 struct uio uio; 6372 struct iovec iov; 6373 struct dirent *dirent, *dp, *endp; 6374 int error, eof; 6375 6376 error = 0; 6377 eof = 0; 6378 6379 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6380 VNPASS(vp->v_type == VDIR, vp); 6381 6382 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6383 iov.iov_base = dirent; 6384 iov.iov_len = sizeof(struct dirent); 6385 6386 uio.uio_iov = &iov; 6387 uio.uio_iovcnt = 1; 6388 uio.uio_offset = 0; 6389 uio.uio_resid = sizeof(struct dirent); 6390 uio.uio_segflg = UIO_SYSSPACE; 6391 uio.uio_rw = UIO_READ; 6392 uio.uio_td = curthread; 6393 6394 while (eof == 0 && error == 0) { 6395 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6396 NULL, NULL); 6397 if (error != 0) 6398 break; 6399 endp = (void *)((uint8_t *)dirent + 6400 sizeof(struct dirent) - uio.uio_resid); 6401 for (dp = dirent; dp < endp; 6402 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6403 if (dp->d_type == DT_WHT) 6404 continue; 6405 if (dp->d_namlen == 0) 6406 continue; 6407 if (dp->d_type != DT_DIR && 6408 dp->d_type != DT_UNKNOWN) { 6409 error = ENOTEMPTY; 6410 break; 6411 } 6412 if (dp->d_namlen > 2) { 6413 error = ENOTEMPTY; 6414 break; 6415 } 6416 if (dp->d_namlen == 1 && 6417 dp->d_name[0] != '.') { 6418 error = ENOTEMPTY; 6419 break; 6420 } 6421 if (dp->d_namlen == 2 && 6422 dp->d_name[1] != '.') { 6423 error = ENOTEMPTY; 6424 break; 6425 } 6426 uio.uio_resid = sizeof(struct dirent); 6427 } 6428 } 6429 free(dirent, M_TEMP); 6430 return (error); 6431 } 6432 6433 int 6434 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6435 { 6436 int error; 6437 6438 if (dp->d_reclen > ap->a_uio->uio_resid) 6439 return (ENAMETOOLONG); 6440 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6441 if (error) { 6442 if (ap->a_ncookies != NULL) { 6443 if (ap->a_cookies != NULL) 6444 free(ap->a_cookies, M_TEMP); 6445 ap->a_cookies = NULL; 6446 *ap->a_ncookies = 0; 6447 } 6448 return (error); 6449 } 6450 if (ap->a_ncookies == NULL) 6451 return (0); 6452 6453 KASSERT(ap->a_cookies, 6454 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6455 6456 *ap->a_cookies = realloc(*ap->a_cookies, 6457 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6458 (*ap->a_cookies)[*ap->a_ncookies] = off; 6459 *ap->a_ncookies += 1; 6460 return (0); 6461 } 6462 6463 /* 6464 * The purpose of this routine is to remove granularity from accmode_t, 6465 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6466 * VADMIN and VAPPEND. 6467 * 6468 * If it returns 0, the caller is supposed to continue with the usual 6469 * access checks using 'accmode' as modified by this routine. If it 6470 * returns nonzero value, the caller is supposed to return that value 6471 * as errno. 6472 * 6473 * Note that after this routine runs, accmode may be zero. 6474 */ 6475 int 6476 vfs_unixify_accmode(accmode_t *accmode) 6477 { 6478 /* 6479 * There is no way to specify explicit "deny" rule using 6480 * file mode or POSIX.1e ACLs. 6481 */ 6482 if (*accmode & VEXPLICIT_DENY) { 6483 *accmode = 0; 6484 return (0); 6485 } 6486 6487 /* 6488 * None of these can be translated into usual access bits. 6489 * Also, the common case for NFSv4 ACLs is to not contain 6490 * either of these bits. Caller should check for VWRITE 6491 * on the containing directory instead. 6492 */ 6493 if (*accmode & (VDELETE_CHILD | VDELETE)) 6494 return (EPERM); 6495 6496 if (*accmode & VADMIN_PERMS) { 6497 *accmode &= ~VADMIN_PERMS; 6498 *accmode |= VADMIN; 6499 } 6500 6501 /* 6502 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6503 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6504 */ 6505 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6506 6507 return (0); 6508 } 6509 6510 /* 6511 * Clear out a doomed vnode (if any) and replace it with a new one as long 6512 * as the fs is not being unmounted. Return the root vnode to the caller. 6513 */ 6514 static int __noinline 6515 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6516 { 6517 struct vnode *vp; 6518 int error; 6519 6520 restart: 6521 if (mp->mnt_rootvnode != NULL) { 6522 MNT_ILOCK(mp); 6523 vp = mp->mnt_rootvnode; 6524 if (vp != NULL) { 6525 if (!VN_IS_DOOMED(vp)) { 6526 vrefact(vp); 6527 MNT_IUNLOCK(mp); 6528 error = vn_lock(vp, flags); 6529 if (error == 0) { 6530 *vpp = vp; 6531 return (0); 6532 } 6533 vrele(vp); 6534 goto restart; 6535 } 6536 /* 6537 * Clear the old one. 6538 */ 6539 mp->mnt_rootvnode = NULL; 6540 } 6541 MNT_IUNLOCK(mp); 6542 if (vp != NULL) { 6543 vfs_op_barrier_wait(mp); 6544 vrele(vp); 6545 } 6546 } 6547 error = VFS_CACHEDROOT(mp, flags, vpp); 6548 if (error != 0) 6549 return (error); 6550 if (mp->mnt_vfs_ops == 0) { 6551 MNT_ILOCK(mp); 6552 if (mp->mnt_vfs_ops != 0) { 6553 MNT_IUNLOCK(mp); 6554 return (0); 6555 } 6556 if (mp->mnt_rootvnode == NULL) { 6557 vrefact(*vpp); 6558 mp->mnt_rootvnode = *vpp; 6559 } else { 6560 if (mp->mnt_rootvnode != *vpp) { 6561 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6562 panic("%s: mismatch between vnode returned " 6563 " by VFS_CACHEDROOT and the one cached " 6564 " (%p != %p)", 6565 __func__, *vpp, mp->mnt_rootvnode); 6566 } 6567 } 6568 } 6569 MNT_IUNLOCK(mp); 6570 } 6571 return (0); 6572 } 6573 6574 int 6575 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6576 { 6577 struct mount_pcpu *mpcpu; 6578 struct vnode *vp; 6579 int error; 6580 6581 if (!vfs_op_thread_enter(mp, mpcpu)) 6582 return (vfs_cache_root_fallback(mp, flags, vpp)); 6583 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6584 if (vp == NULL || VN_IS_DOOMED(vp)) { 6585 vfs_op_thread_exit(mp, mpcpu); 6586 return (vfs_cache_root_fallback(mp, flags, vpp)); 6587 } 6588 vrefact(vp); 6589 vfs_op_thread_exit(mp, mpcpu); 6590 error = vn_lock(vp, flags); 6591 if (error != 0) { 6592 vrele(vp); 6593 return (vfs_cache_root_fallback(mp, flags, vpp)); 6594 } 6595 *vpp = vp; 6596 return (0); 6597 } 6598 6599 struct vnode * 6600 vfs_cache_root_clear(struct mount *mp) 6601 { 6602 struct vnode *vp; 6603 6604 /* 6605 * ops > 0 guarantees there is nobody who can see this vnode 6606 */ 6607 MPASS(mp->mnt_vfs_ops > 0); 6608 vp = mp->mnt_rootvnode; 6609 if (vp != NULL) 6610 vn_seqc_write_begin(vp); 6611 mp->mnt_rootvnode = NULL; 6612 return (vp); 6613 } 6614 6615 void 6616 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6617 { 6618 6619 MPASS(mp->mnt_vfs_ops > 0); 6620 vrefact(vp); 6621 mp->mnt_rootvnode = vp; 6622 } 6623 6624 /* 6625 * These are helper functions for filesystems to traverse all 6626 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6627 * 6628 * This interface replaces MNT_VNODE_FOREACH. 6629 */ 6630 6631 struct vnode * 6632 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6633 { 6634 struct vnode *vp; 6635 6636 maybe_yield(); 6637 MNT_ILOCK(mp); 6638 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6639 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6640 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6641 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6642 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6643 continue; 6644 VI_LOCK(vp); 6645 if (VN_IS_DOOMED(vp)) { 6646 VI_UNLOCK(vp); 6647 continue; 6648 } 6649 break; 6650 } 6651 if (vp == NULL) { 6652 __mnt_vnode_markerfree_all(mvp, mp); 6653 /* MNT_IUNLOCK(mp); -- done in above function */ 6654 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6655 return (NULL); 6656 } 6657 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6658 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6659 MNT_IUNLOCK(mp); 6660 return (vp); 6661 } 6662 6663 struct vnode * 6664 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6665 { 6666 struct vnode *vp; 6667 6668 *mvp = vn_alloc_marker(mp); 6669 MNT_ILOCK(mp); 6670 MNT_REF(mp); 6671 6672 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6673 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6674 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6675 continue; 6676 VI_LOCK(vp); 6677 if (VN_IS_DOOMED(vp)) { 6678 VI_UNLOCK(vp); 6679 continue; 6680 } 6681 break; 6682 } 6683 if (vp == NULL) { 6684 MNT_REL(mp); 6685 MNT_IUNLOCK(mp); 6686 vn_free_marker(*mvp); 6687 *mvp = NULL; 6688 return (NULL); 6689 } 6690 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6691 MNT_IUNLOCK(mp); 6692 return (vp); 6693 } 6694 6695 void 6696 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6697 { 6698 6699 if (*mvp == NULL) { 6700 MNT_IUNLOCK(mp); 6701 return; 6702 } 6703 6704 mtx_assert(MNT_MTX(mp), MA_OWNED); 6705 6706 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6707 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6708 MNT_REL(mp); 6709 MNT_IUNLOCK(mp); 6710 vn_free_marker(*mvp); 6711 *mvp = NULL; 6712 } 6713 6714 /* 6715 * These are helper functions for filesystems to traverse their 6716 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6717 */ 6718 static void 6719 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6720 { 6721 6722 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6723 6724 MNT_ILOCK(mp); 6725 MNT_REL(mp); 6726 MNT_IUNLOCK(mp); 6727 vn_free_marker(*mvp); 6728 *mvp = NULL; 6729 } 6730 6731 /* 6732 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6733 * conventional lock order during mnt_vnode_next_lazy iteration. 6734 * 6735 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6736 * The list lock is dropped and reacquired. On success, both locks are held. 6737 * On failure, the mount vnode list lock is held but the vnode interlock is 6738 * not, and the procedure may have yielded. 6739 */ 6740 static bool 6741 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6742 struct vnode *vp) 6743 { 6744 6745 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6746 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6747 ("%s: bad marker", __func__)); 6748 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6749 ("%s: inappropriate vnode", __func__)); 6750 ASSERT_VI_UNLOCKED(vp, __func__); 6751 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6752 6753 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6754 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6755 6756 /* 6757 * Note we may be racing against vdrop which transitioned the hold 6758 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6759 * if we are the only user after we get the interlock we will just 6760 * vdrop. 6761 */ 6762 vhold(vp); 6763 mtx_unlock(&mp->mnt_listmtx); 6764 VI_LOCK(vp); 6765 if (VN_IS_DOOMED(vp)) { 6766 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6767 goto out_lost; 6768 } 6769 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6770 /* 6771 * There is nothing to do if we are the last user. 6772 */ 6773 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6774 goto out_lost; 6775 mtx_lock(&mp->mnt_listmtx); 6776 return (true); 6777 out_lost: 6778 vdropl(vp); 6779 maybe_yield(); 6780 mtx_lock(&mp->mnt_listmtx); 6781 return (false); 6782 } 6783 6784 static struct vnode * 6785 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6786 void *cbarg) 6787 { 6788 struct vnode *vp; 6789 6790 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6791 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6792 restart: 6793 vp = TAILQ_NEXT(*mvp, v_lazylist); 6794 while (vp != NULL) { 6795 if (vp->v_type == VMARKER) { 6796 vp = TAILQ_NEXT(vp, v_lazylist); 6797 continue; 6798 } 6799 /* 6800 * See if we want to process the vnode. Note we may encounter a 6801 * long string of vnodes we don't care about and hog the list 6802 * as a result. Check for it and requeue the marker. 6803 */ 6804 VNPASS(!VN_IS_DOOMED(vp), vp); 6805 if (!cb(vp, cbarg)) { 6806 if (!should_yield()) { 6807 vp = TAILQ_NEXT(vp, v_lazylist); 6808 continue; 6809 } 6810 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6811 v_lazylist); 6812 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6813 v_lazylist); 6814 mtx_unlock(&mp->mnt_listmtx); 6815 kern_yield(PRI_USER); 6816 mtx_lock(&mp->mnt_listmtx); 6817 goto restart; 6818 } 6819 /* 6820 * Try-lock because this is the wrong lock order. 6821 */ 6822 if (!VI_TRYLOCK(vp) && 6823 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6824 goto restart; 6825 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6826 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6827 ("alien vnode on the lazy list %p %p", vp, mp)); 6828 VNPASS(vp->v_mount == mp, vp); 6829 VNPASS(!VN_IS_DOOMED(vp), vp); 6830 break; 6831 } 6832 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6833 6834 /* Check if we are done */ 6835 if (vp == NULL) { 6836 mtx_unlock(&mp->mnt_listmtx); 6837 mnt_vnode_markerfree_lazy(mvp, mp); 6838 return (NULL); 6839 } 6840 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6841 mtx_unlock(&mp->mnt_listmtx); 6842 ASSERT_VI_LOCKED(vp, "lazy iter"); 6843 return (vp); 6844 } 6845 6846 struct vnode * 6847 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6848 void *cbarg) 6849 { 6850 6851 maybe_yield(); 6852 mtx_lock(&mp->mnt_listmtx); 6853 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6854 } 6855 6856 struct vnode * 6857 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6858 void *cbarg) 6859 { 6860 struct vnode *vp; 6861 6862 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6863 return (NULL); 6864 6865 *mvp = vn_alloc_marker(mp); 6866 MNT_ILOCK(mp); 6867 MNT_REF(mp); 6868 MNT_IUNLOCK(mp); 6869 6870 mtx_lock(&mp->mnt_listmtx); 6871 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6872 if (vp == NULL) { 6873 mtx_unlock(&mp->mnt_listmtx); 6874 mnt_vnode_markerfree_lazy(mvp, mp); 6875 return (NULL); 6876 } 6877 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6878 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6879 } 6880 6881 void 6882 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6883 { 6884 6885 if (*mvp == NULL) 6886 return; 6887 6888 mtx_lock(&mp->mnt_listmtx); 6889 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6890 mtx_unlock(&mp->mnt_listmtx); 6891 mnt_vnode_markerfree_lazy(mvp, mp); 6892 } 6893 6894 int 6895 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6896 { 6897 6898 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6899 cnp->cn_flags &= ~NOEXECCHECK; 6900 return (0); 6901 } 6902 6903 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 6904 } 6905 6906 /* 6907 * Do not use this variant unless you have means other than the hold count 6908 * to prevent the vnode from getting freed. 6909 */ 6910 void 6911 vn_seqc_write_begin_locked(struct vnode *vp) 6912 { 6913 6914 ASSERT_VI_LOCKED(vp, __func__); 6915 VNPASS(vp->v_holdcnt > 0, vp); 6916 VNPASS(vp->v_seqc_users >= 0, vp); 6917 vp->v_seqc_users++; 6918 if (vp->v_seqc_users == 1) 6919 seqc_sleepable_write_begin(&vp->v_seqc); 6920 } 6921 6922 void 6923 vn_seqc_write_begin(struct vnode *vp) 6924 { 6925 6926 VI_LOCK(vp); 6927 vn_seqc_write_begin_locked(vp); 6928 VI_UNLOCK(vp); 6929 } 6930 6931 void 6932 vn_seqc_write_end_locked(struct vnode *vp) 6933 { 6934 6935 ASSERT_VI_LOCKED(vp, __func__); 6936 VNPASS(vp->v_seqc_users > 0, vp); 6937 vp->v_seqc_users--; 6938 if (vp->v_seqc_users == 0) 6939 seqc_sleepable_write_end(&vp->v_seqc); 6940 } 6941 6942 void 6943 vn_seqc_write_end(struct vnode *vp) 6944 { 6945 6946 VI_LOCK(vp); 6947 vn_seqc_write_end_locked(vp); 6948 VI_UNLOCK(vp); 6949 } 6950 6951 /* 6952 * Special case handling for allocating and freeing vnodes. 6953 * 6954 * The counter remains unchanged on free so that a doomed vnode will 6955 * keep testing as in modify as long as it is accessible with SMR. 6956 */ 6957 static void 6958 vn_seqc_init(struct vnode *vp) 6959 { 6960 6961 vp->v_seqc = 0; 6962 vp->v_seqc_users = 0; 6963 } 6964 6965 static void 6966 vn_seqc_write_end_free(struct vnode *vp) 6967 { 6968 6969 VNPASS(seqc_in_modify(vp->v_seqc), vp); 6970 VNPASS(vp->v_seqc_users == 1, vp); 6971 } 6972 6973 void 6974 vn_irflag_set_locked(struct vnode *vp, short toset) 6975 { 6976 short flags; 6977 6978 ASSERT_VI_LOCKED(vp, __func__); 6979 flags = vn_irflag_read(vp); 6980 VNASSERT((flags & toset) == 0, vp, 6981 ("%s: some of the passed flags already set (have %d, passed %d)\n", 6982 __func__, flags, toset)); 6983 atomic_store_short(&vp->v_irflag, flags | toset); 6984 } 6985 6986 void 6987 vn_irflag_set(struct vnode *vp, short toset) 6988 { 6989 6990 VI_LOCK(vp); 6991 vn_irflag_set_locked(vp, toset); 6992 VI_UNLOCK(vp); 6993 } 6994 6995 void 6996 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 6997 { 6998 short flags; 6999 7000 ASSERT_VI_LOCKED(vp, __func__); 7001 flags = vn_irflag_read(vp); 7002 atomic_store_short(&vp->v_irflag, flags | toset); 7003 } 7004 7005 void 7006 vn_irflag_set_cond(struct vnode *vp, short toset) 7007 { 7008 7009 VI_LOCK(vp); 7010 vn_irflag_set_cond_locked(vp, toset); 7011 VI_UNLOCK(vp); 7012 } 7013 7014 void 7015 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7016 { 7017 short flags; 7018 7019 ASSERT_VI_LOCKED(vp, __func__); 7020 flags = vn_irflag_read(vp); 7021 VNASSERT((flags & tounset) == tounset, vp, 7022 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7023 __func__, flags, tounset)); 7024 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7025 } 7026 7027 void 7028 vn_irflag_unset(struct vnode *vp, short tounset) 7029 { 7030 7031 VI_LOCK(vp); 7032 vn_irflag_unset_locked(vp, tounset); 7033 VI_UNLOCK(vp); 7034 } 7035 7036 int 7037 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7038 { 7039 struct vattr vattr; 7040 int error; 7041 7042 ASSERT_VOP_LOCKED(vp, __func__); 7043 error = VOP_GETATTR(vp, &vattr, cred); 7044 if (__predict_true(error == 0)) { 7045 if (vattr.va_size <= OFF_MAX) 7046 *size = vattr.va_size; 7047 else 7048 error = EFBIG; 7049 } 7050 return (error); 7051 } 7052 7053 int 7054 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7055 { 7056 int error; 7057 7058 VOP_LOCK(vp, LK_SHARED); 7059 error = vn_getsize_locked(vp, size, cred); 7060 VOP_UNLOCK(vp); 7061 return (error); 7062 } 7063 7064 #ifdef INVARIANTS 7065 void 7066 vn_set_state_validate(struct vnode *vp, enum vstate state) 7067 { 7068 7069 switch (vp->v_state) { 7070 case VSTATE_UNINITIALIZED: 7071 switch (state) { 7072 case VSTATE_CONSTRUCTED: 7073 case VSTATE_DESTROYING: 7074 return; 7075 default: 7076 break; 7077 } 7078 break; 7079 case VSTATE_CONSTRUCTED: 7080 ASSERT_VOP_ELOCKED(vp, __func__); 7081 switch (state) { 7082 case VSTATE_DESTROYING: 7083 return; 7084 default: 7085 break; 7086 } 7087 break; 7088 case VSTATE_DESTROYING: 7089 ASSERT_VOP_ELOCKED(vp, __func__); 7090 switch (state) { 7091 case VSTATE_DEAD: 7092 return; 7093 default: 7094 break; 7095 } 7096 break; 7097 case VSTATE_DEAD: 7098 switch (state) { 7099 case VSTATE_UNINITIALIZED: 7100 return; 7101 default: 7102 break; 7103 } 7104 break; 7105 } 7106 7107 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7108 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7109 } 7110 #endif 7111