1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #ifdef DDB 102 #include <ddb/ddb.h> 103 #endif 104 105 static void delmntque(struct vnode *vp); 106 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 107 int slpflag, int slptimeo); 108 static void syncer_shutdown(void *arg, int howto); 109 static int vtryrecycle(struct vnode *vp); 110 static void v_init_counters(struct vnode *); 111 static void v_incr_devcount(struct vnode *); 112 static void v_decr_devcount(struct vnode *); 113 static void vgonel(struct vnode *); 114 static void vfs_knllock(void *arg); 115 static void vfs_knlunlock(void *arg); 116 static void vfs_knl_assert_locked(void *arg); 117 static void vfs_knl_assert_unlocked(void *arg); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 static void vnlru_recalc(void); 122 123 /* 124 * These fences are intended for cases where some synchronization is 125 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 126 * and v_usecount) updates. Access to v_iflags is generally synchronized 127 * by the interlock, but we have some internal assertions that check vnode 128 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 129 * for now. 130 */ 131 #ifdef INVARIANTS 132 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 133 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 134 #else 135 #define VNODE_REFCOUNT_FENCE_ACQ() 136 #define VNODE_REFCOUNT_FENCE_REL() 137 #endif 138 139 /* 140 * Number of vnodes in existence. Increased whenever getnewvnode() 141 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 142 */ 143 static u_long __exclusive_cache_line numvnodes; 144 145 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 146 "Number of vnodes in existence"); 147 148 static counter_u64_t vnodes_created; 149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 150 "Number of vnodes created by getnewvnode"); 151 152 /* 153 * Conversion tables for conversion from vnode types to inode formats 154 * and back. 155 */ 156 enum vtype iftovt_tab[16] = { 157 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 158 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 159 }; 160 int vttoif_tab[10] = { 161 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 162 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 163 }; 164 165 /* 166 * List of allocates vnodes in the system. 167 */ 168 static TAILQ_HEAD(freelst, vnode) vnode_list; 169 static struct vnode *vnode_list_free_marker; 170 static struct vnode *vnode_list_reclaim_marker; 171 172 /* 173 * "Free" vnode target. Free vnodes are rarely completely free, but are 174 * just ones that are cheap to recycle. Usually they are for files which 175 * have been stat'd but not read; these usually have inode and namecache 176 * data attached to them. This target is the preferred minimum size of a 177 * sub-cache consisting mostly of such files. The system balances the size 178 * of this sub-cache with its complement to try to prevent either from 179 * thrashing while the other is relatively inactive. The targets express 180 * a preference for the best balance. 181 * 182 * "Above" this target there are 2 further targets (watermarks) related 183 * to recyling of free vnodes. In the best-operating case, the cache is 184 * exactly full, the free list has size between vlowat and vhiwat above the 185 * free target, and recycling from it and normal use maintains this state. 186 * Sometimes the free list is below vlowat or even empty, but this state 187 * is even better for immediate use provided the cache is not full. 188 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 189 * ones) to reach one of these states. The watermarks are currently hard- 190 * coded as 4% and 9% of the available space higher. These and the default 191 * of 25% for wantfreevnodes are too large if the memory size is large. 192 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 193 * whenever vnlru_proc() becomes active. 194 */ 195 static long wantfreevnodes; 196 static long __exclusive_cache_line freevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 198 &freevnodes, 0, "Number of \"free\" vnodes"); 199 static long freevnodes_old; 200 201 static counter_u64_t recycles_count; 202 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 203 "Number of vnodes recycled to meet vnode cache targets"); 204 205 static counter_u64_t recycles_free_count; 206 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 207 "Number of free vnodes recycled to meet vnode cache targets"); 208 209 /* 210 * Various variables used for debugging the new implementation of 211 * reassignbuf(). 212 * XXX these are probably of (very) limited utility now. 213 */ 214 static int reassignbufcalls; 215 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 216 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 217 218 static counter_u64_t deferred_inact; 219 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 220 "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 static smr_t buf_trie_smr; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 static uma_zone_t vnodepoll_zone; 242 243 __read_frequently smr_t vfs_smr; 244 245 /* 246 * The workitem queue. 247 * 248 * It is useful to delay writes of file data and filesystem metadata 249 * for tens of seconds so that quickly created and deleted files need 250 * not waste disk bandwidth being created and removed. To realize this, 251 * we append vnodes to a "workitem" queue. When running with a soft 252 * updates implementation, most pending metadata dependencies should 253 * not wait for more than a few seconds. Thus, mounted on block devices 254 * are delayed only about a half the time that file data is delayed. 255 * Similarly, directory updates are more critical, so are only delayed 256 * about a third the time that file data is delayed. Thus, there are 257 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 258 * one each second (driven off the filesystem syncer process). The 259 * syncer_delayno variable indicates the next queue that is to be processed. 260 * Items that need to be processed soon are placed in this queue: 261 * 262 * syncer_workitem_pending[syncer_delayno] 263 * 264 * A delay of fifteen seconds is done by placing the request fifteen 265 * entries later in the queue: 266 * 267 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 268 * 269 */ 270 static int syncer_delayno; 271 static long syncer_mask; 272 LIST_HEAD(synclist, bufobj); 273 static struct synclist *syncer_workitem_pending; 274 /* 275 * The sync_mtx protects: 276 * bo->bo_synclist 277 * sync_vnode_count 278 * syncer_delayno 279 * syncer_state 280 * syncer_workitem_pending 281 * syncer_worklist_len 282 * rushjob 283 */ 284 static struct mtx sync_mtx; 285 static struct cv sync_wakeup; 286 287 #define SYNCER_MAXDELAY 32 288 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 289 static int syncdelay = 30; /* max time to delay syncing data */ 290 static int filedelay = 30; /* time to delay syncing files */ 291 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 292 "Time to delay syncing files (in seconds)"); 293 static int dirdelay = 29; /* time to delay syncing directories */ 294 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 295 "Time to delay syncing directories (in seconds)"); 296 static int metadelay = 28; /* time to delay syncing metadata */ 297 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 298 "Time to delay syncing metadata (in seconds)"); 299 static int rushjob; /* number of slots to run ASAP */ 300 static int stat_rush_requests; /* number of times I/O speeded up */ 301 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 302 "Number of times I/O speeded up (rush requests)"); 303 304 #define VDBATCH_SIZE 8 305 struct vdbatch { 306 u_int index; 307 long freevnodes; 308 struct mtx lock; 309 struct vnode *tab[VDBATCH_SIZE]; 310 }; 311 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 312 313 static void vdbatch_dequeue(struct vnode *vp); 314 315 /* 316 * When shutting down the syncer, run it at four times normal speed. 317 */ 318 #define SYNCER_SHUTDOWN_SPEEDUP 4 319 static int sync_vnode_count; 320 static int syncer_worklist_len; 321 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 322 syncer_state; 323 324 /* Target for maximum number of vnodes. */ 325 u_long desiredvnodes; 326 static u_long gapvnodes; /* gap between wanted and desired */ 327 static u_long vhiwat; /* enough extras after expansion */ 328 static u_long vlowat; /* minimal extras before expansion */ 329 static u_long vstir; /* nonzero to stir non-free vnodes */ 330 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 331 332 static u_long vnlru_read_freevnodes(void); 333 334 /* 335 * Note that no attempt is made to sanitize these parameters. 336 */ 337 static int 338 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 339 { 340 u_long val; 341 int error; 342 343 val = desiredvnodes; 344 error = sysctl_handle_long(oidp, &val, 0, req); 345 if (error != 0 || req->newptr == NULL) 346 return (error); 347 348 if (val == desiredvnodes) 349 return (0); 350 mtx_lock(&vnode_list_mtx); 351 desiredvnodes = val; 352 wantfreevnodes = desiredvnodes / 4; 353 vnlru_recalc(); 354 mtx_unlock(&vnode_list_mtx); 355 /* 356 * XXX There is no protection against multiple threads changing 357 * desiredvnodes at the same time. Locking above only helps vnlru and 358 * getnewvnode. 359 */ 360 vfs_hash_changesize(desiredvnodes); 361 cache_changesize(desiredvnodes); 362 return (0); 363 } 364 365 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 366 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 367 "LU", "Target for maximum number of vnodes"); 368 369 static int 370 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 371 { 372 u_long val; 373 int error; 374 375 val = wantfreevnodes; 376 error = sysctl_handle_long(oidp, &val, 0, req); 377 if (error != 0 || req->newptr == NULL) 378 return (error); 379 380 if (val == wantfreevnodes) 381 return (0); 382 mtx_lock(&vnode_list_mtx); 383 wantfreevnodes = val; 384 vnlru_recalc(); 385 mtx_unlock(&vnode_list_mtx); 386 return (0); 387 } 388 389 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 390 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 391 "LU", "Target for minimum number of \"free\" vnodes"); 392 393 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 394 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 395 static int vnlru_nowhere; 396 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 397 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 398 399 static int 400 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 401 { 402 struct vnode *vp; 403 struct nameidata nd; 404 char *buf; 405 unsigned long ndflags; 406 int error; 407 408 if (req->newptr == NULL) 409 return (EINVAL); 410 if (req->newlen >= PATH_MAX) 411 return (E2BIG); 412 413 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 414 error = SYSCTL_IN(req, buf, req->newlen); 415 if (error != 0) 416 goto out; 417 418 buf[req->newlen] = '\0'; 419 420 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 421 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 422 if ((error = namei(&nd)) != 0) 423 goto out; 424 vp = nd.ni_vp; 425 426 if (VN_IS_DOOMED(vp)) { 427 /* 428 * This vnode is being recycled. Return != 0 to let the caller 429 * know that the sysctl had no effect. Return EAGAIN because a 430 * subsequent call will likely succeed (since namei will create 431 * a new vnode if necessary) 432 */ 433 error = EAGAIN; 434 goto putvnode; 435 } 436 437 counter_u64_add(recycles_count, 1); 438 vgone(vp); 439 putvnode: 440 NDFREE(&nd, 0); 441 out: 442 free(buf, M_TEMP); 443 return (error); 444 } 445 446 static int 447 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 448 { 449 struct thread *td = curthread; 450 struct vnode *vp; 451 struct file *fp; 452 int error; 453 int fd; 454 455 if (req->newptr == NULL) 456 return (EBADF); 457 458 error = sysctl_handle_int(oidp, &fd, 0, req); 459 if (error != 0) 460 return (error); 461 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 462 if (error != 0) 463 return (error); 464 vp = fp->f_vnode; 465 466 error = vn_lock(vp, LK_EXCLUSIVE); 467 if (error != 0) 468 goto drop; 469 470 counter_u64_add(recycles_count, 1); 471 vgone(vp); 472 VOP_UNLOCK(vp); 473 drop: 474 fdrop(fp, td); 475 return (error); 476 } 477 478 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 479 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 480 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 481 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 482 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 483 sysctl_ftry_reclaim_vnode, "I", 484 "Try to reclaim a vnode by its file descriptor"); 485 486 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 487 static int vnsz2log; 488 489 /* 490 * Support for the bufobj clean & dirty pctrie. 491 */ 492 static void * 493 buf_trie_alloc(struct pctrie *ptree) 494 { 495 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 496 } 497 498 static void 499 buf_trie_free(struct pctrie *ptree, void *node) 500 { 501 uma_zfree_smr(buf_trie_zone, node); 502 } 503 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 504 buf_trie_smr); 505 506 /* 507 * Initialize the vnode management data structures. 508 * 509 * Reevaluate the following cap on the number of vnodes after the physical 510 * memory size exceeds 512GB. In the limit, as the physical memory size 511 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 512 */ 513 #ifndef MAXVNODES_MAX 514 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 515 #endif 516 517 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 518 519 static struct vnode * 520 vn_alloc_marker(struct mount *mp) 521 { 522 struct vnode *vp; 523 524 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 525 vp->v_type = VMARKER; 526 vp->v_mount = mp; 527 528 return (vp); 529 } 530 531 static void 532 vn_free_marker(struct vnode *vp) 533 { 534 535 MPASS(vp->v_type == VMARKER); 536 free(vp, M_VNODE_MARKER); 537 } 538 539 /* 540 * Initialize a vnode as it first enters the zone. 541 */ 542 static int 543 vnode_init(void *mem, int size, int flags) 544 { 545 struct vnode *vp; 546 547 vp = mem; 548 bzero(vp, size); 549 /* 550 * Setup locks. 551 */ 552 vp->v_vnlock = &vp->v_lock; 553 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 554 /* 555 * By default, don't allow shared locks unless filesystems opt-in. 556 */ 557 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 558 LK_NOSHARE | LK_IS_VNODE); 559 /* 560 * Initialize bufobj. 561 */ 562 bufobj_init(&vp->v_bufobj, vp); 563 /* 564 * Initialize namecache. 565 */ 566 cache_vnode_init(vp); 567 /* 568 * Initialize rangelocks. 569 */ 570 rangelock_init(&vp->v_rl); 571 572 vp->v_dbatchcpu = NOCPU; 573 574 mtx_lock(&vnode_list_mtx); 575 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 576 mtx_unlock(&vnode_list_mtx); 577 return (0); 578 } 579 580 /* 581 * Free a vnode when it is cleared from the zone. 582 */ 583 static void 584 vnode_fini(void *mem, int size) 585 { 586 struct vnode *vp; 587 struct bufobj *bo; 588 589 vp = mem; 590 vdbatch_dequeue(vp); 591 mtx_lock(&vnode_list_mtx); 592 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 593 mtx_unlock(&vnode_list_mtx); 594 rangelock_destroy(&vp->v_rl); 595 lockdestroy(vp->v_vnlock); 596 mtx_destroy(&vp->v_interlock); 597 bo = &vp->v_bufobj; 598 rw_destroy(BO_LOCKPTR(bo)); 599 } 600 601 /* 602 * Provide the size of NFS nclnode and NFS fh for calculation of the 603 * vnode memory consumption. The size is specified directly to 604 * eliminate dependency on NFS-private header. 605 * 606 * Other filesystems may use bigger or smaller (like UFS and ZFS) 607 * private inode data, but the NFS-based estimation is ample enough. 608 * Still, we care about differences in the size between 64- and 32-bit 609 * platforms. 610 * 611 * Namecache structure size is heuristically 612 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 613 */ 614 #ifdef _LP64 615 #define NFS_NCLNODE_SZ (528 + 64) 616 #define NC_SZ 148 617 #else 618 #define NFS_NCLNODE_SZ (360 + 32) 619 #define NC_SZ 92 620 #endif 621 622 static void 623 vntblinit(void *dummy __unused) 624 { 625 struct vdbatch *vd; 626 int cpu, physvnodes, virtvnodes; 627 u_int i; 628 629 /* 630 * Desiredvnodes is a function of the physical memory size and the 631 * kernel's heap size. Generally speaking, it scales with the 632 * physical memory size. The ratio of desiredvnodes to the physical 633 * memory size is 1:16 until desiredvnodes exceeds 98,304. 634 * Thereafter, the 635 * marginal ratio of desiredvnodes to the physical memory size is 636 * 1:64. However, desiredvnodes is limited by the kernel's heap 637 * size. The memory required by desiredvnodes vnodes and vm objects 638 * must not exceed 1/10th of the kernel's heap size. 639 */ 640 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 641 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 642 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 643 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 644 desiredvnodes = min(physvnodes, virtvnodes); 645 if (desiredvnodes > MAXVNODES_MAX) { 646 if (bootverbose) 647 printf("Reducing kern.maxvnodes %lu -> %lu\n", 648 desiredvnodes, MAXVNODES_MAX); 649 desiredvnodes = MAXVNODES_MAX; 650 } 651 wantfreevnodes = desiredvnodes / 4; 652 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 653 TAILQ_INIT(&vnode_list); 654 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 655 /* 656 * The lock is taken to appease WITNESS. 657 */ 658 mtx_lock(&vnode_list_mtx); 659 vnlru_recalc(); 660 mtx_unlock(&vnode_list_mtx); 661 vnode_list_free_marker = vn_alloc_marker(NULL); 662 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 663 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 664 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 665 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 666 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 667 uma_zone_set_smr(vnode_zone, vfs_smr); 668 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 669 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 670 /* 671 * Preallocate enough nodes to support one-per buf so that 672 * we can not fail an insert. reassignbuf() callers can not 673 * tolerate the insertion failure. 674 */ 675 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 676 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 677 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 678 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 679 uma_prealloc(buf_trie_zone, nbuf); 680 681 vnodes_created = counter_u64_alloc(M_WAITOK); 682 recycles_count = counter_u64_alloc(M_WAITOK); 683 recycles_free_count = counter_u64_alloc(M_WAITOK); 684 deferred_inact = counter_u64_alloc(M_WAITOK); 685 686 /* 687 * Initialize the filesystem syncer. 688 */ 689 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 690 &syncer_mask); 691 syncer_maxdelay = syncer_mask + 1; 692 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 693 cv_init(&sync_wakeup, "syncer"); 694 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 695 vnsz2log++; 696 vnsz2log--; 697 698 CPU_FOREACH(cpu) { 699 vd = DPCPU_ID_PTR((cpu), vd); 700 bzero(vd, sizeof(*vd)); 701 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 702 } 703 } 704 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 705 706 /* 707 * Mark a mount point as busy. Used to synchronize access and to delay 708 * unmounting. Eventually, mountlist_mtx is not released on failure. 709 * 710 * vfs_busy() is a custom lock, it can block the caller. 711 * vfs_busy() only sleeps if the unmount is active on the mount point. 712 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 713 * vnode belonging to mp. 714 * 715 * Lookup uses vfs_busy() to traverse mount points. 716 * root fs var fs 717 * / vnode lock A / vnode lock (/var) D 718 * /var vnode lock B /log vnode lock(/var/log) E 719 * vfs_busy lock C vfs_busy lock F 720 * 721 * Within each file system, the lock order is C->A->B and F->D->E. 722 * 723 * When traversing across mounts, the system follows that lock order: 724 * 725 * C->A->B 726 * | 727 * +->F->D->E 728 * 729 * The lookup() process for namei("/var") illustrates the process: 730 * VOP_LOOKUP() obtains B while A is held 731 * vfs_busy() obtains a shared lock on F while A and B are held 732 * vput() releases lock on B 733 * vput() releases lock on A 734 * VFS_ROOT() obtains lock on D while shared lock on F is held 735 * vfs_unbusy() releases shared lock on F 736 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 737 * Attempt to lock A (instead of vp_crossmp) while D is held would 738 * violate the global order, causing deadlocks. 739 * 740 * dounmount() locks B while F is drained. 741 */ 742 int 743 vfs_busy(struct mount *mp, int flags) 744 { 745 746 MPASS((flags & ~MBF_MASK) == 0); 747 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 748 749 if (vfs_op_thread_enter(mp)) { 750 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 751 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 752 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 753 vfs_mp_count_add_pcpu(mp, ref, 1); 754 vfs_mp_count_add_pcpu(mp, lockref, 1); 755 vfs_op_thread_exit(mp); 756 if (flags & MBF_MNTLSTLOCK) 757 mtx_unlock(&mountlist_mtx); 758 return (0); 759 } 760 761 MNT_ILOCK(mp); 762 vfs_assert_mount_counters(mp); 763 MNT_REF(mp); 764 /* 765 * If mount point is currently being unmounted, sleep until the 766 * mount point fate is decided. If thread doing the unmounting fails, 767 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 768 * that this mount point has survived the unmount attempt and vfs_busy 769 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 770 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 771 * about to be really destroyed. vfs_busy needs to release its 772 * reference on the mount point in this case and return with ENOENT, 773 * telling the caller that mount mount it tried to busy is no longer 774 * valid. 775 */ 776 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 777 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 778 MNT_REL(mp); 779 MNT_IUNLOCK(mp); 780 CTR1(KTR_VFS, "%s: failed busying before sleeping", 781 __func__); 782 return (ENOENT); 783 } 784 if (flags & MBF_MNTLSTLOCK) 785 mtx_unlock(&mountlist_mtx); 786 mp->mnt_kern_flag |= MNTK_MWAIT; 787 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 788 if (flags & MBF_MNTLSTLOCK) 789 mtx_lock(&mountlist_mtx); 790 MNT_ILOCK(mp); 791 } 792 if (flags & MBF_MNTLSTLOCK) 793 mtx_unlock(&mountlist_mtx); 794 mp->mnt_lockref++; 795 MNT_IUNLOCK(mp); 796 return (0); 797 } 798 799 /* 800 * Free a busy filesystem. 801 */ 802 void 803 vfs_unbusy(struct mount *mp) 804 { 805 int c; 806 807 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 808 809 if (vfs_op_thread_enter(mp)) { 810 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 811 vfs_mp_count_sub_pcpu(mp, lockref, 1); 812 vfs_mp_count_sub_pcpu(mp, ref, 1); 813 vfs_op_thread_exit(mp); 814 return; 815 } 816 817 MNT_ILOCK(mp); 818 vfs_assert_mount_counters(mp); 819 MNT_REL(mp); 820 c = --mp->mnt_lockref; 821 if (mp->mnt_vfs_ops == 0) { 822 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 823 MNT_IUNLOCK(mp); 824 return; 825 } 826 if (c < 0) 827 vfs_dump_mount_counters(mp); 828 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 829 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 830 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 831 mp->mnt_kern_flag &= ~MNTK_DRAINING; 832 wakeup(&mp->mnt_lockref); 833 } 834 MNT_IUNLOCK(mp); 835 } 836 837 /* 838 * Lookup a mount point by filesystem identifier. 839 */ 840 struct mount * 841 vfs_getvfs(fsid_t *fsid) 842 { 843 struct mount *mp; 844 845 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 846 mtx_lock(&mountlist_mtx); 847 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 848 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 849 vfs_ref(mp); 850 mtx_unlock(&mountlist_mtx); 851 return (mp); 852 } 853 } 854 mtx_unlock(&mountlist_mtx); 855 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 856 return ((struct mount *) 0); 857 } 858 859 /* 860 * Lookup a mount point by filesystem identifier, busying it before 861 * returning. 862 * 863 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 864 * cache for popular filesystem identifiers. The cache is lockess, using 865 * the fact that struct mount's are never freed. In worst case we may 866 * get pointer to unmounted or even different filesystem, so we have to 867 * check what we got, and go slow way if so. 868 */ 869 struct mount * 870 vfs_busyfs(fsid_t *fsid) 871 { 872 #define FSID_CACHE_SIZE 256 873 typedef struct mount * volatile vmp_t; 874 static vmp_t cache[FSID_CACHE_SIZE]; 875 struct mount *mp; 876 int error; 877 uint32_t hash; 878 879 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 880 hash = fsid->val[0] ^ fsid->val[1]; 881 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 882 mp = cache[hash]; 883 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 884 goto slow; 885 if (vfs_busy(mp, 0) != 0) { 886 cache[hash] = NULL; 887 goto slow; 888 } 889 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 890 return (mp); 891 else 892 vfs_unbusy(mp); 893 894 slow: 895 mtx_lock(&mountlist_mtx); 896 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 897 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 898 error = vfs_busy(mp, MBF_MNTLSTLOCK); 899 if (error) { 900 cache[hash] = NULL; 901 mtx_unlock(&mountlist_mtx); 902 return (NULL); 903 } 904 cache[hash] = mp; 905 return (mp); 906 } 907 } 908 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 909 mtx_unlock(&mountlist_mtx); 910 return ((struct mount *) 0); 911 } 912 913 /* 914 * Check if a user can access privileged mount options. 915 */ 916 int 917 vfs_suser(struct mount *mp, struct thread *td) 918 { 919 int error; 920 921 if (jailed(td->td_ucred)) { 922 /* 923 * If the jail of the calling thread lacks permission for 924 * this type of file system, deny immediately. 925 */ 926 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 927 return (EPERM); 928 929 /* 930 * If the file system was mounted outside the jail of the 931 * calling thread, deny immediately. 932 */ 933 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 934 return (EPERM); 935 } 936 937 /* 938 * If file system supports delegated administration, we don't check 939 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 940 * by the file system itself. 941 * If this is not the user that did original mount, we check for 942 * the PRIV_VFS_MOUNT_OWNER privilege. 943 */ 944 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 945 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 946 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 947 return (error); 948 } 949 return (0); 950 } 951 952 /* 953 * Get a new unique fsid. Try to make its val[0] unique, since this value 954 * will be used to create fake device numbers for stat(). Also try (but 955 * not so hard) make its val[0] unique mod 2^16, since some emulators only 956 * support 16-bit device numbers. We end up with unique val[0]'s for the 957 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 958 * 959 * Keep in mind that several mounts may be running in parallel. Starting 960 * the search one past where the previous search terminated is both a 961 * micro-optimization and a defense against returning the same fsid to 962 * different mounts. 963 */ 964 void 965 vfs_getnewfsid(struct mount *mp) 966 { 967 static uint16_t mntid_base; 968 struct mount *nmp; 969 fsid_t tfsid; 970 int mtype; 971 972 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 973 mtx_lock(&mntid_mtx); 974 mtype = mp->mnt_vfc->vfc_typenum; 975 tfsid.val[1] = mtype; 976 mtype = (mtype & 0xFF) << 24; 977 for (;;) { 978 tfsid.val[0] = makedev(255, 979 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 980 mntid_base++; 981 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 982 break; 983 vfs_rel(nmp); 984 } 985 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 986 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 987 mtx_unlock(&mntid_mtx); 988 } 989 990 /* 991 * Knob to control the precision of file timestamps: 992 * 993 * 0 = seconds only; nanoseconds zeroed. 994 * 1 = seconds and nanoseconds, accurate within 1/HZ. 995 * 2 = seconds and nanoseconds, truncated to microseconds. 996 * >=3 = seconds and nanoseconds, maximum precision. 997 */ 998 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 999 1000 static int timestamp_precision = TSP_USEC; 1001 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1002 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1003 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1004 "3+: sec + ns (max. precision))"); 1005 1006 /* 1007 * Get a current timestamp. 1008 */ 1009 void 1010 vfs_timestamp(struct timespec *tsp) 1011 { 1012 struct timeval tv; 1013 1014 switch (timestamp_precision) { 1015 case TSP_SEC: 1016 tsp->tv_sec = time_second; 1017 tsp->tv_nsec = 0; 1018 break; 1019 case TSP_HZ: 1020 getnanotime(tsp); 1021 break; 1022 case TSP_USEC: 1023 microtime(&tv); 1024 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1025 break; 1026 case TSP_NSEC: 1027 default: 1028 nanotime(tsp); 1029 break; 1030 } 1031 } 1032 1033 /* 1034 * Set vnode attributes to VNOVAL 1035 */ 1036 void 1037 vattr_null(struct vattr *vap) 1038 { 1039 1040 vap->va_type = VNON; 1041 vap->va_size = VNOVAL; 1042 vap->va_bytes = VNOVAL; 1043 vap->va_mode = VNOVAL; 1044 vap->va_nlink = VNOVAL; 1045 vap->va_uid = VNOVAL; 1046 vap->va_gid = VNOVAL; 1047 vap->va_fsid = VNOVAL; 1048 vap->va_fileid = VNOVAL; 1049 vap->va_blocksize = VNOVAL; 1050 vap->va_rdev = VNOVAL; 1051 vap->va_atime.tv_sec = VNOVAL; 1052 vap->va_atime.tv_nsec = VNOVAL; 1053 vap->va_mtime.tv_sec = VNOVAL; 1054 vap->va_mtime.tv_nsec = VNOVAL; 1055 vap->va_ctime.tv_sec = VNOVAL; 1056 vap->va_ctime.tv_nsec = VNOVAL; 1057 vap->va_birthtime.tv_sec = VNOVAL; 1058 vap->va_birthtime.tv_nsec = VNOVAL; 1059 vap->va_flags = VNOVAL; 1060 vap->va_gen = VNOVAL; 1061 vap->va_vaflags = 0; 1062 } 1063 1064 /* 1065 * Try to reduce the total number of vnodes. 1066 * 1067 * This routine (and its user) are buggy in at least the following ways: 1068 * - all parameters were picked years ago when RAM sizes were significantly 1069 * smaller 1070 * - it can pick vnodes based on pages used by the vm object, but filesystems 1071 * like ZFS don't use it making the pick broken 1072 * - since ZFS has its own aging policy it gets partially combated by this one 1073 * - a dedicated method should be provided for filesystems to let them decide 1074 * whether the vnode should be recycled 1075 * 1076 * This routine is called when we have too many vnodes. It attempts 1077 * to free <count> vnodes and will potentially free vnodes that still 1078 * have VM backing store (VM backing store is typically the cause 1079 * of a vnode blowout so we want to do this). Therefore, this operation 1080 * is not considered cheap. 1081 * 1082 * A number of conditions may prevent a vnode from being reclaimed. 1083 * the buffer cache may have references on the vnode, a directory 1084 * vnode may still have references due to the namei cache representing 1085 * underlying files, or the vnode may be in active use. It is not 1086 * desirable to reuse such vnodes. These conditions may cause the 1087 * number of vnodes to reach some minimum value regardless of what 1088 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1089 * 1090 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1091 * entries if this argument is strue 1092 * @param trigger Only reclaim vnodes with fewer than this many resident 1093 * pages. 1094 * @param target How many vnodes to reclaim. 1095 * @return The number of vnodes that were reclaimed. 1096 */ 1097 static int 1098 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1099 { 1100 struct vnode *vp, *mvp; 1101 struct mount *mp; 1102 struct vm_object *object; 1103 u_long done; 1104 bool retried; 1105 1106 mtx_assert(&vnode_list_mtx, MA_OWNED); 1107 1108 retried = false; 1109 done = 0; 1110 1111 mvp = vnode_list_reclaim_marker; 1112 restart: 1113 vp = mvp; 1114 while (done < target) { 1115 vp = TAILQ_NEXT(vp, v_vnodelist); 1116 if (__predict_false(vp == NULL)) 1117 break; 1118 1119 if (__predict_false(vp->v_type == VMARKER)) 1120 continue; 1121 1122 /* 1123 * If it's been deconstructed already, it's still 1124 * referenced, or it exceeds the trigger, skip it. 1125 * Also skip free vnodes. We are trying to make space 1126 * to expand the free list, not reduce it. 1127 */ 1128 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1129 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1130 goto next_iter; 1131 1132 if (vp->v_type == VBAD || vp->v_type == VNON) 1133 goto next_iter; 1134 1135 if (!VI_TRYLOCK(vp)) 1136 goto next_iter; 1137 1138 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1139 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1140 VN_IS_DOOMED(vp) || vp->v_type == VNON) { 1141 VI_UNLOCK(vp); 1142 goto next_iter; 1143 } 1144 1145 object = atomic_load_ptr(&vp->v_object); 1146 if (object == NULL || object->resident_page_count > trigger) { 1147 VI_UNLOCK(vp); 1148 goto next_iter; 1149 } 1150 1151 vholdl(vp); 1152 VI_UNLOCK(vp); 1153 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1154 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1155 mtx_unlock(&vnode_list_mtx); 1156 1157 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1158 vdrop(vp); 1159 goto next_iter_unlocked; 1160 } 1161 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1162 vdrop(vp); 1163 vn_finished_write(mp); 1164 goto next_iter_unlocked; 1165 } 1166 1167 VI_LOCK(vp); 1168 if (vp->v_usecount > 0 || 1169 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1170 (vp->v_object != NULL && 1171 vp->v_object->resident_page_count > trigger)) { 1172 VOP_UNLOCK(vp); 1173 vdropl(vp); 1174 vn_finished_write(mp); 1175 goto next_iter_unlocked; 1176 } 1177 counter_u64_add(recycles_count, 1); 1178 vgonel(vp); 1179 VOP_UNLOCK(vp); 1180 vdropl(vp); 1181 vn_finished_write(mp); 1182 done++; 1183 next_iter_unlocked: 1184 if (should_yield()) 1185 kern_yield(PRI_USER); 1186 mtx_lock(&vnode_list_mtx); 1187 goto restart; 1188 next_iter: 1189 MPASS(vp->v_type != VMARKER); 1190 if (!should_yield()) 1191 continue; 1192 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1193 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1194 mtx_unlock(&vnode_list_mtx); 1195 kern_yield(PRI_USER); 1196 mtx_lock(&vnode_list_mtx); 1197 goto restart; 1198 } 1199 if (done == 0 && !retried) { 1200 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1201 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1202 retried = true; 1203 goto restart; 1204 } 1205 return (done); 1206 } 1207 1208 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1209 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1210 0, 1211 "limit on vnode free requests per call to the vnlru_free routine"); 1212 1213 /* 1214 * Attempt to reduce the free list by the requested amount. 1215 */ 1216 static int 1217 vnlru_free_locked(int count, struct vfsops *mnt_op) 1218 { 1219 struct vnode *vp, *mvp; 1220 struct mount *mp; 1221 int ocount; 1222 1223 mtx_assert(&vnode_list_mtx, MA_OWNED); 1224 if (count > max_vnlru_free) 1225 count = max_vnlru_free; 1226 ocount = count; 1227 mvp = vnode_list_free_marker; 1228 restart: 1229 vp = mvp; 1230 while (count > 0) { 1231 vp = TAILQ_NEXT(vp, v_vnodelist); 1232 if (__predict_false(vp == NULL)) { 1233 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1234 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1235 break; 1236 } 1237 if (__predict_false(vp->v_type == VMARKER)) 1238 continue; 1239 1240 /* 1241 * Don't recycle if our vnode is from different type 1242 * of mount point. Note that mp is type-safe, the 1243 * check does not reach unmapped address even if 1244 * vnode is reclaimed. 1245 * Don't recycle if we can't get the interlock without 1246 * blocking. 1247 */ 1248 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1249 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1250 continue; 1251 } 1252 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1253 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1254 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1255 VI_UNLOCK(vp); 1256 continue; 1257 } 1258 vholdl(vp); 1259 count--; 1260 mtx_unlock(&vnode_list_mtx); 1261 VI_UNLOCK(vp); 1262 vtryrecycle(vp); 1263 vdrop(vp); 1264 mtx_lock(&vnode_list_mtx); 1265 goto restart; 1266 } 1267 return (ocount - count); 1268 } 1269 1270 void 1271 vnlru_free(int count, struct vfsops *mnt_op) 1272 { 1273 1274 mtx_lock(&vnode_list_mtx); 1275 vnlru_free_locked(count, mnt_op); 1276 mtx_unlock(&vnode_list_mtx); 1277 } 1278 1279 static void 1280 vnlru_recalc(void) 1281 { 1282 1283 mtx_assert(&vnode_list_mtx, MA_OWNED); 1284 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1285 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1286 vlowat = vhiwat / 2; 1287 } 1288 1289 /* 1290 * Attempt to recycle vnodes in a context that is always safe to block. 1291 * Calling vlrurecycle() from the bowels of filesystem code has some 1292 * interesting deadlock problems. 1293 */ 1294 static struct proc *vnlruproc; 1295 static int vnlruproc_sig; 1296 1297 /* 1298 * The main freevnodes counter is only updated when threads requeue their vnode 1299 * batches. CPUs are conditionally walked to compute a more accurate total. 1300 * 1301 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1302 * at any given moment can still exceed slop, but it should not be by significant 1303 * margin in practice. 1304 */ 1305 #define VNLRU_FREEVNODES_SLOP 128 1306 1307 static u_long 1308 vnlru_read_freevnodes(void) 1309 { 1310 struct vdbatch *vd; 1311 long slop; 1312 int cpu; 1313 1314 mtx_assert(&vnode_list_mtx, MA_OWNED); 1315 if (freevnodes > freevnodes_old) 1316 slop = freevnodes - freevnodes_old; 1317 else 1318 slop = freevnodes_old - freevnodes; 1319 if (slop < VNLRU_FREEVNODES_SLOP) 1320 return (freevnodes >= 0 ? freevnodes : 0); 1321 freevnodes_old = freevnodes; 1322 CPU_FOREACH(cpu) { 1323 vd = DPCPU_ID_PTR((cpu), vd); 1324 freevnodes_old += vd->freevnodes; 1325 } 1326 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1327 } 1328 1329 static bool 1330 vnlru_under(u_long rnumvnodes, u_long limit) 1331 { 1332 u_long rfreevnodes, space; 1333 1334 if (__predict_false(rnumvnodes > desiredvnodes)) 1335 return (true); 1336 1337 space = desiredvnodes - rnumvnodes; 1338 if (space < limit) { 1339 rfreevnodes = vnlru_read_freevnodes(); 1340 if (rfreevnodes > wantfreevnodes) 1341 space += rfreevnodes - wantfreevnodes; 1342 } 1343 return (space < limit); 1344 } 1345 1346 static bool 1347 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1348 { 1349 long rfreevnodes, space; 1350 1351 if (__predict_false(rnumvnodes > desiredvnodes)) 1352 return (true); 1353 1354 space = desiredvnodes - rnumvnodes; 1355 if (space < limit) { 1356 rfreevnodes = atomic_load_long(&freevnodes); 1357 if (rfreevnodes > wantfreevnodes) 1358 space += rfreevnodes - wantfreevnodes; 1359 } 1360 return (space < limit); 1361 } 1362 1363 static void 1364 vnlru_kick(void) 1365 { 1366 1367 mtx_assert(&vnode_list_mtx, MA_OWNED); 1368 if (vnlruproc_sig == 0) { 1369 vnlruproc_sig = 1; 1370 wakeup(vnlruproc); 1371 } 1372 } 1373 1374 static void 1375 vnlru_proc(void) 1376 { 1377 u_long rnumvnodes, rfreevnodes, target; 1378 unsigned long onumvnodes; 1379 int done, force, trigger, usevnodes; 1380 bool reclaim_nc_src, want_reread; 1381 1382 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1383 SHUTDOWN_PRI_FIRST); 1384 1385 force = 0; 1386 want_reread = false; 1387 for (;;) { 1388 kproc_suspend_check(vnlruproc); 1389 mtx_lock(&vnode_list_mtx); 1390 rnumvnodes = atomic_load_long(&numvnodes); 1391 1392 if (want_reread) { 1393 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1394 want_reread = false; 1395 } 1396 1397 /* 1398 * If numvnodes is too large (due to desiredvnodes being 1399 * adjusted using its sysctl, or emergency growth), first 1400 * try to reduce it by discarding from the free list. 1401 */ 1402 if (rnumvnodes > desiredvnodes) { 1403 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1404 rnumvnodes = atomic_load_long(&numvnodes); 1405 } 1406 /* 1407 * Sleep if the vnode cache is in a good state. This is 1408 * when it is not over-full and has space for about a 4% 1409 * or 9% expansion (by growing its size or inexcessively 1410 * reducing its free list). Otherwise, try to reclaim 1411 * space for a 10% expansion. 1412 */ 1413 if (vstir && force == 0) { 1414 force = 1; 1415 vstir = 0; 1416 } 1417 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1418 vnlruproc_sig = 0; 1419 wakeup(&vnlruproc_sig); 1420 msleep(vnlruproc, &vnode_list_mtx, 1421 PVFS|PDROP, "vlruwt", hz); 1422 continue; 1423 } 1424 rfreevnodes = vnlru_read_freevnodes(); 1425 1426 onumvnodes = rnumvnodes; 1427 /* 1428 * Calculate parameters for recycling. These are the same 1429 * throughout the loop to give some semblance of fairness. 1430 * The trigger point is to avoid recycling vnodes with lots 1431 * of resident pages. We aren't trying to free memory; we 1432 * are trying to recycle or at least free vnodes. 1433 */ 1434 if (rnumvnodes <= desiredvnodes) 1435 usevnodes = rnumvnodes - rfreevnodes; 1436 else 1437 usevnodes = rnumvnodes; 1438 if (usevnodes <= 0) 1439 usevnodes = 1; 1440 /* 1441 * The trigger value is is chosen to give a conservatively 1442 * large value to ensure that it alone doesn't prevent 1443 * making progress. The value can easily be so large that 1444 * it is effectively infinite in some congested and 1445 * misconfigured cases, and this is necessary. Normally 1446 * it is about 8 to 100 (pages), which is quite large. 1447 */ 1448 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1449 if (force < 2) 1450 trigger = vsmalltrigger; 1451 reclaim_nc_src = force >= 3; 1452 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1453 target = target / 10 + 1; 1454 done = vlrureclaim(reclaim_nc_src, trigger, target); 1455 mtx_unlock(&vnode_list_mtx); 1456 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1457 uma_reclaim(UMA_RECLAIM_DRAIN); 1458 if (done == 0) { 1459 if (force == 0 || force == 1) { 1460 force = 2; 1461 continue; 1462 } 1463 if (force == 2) { 1464 force = 3; 1465 continue; 1466 } 1467 want_reread = true; 1468 force = 0; 1469 vnlru_nowhere++; 1470 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1471 } else { 1472 want_reread = true; 1473 kern_yield(PRI_USER); 1474 } 1475 } 1476 } 1477 1478 static struct kproc_desc vnlru_kp = { 1479 "vnlru", 1480 vnlru_proc, 1481 &vnlruproc 1482 }; 1483 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1484 &vnlru_kp); 1485 1486 /* 1487 * Routines having to do with the management of the vnode table. 1488 */ 1489 1490 /* 1491 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1492 * before we actually vgone(). This function must be called with the vnode 1493 * held to prevent the vnode from being returned to the free list midway 1494 * through vgone(). 1495 */ 1496 static int 1497 vtryrecycle(struct vnode *vp) 1498 { 1499 struct mount *vnmp; 1500 1501 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1502 VNASSERT(vp->v_holdcnt, vp, 1503 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1504 /* 1505 * This vnode may found and locked via some other list, if so we 1506 * can't recycle it yet. 1507 */ 1508 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1509 CTR2(KTR_VFS, 1510 "%s: impossible to recycle, vp %p lock is already held", 1511 __func__, vp); 1512 return (EWOULDBLOCK); 1513 } 1514 /* 1515 * Don't recycle if its filesystem is being suspended. 1516 */ 1517 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1518 VOP_UNLOCK(vp); 1519 CTR2(KTR_VFS, 1520 "%s: impossible to recycle, cannot start the write for %p", 1521 __func__, vp); 1522 return (EBUSY); 1523 } 1524 /* 1525 * If we got this far, we need to acquire the interlock and see if 1526 * anyone picked up this vnode from another list. If not, we will 1527 * mark it with DOOMED via vgonel() so that anyone who does find it 1528 * will skip over it. 1529 */ 1530 VI_LOCK(vp); 1531 if (vp->v_usecount) { 1532 VOP_UNLOCK(vp); 1533 VI_UNLOCK(vp); 1534 vn_finished_write(vnmp); 1535 CTR2(KTR_VFS, 1536 "%s: impossible to recycle, %p is already referenced", 1537 __func__, vp); 1538 return (EBUSY); 1539 } 1540 if (!VN_IS_DOOMED(vp)) { 1541 counter_u64_add(recycles_free_count, 1); 1542 vgonel(vp); 1543 } 1544 VOP_UNLOCK(vp); 1545 VI_UNLOCK(vp); 1546 vn_finished_write(vnmp); 1547 return (0); 1548 } 1549 1550 /* 1551 * Allocate a new vnode. 1552 * 1553 * The operation never returns an error. Returning an error was disabled 1554 * in r145385 (dated 2005) with the following comment: 1555 * 1556 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1557 * 1558 * Given the age of this commit (almost 15 years at the time of writing this 1559 * comment) restoring the ability to fail requires a significant audit of 1560 * all codepaths. 1561 * 1562 * The routine can try to free a vnode or stall for up to 1 second waiting for 1563 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1564 */ 1565 static u_long vn_alloc_cyclecount; 1566 1567 static struct vnode * __noinline 1568 vn_alloc_hard(struct mount *mp) 1569 { 1570 u_long rnumvnodes, rfreevnodes; 1571 1572 mtx_lock(&vnode_list_mtx); 1573 rnumvnodes = atomic_load_long(&numvnodes); 1574 if (rnumvnodes + 1 < desiredvnodes) { 1575 vn_alloc_cyclecount = 0; 1576 goto alloc; 1577 } 1578 rfreevnodes = vnlru_read_freevnodes(); 1579 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1580 vn_alloc_cyclecount = 0; 1581 vstir = 1; 1582 } 1583 /* 1584 * Grow the vnode cache if it will not be above its target max 1585 * after growing. Otherwise, if the free list is nonempty, try 1586 * to reclaim 1 item from it before growing the cache (possibly 1587 * above its target max if the reclamation failed or is delayed). 1588 * Otherwise, wait for some space. In all cases, schedule 1589 * vnlru_proc() if we are getting short of space. The watermarks 1590 * should be chosen so that we never wait or even reclaim from 1591 * the free list to below its target minimum. 1592 */ 1593 if (vnlru_free_locked(1, NULL) > 0) 1594 goto alloc; 1595 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1596 /* 1597 * Wait for space for a new vnode. 1598 */ 1599 vnlru_kick(); 1600 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1601 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1602 vnlru_read_freevnodes() > 1) 1603 vnlru_free_locked(1, NULL); 1604 } 1605 alloc: 1606 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1607 if (vnlru_under(rnumvnodes, vlowat)) 1608 vnlru_kick(); 1609 mtx_unlock(&vnode_list_mtx); 1610 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1611 } 1612 1613 static struct vnode * 1614 vn_alloc(struct mount *mp) 1615 { 1616 u_long rnumvnodes; 1617 1618 if (__predict_false(vn_alloc_cyclecount != 0)) 1619 return (vn_alloc_hard(mp)); 1620 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1621 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1622 atomic_subtract_long(&numvnodes, 1); 1623 return (vn_alloc_hard(mp)); 1624 } 1625 1626 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1627 } 1628 1629 static void 1630 vn_free(struct vnode *vp) 1631 { 1632 1633 atomic_subtract_long(&numvnodes, 1); 1634 uma_zfree_smr(vnode_zone, vp); 1635 } 1636 1637 /* 1638 * Return the next vnode from the free list. 1639 */ 1640 int 1641 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1642 struct vnode **vpp) 1643 { 1644 struct vnode *vp; 1645 struct thread *td; 1646 struct lock_object *lo; 1647 1648 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1649 1650 KASSERT(vops->registered, 1651 ("%s: not registered vector op %p\n", __func__, vops)); 1652 1653 td = curthread; 1654 if (td->td_vp_reserved != NULL) { 1655 vp = td->td_vp_reserved; 1656 td->td_vp_reserved = NULL; 1657 } else { 1658 vp = vn_alloc(mp); 1659 } 1660 counter_u64_add(vnodes_created, 1); 1661 /* 1662 * Locks are given the generic name "vnode" when created. 1663 * Follow the historic practice of using the filesystem 1664 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1665 * 1666 * Locks live in a witness group keyed on their name. Thus, 1667 * when a lock is renamed, it must also move from the witness 1668 * group of its old name to the witness group of its new name. 1669 * 1670 * The change only needs to be made when the vnode moves 1671 * from one filesystem type to another. We ensure that each 1672 * filesystem use a single static name pointer for its tag so 1673 * that we can compare pointers rather than doing a strcmp(). 1674 */ 1675 lo = &vp->v_vnlock->lock_object; 1676 #ifdef WITNESS 1677 if (lo->lo_name != tag) { 1678 #endif 1679 lo->lo_name = tag; 1680 #ifdef WITNESS 1681 WITNESS_DESTROY(lo); 1682 WITNESS_INIT(lo, tag); 1683 } 1684 #endif 1685 /* 1686 * By default, don't allow shared locks unless filesystems opt-in. 1687 */ 1688 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1689 /* 1690 * Finalize various vnode identity bits. 1691 */ 1692 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1693 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1694 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1695 vp->v_type = VNON; 1696 vp->v_op = vops; 1697 v_init_counters(vp); 1698 vp->v_bufobj.bo_ops = &buf_ops_bio; 1699 #ifdef DIAGNOSTIC 1700 if (mp == NULL && vops != &dead_vnodeops) 1701 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1702 #endif 1703 #ifdef MAC 1704 mac_vnode_init(vp); 1705 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1706 mac_vnode_associate_singlelabel(mp, vp); 1707 #endif 1708 if (mp != NULL) { 1709 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1710 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1711 vp->v_vflag |= VV_NOKNOTE; 1712 } 1713 1714 /* 1715 * For the filesystems which do not use vfs_hash_insert(), 1716 * still initialize v_hash to have vfs_hash_index() useful. 1717 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1718 * its own hashing. 1719 */ 1720 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1721 1722 *vpp = vp; 1723 return (0); 1724 } 1725 1726 void 1727 getnewvnode_reserve(void) 1728 { 1729 struct thread *td; 1730 1731 td = curthread; 1732 MPASS(td->td_vp_reserved == NULL); 1733 td->td_vp_reserved = vn_alloc(NULL); 1734 } 1735 1736 void 1737 getnewvnode_drop_reserve(void) 1738 { 1739 struct thread *td; 1740 1741 td = curthread; 1742 if (td->td_vp_reserved != NULL) { 1743 vn_free(td->td_vp_reserved); 1744 td->td_vp_reserved = NULL; 1745 } 1746 } 1747 1748 static void 1749 freevnode(struct vnode *vp) 1750 { 1751 struct bufobj *bo; 1752 1753 /* 1754 * The vnode has been marked for destruction, so free it. 1755 * 1756 * The vnode will be returned to the zone where it will 1757 * normally remain until it is needed for another vnode. We 1758 * need to cleanup (or verify that the cleanup has already 1759 * been done) any residual data left from its current use 1760 * so as not to contaminate the freshly allocated vnode. 1761 */ 1762 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1763 /* 1764 * Paired with vgone. 1765 */ 1766 vn_seqc_write_end_locked(vp); 1767 VNPASS(vp->v_seqc_users == 0, vp); 1768 1769 bo = &vp->v_bufobj; 1770 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1771 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1772 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1773 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1774 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1775 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1776 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1777 ("clean blk trie not empty")); 1778 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1779 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1780 ("dirty blk trie not empty")); 1781 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1782 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1783 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1784 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1785 ("Dangling rangelock waiters")); 1786 VI_UNLOCK(vp); 1787 #ifdef MAC 1788 mac_vnode_destroy(vp); 1789 #endif 1790 if (vp->v_pollinfo != NULL) { 1791 destroy_vpollinfo(vp->v_pollinfo); 1792 vp->v_pollinfo = NULL; 1793 } 1794 #ifdef INVARIANTS 1795 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1796 vp->v_op = NULL; 1797 #endif 1798 vp->v_mountedhere = NULL; 1799 vp->v_unpcb = NULL; 1800 vp->v_rdev = NULL; 1801 vp->v_fifoinfo = NULL; 1802 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1803 vp->v_irflag = 0; 1804 vp->v_iflag = 0; 1805 vp->v_vflag = 0; 1806 bo->bo_flag = 0; 1807 vn_free(vp); 1808 } 1809 1810 /* 1811 * Delete from old mount point vnode list, if on one. 1812 */ 1813 static void 1814 delmntque(struct vnode *vp) 1815 { 1816 struct mount *mp; 1817 1818 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1819 1820 mp = vp->v_mount; 1821 if (mp == NULL) 1822 return; 1823 MNT_ILOCK(mp); 1824 VI_LOCK(vp); 1825 vp->v_mount = NULL; 1826 VI_UNLOCK(vp); 1827 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1828 ("bad mount point vnode list size")); 1829 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1830 mp->mnt_nvnodelistsize--; 1831 MNT_REL(mp); 1832 MNT_IUNLOCK(mp); 1833 } 1834 1835 static void 1836 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1837 { 1838 1839 vp->v_data = NULL; 1840 vp->v_op = &dead_vnodeops; 1841 vgone(vp); 1842 vput(vp); 1843 } 1844 1845 /* 1846 * Insert into list of vnodes for the new mount point, if available. 1847 */ 1848 int 1849 insmntque1(struct vnode *vp, struct mount *mp, 1850 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1851 { 1852 1853 KASSERT(vp->v_mount == NULL, 1854 ("insmntque: vnode already on per mount vnode list")); 1855 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1856 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1857 1858 /* 1859 * We acquire the vnode interlock early to ensure that the 1860 * vnode cannot be recycled by another process releasing a 1861 * holdcnt on it before we get it on both the vnode list 1862 * and the active vnode list. The mount mutex protects only 1863 * manipulation of the vnode list and the vnode freelist 1864 * mutex protects only manipulation of the active vnode list. 1865 * Hence the need to hold the vnode interlock throughout. 1866 */ 1867 MNT_ILOCK(mp); 1868 VI_LOCK(vp); 1869 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1870 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1871 mp->mnt_nvnodelistsize == 0)) && 1872 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1873 VI_UNLOCK(vp); 1874 MNT_IUNLOCK(mp); 1875 if (dtr != NULL) 1876 dtr(vp, dtr_arg); 1877 return (EBUSY); 1878 } 1879 vp->v_mount = mp; 1880 MNT_REF(mp); 1881 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1882 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1883 ("neg mount point vnode list size")); 1884 mp->mnt_nvnodelistsize++; 1885 VI_UNLOCK(vp); 1886 MNT_IUNLOCK(mp); 1887 return (0); 1888 } 1889 1890 int 1891 insmntque(struct vnode *vp, struct mount *mp) 1892 { 1893 1894 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1895 } 1896 1897 /* 1898 * Flush out and invalidate all buffers associated with a bufobj 1899 * Called with the underlying object locked. 1900 */ 1901 int 1902 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1903 { 1904 int error; 1905 1906 BO_LOCK(bo); 1907 if (flags & V_SAVE) { 1908 error = bufobj_wwait(bo, slpflag, slptimeo); 1909 if (error) { 1910 BO_UNLOCK(bo); 1911 return (error); 1912 } 1913 if (bo->bo_dirty.bv_cnt > 0) { 1914 BO_UNLOCK(bo); 1915 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1916 return (error); 1917 /* 1918 * XXX We could save a lock/unlock if this was only 1919 * enabled under INVARIANTS 1920 */ 1921 BO_LOCK(bo); 1922 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1923 panic("vinvalbuf: dirty bufs"); 1924 } 1925 } 1926 /* 1927 * If you alter this loop please notice that interlock is dropped and 1928 * reacquired in flushbuflist. Special care is needed to ensure that 1929 * no race conditions occur from this. 1930 */ 1931 do { 1932 error = flushbuflist(&bo->bo_clean, 1933 flags, bo, slpflag, slptimeo); 1934 if (error == 0 && !(flags & V_CLEANONLY)) 1935 error = flushbuflist(&bo->bo_dirty, 1936 flags, bo, slpflag, slptimeo); 1937 if (error != 0 && error != EAGAIN) { 1938 BO_UNLOCK(bo); 1939 return (error); 1940 } 1941 } while (error != 0); 1942 1943 /* 1944 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1945 * have write I/O in-progress but if there is a VM object then the 1946 * VM object can also have read-I/O in-progress. 1947 */ 1948 do { 1949 bufobj_wwait(bo, 0, 0); 1950 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1951 BO_UNLOCK(bo); 1952 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1953 BO_LOCK(bo); 1954 } 1955 } while (bo->bo_numoutput > 0); 1956 BO_UNLOCK(bo); 1957 1958 /* 1959 * Destroy the copy in the VM cache, too. 1960 */ 1961 if (bo->bo_object != NULL && 1962 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1963 VM_OBJECT_WLOCK(bo->bo_object); 1964 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1965 OBJPR_CLEANONLY : 0); 1966 VM_OBJECT_WUNLOCK(bo->bo_object); 1967 } 1968 1969 #ifdef INVARIANTS 1970 BO_LOCK(bo); 1971 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1972 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1973 bo->bo_clean.bv_cnt > 0)) 1974 panic("vinvalbuf: flush failed"); 1975 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1976 bo->bo_dirty.bv_cnt > 0) 1977 panic("vinvalbuf: flush dirty failed"); 1978 BO_UNLOCK(bo); 1979 #endif 1980 return (0); 1981 } 1982 1983 /* 1984 * Flush out and invalidate all buffers associated with a vnode. 1985 * Called with the underlying object locked. 1986 */ 1987 int 1988 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1989 { 1990 1991 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1992 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1993 if (vp->v_object != NULL && vp->v_object->handle != vp) 1994 return (0); 1995 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1996 } 1997 1998 /* 1999 * Flush out buffers on the specified list. 2000 * 2001 */ 2002 static int 2003 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2004 int slptimeo) 2005 { 2006 struct buf *bp, *nbp; 2007 int retval, error; 2008 daddr_t lblkno; 2009 b_xflags_t xflags; 2010 2011 ASSERT_BO_WLOCKED(bo); 2012 2013 retval = 0; 2014 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2015 /* 2016 * If we are flushing both V_NORMAL and V_ALT buffers then 2017 * do not skip any buffers. If we are flushing only V_NORMAL 2018 * buffers then skip buffers marked as BX_ALTDATA. If we are 2019 * flushing only V_ALT buffers then skip buffers not marked 2020 * as BX_ALTDATA. 2021 */ 2022 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2023 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2024 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2025 continue; 2026 } 2027 if (nbp != NULL) { 2028 lblkno = nbp->b_lblkno; 2029 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2030 } 2031 retval = EAGAIN; 2032 error = BUF_TIMELOCK(bp, 2033 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2034 "flushbuf", slpflag, slptimeo); 2035 if (error) { 2036 BO_LOCK(bo); 2037 return (error != ENOLCK ? error : EAGAIN); 2038 } 2039 KASSERT(bp->b_bufobj == bo, 2040 ("bp %p wrong b_bufobj %p should be %p", 2041 bp, bp->b_bufobj, bo)); 2042 /* 2043 * XXX Since there are no node locks for NFS, I 2044 * believe there is a slight chance that a delayed 2045 * write will occur while sleeping just above, so 2046 * check for it. 2047 */ 2048 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2049 (flags & V_SAVE)) { 2050 bremfree(bp); 2051 bp->b_flags |= B_ASYNC; 2052 bwrite(bp); 2053 BO_LOCK(bo); 2054 return (EAGAIN); /* XXX: why not loop ? */ 2055 } 2056 bremfree(bp); 2057 bp->b_flags |= (B_INVAL | B_RELBUF); 2058 bp->b_flags &= ~B_ASYNC; 2059 brelse(bp); 2060 BO_LOCK(bo); 2061 if (nbp == NULL) 2062 break; 2063 nbp = gbincore(bo, lblkno); 2064 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2065 != xflags) 2066 break; /* nbp invalid */ 2067 } 2068 return (retval); 2069 } 2070 2071 int 2072 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2073 { 2074 struct buf *bp; 2075 int error; 2076 daddr_t lblkno; 2077 2078 ASSERT_BO_LOCKED(bo); 2079 2080 for (lblkno = startn;;) { 2081 again: 2082 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2083 if (bp == NULL || bp->b_lblkno >= endn || 2084 bp->b_lblkno < startn) 2085 break; 2086 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2087 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2088 if (error != 0) { 2089 BO_RLOCK(bo); 2090 if (error == ENOLCK) 2091 goto again; 2092 return (error); 2093 } 2094 KASSERT(bp->b_bufobj == bo, 2095 ("bp %p wrong b_bufobj %p should be %p", 2096 bp, bp->b_bufobj, bo)); 2097 lblkno = bp->b_lblkno + 1; 2098 if ((bp->b_flags & B_MANAGED) == 0) 2099 bremfree(bp); 2100 bp->b_flags |= B_RELBUF; 2101 /* 2102 * In the VMIO case, use the B_NOREUSE flag to hint that the 2103 * pages backing each buffer in the range are unlikely to be 2104 * reused. Dirty buffers will have the hint applied once 2105 * they've been written. 2106 */ 2107 if ((bp->b_flags & B_VMIO) != 0) 2108 bp->b_flags |= B_NOREUSE; 2109 brelse(bp); 2110 BO_RLOCK(bo); 2111 } 2112 return (0); 2113 } 2114 2115 /* 2116 * Truncate a file's buffer and pages to a specified length. This 2117 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2118 * sync activity. 2119 */ 2120 int 2121 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2122 { 2123 struct buf *bp, *nbp; 2124 struct bufobj *bo; 2125 daddr_t startlbn; 2126 2127 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2128 vp, blksize, (uintmax_t)length); 2129 2130 /* 2131 * Round up to the *next* lbn. 2132 */ 2133 startlbn = howmany(length, blksize); 2134 2135 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2136 2137 bo = &vp->v_bufobj; 2138 restart_unlocked: 2139 BO_LOCK(bo); 2140 2141 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2142 ; 2143 2144 if (length > 0) { 2145 restartsync: 2146 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2147 if (bp->b_lblkno > 0) 2148 continue; 2149 /* 2150 * Since we hold the vnode lock this should only 2151 * fail if we're racing with the buf daemon. 2152 */ 2153 if (BUF_LOCK(bp, 2154 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2155 BO_LOCKPTR(bo)) == ENOLCK) 2156 goto restart_unlocked; 2157 2158 VNASSERT((bp->b_flags & B_DELWRI), vp, 2159 ("buf(%p) on dirty queue without DELWRI", bp)); 2160 2161 bremfree(bp); 2162 bawrite(bp); 2163 BO_LOCK(bo); 2164 goto restartsync; 2165 } 2166 } 2167 2168 bufobj_wwait(bo, 0, 0); 2169 BO_UNLOCK(bo); 2170 vnode_pager_setsize(vp, length); 2171 2172 return (0); 2173 } 2174 2175 /* 2176 * Invalidate the cached pages of a file's buffer within the range of block 2177 * numbers [startlbn, endlbn). 2178 */ 2179 void 2180 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2181 int blksize) 2182 { 2183 struct bufobj *bo; 2184 off_t start, end; 2185 2186 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2187 2188 start = blksize * startlbn; 2189 end = blksize * endlbn; 2190 2191 bo = &vp->v_bufobj; 2192 BO_LOCK(bo); 2193 MPASS(blksize == bo->bo_bsize); 2194 2195 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2196 ; 2197 2198 BO_UNLOCK(bo); 2199 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2200 } 2201 2202 static int 2203 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2204 daddr_t startlbn, daddr_t endlbn) 2205 { 2206 struct buf *bp, *nbp; 2207 bool anyfreed; 2208 2209 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2210 ASSERT_BO_LOCKED(bo); 2211 2212 do { 2213 anyfreed = false; 2214 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2215 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2216 continue; 2217 if (BUF_LOCK(bp, 2218 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2219 BO_LOCKPTR(bo)) == ENOLCK) { 2220 BO_LOCK(bo); 2221 return (EAGAIN); 2222 } 2223 2224 bremfree(bp); 2225 bp->b_flags |= B_INVAL | B_RELBUF; 2226 bp->b_flags &= ~B_ASYNC; 2227 brelse(bp); 2228 anyfreed = true; 2229 2230 BO_LOCK(bo); 2231 if (nbp != NULL && 2232 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2233 nbp->b_vp != vp || 2234 (nbp->b_flags & B_DELWRI) != 0)) 2235 return (EAGAIN); 2236 } 2237 2238 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2239 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2240 continue; 2241 if (BUF_LOCK(bp, 2242 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2243 BO_LOCKPTR(bo)) == ENOLCK) { 2244 BO_LOCK(bo); 2245 return (EAGAIN); 2246 } 2247 bremfree(bp); 2248 bp->b_flags |= B_INVAL | B_RELBUF; 2249 bp->b_flags &= ~B_ASYNC; 2250 brelse(bp); 2251 anyfreed = true; 2252 2253 BO_LOCK(bo); 2254 if (nbp != NULL && 2255 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2256 (nbp->b_vp != vp) || 2257 (nbp->b_flags & B_DELWRI) == 0)) 2258 return (EAGAIN); 2259 } 2260 } while (anyfreed); 2261 return (0); 2262 } 2263 2264 static void 2265 buf_vlist_remove(struct buf *bp) 2266 { 2267 struct bufv *bv; 2268 2269 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2270 ASSERT_BO_WLOCKED(bp->b_bufobj); 2271 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2272 (BX_VNDIRTY|BX_VNCLEAN), 2273 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2274 if (bp->b_xflags & BX_VNDIRTY) 2275 bv = &bp->b_bufobj->bo_dirty; 2276 else 2277 bv = &bp->b_bufobj->bo_clean; 2278 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2279 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2280 bv->bv_cnt--; 2281 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2282 } 2283 2284 /* 2285 * Add the buffer to the sorted clean or dirty block list. 2286 * 2287 * NOTE: xflags is passed as a constant, optimizing this inline function! 2288 */ 2289 static void 2290 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2291 { 2292 struct bufv *bv; 2293 struct buf *n; 2294 int error; 2295 2296 ASSERT_BO_WLOCKED(bo); 2297 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2298 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2299 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2300 ("dead bo %p", bo)); 2301 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2302 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2303 bp->b_xflags |= xflags; 2304 if (xflags & BX_VNDIRTY) 2305 bv = &bo->bo_dirty; 2306 else 2307 bv = &bo->bo_clean; 2308 2309 /* 2310 * Keep the list ordered. Optimize empty list insertion. Assume 2311 * we tend to grow at the tail so lookup_le should usually be cheaper 2312 * than _ge. 2313 */ 2314 if (bv->bv_cnt == 0 || 2315 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2316 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2317 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2318 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2319 else 2320 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2321 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2322 if (error) 2323 panic("buf_vlist_add: Preallocated nodes insufficient."); 2324 bv->bv_cnt++; 2325 } 2326 2327 /* 2328 * Look up a buffer using the buffer tries. 2329 */ 2330 struct buf * 2331 gbincore(struct bufobj *bo, daddr_t lblkno) 2332 { 2333 struct buf *bp; 2334 2335 ASSERT_BO_LOCKED(bo); 2336 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2337 if (bp != NULL) 2338 return (bp); 2339 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2340 } 2341 2342 /* 2343 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2344 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2345 * stability of the result. Like other lockless lookups, the found buf may 2346 * already be invalid by the time this function returns. 2347 */ 2348 struct buf * 2349 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2350 { 2351 struct buf *bp; 2352 2353 ASSERT_BO_UNLOCKED(bo); 2354 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2355 if (bp != NULL) 2356 return (bp); 2357 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2358 } 2359 2360 /* 2361 * Associate a buffer with a vnode. 2362 */ 2363 void 2364 bgetvp(struct vnode *vp, struct buf *bp) 2365 { 2366 struct bufobj *bo; 2367 2368 bo = &vp->v_bufobj; 2369 ASSERT_BO_WLOCKED(bo); 2370 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2371 2372 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2373 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2374 ("bgetvp: bp already attached! %p", bp)); 2375 2376 vhold(vp); 2377 bp->b_vp = vp; 2378 bp->b_bufobj = bo; 2379 /* 2380 * Insert onto list for new vnode. 2381 */ 2382 buf_vlist_add(bp, bo, BX_VNCLEAN); 2383 } 2384 2385 /* 2386 * Disassociate a buffer from a vnode. 2387 */ 2388 void 2389 brelvp(struct buf *bp) 2390 { 2391 struct bufobj *bo; 2392 struct vnode *vp; 2393 2394 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2395 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2396 2397 /* 2398 * Delete from old vnode list, if on one. 2399 */ 2400 vp = bp->b_vp; /* XXX */ 2401 bo = bp->b_bufobj; 2402 BO_LOCK(bo); 2403 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2404 buf_vlist_remove(bp); 2405 else 2406 panic("brelvp: Buffer %p not on queue.", bp); 2407 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2408 bo->bo_flag &= ~BO_ONWORKLST; 2409 mtx_lock(&sync_mtx); 2410 LIST_REMOVE(bo, bo_synclist); 2411 syncer_worklist_len--; 2412 mtx_unlock(&sync_mtx); 2413 } 2414 bp->b_vp = NULL; 2415 bp->b_bufobj = NULL; 2416 BO_UNLOCK(bo); 2417 vdrop(vp); 2418 } 2419 2420 /* 2421 * Add an item to the syncer work queue. 2422 */ 2423 static void 2424 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2425 { 2426 int slot; 2427 2428 ASSERT_BO_WLOCKED(bo); 2429 2430 mtx_lock(&sync_mtx); 2431 if (bo->bo_flag & BO_ONWORKLST) 2432 LIST_REMOVE(bo, bo_synclist); 2433 else { 2434 bo->bo_flag |= BO_ONWORKLST; 2435 syncer_worklist_len++; 2436 } 2437 2438 if (delay > syncer_maxdelay - 2) 2439 delay = syncer_maxdelay - 2; 2440 slot = (syncer_delayno + delay) & syncer_mask; 2441 2442 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2443 mtx_unlock(&sync_mtx); 2444 } 2445 2446 static int 2447 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2448 { 2449 int error, len; 2450 2451 mtx_lock(&sync_mtx); 2452 len = syncer_worklist_len - sync_vnode_count; 2453 mtx_unlock(&sync_mtx); 2454 error = SYSCTL_OUT(req, &len, sizeof(len)); 2455 return (error); 2456 } 2457 2458 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2459 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2460 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2461 2462 static struct proc *updateproc; 2463 static void sched_sync(void); 2464 static struct kproc_desc up_kp = { 2465 "syncer", 2466 sched_sync, 2467 &updateproc 2468 }; 2469 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2470 2471 static int 2472 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2473 { 2474 struct vnode *vp; 2475 struct mount *mp; 2476 2477 *bo = LIST_FIRST(slp); 2478 if (*bo == NULL) 2479 return (0); 2480 vp = bo2vnode(*bo); 2481 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2482 return (1); 2483 /* 2484 * We use vhold in case the vnode does not 2485 * successfully sync. vhold prevents the vnode from 2486 * going away when we unlock the sync_mtx so that 2487 * we can acquire the vnode interlock. 2488 */ 2489 vholdl(vp); 2490 mtx_unlock(&sync_mtx); 2491 VI_UNLOCK(vp); 2492 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2493 vdrop(vp); 2494 mtx_lock(&sync_mtx); 2495 return (*bo == LIST_FIRST(slp)); 2496 } 2497 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2498 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2499 VOP_UNLOCK(vp); 2500 vn_finished_write(mp); 2501 BO_LOCK(*bo); 2502 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2503 /* 2504 * Put us back on the worklist. The worklist 2505 * routine will remove us from our current 2506 * position and then add us back in at a later 2507 * position. 2508 */ 2509 vn_syncer_add_to_worklist(*bo, syncdelay); 2510 } 2511 BO_UNLOCK(*bo); 2512 vdrop(vp); 2513 mtx_lock(&sync_mtx); 2514 return (0); 2515 } 2516 2517 static int first_printf = 1; 2518 2519 /* 2520 * System filesystem synchronizer daemon. 2521 */ 2522 static void 2523 sched_sync(void) 2524 { 2525 struct synclist *next, *slp; 2526 struct bufobj *bo; 2527 long starttime; 2528 struct thread *td = curthread; 2529 int last_work_seen; 2530 int net_worklist_len; 2531 int syncer_final_iter; 2532 int error; 2533 2534 last_work_seen = 0; 2535 syncer_final_iter = 0; 2536 syncer_state = SYNCER_RUNNING; 2537 starttime = time_uptime; 2538 td->td_pflags |= TDP_NORUNNINGBUF; 2539 2540 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2541 SHUTDOWN_PRI_LAST); 2542 2543 mtx_lock(&sync_mtx); 2544 for (;;) { 2545 if (syncer_state == SYNCER_FINAL_DELAY && 2546 syncer_final_iter == 0) { 2547 mtx_unlock(&sync_mtx); 2548 kproc_suspend_check(td->td_proc); 2549 mtx_lock(&sync_mtx); 2550 } 2551 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2552 if (syncer_state != SYNCER_RUNNING && 2553 starttime != time_uptime) { 2554 if (first_printf) { 2555 printf("\nSyncing disks, vnodes remaining... "); 2556 first_printf = 0; 2557 } 2558 printf("%d ", net_worklist_len); 2559 } 2560 starttime = time_uptime; 2561 2562 /* 2563 * Push files whose dirty time has expired. Be careful 2564 * of interrupt race on slp queue. 2565 * 2566 * Skip over empty worklist slots when shutting down. 2567 */ 2568 do { 2569 slp = &syncer_workitem_pending[syncer_delayno]; 2570 syncer_delayno += 1; 2571 if (syncer_delayno == syncer_maxdelay) 2572 syncer_delayno = 0; 2573 next = &syncer_workitem_pending[syncer_delayno]; 2574 /* 2575 * If the worklist has wrapped since the 2576 * it was emptied of all but syncer vnodes, 2577 * switch to the FINAL_DELAY state and run 2578 * for one more second. 2579 */ 2580 if (syncer_state == SYNCER_SHUTTING_DOWN && 2581 net_worklist_len == 0 && 2582 last_work_seen == syncer_delayno) { 2583 syncer_state = SYNCER_FINAL_DELAY; 2584 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2585 } 2586 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2587 syncer_worklist_len > 0); 2588 2589 /* 2590 * Keep track of the last time there was anything 2591 * on the worklist other than syncer vnodes. 2592 * Return to the SHUTTING_DOWN state if any 2593 * new work appears. 2594 */ 2595 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2596 last_work_seen = syncer_delayno; 2597 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2598 syncer_state = SYNCER_SHUTTING_DOWN; 2599 while (!LIST_EMPTY(slp)) { 2600 error = sync_vnode(slp, &bo, td); 2601 if (error == 1) { 2602 LIST_REMOVE(bo, bo_synclist); 2603 LIST_INSERT_HEAD(next, bo, bo_synclist); 2604 continue; 2605 } 2606 2607 if (first_printf == 0) { 2608 /* 2609 * Drop the sync mutex, because some watchdog 2610 * drivers need to sleep while patting 2611 */ 2612 mtx_unlock(&sync_mtx); 2613 wdog_kern_pat(WD_LASTVAL); 2614 mtx_lock(&sync_mtx); 2615 } 2616 2617 } 2618 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2619 syncer_final_iter--; 2620 /* 2621 * The variable rushjob allows the kernel to speed up the 2622 * processing of the filesystem syncer process. A rushjob 2623 * value of N tells the filesystem syncer to process the next 2624 * N seconds worth of work on its queue ASAP. Currently rushjob 2625 * is used by the soft update code to speed up the filesystem 2626 * syncer process when the incore state is getting so far 2627 * ahead of the disk that the kernel memory pool is being 2628 * threatened with exhaustion. 2629 */ 2630 if (rushjob > 0) { 2631 rushjob -= 1; 2632 continue; 2633 } 2634 /* 2635 * Just sleep for a short period of time between 2636 * iterations when shutting down to allow some I/O 2637 * to happen. 2638 * 2639 * If it has taken us less than a second to process the 2640 * current work, then wait. Otherwise start right over 2641 * again. We can still lose time if any single round 2642 * takes more than two seconds, but it does not really 2643 * matter as we are just trying to generally pace the 2644 * filesystem activity. 2645 */ 2646 if (syncer_state != SYNCER_RUNNING || 2647 time_uptime == starttime) { 2648 thread_lock(td); 2649 sched_prio(td, PPAUSE); 2650 thread_unlock(td); 2651 } 2652 if (syncer_state != SYNCER_RUNNING) 2653 cv_timedwait(&sync_wakeup, &sync_mtx, 2654 hz / SYNCER_SHUTDOWN_SPEEDUP); 2655 else if (time_uptime == starttime) 2656 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2657 } 2658 } 2659 2660 /* 2661 * Request the syncer daemon to speed up its work. 2662 * We never push it to speed up more than half of its 2663 * normal turn time, otherwise it could take over the cpu. 2664 */ 2665 int 2666 speedup_syncer(void) 2667 { 2668 int ret = 0; 2669 2670 mtx_lock(&sync_mtx); 2671 if (rushjob < syncdelay / 2) { 2672 rushjob += 1; 2673 stat_rush_requests += 1; 2674 ret = 1; 2675 } 2676 mtx_unlock(&sync_mtx); 2677 cv_broadcast(&sync_wakeup); 2678 return (ret); 2679 } 2680 2681 /* 2682 * Tell the syncer to speed up its work and run though its work 2683 * list several times, then tell it to shut down. 2684 */ 2685 static void 2686 syncer_shutdown(void *arg, int howto) 2687 { 2688 2689 if (howto & RB_NOSYNC) 2690 return; 2691 mtx_lock(&sync_mtx); 2692 syncer_state = SYNCER_SHUTTING_DOWN; 2693 rushjob = 0; 2694 mtx_unlock(&sync_mtx); 2695 cv_broadcast(&sync_wakeup); 2696 kproc_shutdown(arg, howto); 2697 } 2698 2699 void 2700 syncer_suspend(void) 2701 { 2702 2703 syncer_shutdown(updateproc, 0); 2704 } 2705 2706 void 2707 syncer_resume(void) 2708 { 2709 2710 mtx_lock(&sync_mtx); 2711 first_printf = 1; 2712 syncer_state = SYNCER_RUNNING; 2713 mtx_unlock(&sync_mtx); 2714 cv_broadcast(&sync_wakeup); 2715 kproc_resume(updateproc); 2716 } 2717 2718 /* 2719 * Reassign a buffer from one vnode to another. 2720 * Used to assign file specific control information 2721 * (indirect blocks) to the vnode to which they belong. 2722 */ 2723 void 2724 reassignbuf(struct buf *bp) 2725 { 2726 struct vnode *vp; 2727 struct bufobj *bo; 2728 int delay; 2729 #ifdef INVARIANTS 2730 struct bufv *bv; 2731 #endif 2732 2733 vp = bp->b_vp; 2734 bo = bp->b_bufobj; 2735 ++reassignbufcalls; 2736 2737 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2738 bp, bp->b_vp, bp->b_flags); 2739 /* 2740 * B_PAGING flagged buffers cannot be reassigned because their vp 2741 * is not fully linked in. 2742 */ 2743 if (bp->b_flags & B_PAGING) 2744 panic("cannot reassign paging buffer"); 2745 2746 /* 2747 * Delete from old vnode list, if on one. 2748 */ 2749 BO_LOCK(bo); 2750 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2751 buf_vlist_remove(bp); 2752 else 2753 panic("reassignbuf: Buffer %p not on queue.", bp); 2754 /* 2755 * If dirty, put on list of dirty buffers; otherwise insert onto list 2756 * of clean buffers. 2757 */ 2758 if (bp->b_flags & B_DELWRI) { 2759 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2760 switch (vp->v_type) { 2761 case VDIR: 2762 delay = dirdelay; 2763 break; 2764 case VCHR: 2765 delay = metadelay; 2766 break; 2767 default: 2768 delay = filedelay; 2769 } 2770 vn_syncer_add_to_worklist(bo, delay); 2771 } 2772 buf_vlist_add(bp, bo, BX_VNDIRTY); 2773 } else { 2774 buf_vlist_add(bp, bo, BX_VNCLEAN); 2775 2776 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2777 mtx_lock(&sync_mtx); 2778 LIST_REMOVE(bo, bo_synclist); 2779 syncer_worklist_len--; 2780 mtx_unlock(&sync_mtx); 2781 bo->bo_flag &= ~BO_ONWORKLST; 2782 } 2783 } 2784 #ifdef INVARIANTS 2785 bv = &bo->bo_clean; 2786 bp = TAILQ_FIRST(&bv->bv_hd); 2787 KASSERT(bp == NULL || bp->b_bufobj == bo, 2788 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2789 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2790 KASSERT(bp == NULL || bp->b_bufobj == bo, 2791 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2792 bv = &bo->bo_dirty; 2793 bp = TAILQ_FIRST(&bv->bv_hd); 2794 KASSERT(bp == NULL || bp->b_bufobj == bo, 2795 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2796 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2797 KASSERT(bp == NULL || bp->b_bufobj == bo, 2798 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2799 #endif 2800 BO_UNLOCK(bo); 2801 } 2802 2803 static void 2804 v_init_counters(struct vnode *vp) 2805 { 2806 2807 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2808 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2809 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2810 2811 refcount_init(&vp->v_holdcnt, 1); 2812 refcount_init(&vp->v_usecount, 1); 2813 } 2814 2815 /* 2816 * Increment si_usecount of the associated device, if any. 2817 */ 2818 static void 2819 v_incr_devcount(struct vnode *vp) 2820 { 2821 2822 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2823 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2824 dev_lock(); 2825 vp->v_rdev->si_usecount++; 2826 dev_unlock(); 2827 } 2828 } 2829 2830 /* 2831 * Decrement si_usecount of the associated device, if any. 2832 * 2833 * The caller is required to hold the interlock when transitioning a VCHR use 2834 * count to zero. This prevents a race with devfs_reclaim_vchr() that would 2835 * leak a si_usecount reference. The vnode lock will also prevent this race 2836 * if it is held while dropping the last ref. 2837 * 2838 * The race is: 2839 * 2840 * CPU1 CPU2 2841 * devfs_reclaim_vchr 2842 * make v_usecount == 0 2843 * VI_LOCK 2844 * sees v_usecount == 0, no updates 2845 * vp->v_rdev = NULL; 2846 * ... 2847 * VI_UNLOCK 2848 * VI_LOCK 2849 * v_decr_devcount 2850 * sees v_rdev == NULL, no updates 2851 * 2852 * In this scenario si_devcount decrement is not performed. 2853 */ 2854 static void 2855 v_decr_devcount(struct vnode *vp) 2856 { 2857 2858 ASSERT_VOP_LOCKED(vp, __func__); 2859 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2860 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2861 dev_lock(); 2862 VNPASS(vp->v_rdev->si_usecount > 0, vp); 2863 vp->v_rdev->si_usecount--; 2864 dev_unlock(); 2865 } 2866 } 2867 2868 /* 2869 * Grab a particular vnode from the free list, increment its 2870 * reference count and lock it. VIRF_DOOMED is set if the vnode 2871 * is being destroyed. Only callers who specify LK_RETRY will 2872 * see doomed vnodes. If inactive processing was delayed in 2873 * vput try to do it here. 2874 * 2875 * usecount is manipulated using atomics without holding any locks. 2876 * 2877 * holdcnt can be manipulated using atomics without holding any locks, 2878 * except when transitioning 1<->0, in which case the interlock is held. 2879 * 2880 * Consumers which don't guarantee liveness of the vnode can use SMR to 2881 * try to get a reference. Note this operation can fail since the vnode 2882 * may be awaiting getting freed by the time they get to it. 2883 */ 2884 enum vgetstate 2885 vget_prep_smr(struct vnode *vp) 2886 { 2887 enum vgetstate vs; 2888 2889 VFS_SMR_ASSERT_ENTERED(); 2890 2891 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2892 vs = VGET_USECOUNT; 2893 } else { 2894 if (vhold_smr(vp)) 2895 vs = VGET_HOLDCNT; 2896 else 2897 vs = VGET_NONE; 2898 } 2899 return (vs); 2900 } 2901 2902 enum vgetstate 2903 vget_prep(struct vnode *vp) 2904 { 2905 enum vgetstate vs; 2906 2907 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2908 vs = VGET_USECOUNT; 2909 } else { 2910 vhold(vp); 2911 vs = VGET_HOLDCNT; 2912 } 2913 return (vs); 2914 } 2915 2916 void 2917 vget_abort(struct vnode *vp, enum vgetstate vs) 2918 { 2919 2920 switch (vs) { 2921 case VGET_USECOUNT: 2922 vrele(vp); 2923 break; 2924 case VGET_HOLDCNT: 2925 vdrop(vp); 2926 break; 2927 default: 2928 __assert_unreachable(); 2929 } 2930 } 2931 2932 int 2933 vget(struct vnode *vp, int flags, struct thread *td) 2934 { 2935 enum vgetstate vs; 2936 2937 MPASS(td == curthread); 2938 2939 vs = vget_prep(vp); 2940 return (vget_finish(vp, flags, vs)); 2941 } 2942 2943 static void __noinline 2944 vget_finish_vchr(struct vnode *vp) 2945 { 2946 2947 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 2948 2949 /* 2950 * See the comment in vget_finish before usecount bump. 2951 */ 2952 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2953 #ifdef INVARIANTS 2954 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2955 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 2956 #else 2957 refcount_release(&vp->v_holdcnt); 2958 #endif 2959 return; 2960 } 2961 2962 VI_LOCK(vp); 2963 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2964 #ifdef INVARIANTS 2965 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2966 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2967 #else 2968 refcount_release(&vp->v_holdcnt); 2969 #endif 2970 VI_UNLOCK(vp); 2971 return; 2972 } 2973 v_incr_devcount(vp); 2974 refcount_acquire(&vp->v_usecount); 2975 VI_UNLOCK(vp); 2976 } 2977 2978 int 2979 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2980 { 2981 int error; 2982 2983 if ((flags & LK_INTERLOCK) != 0) 2984 ASSERT_VI_LOCKED(vp, __func__); 2985 else 2986 ASSERT_VI_UNLOCKED(vp, __func__); 2987 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2988 VNPASS(vp->v_holdcnt > 0, vp); 2989 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2990 2991 error = vn_lock(vp, flags); 2992 if (__predict_false(error != 0)) { 2993 vget_abort(vp, vs); 2994 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2995 vp); 2996 return (error); 2997 } 2998 2999 vget_finish_ref(vp, vs); 3000 return (0); 3001 } 3002 3003 void 3004 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3005 { 3006 int old; 3007 3008 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3009 VNPASS(vp->v_holdcnt > 0, vp); 3010 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3011 3012 if (vs == VGET_USECOUNT) 3013 return; 3014 3015 if (__predict_false(vp->v_type == VCHR)) { 3016 vget_finish_vchr(vp); 3017 return; 3018 } 3019 3020 /* 3021 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3022 * the vnode around. Otherwise someone else lended their hold count and 3023 * we have to drop ours. 3024 */ 3025 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3026 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3027 if (old != 0) { 3028 #ifdef INVARIANTS 3029 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3030 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3031 #else 3032 refcount_release(&vp->v_holdcnt); 3033 #endif 3034 } 3035 } 3036 3037 /* 3038 * Increase the reference (use) and hold count of a vnode. 3039 * This will also remove the vnode from the free list if it is presently free. 3040 */ 3041 static void __noinline 3042 vref_vchr(struct vnode *vp, bool interlock) 3043 { 3044 3045 /* 3046 * See the comment in vget_finish before usecount bump. 3047 */ 3048 if (!interlock) { 3049 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3050 VNODE_REFCOUNT_FENCE_ACQ(); 3051 VNASSERT(vp->v_holdcnt > 0, vp, 3052 ("%s: active vnode not held", __func__)); 3053 return; 3054 } 3055 VI_LOCK(vp); 3056 /* 3057 * By the time we get here the vnode might have been doomed, at 3058 * which point the 0->1 use count transition is no longer 3059 * protected by the interlock. Since it can't bounce back to 3060 * VCHR and requires vref semantics, punt it back 3061 */ 3062 if (__predict_false(vp->v_type == VBAD)) { 3063 VI_UNLOCK(vp); 3064 vref(vp); 3065 return; 3066 } 3067 } 3068 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 3069 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3070 VNODE_REFCOUNT_FENCE_ACQ(); 3071 VNASSERT(vp->v_holdcnt > 0, vp, 3072 ("%s: active vnode not held", __func__)); 3073 if (!interlock) 3074 VI_UNLOCK(vp); 3075 return; 3076 } 3077 vhold(vp); 3078 v_incr_devcount(vp); 3079 refcount_acquire(&vp->v_usecount); 3080 if (!interlock) 3081 VI_UNLOCK(vp); 3082 return; 3083 } 3084 3085 void 3086 vref(struct vnode *vp) 3087 { 3088 int old; 3089 3090 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3091 if (__predict_false(vp->v_type == VCHR)) { 3092 vref_vchr(vp, false); 3093 return; 3094 } 3095 3096 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3097 VNODE_REFCOUNT_FENCE_ACQ(); 3098 VNASSERT(vp->v_holdcnt > 0, vp, 3099 ("%s: active vnode not held", __func__)); 3100 return; 3101 } 3102 vhold(vp); 3103 /* 3104 * See the comment in vget_finish. 3105 */ 3106 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3107 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3108 if (old != 0) { 3109 #ifdef INVARIANTS 3110 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3111 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3112 #else 3113 refcount_release(&vp->v_holdcnt); 3114 #endif 3115 } 3116 } 3117 3118 void 3119 vrefl(struct vnode *vp) 3120 { 3121 3122 ASSERT_VI_LOCKED(vp, __func__); 3123 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3124 if (__predict_false(vp->v_type == VCHR)) { 3125 vref_vchr(vp, true); 3126 return; 3127 } 3128 vref(vp); 3129 } 3130 3131 void 3132 vrefact(struct vnode *vp) 3133 { 3134 3135 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3136 #ifdef INVARIANTS 3137 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3138 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3139 #else 3140 refcount_acquire(&vp->v_usecount); 3141 #endif 3142 } 3143 3144 void 3145 vrefactn(struct vnode *vp, u_int n) 3146 { 3147 3148 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3149 #ifdef INVARIANTS 3150 int old = atomic_fetchadd_int(&vp->v_usecount, n); 3151 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3152 #else 3153 atomic_add_int(&vp->v_usecount, n); 3154 #endif 3155 } 3156 3157 /* 3158 * Return reference count of a vnode. 3159 * 3160 * The results of this call are only guaranteed when some mechanism is used to 3161 * stop other processes from gaining references to the vnode. This may be the 3162 * case if the caller holds the only reference. This is also useful when stale 3163 * data is acceptable as race conditions may be accounted for by some other 3164 * means. 3165 */ 3166 int 3167 vrefcnt(struct vnode *vp) 3168 { 3169 3170 return (vp->v_usecount); 3171 } 3172 3173 void 3174 vlazy(struct vnode *vp) 3175 { 3176 struct mount *mp; 3177 3178 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3179 3180 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3181 return; 3182 /* 3183 * We may get here for inactive routines after the vnode got doomed. 3184 */ 3185 if (VN_IS_DOOMED(vp)) 3186 return; 3187 mp = vp->v_mount; 3188 mtx_lock(&mp->mnt_listmtx); 3189 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3190 vp->v_mflag |= VMP_LAZYLIST; 3191 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3192 mp->mnt_lazyvnodelistsize++; 3193 } 3194 mtx_unlock(&mp->mnt_listmtx); 3195 } 3196 3197 /* 3198 * This routine is only meant to be called from vgonel prior to dooming 3199 * the vnode. 3200 */ 3201 static void 3202 vunlazy_gone(struct vnode *vp) 3203 { 3204 struct mount *mp; 3205 3206 ASSERT_VOP_ELOCKED(vp, __func__); 3207 ASSERT_VI_LOCKED(vp, __func__); 3208 VNPASS(!VN_IS_DOOMED(vp), vp); 3209 3210 if (vp->v_mflag & VMP_LAZYLIST) { 3211 mp = vp->v_mount; 3212 mtx_lock(&mp->mnt_listmtx); 3213 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3214 vp->v_mflag &= ~VMP_LAZYLIST; 3215 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3216 mp->mnt_lazyvnodelistsize--; 3217 mtx_unlock(&mp->mnt_listmtx); 3218 } 3219 } 3220 3221 static void 3222 vdefer_inactive(struct vnode *vp) 3223 { 3224 3225 ASSERT_VI_LOCKED(vp, __func__); 3226 VNASSERT(vp->v_holdcnt > 0, vp, 3227 ("%s: vnode without hold count", __func__)); 3228 if (VN_IS_DOOMED(vp)) { 3229 vdropl(vp); 3230 return; 3231 } 3232 if (vp->v_iflag & VI_DEFINACT) { 3233 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3234 vdropl(vp); 3235 return; 3236 } 3237 if (vp->v_usecount > 0) { 3238 vp->v_iflag &= ~VI_OWEINACT; 3239 vdropl(vp); 3240 return; 3241 } 3242 vlazy(vp); 3243 vp->v_iflag |= VI_DEFINACT; 3244 VI_UNLOCK(vp); 3245 counter_u64_add(deferred_inact, 1); 3246 } 3247 3248 static void 3249 vdefer_inactive_unlocked(struct vnode *vp) 3250 { 3251 3252 VI_LOCK(vp); 3253 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3254 vdropl(vp); 3255 return; 3256 } 3257 vdefer_inactive(vp); 3258 } 3259 3260 enum vput_op { VRELE, VPUT, VUNREF }; 3261 3262 /* 3263 * Handle ->v_usecount transitioning to 0. 3264 * 3265 * By releasing the last usecount we take ownership of the hold count which 3266 * provides liveness of the vnode, meaning we have to vdrop. 3267 * 3268 * If the vnode is of type VCHR we may need to decrement si_usecount, see 3269 * v_decr_devcount for details. 3270 * 3271 * For all vnodes we may need to perform inactive processing. It requires an 3272 * exclusive lock on the vnode, while it is legal to call here with only a 3273 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3274 * inactive processing gets deferred to the syncer. 3275 * 3276 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3277 * on the lock being held all the way until VOP_INACTIVE. This in particular 3278 * happens with UFS which adds half-constructed vnodes to the hash, where they 3279 * can be found by other code. 3280 */ 3281 static void 3282 vput_final(struct vnode *vp, enum vput_op func) 3283 { 3284 int error; 3285 bool want_unlock; 3286 3287 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3288 VNPASS(vp->v_holdcnt > 0, vp); 3289 3290 VI_LOCK(vp); 3291 if (__predict_false(vp->v_type == VCHR && func != VRELE)) 3292 v_decr_devcount(vp); 3293 3294 /* 3295 * By the time we got here someone else might have transitioned 3296 * the count back to > 0. 3297 */ 3298 if (vp->v_usecount > 0) 3299 goto out; 3300 3301 /* 3302 * If the vnode is doomed vgone already performed inactive processing 3303 * (if needed). 3304 */ 3305 if (VN_IS_DOOMED(vp)) 3306 goto out; 3307 3308 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3309 goto out; 3310 3311 if (vp->v_iflag & VI_DOINGINACT) 3312 goto out; 3313 3314 /* 3315 * Locking operations here will drop the interlock and possibly the 3316 * vnode lock, opening a window where the vnode can get doomed all the 3317 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3318 * perform inactive. 3319 */ 3320 vp->v_iflag |= VI_OWEINACT; 3321 want_unlock = false; 3322 error = 0; 3323 switch (func) { 3324 case VRELE: 3325 switch (VOP_ISLOCKED(vp)) { 3326 case LK_EXCLUSIVE: 3327 break; 3328 case LK_EXCLOTHER: 3329 case 0: 3330 want_unlock = true; 3331 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3332 VI_LOCK(vp); 3333 break; 3334 default: 3335 /* 3336 * The lock has at least one sharer, but we have no way 3337 * to conclude whether this is us. Play it safe and 3338 * defer processing. 3339 */ 3340 error = EAGAIN; 3341 break; 3342 } 3343 break; 3344 case VPUT: 3345 want_unlock = true; 3346 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3347 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3348 LK_NOWAIT); 3349 VI_LOCK(vp); 3350 } 3351 break; 3352 case VUNREF: 3353 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3354 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3355 VI_LOCK(vp); 3356 } 3357 break; 3358 } 3359 if (error == 0) { 3360 vinactive(vp); 3361 if (want_unlock) 3362 VOP_UNLOCK(vp); 3363 vdropl(vp); 3364 } else { 3365 vdefer_inactive(vp); 3366 } 3367 return; 3368 out: 3369 if (func == VPUT) 3370 VOP_UNLOCK(vp); 3371 vdropl(vp); 3372 } 3373 3374 /* 3375 * Decrement ->v_usecount for a vnode. 3376 * 3377 * Releasing the last use count requires additional processing, see vput_final 3378 * above for details. 3379 * 3380 * Note that releasing use count without the vnode lock requires special casing 3381 * for VCHR, see v_decr_devcount for details. 3382 * 3383 * Comment above each variant denotes lock state on entry and exit. 3384 */ 3385 3386 static void __noinline 3387 vrele_vchr(struct vnode *vp) 3388 { 3389 3390 if (refcount_release_if_not_last(&vp->v_usecount)) 3391 return; 3392 VI_LOCK(vp); 3393 if (!refcount_release(&vp->v_usecount)) { 3394 VI_UNLOCK(vp); 3395 return; 3396 } 3397 v_decr_devcount(vp); 3398 VI_UNLOCK(vp); 3399 vput_final(vp, VRELE); 3400 } 3401 3402 /* 3403 * in: any 3404 * out: same as passed in 3405 */ 3406 void 3407 vrele(struct vnode *vp) 3408 { 3409 3410 ASSERT_VI_UNLOCKED(vp, __func__); 3411 if (__predict_false(vp->v_type == VCHR)) { 3412 vrele_vchr(vp); 3413 return; 3414 } 3415 if (!refcount_release(&vp->v_usecount)) 3416 return; 3417 vput_final(vp, VRELE); 3418 } 3419 3420 /* 3421 * in: locked 3422 * out: unlocked 3423 */ 3424 void 3425 vput(struct vnode *vp) 3426 { 3427 3428 ASSERT_VOP_LOCKED(vp, __func__); 3429 ASSERT_VI_UNLOCKED(vp, __func__); 3430 if (!refcount_release(&vp->v_usecount)) { 3431 VOP_UNLOCK(vp); 3432 return; 3433 } 3434 vput_final(vp, VPUT); 3435 } 3436 3437 /* 3438 * in: locked 3439 * out: locked 3440 */ 3441 void 3442 vunref(struct vnode *vp) 3443 { 3444 3445 ASSERT_VOP_LOCKED(vp, __func__); 3446 ASSERT_VI_UNLOCKED(vp, __func__); 3447 if (!refcount_release(&vp->v_usecount)) 3448 return; 3449 vput_final(vp, VUNREF); 3450 } 3451 3452 void 3453 vhold(struct vnode *vp) 3454 { 3455 struct vdbatch *vd; 3456 int old; 3457 3458 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3459 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3460 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3461 ("%s: wrong hold count %d", __func__, old)); 3462 if (old != 0) 3463 return; 3464 critical_enter(); 3465 vd = DPCPU_PTR(vd); 3466 vd->freevnodes--; 3467 critical_exit(); 3468 } 3469 3470 void 3471 vholdl(struct vnode *vp) 3472 { 3473 3474 ASSERT_VI_LOCKED(vp, __func__); 3475 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3476 vhold(vp); 3477 } 3478 3479 void 3480 vholdnz(struct vnode *vp) 3481 { 3482 3483 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3484 #ifdef INVARIANTS 3485 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3486 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3487 ("%s: wrong hold count %d", __func__, old)); 3488 #else 3489 atomic_add_int(&vp->v_holdcnt, 1); 3490 #endif 3491 } 3492 3493 /* 3494 * Grab a hold count unless the vnode is freed. 3495 * 3496 * Only use this routine if vfs smr is the only protection you have against 3497 * freeing the vnode. 3498 * 3499 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3500 * is not set. After the flag is set the vnode becomes immutable to anyone but 3501 * the thread which managed to set the flag. 3502 * 3503 * It may be tempting to replace the loop with: 3504 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3505 * if (count & VHOLD_NO_SMR) { 3506 * backpedal and error out; 3507 * } 3508 * 3509 * However, while this is more performant, it hinders debugging by eliminating 3510 * the previously mentioned invariant. 3511 */ 3512 bool 3513 vhold_smr(struct vnode *vp) 3514 { 3515 int count; 3516 3517 VFS_SMR_ASSERT_ENTERED(); 3518 3519 count = atomic_load_int(&vp->v_holdcnt); 3520 for (;;) { 3521 if (count & VHOLD_NO_SMR) { 3522 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3523 ("non-zero hold count with flags %d\n", count)); 3524 return (false); 3525 } 3526 3527 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3528 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) 3529 return (true); 3530 } 3531 } 3532 3533 static void __noinline 3534 vdbatch_process(struct vdbatch *vd) 3535 { 3536 struct vnode *vp; 3537 int i; 3538 3539 mtx_assert(&vd->lock, MA_OWNED); 3540 MPASS(curthread->td_pinned > 0); 3541 MPASS(vd->index == VDBATCH_SIZE); 3542 3543 mtx_lock(&vnode_list_mtx); 3544 critical_enter(); 3545 freevnodes += vd->freevnodes; 3546 for (i = 0; i < VDBATCH_SIZE; i++) { 3547 vp = vd->tab[i]; 3548 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3549 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3550 MPASS(vp->v_dbatchcpu != NOCPU); 3551 vp->v_dbatchcpu = NOCPU; 3552 } 3553 mtx_unlock(&vnode_list_mtx); 3554 vd->freevnodes = 0; 3555 bzero(vd->tab, sizeof(vd->tab)); 3556 vd->index = 0; 3557 critical_exit(); 3558 } 3559 3560 static void 3561 vdbatch_enqueue(struct vnode *vp) 3562 { 3563 struct vdbatch *vd; 3564 3565 ASSERT_VI_LOCKED(vp, __func__); 3566 VNASSERT(!VN_IS_DOOMED(vp), vp, 3567 ("%s: deferring requeue of a doomed vnode", __func__)); 3568 3569 critical_enter(); 3570 vd = DPCPU_PTR(vd); 3571 vd->freevnodes++; 3572 if (vp->v_dbatchcpu != NOCPU) { 3573 VI_UNLOCK(vp); 3574 critical_exit(); 3575 return; 3576 } 3577 3578 sched_pin(); 3579 critical_exit(); 3580 mtx_lock(&vd->lock); 3581 MPASS(vd->index < VDBATCH_SIZE); 3582 MPASS(vd->tab[vd->index] == NULL); 3583 /* 3584 * A hack: we depend on being pinned so that we know what to put in 3585 * ->v_dbatchcpu. 3586 */ 3587 vp->v_dbatchcpu = curcpu; 3588 vd->tab[vd->index] = vp; 3589 vd->index++; 3590 VI_UNLOCK(vp); 3591 if (vd->index == VDBATCH_SIZE) 3592 vdbatch_process(vd); 3593 mtx_unlock(&vd->lock); 3594 sched_unpin(); 3595 } 3596 3597 /* 3598 * This routine must only be called for vnodes which are about to be 3599 * deallocated. Supporting dequeue for arbitrary vndoes would require 3600 * validating that the locked batch matches. 3601 */ 3602 static void 3603 vdbatch_dequeue(struct vnode *vp) 3604 { 3605 struct vdbatch *vd; 3606 int i; 3607 short cpu; 3608 3609 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3610 ("%s: called for a used vnode\n", __func__)); 3611 3612 cpu = vp->v_dbatchcpu; 3613 if (cpu == NOCPU) 3614 return; 3615 3616 vd = DPCPU_ID_PTR(cpu, vd); 3617 mtx_lock(&vd->lock); 3618 for (i = 0; i < vd->index; i++) { 3619 if (vd->tab[i] != vp) 3620 continue; 3621 vp->v_dbatchcpu = NOCPU; 3622 vd->index--; 3623 vd->tab[i] = vd->tab[vd->index]; 3624 vd->tab[vd->index] = NULL; 3625 break; 3626 } 3627 mtx_unlock(&vd->lock); 3628 /* 3629 * Either we dequeued the vnode above or the target CPU beat us to it. 3630 */ 3631 MPASS(vp->v_dbatchcpu == NOCPU); 3632 } 3633 3634 /* 3635 * Drop the hold count of the vnode. If this is the last reference to 3636 * the vnode we place it on the free list unless it has been vgone'd 3637 * (marked VIRF_DOOMED) in which case we will free it. 3638 * 3639 * Because the vnode vm object keeps a hold reference on the vnode if 3640 * there is at least one resident non-cached page, the vnode cannot 3641 * leave the active list without the page cleanup done. 3642 */ 3643 static void 3644 vdrop_deactivate(struct vnode *vp) 3645 { 3646 struct mount *mp; 3647 3648 ASSERT_VI_LOCKED(vp, __func__); 3649 /* 3650 * Mark a vnode as free: remove it from its active list 3651 * and put it up for recycling on the freelist. 3652 */ 3653 VNASSERT(!VN_IS_DOOMED(vp), vp, 3654 ("vdrop: returning doomed vnode")); 3655 VNASSERT(vp->v_op != NULL, vp, 3656 ("vdrop: vnode already reclaimed.")); 3657 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3658 ("vnode with VI_OWEINACT set")); 3659 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3660 ("vnode with VI_DEFINACT set")); 3661 if (vp->v_mflag & VMP_LAZYLIST) { 3662 mp = vp->v_mount; 3663 mtx_lock(&mp->mnt_listmtx); 3664 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3665 /* 3666 * Don't remove the vnode from the lazy list if another thread 3667 * has increased the hold count. It may have re-enqueued the 3668 * vnode to the lazy list and is now responsible for its 3669 * removal. 3670 */ 3671 if (vp->v_holdcnt == 0) { 3672 vp->v_mflag &= ~VMP_LAZYLIST; 3673 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3674 mp->mnt_lazyvnodelistsize--; 3675 } 3676 mtx_unlock(&mp->mnt_listmtx); 3677 } 3678 vdbatch_enqueue(vp); 3679 } 3680 3681 void 3682 vdrop(struct vnode *vp) 3683 { 3684 3685 ASSERT_VI_UNLOCKED(vp, __func__); 3686 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3687 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3688 return; 3689 VI_LOCK(vp); 3690 vdropl(vp); 3691 } 3692 3693 void 3694 vdropl(struct vnode *vp) 3695 { 3696 3697 ASSERT_VI_LOCKED(vp, __func__); 3698 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3699 if (!refcount_release(&vp->v_holdcnt)) { 3700 VI_UNLOCK(vp); 3701 return; 3702 } 3703 if (!VN_IS_DOOMED(vp)) { 3704 vdrop_deactivate(vp); 3705 return; 3706 } 3707 /* 3708 * We may be racing against vhold_smr. 3709 * 3710 * If they win we can just pretend we never got this far, they will 3711 * vdrop later. 3712 */ 3713 if (!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR)) { 3714 /* 3715 * We lost the aforementioned race. Note that any subsequent 3716 * access is invalid as they might have managed to vdropl on 3717 * their own. 3718 */ 3719 return; 3720 } 3721 freevnode(vp); 3722 } 3723 3724 /* 3725 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3726 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3727 */ 3728 static void 3729 vinactivef(struct vnode *vp) 3730 { 3731 struct vm_object *obj; 3732 3733 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3734 ASSERT_VI_LOCKED(vp, "vinactive"); 3735 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3736 ("vinactive: recursed on VI_DOINGINACT")); 3737 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3738 vp->v_iflag |= VI_DOINGINACT; 3739 vp->v_iflag &= ~VI_OWEINACT; 3740 VI_UNLOCK(vp); 3741 /* 3742 * Before moving off the active list, we must be sure that any 3743 * modified pages are converted into the vnode's dirty 3744 * buffers, since these will no longer be checked once the 3745 * vnode is on the inactive list. 3746 * 3747 * The write-out of the dirty pages is asynchronous. At the 3748 * point that VOP_INACTIVE() is called, there could still be 3749 * pending I/O and dirty pages in the object. 3750 */ 3751 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3752 vm_object_mightbedirty(obj)) { 3753 VM_OBJECT_WLOCK(obj); 3754 vm_object_page_clean(obj, 0, 0, 0); 3755 VM_OBJECT_WUNLOCK(obj); 3756 } 3757 VOP_INACTIVE(vp, curthread); 3758 VI_LOCK(vp); 3759 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3760 ("vinactive: lost VI_DOINGINACT")); 3761 vp->v_iflag &= ~VI_DOINGINACT; 3762 } 3763 3764 void 3765 vinactive(struct vnode *vp) 3766 { 3767 3768 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3769 ASSERT_VI_LOCKED(vp, "vinactive"); 3770 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3771 3772 if ((vp->v_iflag & VI_OWEINACT) == 0) 3773 return; 3774 if (vp->v_iflag & VI_DOINGINACT) 3775 return; 3776 if (vp->v_usecount > 0) { 3777 vp->v_iflag &= ~VI_OWEINACT; 3778 return; 3779 } 3780 vinactivef(vp); 3781 } 3782 3783 /* 3784 * Remove any vnodes in the vnode table belonging to mount point mp. 3785 * 3786 * If FORCECLOSE is not specified, there should not be any active ones, 3787 * return error if any are found (nb: this is a user error, not a 3788 * system error). If FORCECLOSE is specified, detach any active vnodes 3789 * that are found. 3790 * 3791 * If WRITECLOSE is set, only flush out regular file vnodes open for 3792 * writing. 3793 * 3794 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3795 * 3796 * `rootrefs' specifies the base reference count for the root vnode 3797 * of this filesystem. The root vnode is considered busy if its 3798 * v_usecount exceeds this value. On a successful return, vflush(, td) 3799 * will call vrele() on the root vnode exactly rootrefs times. 3800 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3801 * be zero. 3802 */ 3803 #ifdef DIAGNOSTIC 3804 static int busyprt = 0; /* print out busy vnodes */ 3805 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3806 #endif 3807 3808 int 3809 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3810 { 3811 struct vnode *vp, *mvp, *rootvp = NULL; 3812 struct vattr vattr; 3813 int busy = 0, error; 3814 3815 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3816 rootrefs, flags); 3817 if (rootrefs > 0) { 3818 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3819 ("vflush: bad args")); 3820 /* 3821 * Get the filesystem root vnode. We can vput() it 3822 * immediately, since with rootrefs > 0, it won't go away. 3823 */ 3824 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3825 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3826 __func__, error); 3827 return (error); 3828 } 3829 vput(rootvp); 3830 } 3831 loop: 3832 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3833 vholdl(vp); 3834 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3835 if (error) { 3836 vdrop(vp); 3837 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3838 goto loop; 3839 } 3840 /* 3841 * Skip over a vnodes marked VV_SYSTEM. 3842 */ 3843 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3844 VOP_UNLOCK(vp); 3845 vdrop(vp); 3846 continue; 3847 } 3848 /* 3849 * If WRITECLOSE is set, flush out unlinked but still open 3850 * files (even if open only for reading) and regular file 3851 * vnodes open for writing. 3852 */ 3853 if (flags & WRITECLOSE) { 3854 if (vp->v_object != NULL) { 3855 VM_OBJECT_WLOCK(vp->v_object); 3856 vm_object_page_clean(vp->v_object, 0, 0, 0); 3857 VM_OBJECT_WUNLOCK(vp->v_object); 3858 } 3859 error = VOP_FSYNC(vp, MNT_WAIT, td); 3860 if (error != 0) { 3861 VOP_UNLOCK(vp); 3862 vdrop(vp); 3863 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3864 return (error); 3865 } 3866 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3867 VI_LOCK(vp); 3868 3869 if ((vp->v_type == VNON || 3870 (error == 0 && vattr.va_nlink > 0)) && 3871 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3872 VOP_UNLOCK(vp); 3873 vdropl(vp); 3874 continue; 3875 } 3876 } else 3877 VI_LOCK(vp); 3878 /* 3879 * With v_usecount == 0, all we need to do is clear out the 3880 * vnode data structures and we are done. 3881 * 3882 * If FORCECLOSE is set, forcibly close the vnode. 3883 */ 3884 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3885 vgonel(vp); 3886 } else { 3887 busy++; 3888 #ifdef DIAGNOSTIC 3889 if (busyprt) 3890 vn_printf(vp, "vflush: busy vnode "); 3891 #endif 3892 } 3893 VOP_UNLOCK(vp); 3894 vdropl(vp); 3895 } 3896 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3897 /* 3898 * If just the root vnode is busy, and if its refcount 3899 * is equal to `rootrefs', then go ahead and kill it. 3900 */ 3901 VI_LOCK(rootvp); 3902 KASSERT(busy > 0, ("vflush: not busy")); 3903 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3904 ("vflush: usecount %d < rootrefs %d", 3905 rootvp->v_usecount, rootrefs)); 3906 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3907 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3908 vgone(rootvp); 3909 VOP_UNLOCK(rootvp); 3910 busy = 0; 3911 } else 3912 VI_UNLOCK(rootvp); 3913 } 3914 if (busy) { 3915 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3916 busy); 3917 return (EBUSY); 3918 } 3919 for (; rootrefs > 0; rootrefs--) 3920 vrele(rootvp); 3921 return (0); 3922 } 3923 3924 /* 3925 * Recycle an unused vnode to the front of the free list. 3926 */ 3927 int 3928 vrecycle(struct vnode *vp) 3929 { 3930 int recycled; 3931 3932 VI_LOCK(vp); 3933 recycled = vrecyclel(vp); 3934 VI_UNLOCK(vp); 3935 return (recycled); 3936 } 3937 3938 /* 3939 * vrecycle, with the vp interlock held. 3940 */ 3941 int 3942 vrecyclel(struct vnode *vp) 3943 { 3944 int recycled; 3945 3946 ASSERT_VOP_ELOCKED(vp, __func__); 3947 ASSERT_VI_LOCKED(vp, __func__); 3948 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3949 recycled = 0; 3950 if (vp->v_usecount == 0) { 3951 recycled = 1; 3952 vgonel(vp); 3953 } 3954 return (recycled); 3955 } 3956 3957 /* 3958 * Eliminate all activity associated with a vnode 3959 * in preparation for reuse. 3960 */ 3961 void 3962 vgone(struct vnode *vp) 3963 { 3964 VI_LOCK(vp); 3965 vgonel(vp); 3966 VI_UNLOCK(vp); 3967 } 3968 3969 static void 3970 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3971 struct vnode *lowervp __unused) 3972 { 3973 } 3974 3975 /* 3976 * Notify upper mounts about reclaimed or unlinked vnode. 3977 */ 3978 void 3979 vfs_notify_upper(struct vnode *vp, int event) 3980 { 3981 static struct vfsops vgonel_vfsops = { 3982 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3983 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3984 }; 3985 struct mount *mp, *ump, *mmp; 3986 3987 mp = vp->v_mount; 3988 if (mp == NULL) 3989 return; 3990 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3991 return; 3992 3993 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3994 mmp->mnt_op = &vgonel_vfsops; 3995 mmp->mnt_kern_flag |= MNTK_MARKER; 3996 MNT_ILOCK(mp); 3997 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3998 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3999 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 4000 ump = TAILQ_NEXT(ump, mnt_upper_link); 4001 continue; 4002 } 4003 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 4004 MNT_IUNLOCK(mp); 4005 switch (event) { 4006 case VFS_NOTIFY_UPPER_RECLAIM: 4007 VFS_RECLAIM_LOWERVP(ump, vp); 4008 break; 4009 case VFS_NOTIFY_UPPER_UNLINK: 4010 VFS_UNLINK_LOWERVP(ump, vp); 4011 break; 4012 default: 4013 KASSERT(0, ("invalid event %d", event)); 4014 break; 4015 } 4016 MNT_ILOCK(mp); 4017 ump = TAILQ_NEXT(mmp, mnt_upper_link); 4018 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 4019 } 4020 free(mmp, M_TEMP); 4021 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 4022 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 4023 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 4024 wakeup(&mp->mnt_uppers); 4025 } 4026 MNT_IUNLOCK(mp); 4027 } 4028 4029 /* 4030 * vgone, with the vp interlock held. 4031 */ 4032 static void 4033 vgonel(struct vnode *vp) 4034 { 4035 struct thread *td; 4036 struct mount *mp; 4037 vm_object_t object; 4038 bool active, oweinact; 4039 4040 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4041 ASSERT_VI_LOCKED(vp, "vgonel"); 4042 VNASSERT(vp->v_holdcnt, vp, 4043 ("vgonel: vp %p has no reference.", vp)); 4044 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4045 td = curthread; 4046 4047 /* 4048 * Don't vgonel if we're already doomed. 4049 */ 4050 if (vp->v_irflag & VIRF_DOOMED) 4051 return; 4052 /* 4053 * Paired with freevnode. 4054 */ 4055 vn_seqc_write_begin_locked(vp); 4056 vunlazy_gone(vp); 4057 vp->v_irflag |= VIRF_DOOMED; 4058 4059 /* 4060 * Check to see if the vnode is in use. If so, we have to call 4061 * VOP_CLOSE() and VOP_INACTIVE(). 4062 */ 4063 active = vp->v_usecount > 0; 4064 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4065 /* 4066 * If we need to do inactive VI_OWEINACT will be set. 4067 */ 4068 if (vp->v_iflag & VI_DEFINACT) { 4069 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4070 vp->v_iflag &= ~VI_DEFINACT; 4071 vdropl(vp); 4072 } else { 4073 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4074 VI_UNLOCK(vp); 4075 } 4076 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4077 4078 /* 4079 * If purging an active vnode, it must be closed and 4080 * deactivated before being reclaimed. 4081 */ 4082 if (active) 4083 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4084 if (oweinact || active) { 4085 VI_LOCK(vp); 4086 vinactivef(vp); 4087 VI_UNLOCK(vp); 4088 } 4089 if (vp->v_type == VSOCK) 4090 vfs_unp_reclaim(vp); 4091 4092 /* 4093 * Clean out any buffers associated with the vnode. 4094 * If the flush fails, just toss the buffers. 4095 */ 4096 mp = NULL; 4097 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4098 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4099 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4100 while (vinvalbuf(vp, 0, 0, 0) != 0) 4101 ; 4102 } 4103 4104 BO_LOCK(&vp->v_bufobj); 4105 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4106 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4107 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4108 vp->v_bufobj.bo_clean.bv_cnt == 0, 4109 ("vp %p bufobj not invalidated", vp)); 4110 4111 /* 4112 * For VMIO bufobj, BO_DEAD is set later, or in 4113 * vm_object_terminate() after the object's page queue is 4114 * flushed. 4115 */ 4116 object = vp->v_bufobj.bo_object; 4117 if (object == NULL) 4118 vp->v_bufobj.bo_flag |= BO_DEAD; 4119 BO_UNLOCK(&vp->v_bufobj); 4120 4121 /* 4122 * Handle the VM part. Tmpfs handles v_object on its own (the 4123 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4124 * should not touch the object borrowed from the lower vnode 4125 * (the handle check). 4126 */ 4127 if (object != NULL && object->type == OBJT_VNODE && 4128 object->handle == vp) 4129 vnode_destroy_vobject(vp); 4130 4131 /* 4132 * Reclaim the vnode. 4133 */ 4134 if (VOP_RECLAIM(vp, td)) 4135 panic("vgone: cannot reclaim"); 4136 if (mp != NULL) 4137 vn_finished_secondary_write(mp); 4138 VNASSERT(vp->v_object == NULL, vp, 4139 ("vop_reclaim left v_object vp=%p", vp)); 4140 /* 4141 * Clear the advisory locks and wake up waiting threads. 4142 */ 4143 (void)VOP_ADVLOCKPURGE(vp); 4144 vp->v_lockf = NULL; 4145 /* 4146 * Delete from old mount point vnode list. 4147 */ 4148 delmntque(vp); 4149 cache_purge(vp); 4150 /* 4151 * Done with purge, reset to the standard lock and invalidate 4152 * the vnode. 4153 */ 4154 VI_LOCK(vp); 4155 vp->v_vnlock = &vp->v_lock; 4156 vp->v_op = &dead_vnodeops; 4157 vp->v_type = VBAD; 4158 } 4159 4160 /* 4161 * Calculate the total number of references to a special device. 4162 */ 4163 int 4164 vcount(struct vnode *vp) 4165 { 4166 int count; 4167 4168 dev_lock(); 4169 count = vp->v_rdev->si_usecount; 4170 dev_unlock(); 4171 return (count); 4172 } 4173 4174 /* 4175 * Print out a description of a vnode. 4176 */ 4177 static const char * const typename[] = 4178 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4179 "VMARKER"}; 4180 4181 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4182 "new hold count flag not added to vn_printf"); 4183 4184 void 4185 vn_printf(struct vnode *vp, const char *fmt, ...) 4186 { 4187 va_list ap; 4188 char buf[256], buf2[16]; 4189 u_long flags; 4190 u_int holdcnt; 4191 4192 va_start(ap, fmt); 4193 vprintf(fmt, ap); 4194 va_end(ap); 4195 printf("%p: ", (void *)vp); 4196 printf("type %s\n", typename[vp->v_type]); 4197 holdcnt = atomic_load_int(&vp->v_holdcnt); 4198 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4199 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4200 vp->v_seqc_users); 4201 switch (vp->v_type) { 4202 case VDIR: 4203 printf(" mountedhere %p\n", vp->v_mountedhere); 4204 break; 4205 case VCHR: 4206 printf(" rdev %p\n", vp->v_rdev); 4207 break; 4208 case VSOCK: 4209 printf(" socket %p\n", vp->v_unpcb); 4210 break; 4211 case VFIFO: 4212 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4213 break; 4214 default: 4215 printf("\n"); 4216 break; 4217 } 4218 buf[0] = '\0'; 4219 buf[1] = '\0'; 4220 if (holdcnt & VHOLD_NO_SMR) 4221 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4222 printf(" hold count flags (%s)\n", buf + 1); 4223 4224 buf[0] = '\0'; 4225 buf[1] = '\0'; 4226 if (vp->v_irflag & VIRF_DOOMED) 4227 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4228 flags = vp->v_irflag & ~(VIRF_DOOMED); 4229 if (flags != 0) { 4230 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4231 strlcat(buf, buf2, sizeof(buf)); 4232 } 4233 if (vp->v_vflag & VV_ROOT) 4234 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4235 if (vp->v_vflag & VV_ISTTY) 4236 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4237 if (vp->v_vflag & VV_NOSYNC) 4238 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4239 if (vp->v_vflag & VV_ETERNALDEV) 4240 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4241 if (vp->v_vflag & VV_CACHEDLABEL) 4242 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4243 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4244 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4245 if (vp->v_vflag & VV_COPYONWRITE) 4246 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4247 if (vp->v_vflag & VV_SYSTEM) 4248 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4249 if (vp->v_vflag & VV_PROCDEP) 4250 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4251 if (vp->v_vflag & VV_NOKNOTE) 4252 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4253 if (vp->v_vflag & VV_DELETED) 4254 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4255 if (vp->v_vflag & VV_MD) 4256 strlcat(buf, "|VV_MD", sizeof(buf)); 4257 if (vp->v_vflag & VV_FORCEINSMQ) 4258 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4259 if (vp->v_vflag & VV_READLINK) 4260 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4261 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4262 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4263 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4264 if (flags != 0) { 4265 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4266 strlcat(buf, buf2, sizeof(buf)); 4267 } 4268 if (vp->v_iflag & VI_TEXT_REF) 4269 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4270 if (vp->v_iflag & VI_MOUNT) 4271 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4272 if (vp->v_iflag & VI_DOINGINACT) 4273 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4274 if (vp->v_iflag & VI_OWEINACT) 4275 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4276 if (vp->v_iflag & VI_DEFINACT) 4277 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4278 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4279 VI_OWEINACT | VI_DEFINACT); 4280 if (flags != 0) { 4281 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4282 strlcat(buf, buf2, sizeof(buf)); 4283 } 4284 if (vp->v_mflag & VMP_LAZYLIST) 4285 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4286 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4287 if (flags != 0) { 4288 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4289 strlcat(buf, buf2, sizeof(buf)); 4290 } 4291 printf(" flags (%s)\n", buf + 1); 4292 if (mtx_owned(VI_MTX(vp))) 4293 printf(" VI_LOCKed"); 4294 if (vp->v_object != NULL) 4295 printf(" v_object %p ref %d pages %d " 4296 "cleanbuf %d dirtybuf %d\n", 4297 vp->v_object, vp->v_object->ref_count, 4298 vp->v_object->resident_page_count, 4299 vp->v_bufobj.bo_clean.bv_cnt, 4300 vp->v_bufobj.bo_dirty.bv_cnt); 4301 printf(" "); 4302 lockmgr_printinfo(vp->v_vnlock); 4303 if (vp->v_data != NULL) 4304 VOP_PRINT(vp); 4305 } 4306 4307 #ifdef DDB 4308 /* 4309 * List all of the locked vnodes in the system. 4310 * Called when debugging the kernel. 4311 */ 4312 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4313 { 4314 struct mount *mp; 4315 struct vnode *vp; 4316 4317 /* 4318 * Note: because this is DDB, we can't obey the locking semantics 4319 * for these structures, which means we could catch an inconsistent 4320 * state and dereference a nasty pointer. Not much to be done 4321 * about that. 4322 */ 4323 db_printf("Locked vnodes\n"); 4324 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4325 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4326 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4327 vn_printf(vp, "vnode "); 4328 } 4329 } 4330 } 4331 4332 /* 4333 * Show details about the given vnode. 4334 */ 4335 DB_SHOW_COMMAND(vnode, db_show_vnode) 4336 { 4337 struct vnode *vp; 4338 4339 if (!have_addr) 4340 return; 4341 vp = (struct vnode *)addr; 4342 vn_printf(vp, "vnode "); 4343 } 4344 4345 /* 4346 * Show details about the given mount point. 4347 */ 4348 DB_SHOW_COMMAND(mount, db_show_mount) 4349 { 4350 struct mount *mp; 4351 struct vfsopt *opt; 4352 struct statfs *sp; 4353 struct vnode *vp; 4354 char buf[512]; 4355 uint64_t mflags; 4356 u_int flags; 4357 4358 if (!have_addr) { 4359 /* No address given, print short info about all mount points. */ 4360 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4361 db_printf("%p %s on %s (%s)\n", mp, 4362 mp->mnt_stat.f_mntfromname, 4363 mp->mnt_stat.f_mntonname, 4364 mp->mnt_stat.f_fstypename); 4365 if (db_pager_quit) 4366 break; 4367 } 4368 db_printf("\nMore info: show mount <addr>\n"); 4369 return; 4370 } 4371 4372 mp = (struct mount *)addr; 4373 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4374 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4375 4376 buf[0] = '\0'; 4377 mflags = mp->mnt_flag; 4378 #define MNT_FLAG(flag) do { \ 4379 if (mflags & (flag)) { \ 4380 if (buf[0] != '\0') \ 4381 strlcat(buf, ", ", sizeof(buf)); \ 4382 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4383 mflags &= ~(flag); \ 4384 } \ 4385 } while (0) 4386 MNT_FLAG(MNT_RDONLY); 4387 MNT_FLAG(MNT_SYNCHRONOUS); 4388 MNT_FLAG(MNT_NOEXEC); 4389 MNT_FLAG(MNT_NOSUID); 4390 MNT_FLAG(MNT_NFS4ACLS); 4391 MNT_FLAG(MNT_UNION); 4392 MNT_FLAG(MNT_ASYNC); 4393 MNT_FLAG(MNT_SUIDDIR); 4394 MNT_FLAG(MNT_SOFTDEP); 4395 MNT_FLAG(MNT_NOSYMFOLLOW); 4396 MNT_FLAG(MNT_GJOURNAL); 4397 MNT_FLAG(MNT_MULTILABEL); 4398 MNT_FLAG(MNT_ACLS); 4399 MNT_FLAG(MNT_NOATIME); 4400 MNT_FLAG(MNT_NOCLUSTERR); 4401 MNT_FLAG(MNT_NOCLUSTERW); 4402 MNT_FLAG(MNT_SUJ); 4403 MNT_FLAG(MNT_EXRDONLY); 4404 MNT_FLAG(MNT_EXPORTED); 4405 MNT_FLAG(MNT_DEFEXPORTED); 4406 MNT_FLAG(MNT_EXPORTANON); 4407 MNT_FLAG(MNT_EXKERB); 4408 MNT_FLAG(MNT_EXPUBLIC); 4409 MNT_FLAG(MNT_LOCAL); 4410 MNT_FLAG(MNT_QUOTA); 4411 MNT_FLAG(MNT_ROOTFS); 4412 MNT_FLAG(MNT_USER); 4413 MNT_FLAG(MNT_IGNORE); 4414 MNT_FLAG(MNT_UPDATE); 4415 MNT_FLAG(MNT_DELEXPORT); 4416 MNT_FLAG(MNT_RELOAD); 4417 MNT_FLAG(MNT_FORCE); 4418 MNT_FLAG(MNT_SNAPSHOT); 4419 MNT_FLAG(MNT_BYFSID); 4420 #undef MNT_FLAG 4421 if (mflags != 0) { 4422 if (buf[0] != '\0') 4423 strlcat(buf, ", ", sizeof(buf)); 4424 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4425 "0x%016jx", mflags); 4426 } 4427 db_printf(" mnt_flag = %s\n", buf); 4428 4429 buf[0] = '\0'; 4430 flags = mp->mnt_kern_flag; 4431 #define MNT_KERN_FLAG(flag) do { \ 4432 if (flags & (flag)) { \ 4433 if (buf[0] != '\0') \ 4434 strlcat(buf, ", ", sizeof(buf)); \ 4435 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4436 flags &= ~(flag); \ 4437 } \ 4438 } while (0) 4439 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4440 MNT_KERN_FLAG(MNTK_ASYNC); 4441 MNT_KERN_FLAG(MNTK_SOFTDEP); 4442 MNT_KERN_FLAG(MNTK_DRAINING); 4443 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4444 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4445 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4446 MNT_KERN_FLAG(MNTK_NO_IOPF); 4447 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4448 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4449 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4450 MNT_KERN_FLAG(MNTK_MARKER); 4451 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4452 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4453 MNT_KERN_FLAG(MNTK_NOASYNC); 4454 MNT_KERN_FLAG(MNTK_UNMOUNT); 4455 MNT_KERN_FLAG(MNTK_MWAIT); 4456 MNT_KERN_FLAG(MNTK_SUSPEND); 4457 MNT_KERN_FLAG(MNTK_SUSPEND2); 4458 MNT_KERN_FLAG(MNTK_SUSPENDED); 4459 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4460 MNT_KERN_FLAG(MNTK_NOKNOTE); 4461 #undef MNT_KERN_FLAG 4462 if (flags != 0) { 4463 if (buf[0] != '\0') 4464 strlcat(buf, ", ", sizeof(buf)); 4465 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4466 "0x%08x", flags); 4467 } 4468 db_printf(" mnt_kern_flag = %s\n", buf); 4469 4470 db_printf(" mnt_opt = "); 4471 opt = TAILQ_FIRST(mp->mnt_opt); 4472 if (opt != NULL) { 4473 db_printf("%s", opt->name); 4474 opt = TAILQ_NEXT(opt, link); 4475 while (opt != NULL) { 4476 db_printf(", %s", opt->name); 4477 opt = TAILQ_NEXT(opt, link); 4478 } 4479 } 4480 db_printf("\n"); 4481 4482 sp = &mp->mnt_stat; 4483 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4484 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4485 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4486 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4487 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4488 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4489 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4490 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4491 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4492 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4493 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4494 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4495 4496 db_printf(" mnt_cred = { uid=%u ruid=%u", 4497 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4498 if (jailed(mp->mnt_cred)) 4499 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4500 db_printf(" }\n"); 4501 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4502 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4503 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4504 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4505 db_printf(" mnt_lazyvnodelistsize = %d\n", 4506 mp->mnt_lazyvnodelistsize); 4507 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4508 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4509 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4510 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4511 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4512 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4513 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4514 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4515 db_printf(" mnt_secondary_accwrites = %d\n", 4516 mp->mnt_secondary_accwrites); 4517 db_printf(" mnt_gjprovider = %s\n", 4518 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4519 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4520 4521 db_printf("\n\nList of active vnodes\n"); 4522 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4523 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4524 vn_printf(vp, "vnode "); 4525 if (db_pager_quit) 4526 break; 4527 } 4528 } 4529 db_printf("\n\nList of inactive vnodes\n"); 4530 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4531 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4532 vn_printf(vp, "vnode "); 4533 if (db_pager_quit) 4534 break; 4535 } 4536 } 4537 } 4538 #endif /* DDB */ 4539 4540 /* 4541 * Fill in a struct xvfsconf based on a struct vfsconf. 4542 */ 4543 static int 4544 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4545 { 4546 struct xvfsconf xvfsp; 4547 4548 bzero(&xvfsp, sizeof(xvfsp)); 4549 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4550 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4551 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4552 xvfsp.vfc_flags = vfsp->vfc_flags; 4553 /* 4554 * These are unused in userland, we keep them 4555 * to not break binary compatibility. 4556 */ 4557 xvfsp.vfc_vfsops = NULL; 4558 xvfsp.vfc_next = NULL; 4559 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4560 } 4561 4562 #ifdef COMPAT_FREEBSD32 4563 struct xvfsconf32 { 4564 uint32_t vfc_vfsops; 4565 char vfc_name[MFSNAMELEN]; 4566 int32_t vfc_typenum; 4567 int32_t vfc_refcount; 4568 int32_t vfc_flags; 4569 uint32_t vfc_next; 4570 }; 4571 4572 static int 4573 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4574 { 4575 struct xvfsconf32 xvfsp; 4576 4577 bzero(&xvfsp, sizeof(xvfsp)); 4578 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4579 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4580 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4581 xvfsp.vfc_flags = vfsp->vfc_flags; 4582 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4583 } 4584 #endif 4585 4586 /* 4587 * Top level filesystem related information gathering. 4588 */ 4589 static int 4590 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4591 { 4592 struct vfsconf *vfsp; 4593 int error; 4594 4595 error = 0; 4596 vfsconf_slock(); 4597 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4598 #ifdef COMPAT_FREEBSD32 4599 if (req->flags & SCTL_MASK32) 4600 error = vfsconf2x32(req, vfsp); 4601 else 4602 #endif 4603 error = vfsconf2x(req, vfsp); 4604 if (error) 4605 break; 4606 } 4607 vfsconf_sunlock(); 4608 return (error); 4609 } 4610 4611 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4612 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4613 "S,xvfsconf", "List of all configured filesystems"); 4614 4615 #ifndef BURN_BRIDGES 4616 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4617 4618 static int 4619 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4620 { 4621 int *name = (int *)arg1 - 1; /* XXX */ 4622 u_int namelen = arg2 + 1; /* XXX */ 4623 struct vfsconf *vfsp; 4624 4625 log(LOG_WARNING, "userland calling deprecated sysctl, " 4626 "please rebuild world\n"); 4627 4628 #if 1 || defined(COMPAT_PRELITE2) 4629 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4630 if (namelen == 1) 4631 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4632 #endif 4633 4634 switch (name[1]) { 4635 case VFS_MAXTYPENUM: 4636 if (namelen != 2) 4637 return (ENOTDIR); 4638 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4639 case VFS_CONF: 4640 if (namelen != 3) 4641 return (ENOTDIR); /* overloaded */ 4642 vfsconf_slock(); 4643 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4644 if (vfsp->vfc_typenum == name[2]) 4645 break; 4646 } 4647 vfsconf_sunlock(); 4648 if (vfsp == NULL) 4649 return (EOPNOTSUPP); 4650 #ifdef COMPAT_FREEBSD32 4651 if (req->flags & SCTL_MASK32) 4652 return (vfsconf2x32(req, vfsp)); 4653 else 4654 #endif 4655 return (vfsconf2x(req, vfsp)); 4656 } 4657 return (EOPNOTSUPP); 4658 } 4659 4660 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4661 CTLFLAG_MPSAFE, vfs_sysctl, 4662 "Generic filesystem"); 4663 4664 #if 1 || defined(COMPAT_PRELITE2) 4665 4666 static int 4667 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4668 { 4669 int error; 4670 struct vfsconf *vfsp; 4671 struct ovfsconf ovfs; 4672 4673 vfsconf_slock(); 4674 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4675 bzero(&ovfs, sizeof(ovfs)); 4676 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4677 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4678 ovfs.vfc_index = vfsp->vfc_typenum; 4679 ovfs.vfc_refcount = vfsp->vfc_refcount; 4680 ovfs.vfc_flags = vfsp->vfc_flags; 4681 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4682 if (error != 0) { 4683 vfsconf_sunlock(); 4684 return (error); 4685 } 4686 } 4687 vfsconf_sunlock(); 4688 return (0); 4689 } 4690 4691 #endif /* 1 || COMPAT_PRELITE2 */ 4692 #endif /* !BURN_BRIDGES */ 4693 4694 #define KINFO_VNODESLOP 10 4695 #ifdef notyet 4696 /* 4697 * Dump vnode list (via sysctl). 4698 */ 4699 /* ARGSUSED */ 4700 static int 4701 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4702 { 4703 struct xvnode *xvn; 4704 struct mount *mp; 4705 struct vnode *vp; 4706 int error, len, n; 4707 4708 /* 4709 * Stale numvnodes access is not fatal here. 4710 */ 4711 req->lock = 0; 4712 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4713 if (!req->oldptr) 4714 /* Make an estimate */ 4715 return (SYSCTL_OUT(req, 0, len)); 4716 4717 error = sysctl_wire_old_buffer(req, 0); 4718 if (error != 0) 4719 return (error); 4720 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4721 n = 0; 4722 mtx_lock(&mountlist_mtx); 4723 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4724 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4725 continue; 4726 MNT_ILOCK(mp); 4727 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4728 if (n == len) 4729 break; 4730 vref(vp); 4731 xvn[n].xv_size = sizeof *xvn; 4732 xvn[n].xv_vnode = vp; 4733 xvn[n].xv_id = 0; /* XXX compat */ 4734 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4735 XV_COPY(usecount); 4736 XV_COPY(writecount); 4737 XV_COPY(holdcnt); 4738 XV_COPY(mount); 4739 XV_COPY(numoutput); 4740 XV_COPY(type); 4741 #undef XV_COPY 4742 xvn[n].xv_flag = vp->v_vflag; 4743 4744 switch (vp->v_type) { 4745 case VREG: 4746 case VDIR: 4747 case VLNK: 4748 break; 4749 case VBLK: 4750 case VCHR: 4751 if (vp->v_rdev == NULL) { 4752 vrele(vp); 4753 continue; 4754 } 4755 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4756 break; 4757 case VSOCK: 4758 xvn[n].xv_socket = vp->v_socket; 4759 break; 4760 case VFIFO: 4761 xvn[n].xv_fifo = vp->v_fifoinfo; 4762 break; 4763 case VNON: 4764 case VBAD: 4765 default: 4766 /* shouldn't happen? */ 4767 vrele(vp); 4768 continue; 4769 } 4770 vrele(vp); 4771 ++n; 4772 } 4773 MNT_IUNLOCK(mp); 4774 mtx_lock(&mountlist_mtx); 4775 vfs_unbusy(mp); 4776 if (n == len) 4777 break; 4778 } 4779 mtx_unlock(&mountlist_mtx); 4780 4781 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4782 free(xvn, M_TEMP); 4783 return (error); 4784 } 4785 4786 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4787 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4788 ""); 4789 #endif 4790 4791 static void 4792 unmount_or_warn(struct mount *mp) 4793 { 4794 int error; 4795 4796 error = dounmount(mp, MNT_FORCE, curthread); 4797 if (error != 0) { 4798 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4799 if (error == EBUSY) 4800 printf("BUSY)\n"); 4801 else 4802 printf("%d)\n", error); 4803 } 4804 } 4805 4806 /* 4807 * Unmount all filesystems. The list is traversed in reverse order 4808 * of mounting to avoid dependencies. 4809 */ 4810 void 4811 vfs_unmountall(void) 4812 { 4813 struct mount *mp, *tmp; 4814 4815 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4816 4817 /* 4818 * Since this only runs when rebooting, it is not interlocked. 4819 */ 4820 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4821 vfs_ref(mp); 4822 4823 /* 4824 * Forcibly unmounting "/dev" before "/" would prevent clean 4825 * unmount of the latter. 4826 */ 4827 if (mp == rootdevmp) 4828 continue; 4829 4830 unmount_or_warn(mp); 4831 } 4832 4833 if (rootdevmp != NULL) 4834 unmount_or_warn(rootdevmp); 4835 } 4836 4837 static void 4838 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4839 { 4840 4841 ASSERT_VI_LOCKED(vp, __func__); 4842 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4843 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4844 vdropl(vp); 4845 return; 4846 } 4847 if (vn_lock(vp, lkflags) == 0) { 4848 VI_LOCK(vp); 4849 vinactive(vp); 4850 VOP_UNLOCK(vp); 4851 vdropl(vp); 4852 return; 4853 } 4854 vdefer_inactive_unlocked(vp); 4855 } 4856 4857 static int 4858 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4859 { 4860 4861 return (vp->v_iflag & VI_DEFINACT); 4862 } 4863 4864 static void __noinline 4865 vfs_periodic_inactive(struct mount *mp, int flags) 4866 { 4867 struct vnode *vp, *mvp; 4868 int lkflags; 4869 4870 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4871 if (flags != MNT_WAIT) 4872 lkflags |= LK_NOWAIT; 4873 4874 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4875 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4876 VI_UNLOCK(vp); 4877 continue; 4878 } 4879 vp->v_iflag &= ~VI_DEFINACT; 4880 vfs_deferred_inactive(vp, lkflags); 4881 } 4882 } 4883 4884 static inline bool 4885 vfs_want_msync(struct vnode *vp) 4886 { 4887 struct vm_object *obj; 4888 4889 /* 4890 * This test may be performed without any locks held. 4891 * We rely on vm_object's type stability. 4892 */ 4893 if (vp->v_vflag & VV_NOSYNC) 4894 return (false); 4895 obj = vp->v_object; 4896 return (obj != NULL && vm_object_mightbedirty(obj)); 4897 } 4898 4899 static int 4900 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4901 { 4902 4903 if (vp->v_vflag & VV_NOSYNC) 4904 return (false); 4905 if (vp->v_iflag & VI_DEFINACT) 4906 return (true); 4907 return (vfs_want_msync(vp)); 4908 } 4909 4910 static void __noinline 4911 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4912 { 4913 struct vnode *vp, *mvp; 4914 struct vm_object *obj; 4915 struct thread *td; 4916 int lkflags, objflags; 4917 bool seen_defer; 4918 4919 td = curthread; 4920 4921 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4922 if (flags != MNT_WAIT) { 4923 lkflags |= LK_NOWAIT; 4924 objflags = OBJPC_NOSYNC; 4925 } else { 4926 objflags = OBJPC_SYNC; 4927 } 4928 4929 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4930 seen_defer = false; 4931 if (vp->v_iflag & VI_DEFINACT) { 4932 vp->v_iflag &= ~VI_DEFINACT; 4933 seen_defer = true; 4934 } 4935 if (!vfs_want_msync(vp)) { 4936 if (seen_defer) 4937 vfs_deferred_inactive(vp, lkflags); 4938 else 4939 VI_UNLOCK(vp); 4940 continue; 4941 } 4942 if (vget(vp, lkflags, td) == 0) { 4943 obj = vp->v_object; 4944 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4945 VM_OBJECT_WLOCK(obj); 4946 vm_object_page_clean(obj, 0, 0, objflags); 4947 VM_OBJECT_WUNLOCK(obj); 4948 } 4949 vput(vp); 4950 if (seen_defer) 4951 vdrop(vp); 4952 } else { 4953 if (seen_defer) 4954 vdefer_inactive_unlocked(vp); 4955 } 4956 } 4957 } 4958 4959 void 4960 vfs_periodic(struct mount *mp, int flags) 4961 { 4962 4963 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4964 4965 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4966 vfs_periodic_inactive(mp, flags); 4967 else 4968 vfs_periodic_msync_inactive(mp, flags); 4969 } 4970 4971 static void 4972 destroy_vpollinfo_free(struct vpollinfo *vi) 4973 { 4974 4975 knlist_destroy(&vi->vpi_selinfo.si_note); 4976 mtx_destroy(&vi->vpi_lock); 4977 uma_zfree(vnodepoll_zone, vi); 4978 } 4979 4980 static void 4981 destroy_vpollinfo(struct vpollinfo *vi) 4982 { 4983 4984 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4985 seldrain(&vi->vpi_selinfo); 4986 destroy_vpollinfo_free(vi); 4987 } 4988 4989 /* 4990 * Initialize per-vnode helper structure to hold poll-related state. 4991 */ 4992 void 4993 v_addpollinfo(struct vnode *vp) 4994 { 4995 struct vpollinfo *vi; 4996 4997 if (vp->v_pollinfo != NULL) 4998 return; 4999 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 5000 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5001 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5002 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 5003 VI_LOCK(vp); 5004 if (vp->v_pollinfo != NULL) { 5005 VI_UNLOCK(vp); 5006 destroy_vpollinfo_free(vi); 5007 return; 5008 } 5009 vp->v_pollinfo = vi; 5010 VI_UNLOCK(vp); 5011 } 5012 5013 /* 5014 * Record a process's interest in events which might happen to 5015 * a vnode. Because poll uses the historic select-style interface 5016 * internally, this routine serves as both the ``check for any 5017 * pending events'' and the ``record my interest in future events'' 5018 * functions. (These are done together, while the lock is held, 5019 * to avoid race conditions.) 5020 */ 5021 int 5022 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5023 { 5024 5025 v_addpollinfo(vp); 5026 mtx_lock(&vp->v_pollinfo->vpi_lock); 5027 if (vp->v_pollinfo->vpi_revents & events) { 5028 /* 5029 * This leaves events we are not interested 5030 * in available for the other process which 5031 * which presumably had requested them 5032 * (otherwise they would never have been 5033 * recorded). 5034 */ 5035 events &= vp->v_pollinfo->vpi_revents; 5036 vp->v_pollinfo->vpi_revents &= ~events; 5037 5038 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5039 return (events); 5040 } 5041 vp->v_pollinfo->vpi_events |= events; 5042 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5043 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5044 return (0); 5045 } 5046 5047 /* 5048 * Routine to create and manage a filesystem syncer vnode. 5049 */ 5050 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5051 static int sync_fsync(struct vop_fsync_args *); 5052 static int sync_inactive(struct vop_inactive_args *); 5053 static int sync_reclaim(struct vop_reclaim_args *); 5054 5055 static struct vop_vector sync_vnodeops = { 5056 .vop_bypass = VOP_EOPNOTSUPP, 5057 .vop_close = sync_close, /* close */ 5058 .vop_fsync = sync_fsync, /* fsync */ 5059 .vop_inactive = sync_inactive, /* inactive */ 5060 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 5061 .vop_reclaim = sync_reclaim, /* reclaim */ 5062 .vop_lock1 = vop_stdlock, /* lock */ 5063 .vop_unlock = vop_stdunlock, /* unlock */ 5064 .vop_islocked = vop_stdislocked, /* islocked */ 5065 }; 5066 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5067 5068 /* 5069 * Create a new filesystem syncer vnode for the specified mount point. 5070 */ 5071 void 5072 vfs_allocate_syncvnode(struct mount *mp) 5073 { 5074 struct vnode *vp; 5075 struct bufobj *bo; 5076 static long start, incr, next; 5077 int error; 5078 5079 /* Allocate a new vnode */ 5080 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5081 if (error != 0) 5082 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5083 vp->v_type = VNON; 5084 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5085 vp->v_vflag |= VV_FORCEINSMQ; 5086 error = insmntque(vp, mp); 5087 if (error != 0) 5088 panic("vfs_allocate_syncvnode: insmntque() failed"); 5089 vp->v_vflag &= ~VV_FORCEINSMQ; 5090 VOP_UNLOCK(vp); 5091 /* 5092 * Place the vnode onto the syncer worklist. We attempt to 5093 * scatter them about on the list so that they will go off 5094 * at evenly distributed times even if all the filesystems 5095 * are mounted at once. 5096 */ 5097 next += incr; 5098 if (next == 0 || next > syncer_maxdelay) { 5099 start /= 2; 5100 incr /= 2; 5101 if (start == 0) { 5102 start = syncer_maxdelay / 2; 5103 incr = syncer_maxdelay; 5104 } 5105 next = start; 5106 } 5107 bo = &vp->v_bufobj; 5108 BO_LOCK(bo); 5109 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5110 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5111 mtx_lock(&sync_mtx); 5112 sync_vnode_count++; 5113 if (mp->mnt_syncer == NULL) { 5114 mp->mnt_syncer = vp; 5115 vp = NULL; 5116 } 5117 mtx_unlock(&sync_mtx); 5118 BO_UNLOCK(bo); 5119 if (vp != NULL) { 5120 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5121 vgone(vp); 5122 vput(vp); 5123 } 5124 } 5125 5126 void 5127 vfs_deallocate_syncvnode(struct mount *mp) 5128 { 5129 struct vnode *vp; 5130 5131 mtx_lock(&sync_mtx); 5132 vp = mp->mnt_syncer; 5133 if (vp != NULL) 5134 mp->mnt_syncer = NULL; 5135 mtx_unlock(&sync_mtx); 5136 if (vp != NULL) 5137 vrele(vp); 5138 } 5139 5140 /* 5141 * Do a lazy sync of the filesystem. 5142 */ 5143 static int 5144 sync_fsync(struct vop_fsync_args *ap) 5145 { 5146 struct vnode *syncvp = ap->a_vp; 5147 struct mount *mp = syncvp->v_mount; 5148 int error, save; 5149 struct bufobj *bo; 5150 5151 /* 5152 * We only need to do something if this is a lazy evaluation. 5153 */ 5154 if (ap->a_waitfor != MNT_LAZY) 5155 return (0); 5156 5157 /* 5158 * Move ourselves to the back of the sync list. 5159 */ 5160 bo = &syncvp->v_bufobj; 5161 BO_LOCK(bo); 5162 vn_syncer_add_to_worklist(bo, syncdelay); 5163 BO_UNLOCK(bo); 5164 5165 /* 5166 * Walk the list of vnodes pushing all that are dirty and 5167 * not already on the sync list. 5168 */ 5169 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5170 return (0); 5171 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 5172 vfs_unbusy(mp); 5173 return (0); 5174 } 5175 save = curthread_pflags_set(TDP_SYNCIO); 5176 /* 5177 * The filesystem at hand may be idle with free vnodes stored in the 5178 * batch. Return them instead of letting them stay there indefinitely. 5179 */ 5180 vfs_periodic(mp, MNT_NOWAIT); 5181 error = VFS_SYNC(mp, MNT_LAZY); 5182 curthread_pflags_restore(save); 5183 vn_finished_write(mp); 5184 vfs_unbusy(mp); 5185 return (error); 5186 } 5187 5188 /* 5189 * The syncer vnode is no referenced. 5190 */ 5191 static int 5192 sync_inactive(struct vop_inactive_args *ap) 5193 { 5194 5195 vgone(ap->a_vp); 5196 return (0); 5197 } 5198 5199 /* 5200 * The syncer vnode is no longer needed and is being decommissioned. 5201 * 5202 * Modifications to the worklist must be protected by sync_mtx. 5203 */ 5204 static int 5205 sync_reclaim(struct vop_reclaim_args *ap) 5206 { 5207 struct vnode *vp = ap->a_vp; 5208 struct bufobj *bo; 5209 5210 bo = &vp->v_bufobj; 5211 BO_LOCK(bo); 5212 mtx_lock(&sync_mtx); 5213 if (vp->v_mount->mnt_syncer == vp) 5214 vp->v_mount->mnt_syncer = NULL; 5215 if (bo->bo_flag & BO_ONWORKLST) { 5216 LIST_REMOVE(bo, bo_synclist); 5217 syncer_worklist_len--; 5218 sync_vnode_count--; 5219 bo->bo_flag &= ~BO_ONWORKLST; 5220 } 5221 mtx_unlock(&sync_mtx); 5222 BO_UNLOCK(bo); 5223 5224 return (0); 5225 } 5226 5227 int 5228 vn_need_pageq_flush(struct vnode *vp) 5229 { 5230 struct vm_object *obj; 5231 int need; 5232 5233 MPASS(mtx_owned(VI_MTX(vp))); 5234 need = 0; 5235 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5236 vm_object_mightbedirty(obj)) 5237 need = 1; 5238 return (need); 5239 } 5240 5241 /* 5242 * Check if vnode represents a disk device 5243 */ 5244 int 5245 vn_isdisk(struct vnode *vp, int *errp) 5246 { 5247 int error; 5248 5249 if (vp->v_type != VCHR) { 5250 error = ENOTBLK; 5251 goto out; 5252 } 5253 error = 0; 5254 dev_lock(); 5255 if (vp->v_rdev == NULL) 5256 error = ENXIO; 5257 else if (vp->v_rdev->si_devsw == NULL) 5258 error = ENXIO; 5259 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5260 error = ENOTBLK; 5261 dev_unlock(); 5262 out: 5263 if (errp != NULL) 5264 *errp = error; 5265 return (error == 0); 5266 } 5267 5268 /* 5269 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5270 * the comment above cache_fplookup for details. 5271 * 5272 * We never deny as priv_check_cred calls are not yet supported, see vaccess. 5273 */ 5274 int 5275 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5276 { 5277 5278 VFS_SMR_ASSERT_ENTERED(); 5279 5280 /* Check the owner. */ 5281 if (cred->cr_uid == file_uid) { 5282 if (file_mode & S_IXUSR) 5283 return (0); 5284 return (EAGAIN); 5285 } 5286 5287 /* Otherwise, check the groups (first match) */ 5288 if (groupmember(file_gid, cred)) { 5289 if (file_mode & S_IXGRP) 5290 return (0); 5291 return (EAGAIN); 5292 } 5293 5294 /* Otherwise, check everyone else. */ 5295 if (file_mode & S_IXOTH) 5296 return (0); 5297 return (EAGAIN); 5298 } 5299 5300 /* 5301 * Common filesystem object access control check routine. Accepts a 5302 * vnode's type, "mode", uid and gid, requested access mode, credentials, 5303 * and optional call-by-reference privused argument allowing vaccess() 5304 * to indicate to the caller whether privilege was used to satisfy the 5305 * request (obsoleted). Returns 0 on success, or an errno on failure. 5306 */ 5307 int 5308 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5309 accmode_t accmode, struct ucred *cred, int *privused) 5310 { 5311 accmode_t dac_granted; 5312 accmode_t priv_granted; 5313 5314 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5315 ("invalid bit in accmode")); 5316 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5317 ("VAPPEND without VWRITE")); 5318 5319 /* 5320 * Look for a normal, non-privileged way to access the file/directory 5321 * as requested. If it exists, go with that. 5322 */ 5323 5324 if (privused != NULL) 5325 *privused = 0; 5326 5327 dac_granted = 0; 5328 5329 /* Check the owner. */ 5330 if (cred->cr_uid == file_uid) { 5331 dac_granted |= VADMIN; 5332 if (file_mode & S_IXUSR) 5333 dac_granted |= VEXEC; 5334 if (file_mode & S_IRUSR) 5335 dac_granted |= VREAD; 5336 if (file_mode & S_IWUSR) 5337 dac_granted |= (VWRITE | VAPPEND); 5338 5339 if ((accmode & dac_granted) == accmode) 5340 return (0); 5341 5342 goto privcheck; 5343 } 5344 5345 /* Otherwise, check the groups (first match) */ 5346 if (groupmember(file_gid, cred)) { 5347 if (file_mode & S_IXGRP) 5348 dac_granted |= VEXEC; 5349 if (file_mode & S_IRGRP) 5350 dac_granted |= VREAD; 5351 if (file_mode & S_IWGRP) 5352 dac_granted |= (VWRITE | VAPPEND); 5353 5354 if ((accmode & dac_granted) == accmode) 5355 return (0); 5356 5357 goto privcheck; 5358 } 5359 5360 /* Otherwise, check everyone else. */ 5361 if (file_mode & S_IXOTH) 5362 dac_granted |= VEXEC; 5363 if (file_mode & S_IROTH) 5364 dac_granted |= VREAD; 5365 if (file_mode & S_IWOTH) 5366 dac_granted |= (VWRITE | VAPPEND); 5367 if ((accmode & dac_granted) == accmode) 5368 return (0); 5369 5370 privcheck: 5371 /* 5372 * Build a privilege mask to determine if the set of privileges 5373 * satisfies the requirements when combined with the granted mask 5374 * from above. For each privilege, if the privilege is required, 5375 * bitwise or the request type onto the priv_granted mask. 5376 */ 5377 priv_granted = 0; 5378 5379 if (type == VDIR) { 5380 /* 5381 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5382 * requests, instead of PRIV_VFS_EXEC. 5383 */ 5384 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5385 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5386 priv_granted |= VEXEC; 5387 } else { 5388 /* 5389 * Ensure that at least one execute bit is on. Otherwise, 5390 * a privileged user will always succeed, and we don't want 5391 * this to happen unless the file really is executable. 5392 */ 5393 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5394 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5395 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5396 priv_granted |= VEXEC; 5397 } 5398 5399 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5400 !priv_check_cred(cred, PRIV_VFS_READ)) 5401 priv_granted |= VREAD; 5402 5403 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5404 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5405 priv_granted |= (VWRITE | VAPPEND); 5406 5407 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5408 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5409 priv_granted |= VADMIN; 5410 5411 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5412 /* XXX audit: privilege used */ 5413 if (privused != NULL) 5414 *privused = 1; 5415 return (0); 5416 } 5417 5418 return ((accmode & VADMIN) ? EPERM : EACCES); 5419 } 5420 5421 /* 5422 * Credential check based on process requesting service, and per-attribute 5423 * permissions. 5424 */ 5425 int 5426 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5427 struct thread *td, accmode_t accmode) 5428 { 5429 5430 /* 5431 * Kernel-invoked always succeeds. 5432 */ 5433 if (cred == NOCRED) 5434 return (0); 5435 5436 /* 5437 * Do not allow privileged processes in jail to directly manipulate 5438 * system attributes. 5439 */ 5440 switch (attrnamespace) { 5441 case EXTATTR_NAMESPACE_SYSTEM: 5442 /* Potentially should be: return (EPERM); */ 5443 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5444 case EXTATTR_NAMESPACE_USER: 5445 return (VOP_ACCESS(vp, accmode, cred, td)); 5446 default: 5447 return (EPERM); 5448 } 5449 } 5450 5451 #ifdef DEBUG_VFS_LOCKS 5452 /* 5453 * This only exists to suppress warnings from unlocked specfs accesses. It is 5454 * no longer ok to have an unlocked VFS. 5455 */ 5456 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5457 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5458 5459 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5460 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5461 "Drop into debugger on lock violation"); 5462 5463 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5464 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5465 0, "Check for interlock across VOPs"); 5466 5467 int vfs_badlock_print = 1; /* Print lock violations. */ 5468 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5469 0, "Print lock violations"); 5470 5471 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5472 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5473 0, "Print vnode details on lock violations"); 5474 5475 #ifdef KDB 5476 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5477 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5478 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5479 #endif 5480 5481 static void 5482 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5483 { 5484 5485 #ifdef KDB 5486 if (vfs_badlock_backtrace) 5487 kdb_backtrace(); 5488 #endif 5489 if (vfs_badlock_vnode) 5490 vn_printf(vp, "vnode "); 5491 if (vfs_badlock_print) 5492 printf("%s: %p %s\n", str, (void *)vp, msg); 5493 if (vfs_badlock_ddb) 5494 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5495 } 5496 5497 void 5498 assert_vi_locked(struct vnode *vp, const char *str) 5499 { 5500 5501 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5502 vfs_badlock("interlock is not locked but should be", str, vp); 5503 } 5504 5505 void 5506 assert_vi_unlocked(struct vnode *vp, const char *str) 5507 { 5508 5509 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5510 vfs_badlock("interlock is locked but should not be", str, vp); 5511 } 5512 5513 void 5514 assert_vop_locked(struct vnode *vp, const char *str) 5515 { 5516 int locked; 5517 5518 if (!IGNORE_LOCK(vp)) { 5519 locked = VOP_ISLOCKED(vp); 5520 if (locked == 0 || locked == LK_EXCLOTHER) 5521 vfs_badlock("is not locked but should be", str, vp); 5522 } 5523 } 5524 5525 void 5526 assert_vop_unlocked(struct vnode *vp, const char *str) 5527 { 5528 5529 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5530 vfs_badlock("is locked but should not be", str, vp); 5531 } 5532 5533 void 5534 assert_vop_elocked(struct vnode *vp, const char *str) 5535 { 5536 5537 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5538 vfs_badlock("is not exclusive locked but should be", str, vp); 5539 } 5540 #endif /* DEBUG_VFS_LOCKS */ 5541 5542 void 5543 vop_rename_fail(struct vop_rename_args *ap) 5544 { 5545 5546 if (ap->a_tvp != NULL) 5547 vput(ap->a_tvp); 5548 if (ap->a_tdvp == ap->a_tvp) 5549 vrele(ap->a_tdvp); 5550 else 5551 vput(ap->a_tdvp); 5552 vrele(ap->a_fdvp); 5553 vrele(ap->a_fvp); 5554 } 5555 5556 void 5557 vop_rename_pre(void *ap) 5558 { 5559 struct vop_rename_args *a = ap; 5560 5561 #ifdef DEBUG_VFS_LOCKS 5562 if (a->a_tvp) 5563 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5564 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5565 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5566 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5567 5568 /* Check the source (from). */ 5569 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5570 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5571 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5572 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5573 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5574 5575 /* Check the target. */ 5576 if (a->a_tvp) 5577 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5578 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5579 #endif 5580 /* 5581 * It may be tempting to add vn_seqc_write_begin/end calls here and 5582 * in vop_rename_post but that's not going to work out since some 5583 * filesystems relookup vnodes mid-rename. This is probably a bug. 5584 * 5585 * For now filesystems are expected to do the relevant calls after they 5586 * decide what vnodes to operate on. 5587 */ 5588 if (a->a_tdvp != a->a_fdvp) 5589 vhold(a->a_fdvp); 5590 if (a->a_tvp != a->a_fvp) 5591 vhold(a->a_fvp); 5592 vhold(a->a_tdvp); 5593 if (a->a_tvp) 5594 vhold(a->a_tvp); 5595 } 5596 5597 #ifdef DEBUG_VFS_LOCKS 5598 void 5599 vop_fplookup_vexec_debugpre(void *ap __unused) 5600 { 5601 5602 VFS_SMR_ASSERT_ENTERED(); 5603 } 5604 5605 void 5606 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused) 5607 { 5608 5609 VFS_SMR_ASSERT_ENTERED(); 5610 } 5611 5612 void 5613 vop_strategy_debugpre(void *ap) 5614 { 5615 struct vop_strategy_args *a; 5616 struct buf *bp; 5617 5618 a = ap; 5619 bp = a->a_bp; 5620 5621 /* 5622 * Cluster ops lock their component buffers but not the IO container. 5623 */ 5624 if ((bp->b_flags & B_CLUSTER) != 0) 5625 return; 5626 5627 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5628 if (vfs_badlock_print) 5629 printf( 5630 "VOP_STRATEGY: bp is not locked but should be\n"); 5631 if (vfs_badlock_ddb) 5632 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5633 } 5634 } 5635 5636 void 5637 vop_lock_debugpre(void *ap) 5638 { 5639 struct vop_lock1_args *a = ap; 5640 5641 if ((a->a_flags & LK_INTERLOCK) == 0) 5642 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5643 else 5644 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5645 } 5646 5647 void 5648 vop_lock_debugpost(void *ap, int rc) 5649 { 5650 struct vop_lock1_args *a = ap; 5651 5652 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5653 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5654 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5655 } 5656 5657 void 5658 vop_unlock_debugpre(void *ap) 5659 { 5660 struct vop_unlock_args *a = ap; 5661 5662 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5663 } 5664 5665 void 5666 vop_need_inactive_debugpre(void *ap) 5667 { 5668 struct vop_need_inactive_args *a = ap; 5669 5670 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5671 } 5672 5673 void 5674 vop_need_inactive_debugpost(void *ap, int rc) 5675 { 5676 struct vop_need_inactive_args *a = ap; 5677 5678 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5679 } 5680 #endif 5681 5682 void 5683 vop_create_pre(void *ap) 5684 { 5685 struct vop_create_args *a; 5686 struct vnode *dvp; 5687 5688 a = ap; 5689 dvp = a->a_dvp; 5690 vn_seqc_write_begin(dvp); 5691 } 5692 5693 void 5694 vop_create_post(void *ap, int rc) 5695 { 5696 struct vop_create_args *a; 5697 struct vnode *dvp; 5698 5699 a = ap; 5700 dvp = a->a_dvp; 5701 vn_seqc_write_end(dvp); 5702 if (!rc) 5703 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5704 } 5705 5706 void 5707 vop_whiteout_pre(void *ap) 5708 { 5709 struct vop_whiteout_args *a; 5710 struct vnode *dvp; 5711 5712 a = ap; 5713 dvp = a->a_dvp; 5714 vn_seqc_write_begin(dvp); 5715 } 5716 5717 void 5718 vop_whiteout_post(void *ap, int rc) 5719 { 5720 struct vop_whiteout_args *a; 5721 struct vnode *dvp; 5722 5723 a = ap; 5724 dvp = a->a_dvp; 5725 vn_seqc_write_end(dvp); 5726 } 5727 5728 void 5729 vop_deleteextattr_pre(void *ap) 5730 { 5731 struct vop_deleteextattr_args *a; 5732 struct vnode *vp; 5733 5734 a = ap; 5735 vp = a->a_vp; 5736 vn_seqc_write_begin(vp); 5737 } 5738 5739 void 5740 vop_deleteextattr_post(void *ap, int rc) 5741 { 5742 struct vop_deleteextattr_args *a; 5743 struct vnode *vp; 5744 5745 a = ap; 5746 vp = a->a_vp; 5747 vn_seqc_write_end(vp); 5748 if (!rc) 5749 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5750 } 5751 5752 void 5753 vop_link_pre(void *ap) 5754 { 5755 struct vop_link_args *a; 5756 struct vnode *vp, *tdvp; 5757 5758 a = ap; 5759 vp = a->a_vp; 5760 tdvp = a->a_tdvp; 5761 vn_seqc_write_begin(vp); 5762 vn_seqc_write_begin(tdvp); 5763 } 5764 5765 void 5766 vop_link_post(void *ap, int rc) 5767 { 5768 struct vop_link_args *a; 5769 struct vnode *vp, *tdvp; 5770 5771 a = ap; 5772 vp = a->a_vp; 5773 tdvp = a->a_tdvp; 5774 vn_seqc_write_end(vp); 5775 vn_seqc_write_end(tdvp); 5776 if (!rc) { 5777 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 5778 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 5779 } 5780 } 5781 5782 void 5783 vop_mkdir_pre(void *ap) 5784 { 5785 struct vop_mkdir_args *a; 5786 struct vnode *dvp; 5787 5788 a = ap; 5789 dvp = a->a_dvp; 5790 vn_seqc_write_begin(dvp); 5791 } 5792 5793 void 5794 vop_mkdir_post(void *ap, int rc) 5795 { 5796 struct vop_mkdir_args *a; 5797 struct vnode *dvp; 5798 5799 a = ap; 5800 dvp = a->a_dvp; 5801 vn_seqc_write_end(dvp); 5802 if (!rc) 5803 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5804 } 5805 5806 void 5807 vop_mknod_pre(void *ap) 5808 { 5809 struct vop_mknod_args *a; 5810 struct vnode *dvp; 5811 5812 a = ap; 5813 dvp = a->a_dvp; 5814 vn_seqc_write_begin(dvp); 5815 } 5816 5817 void 5818 vop_mknod_post(void *ap, int rc) 5819 { 5820 struct vop_mknod_args *a; 5821 struct vnode *dvp; 5822 5823 a = ap; 5824 dvp = a->a_dvp; 5825 vn_seqc_write_end(dvp); 5826 if (!rc) 5827 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5828 } 5829 5830 void 5831 vop_reclaim_post(void *ap, int rc) 5832 { 5833 struct vop_reclaim_args *a; 5834 struct vnode *vp; 5835 5836 a = ap; 5837 vp = a->a_vp; 5838 ASSERT_VOP_IN_SEQC(vp); 5839 if (!rc) 5840 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 5841 } 5842 5843 void 5844 vop_remove_pre(void *ap) 5845 { 5846 struct vop_remove_args *a; 5847 struct vnode *dvp, *vp; 5848 5849 a = ap; 5850 dvp = a->a_dvp; 5851 vp = a->a_vp; 5852 vn_seqc_write_begin(dvp); 5853 vn_seqc_write_begin(vp); 5854 } 5855 5856 void 5857 vop_remove_post(void *ap, int rc) 5858 { 5859 struct vop_remove_args *a; 5860 struct vnode *dvp, *vp; 5861 5862 a = ap; 5863 dvp = a->a_dvp; 5864 vp = a->a_vp; 5865 vn_seqc_write_end(dvp); 5866 vn_seqc_write_end(vp); 5867 if (!rc) { 5868 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5869 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5870 } 5871 } 5872 5873 void 5874 vop_rename_post(void *ap, int rc) 5875 { 5876 struct vop_rename_args *a = ap; 5877 long hint; 5878 5879 if (!rc) { 5880 hint = NOTE_WRITE; 5881 if (a->a_fdvp == a->a_tdvp) { 5882 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5883 hint |= NOTE_LINK; 5884 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5885 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5886 } else { 5887 hint |= NOTE_EXTEND; 5888 if (a->a_fvp->v_type == VDIR) 5889 hint |= NOTE_LINK; 5890 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5891 5892 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5893 a->a_tvp->v_type == VDIR) 5894 hint &= ~NOTE_LINK; 5895 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5896 } 5897 5898 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5899 if (a->a_tvp) 5900 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5901 } 5902 if (a->a_tdvp != a->a_fdvp) 5903 vdrop(a->a_fdvp); 5904 if (a->a_tvp != a->a_fvp) 5905 vdrop(a->a_fvp); 5906 vdrop(a->a_tdvp); 5907 if (a->a_tvp) 5908 vdrop(a->a_tvp); 5909 } 5910 5911 void 5912 vop_rmdir_pre(void *ap) 5913 { 5914 struct vop_rmdir_args *a; 5915 struct vnode *dvp, *vp; 5916 5917 a = ap; 5918 dvp = a->a_dvp; 5919 vp = a->a_vp; 5920 vn_seqc_write_begin(dvp); 5921 vn_seqc_write_begin(vp); 5922 } 5923 5924 void 5925 vop_rmdir_post(void *ap, int rc) 5926 { 5927 struct vop_rmdir_args *a; 5928 struct vnode *dvp, *vp; 5929 5930 a = ap; 5931 dvp = a->a_dvp; 5932 vp = a->a_vp; 5933 vn_seqc_write_end(dvp); 5934 vn_seqc_write_end(vp); 5935 if (!rc) { 5936 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 5937 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 5938 } 5939 } 5940 5941 void 5942 vop_setattr_pre(void *ap) 5943 { 5944 struct vop_setattr_args *a; 5945 struct vnode *vp; 5946 5947 a = ap; 5948 vp = a->a_vp; 5949 vn_seqc_write_begin(vp); 5950 } 5951 5952 void 5953 vop_setattr_post(void *ap, int rc) 5954 { 5955 struct vop_setattr_args *a; 5956 struct vnode *vp; 5957 5958 a = ap; 5959 vp = a->a_vp; 5960 vn_seqc_write_end(vp); 5961 if (!rc) 5962 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 5963 } 5964 5965 void 5966 vop_setacl_pre(void *ap) 5967 { 5968 struct vop_setacl_args *a; 5969 struct vnode *vp; 5970 5971 a = ap; 5972 vp = a->a_vp; 5973 vn_seqc_write_begin(vp); 5974 } 5975 5976 void 5977 vop_setacl_post(void *ap, int rc __unused) 5978 { 5979 struct vop_setacl_args *a; 5980 struct vnode *vp; 5981 5982 a = ap; 5983 vp = a->a_vp; 5984 vn_seqc_write_end(vp); 5985 } 5986 5987 void 5988 vop_setextattr_pre(void *ap) 5989 { 5990 struct vop_setextattr_args *a; 5991 struct vnode *vp; 5992 5993 a = ap; 5994 vp = a->a_vp; 5995 vn_seqc_write_begin(vp); 5996 } 5997 5998 void 5999 vop_setextattr_post(void *ap, int rc) 6000 { 6001 struct vop_setextattr_args *a; 6002 struct vnode *vp; 6003 6004 a = ap; 6005 vp = a->a_vp; 6006 vn_seqc_write_end(vp); 6007 if (!rc) 6008 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6009 } 6010 6011 void 6012 vop_symlink_pre(void *ap) 6013 { 6014 struct vop_symlink_args *a; 6015 struct vnode *dvp; 6016 6017 a = ap; 6018 dvp = a->a_dvp; 6019 vn_seqc_write_begin(dvp); 6020 } 6021 6022 void 6023 vop_symlink_post(void *ap, int rc) 6024 { 6025 struct vop_symlink_args *a; 6026 struct vnode *dvp; 6027 6028 a = ap; 6029 dvp = a->a_dvp; 6030 vn_seqc_write_end(dvp); 6031 if (!rc) 6032 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6033 } 6034 6035 void 6036 vop_open_post(void *ap, int rc) 6037 { 6038 struct vop_open_args *a = ap; 6039 6040 if (!rc) 6041 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6042 } 6043 6044 void 6045 vop_close_post(void *ap, int rc) 6046 { 6047 struct vop_close_args *a = ap; 6048 6049 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6050 !VN_IS_DOOMED(a->a_vp))) { 6051 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6052 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6053 } 6054 } 6055 6056 void 6057 vop_read_post(void *ap, int rc) 6058 { 6059 struct vop_read_args *a = ap; 6060 6061 if (!rc) 6062 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6063 } 6064 6065 void 6066 vop_readdir_post(void *ap, int rc) 6067 { 6068 struct vop_readdir_args *a = ap; 6069 6070 if (!rc) 6071 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6072 } 6073 6074 static struct knlist fs_knlist; 6075 6076 static void 6077 vfs_event_init(void *arg) 6078 { 6079 knlist_init_mtx(&fs_knlist, NULL); 6080 } 6081 /* XXX - correct order? */ 6082 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6083 6084 void 6085 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6086 { 6087 6088 KNOTE_UNLOCKED(&fs_knlist, event); 6089 } 6090 6091 static int filt_fsattach(struct knote *kn); 6092 static void filt_fsdetach(struct knote *kn); 6093 static int filt_fsevent(struct knote *kn, long hint); 6094 6095 struct filterops fs_filtops = { 6096 .f_isfd = 0, 6097 .f_attach = filt_fsattach, 6098 .f_detach = filt_fsdetach, 6099 .f_event = filt_fsevent 6100 }; 6101 6102 static int 6103 filt_fsattach(struct knote *kn) 6104 { 6105 6106 kn->kn_flags |= EV_CLEAR; 6107 knlist_add(&fs_knlist, kn, 0); 6108 return (0); 6109 } 6110 6111 static void 6112 filt_fsdetach(struct knote *kn) 6113 { 6114 6115 knlist_remove(&fs_knlist, kn, 0); 6116 } 6117 6118 static int 6119 filt_fsevent(struct knote *kn, long hint) 6120 { 6121 6122 kn->kn_fflags |= hint; 6123 return (kn->kn_fflags != 0); 6124 } 6125 6126 static int 6127 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6128 { 6129 struct vfsidctl vc; 6130 int error; 6131 struct mount *mp; 6132 6133 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6134 if (error) 6135 return (error); 6136 if (vc.vc_vers != VFS_CTL_VERS1) 6137 return (EINVAL); 6138 mp = vfs_getvfs(&vc.vc_fsid); 6139 if (mp == NULL) 6140 return (ENOENT); 6141 /* ensure that a specific sysctl goes to the right filesystem. */ 6142 if (strcmp(vc.vc_fstypename, "*") != 0 && 6143 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6144 vfs_rel(mp); 6145 return (EINVAL); 6146 } 6147 VCTLTOREQ(&vc, req); 6148 error = VFS_SYSCTL(mp, vc.vc_op, req); 6149 vfs_rel(mp); 6150 return (error); 6151 } 6152 6153 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6154 NULL, 0, sysctl_vfs_ctl, "", 6155 "Sysctl by fsid"); 6156 6157 /* 6158 * Function to initialize a va_filerev field sensibly. 6159 * XXX: Wouldn't a random number make a lot more sense ?? 6160 */ 6161 u_quad_t 6162 init_va_filerev(void) 6163 { 6164 struct bintime bt; 6165 6166 getbinuptime(&bt); 6167 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6168 } 6169 6170 static int filt_vfsread(struct knote *kn, long hint); 6171 static int filt_vfswrite(struct knote *kn, long hint); 6172 static int filt_vfsvnode(struct knote *kn, long hint); 6173 static void filt_vfsdetach(struct knote *kn); 6174 static struct filterops vfsread_filtops = { 6175 .f_isfd = 1, 6176 .f_detach = filt_vfsdetach, 6177 .f_event = filt_vfsread 6178 }; 6179 static struct filterops vfswrite_filtops = { 6180 .f_isfd = 1, 6181 .f_detach = filt_vfsdetach, 6182 .f_event = filt_vfswrite 6183 }; 6184 static struct filterops vfsvnode_filtops = { 6185 .f_isfd = 1, 6186 .f_detach = filt_vfsdetach, 6187 .f_event = filt_vfsvnode 6188 }; 6189 6190 static void 6191 vfs_knllock(void *arg) 6192 { 6193 struct vnode *vp = arg; 6194 6195 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6196 } 6197 6198 static void 6199 vfs_knlunlock(void *arg) 6200 { 6201 struct vnode *vp = arg; 6202 6203 VOP_UNLOCK(vp); 6204 } 6205 6206 static void 6207 vfs_knl_assert_locked(void *arg) 6208 { 6209 #ifdef DEBUG_VFS_LOCKS 6210 struct vnode *vp = arg; 6211 6212 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6213 #endif 6214 } 6215 6216 static void 6217 vfs_knl_assert_unlocked(void *arg) 6218 { 6219 #ifdef DEBUG_VFS_LOCKS 6220 struct vnode *vp = arg; 6221 6222 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6223 #endif 6224 } 6225 6226 int 6227 vfs_kqfilter(struct vop_kqfilter_args *ap) 6228 { 6229 struct vnode *vp = ap->a_vp; 6230 struct knote *kn = ap->a_kn; 6231 struct knlist *knl; 6232 6233 switch (kn->kn_filter) { 6234 case EVFILT_READ: 6235 kn->kn_fop = &vfsread_filtops; 6236 break; 6237 case EVFILT_WRITE: 6238 kn->kn_fop = &vfswrite_filtops; 6239 break; 6240 case EVFILT_VNODE: 6241 kn->kn_fop = &vfsvnode_filtops; 6242 break; 6243 default: 6244 return (EINVAL); 6245 } 6246 6247 kn->kn_hook = (caddr_t)vp; 6248 6249 v_addpollinfo(vp); 6250 if (vp->v_pollinfo == NULL) 6251 return (ENOMEM); 6252 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6253 vhold(vp); 6254 knlist_add(knl, kn, 0); 6255 6256 return (0); 6257 } 6258 6259 /* 6260 * Detach knote from vnode 6261 */ 6262 static void 6263 filt_vfsdetach(struct knote *kn) 6264 { 6265 struct vnode *vp = (struct vnode *)kn->kn_hook; 6266 6267 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6268 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6269 vdrop(vp); 6270 } 6271 6272 /*ARGSUSED*/ 6273 static int 6274 filt_vfsread(struct knote *kn, long hint) 6275 { 6276 struct vnode *vp = (struct vnode *)kn->kn_hook; 6277 struct vattr va; 6278 int res; 6279 6280 /* 6281 * filesystem is gone, so set the EOF flag and schedule 6282 * the knote for deletion. 6283 */ 6284 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6285 VI_LOCK(vp); 6286 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6287 VI_UNLOCK(vp); 6288 return (1); 6289 } 6290 6291 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 6292 return (0); 6293 6294 VI_LOCK(vp); 6295 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 6296 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6297 VI_UNLOCK(vp); 6298 return (res); 6299 } 6300 6301 /*ARGSUSED*/ 6302 static int 6303 filt_vfswrite(struct knote *kn, long hint) 6304 { 6305 struct vnode *vp = (struct vnode *)kn->kn_hook; 6306 6307 VI_LOCK(vp); 6308 6309 /* 6310 * filesystem is gone, so set the EOF flag and schedule 6311 * the knote for deletion. 6312 */ 6313 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6314 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6315 6316 kn->kn_data = 0; 6317 VI_UNLOCK(vp); 6318 return (1); 6319 } 6320 6321 static int 6322 filt_vfsvnode(struct knote *kn, long hint) 6323 { 6324 struct vnode *vp = (struct vnode *)kn->kn_hook; 6325 int res; 6326 6327 VI_LOCK(vp); 6328 if (kn->kn_sfflags & hint) 6329 kn->kn_fflags |= hint; 6330 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6331 kn->kn_flags |= EV_EOF; 6332 VI_UNLOCK(vp); 6333 return (1); 6334 } 6335 res = (kn->kn_fflags != 0); 6336 VI_UNLOCK(vp); 6337 return (res); 6338 } 6339 6340 /* 6341 * Returns whether the directory is empty or not. 6342 * If it is empty, the return value is 0; otherwise 6343 * the return value is an error value (which may 6344 * be ENOTEMPTY). 6345 */ 6346 int 6347 vfs_emptydir(struct vnode *vp) 6348 { 6349 struct uio uio; 6350 struct iovec iov; 6351 struct dirent *dirent, *dp, *endp; 6352 int error, eof; 6353 6354 error = 0; 6355 eof = 0; 6356 6357 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6358 6359 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6360 iov.iov_base = dirent; 6361 iov.iov_len = sizeof(struct dirent); 6362 6363 uio.uio_iov = &iov; 6364 uio.uio_iovcnt = 1; 6365 uio.uio_offset = 0; 6366 uio.uio_resid = sizeof(struct dirent); 6367 uio.uio_segflg = UIO_SYSSPACE; 6368 uio.uio_rw = UIO_READ; 6369 uio.uio_td = curthread; 6370 6371 while (eof == 0 && error == 0) { 6372 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6373 NULL, NULL); 6374 if (error != 0) 6375 break; 6376 endp = (void *)((uint8_t *)dirent + 6377 sizeof(struct dirent) - uio.uio_resid); 6378 for (dp = dirent; dp < endp; 6379 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6380 if (dp->d_type == DT_WHT) 6381 continue; 6382 if (dp->d_namlen == 0) 6383 continue; 6384 if (dp->d_type != DT_DIR && 6385 dp->d_type != DT_UNKNOWN) { 6386 error = ENOTEMPTY; 6387 break; 6388 } 6389 if (dp->d_namlen > 2) { 6390 error = ENOTEMPTY; 6391 break; 6392 } 6393 if (dp->d_namlen == 1 && 6394 dp->d_name[0] != '.') { 6395 error = ENOTEMPTY; 6396 break; 6397 } 6398 if (dp->d_namlen == 2 && 6399 dp->d_name[1] != '.') { 6400 error = ENOTEMPTY; 6401 break; 6402 } 6403 uio.uio_resid = sizeof(struct dirent); 6404 } 6405 } 6406 free(dirent, M_TEMP); 6407 return (error); 6408 } 6409 6410 int 6411 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6412 { 6413 int error; 6414 6415 if (dp->d_reclen > ap->a_uio->uio_resid) 6416 return (ENAMETOOLONG); 6417 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6418 if (error) { 6419 if (ap->a_ncookies != NULL) { 6420 if (ap->a_cookies != NULL) 6421 free(ap->a_cookies, M_TEMP); 6422 ap->a_cookies = NULL; 6423 *ap->a_ncookies = 0; 6424 } 6425 return (error); 6426 } 6427 if (ap->a_ncookies == NULL) 6428 return (0); 6429 6430 KASSERT(ap->a_cookies, 6431 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6432 6433 *ap->a_cookies = realloc(*ap->a_cookies, 6434 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6435 (*ap->a_cookies)[*ap->a_ncookies] = off; 6436 *ap->a_ncookies += 1; 6437 return (0); 6438 } 6439 6440 /* 6441 * The purpose of this routine is to remove granularity from accmode_t, 6442 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6443 * VADMIN and VAPPEND. 6444 * 6445 * If it returns 0, the caller is supposed to continue with the usual 6446 * access checks using 'accmode' as modified by this routine. If it 6447 * returns nonzero value, the caller is supposed to return that value 6448 * as errno. 6449 * 6450 * Note that after this routine runs, accmode may be zero. 6451 */ 6452 int 6453 vfs_unixify_accmode(accmode_t *accmode) 6454 { 6455 /* 6456 * There is no way to specify explicit "deny" rule using 6457 * file mode or POSIX.1e ACLs. 6458 */ 6459 if (*accmode & VEXPLICIT_DENY) { 6460 *accmode = 0; 6461 return (0); 6462 } 6463 6464 /* 6465 * None of these can be translated into usual access bits. 6466 * Also, the common case for NFSv4 ACLs is to not contain 6467 * either of these bits. Caller should check for VWRITE 6468 * on the containing directory instead. 6469 */ 6470 if (*accmode & (VDELETE_CHILD | VDELETE)) 6471 return (EPERM); 6472 6473 if (*accmode & VADMIN_PERMS) { 6474 *accmode &= ~VADMIN_PERMS; 6475 *accmode |= VADMIN; 6476 } 6477 6478 /* 6479 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6480 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6481 */ 6482 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6483 6484 return (0); 6485 } 6486 6487 /* 6488 * Clear out a doomed vnode (if any) and replace it with a new one as long 6489 * as the fs is not being unmounted. Return the root vnode to the caller. 6490 */ 6491 static int __noinline 6492 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6493 { 6494 struct vnode *vp; 6495 int error; 6496 6497 restart: 6498 if (mp->mnt_rootvnode != NULL) { 6499 MNT_ILOCK(mp); 6500 vp = mp->mnt_rootvnode; 6501 if (vp != NULL) { 6502 if (!VN_IS_DOOMED(vp)) { 6503 vrefact(vp); 6504 MNT_IUNLOCK(mp); 6505 error = vn_lock(vp, flags); 6506 if (error == 0) { 6507 *vpp = vp; 6508 return (0); 6509 } 6510 vrele(vp); 6511 goto restart; 6512 } 6513 /* 6514 * Clear the old one. 6515 */ 6516 mp->mnt_rootvnode = NULL; 6517 } 6518 MNT_IUNLOCK(mp); 6519 if (vp != NULL) { 6520 vfs_op_barrier_wait(mp); 6521 vrele(vp); 6522 } 6523 } 6524 error = VFS_CACHEDROOT(mp, flags, vpp); 6525 if (error != 0) 6526 return (error); 6527 if (mp->mnt_vfs_ops == 0) { 6528 MNT_ILOCK(mp); 6529 if (mp->mnt_vfs_ops != 0) { 6530 MNT_IUNLOCK(mp); 6531 return (0); 6532 } 6533 if (mp->mnt_rootvnode == NULL) { 6534 vrefact(*vpp); 6535 mp->mnt_rootvnode = *vpp; 6536 } else { 6537 if (mp->mnt_rootvnode != *vpp) { 6538 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6539 panic("%s: mismatch between vnode returned " 6540 " by VFS_CACHEDROOT and the one cached " 6541 " (%p != %p)", 6542 __func__, *vpp, mp->mnt_rootvnode); 6543 } 6544 } 6545 } 6546 MNT_IUNLOCK(mp); 6547 } 6548 return (0); 6549 } 6550 6551 int 6552 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6553 { 6554 struct vnode *vp; 6555 int error; 6556 6557 if (!vfs_op_thread_enter(mp)) 6558 return (vfs_cache_root_fallback(mp, flags, vpp)); 6559 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6560 if (vp == NULL || VN_IS_DOOMED(vp)) { 6561 vfs_op_thread_exit(mp); 6562 return (vfs_cache_root_fallback(mp, flags, vpp)); 6563 } 6564 vrefact(vp); 6565 vfs_op_thread_exit(mp); 6566 error = vn_lock(vp, flags); 6567 if (error != 0) { 6568 vrele(vp); 6569 return (vfs_cache_root_fallback(mp, flags, vpp)); 6570 } 6571 *vpp = vp; 6572 return (0); 6573 } 6574 6575 struct vnode * 6576 vfs_cache_root_clear(struct mount *mp) 6577 { 6578 struct vnode *vp; 6579 6580 /* 6581 * ops > 0 guarantees there is nobody who can see this vnode 6582 */ 6583 MPASS(mp->mnt_vfs_ops > 0); 6584 vp = mp->mnt_rootvnode; 6585 if (vp != NULL) 6586 vn_seqc_write_begin(vp); 6587 mp->mnt_rootvnode = NULL; 6588 return (vp); 6589 } 6590 6591 void 6592 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6593 { 6594 6595 MPASS(mp->mnt_vfs_ops > 0); 6596 vrefact(vp); 6597 mp->mnt_rootvnode = vp; 6598 } 6599 6600 /* 6601 * These are helper functions for filesystems to traverse all 6602 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6603 * 6604 * This interface replaces MNT_VNODE_FOREACH. 6605 */ 6606 6607 struct vnode * 6608 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6609 { 6610 struct vnode *vp; 6611 6612 if (should_yield()) 6613 kern_yield(PRI_USER); 6614 MNT_ILOCK(mp); 6615 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6616 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6617 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6618 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6619 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6620 continue; 6621 VI_LOCK(vp); 6622 if (VN_IS_DOOMED(vp)) { 6623 VI_UNLOCK(vp); 6624 continue; 6625 } 6626 break; 6627 } 6628 if (vp == NULL) { 6629 __mnt_vnode_markerfree_all(mvp, mp); 6630 /* MNT_IUNLOCK(mp); -- done in above function */ 6631 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6632 return (NULL); 6633 } 6634 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6635 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6636 MNT_IUNLOCK(mp); 6637 return (vp); 6638 } 6639 6640 struct vnode * 6641 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6642 { 6643 struct vnode *vp; 6644 6645 *mvp = vn_alloc_marker(mp); 6646 MNT_ILOCK(mp); 6647 MNT_REF(mp); 6648 6649 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6650 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6651 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6652 continue; 6653 VI_LOCK(vp); 6654 if (VN_IS_DOOMED(vp)) { 6655 VI_UNLOCK(vp); 6656 continue; 6657 } 6658 break; 6659 } 6660 if (vp == NULL) { 6661 MNT_REL(mp); 6662 MNT_IUNLOCK(mp); 6663 vn_free_marker(*mvp); 6664 *mvp = NULL; 6665 return (NULL); 6666 } 6667 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6668 MNT_IUNLOCK(mp); 6669 return (vp); 6670 } 6671 6672 void 6673 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6674 { 6675 6676 if (*mvp == NULL) { 6677 MNT_IUNLOCK(mp); 6678 return; 6679 } 6680 6681 mtx_assert(MNT_MTX(mp), MA_OWNED); 6682 6683 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6684 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6685 MNT_REL(mp); 6686 MNT_IUNLOCK(mp); 6687 vn_free_marker(*mvp); 6688 *mvp = NULL; 6689 } 6690 6691 /* 6692 * These are helper functions for filesystems to traverse their 6693 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6694 */ 6695 static void 6696 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6697 { 6698 6699 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6700 6701 MNT_ILOCK(mp); 6702 MNT_REL(mp); 6703 MNT_IUNLOCK(mp); 6704 vn_free_marker(*mvp); 6705 *mvp = NULL; 6706 } 6707 6708 /* 6709 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6710 * conventional lock order during mnt_vnode_next_lazy iteration. 6711 * 6712 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6713 * The list lock is dropped and reacquired. On success, both locks are held. 6714 * On failure, the mount vnode list lock is held but the vnode interlock is 6715 * not, and the procedure may have yielded. 6716 */ 6717 static bool 6718 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6719 struct vnode *vp) 6720 { 6721 6722 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6723 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6724 ("%s: bad marker", __func__)); 6725 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6726 ("%s: inappropriate vnode", __func__)); 6727 ASSERT_VI_UNLOCKED(vp, __func__); 6728 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6729 6730 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6731 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6732 6733 /* 6734 * Note we may be racing against vdrop which transitioned the hold 6735 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6736 * if we are the only user after we get the interlock we will just 6737 * vdrop. 6738 */ 6739 vhold(vp); 6740 mtx_unlock(&mp->mnt_listmtx); 6741 VI_LOCK(vp); 6742 if (VN_IS_DOOMED(vp)) { 6743 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6744 goto out_lost; 6745 } 6746 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6747 /* 6748 * There is nothing to do if we are the last user. 6749 */ 6750 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6751 goto out_lost; 6752 mtx_lock(&mp->mnt_listmtx); 6753 return (true); 6754 out_lost: 6755 vdropl(vp); 6756 maybe_yield(); 6757 mtx_lock(&mp->mnt_listmtx); 6758 return (false); 6759 } 6760 6761 static struct vnode * 6762 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6763 void *cbarg) 6764 { 6765 struct vnode *vp; 6766 6767 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6768 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6769 restart: 6770 vp = TAILQ_NEXT(*mvp, v_lazylist); 6771 while (vp != NULL) { 6772 if (vp->v_type == VMARKER) { 6773 vp = TAILQ_NEXT(vp, v_lazylist); 6774 continue; 6775 } 6776 /* 6777 * See if we want to process the vnode. Note we may encounter a 6778 * long string of vnodes we don't care about and hog the list 6779 * as a result. Check for it and requeue the marker. 6780 */ 6781 VNPASS(!VN_IS_DOOMED(vp), vp); 6782 if (!cb(vp, cbarg)) { 6783 if (!should_yield()) { 6784 vp = TAILQ_NEXT(vp, v_lazylist); 6785 continue; 6786 } 6787 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6788 v_lazylist); 6789 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6790 v_lazylist); 6791 mtx_unlock(&mp->mnt_listmtx); 6792 kern_yield(PRI_USER); 6793 mtx_lock(&mp->mnt_listmtx); 6794 goto restart; 6795 } 6796 /* 6797 * Try-lock because this is the wrong lock order. 6798 */ 6799 if (!VI_TRYLOCK(vp) && 6800 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6801 goto restart; 6802 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6803 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6804 ("alien vnode on the lazy list %p %p", vp, mp)); 6805 VNPASS(vp->v_mount == mp, vp); 6806 VNPASS(!VN_IS_DOOMED(vp), vp); 6807 break; 6808 } 6809 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6810 6811 /* Check if we are done */ 6812 if (vp == NULL) { 6813 mtx_unlock(&mp->mnt_listmtx); 6814 mnt_vnode_markerfree_lazy(mvp, mp); 6815 return (NULL); 6816 } 6817 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6818 mtx_unlock(&mp->mnt_listmtx); 6819 ASSERT_VI_LOCKED(vp, "lazy iter"); 6820 return (vp); 6821 } 6822 6823 struct vnode * 6824 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6825 void *cbarg) 6826 { 6827 6828 if (should_yield()) 6829 kern_yield(PRI_USER); 6830 mtx_lock(&mp->mnt_listmtx); 6831 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6832 } 6833 6834 struct vnode * 6835 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6836 void *cbarg) 6837 { 6838 struct vnode *vp; 6839 6840 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6841 return (NULL); 6842 6843 *mvp = vn_alloc_marker(mp); 6844 MNT_ILOCK(mp); 6845 MNT_REF(mp); 6846 MNT_IUNLOCK(mp); 6847 6848 mtx_lock(&mp->mnt_listmtx); 6849 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6850 if (vp == NULL) { 6851 mtx_unlock(&mp->mnt_listmtx); 6852 mnt_vnode_markerfree_lazy(mvp, mp); 6853 return (NULL); 6854 } 6855 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6856 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6857 } 6858 6859 void 6860 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6861 { 6862 6863 if (*mvp == NULL) 6864 return; 6865 6866 mtx_lock(&mp->mnt_listmtx); 6867 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6868 mtx_unlock(&mp->mnt_listmtx); 6869 mnt_vnode_markerfree_lazy(mvp, mp); 6870 } 6871 6872 int 6873 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6874 { 6875 6876 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6877 cnp->cn_flags &= ~NOEXECCHECK; 6878 return (0); 6879 } 6880 6881 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6882 } 6883 6884 void 6885 vn_seqc_write_begin_locked(struct vnode *vp) 6886 { 6887 6888 ASSERT_VI_LOCKED(vp, __func__); 6889 VNPASS(vp->v_holdcnt > 0, vp); 6890 VNPASS(vp->v_seqc_users >= 0, vp); 6891 vp->v_seqc_users++; 6892 if (vp->v_seqc_users == 1) 6893 seqc_sleepable_write_begin(&vp->v_seqc); 6894 } 6895 6896 void 6897 vn_seqc_write_begin(struct vnode *vp) 6898 { 6899 6900 VI_LOCK(vp); 6901 vn_seqc_write_begin_locked(vp); 6902 VI_UNLOCK(vp); 6903 } 6904 6905 void 6906 vn_seqc_write_end_locked(struct vnode *vp) 6907 { 6908 6909 ASSERT_VI_LOCKED(vp, __func__); 6910 VNPASS(vp->v_seqc_users > 0, vp); 6911 vp->v_seqc_users--; 6912 if (vp->v_seqc_users == 0) 6913 seqc_sleepable_write_end(&vp->v_seqc); 6914 } 6915 6916 void 6917 vn_seqc_write_end(struct vnode *vp) 6918 { 6919 6920 VI_LOCK(vp); 6921 vn_seqc_write_end_locked(vp); 6922 VI_UNLOCK(vp); 6923 } 6924