1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #ifdef DDB 102 #include <ddb/ddb.h> 103 #endif 104 105 static void delmntque(struct vnode *vp); 106 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 107 int slpflag, int slptimeo); 108 static void syncer_shutdown(void *arg, int howto); 109 static int vtryrecycle(struct vnode *vp); 110 static void v_init_counters(struct vnode *); 111 static void v_incr_devcount(struct vnode *); 112 static void v_decr_devcount(struct vnode *); 113 static void vgonel(struct vnode *); 114 static void vfs_knllock(void *arg); 115 static void vfs_knlunlock(void *arg); 116 static void vfs_knl_assert_locked(void *arg); 117 static void vfs_knl_assert_unlocked(void *arg); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 static void vnlru_recalc(void); 122 123 /* 124 * These fences are intended for cases where some synchronization is 125 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 126 * and v_usecount) updates. Access to v_iflags is generally synchronized 127 * by the interlock, but we have some internal assertions that check vnode 128 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 129 * for now. 130 */ 131 #ifdef INVARIANTS 132 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 133 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 134 #else 135 #define VNODE_REFCOUNT_FENCE_ACQ() 136 #define VNODE_REFCOUNT_FENCE_REL() 137 #endif 138 139 /* 140 * Number of vnodes in existence. Increased whenever getnewvnode() 141 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 142 */ 143 static u_long __exclusive_cache_line numvnodes; 144 145 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 146 "Number of vnodes in existence"); 147 148 static counter_u64_t vnodes_created; 149 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 150 "Number of vnodes created by getnewvnode"); 151 152 /* 153 * Conversion tables for conversion from vnode types to inode formats 154 * and back. 155 */ 156 enum vtype iftovt_tab[16] = { 157 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 158 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 159 }; 160 int vttoif_tab[10] = { 161 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 162 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 163 }; 164 165 /* 166 * List of allocates vnodes in the system. 167 */ 168 static TAILQ_HEAD(freelst, vnode) vnode_list; 169 static struct vnode *vnode_list_free_marker; 170 static struct vnode *vnode_list_reclaim_marker; 171 172 /* 173 * "Free" vnode target. Free vnodes are rarely completely free, but are 174 * just ones that are cheap to recycle. Usually they are for files which 175 * have been stat'd but not read; these usually have inode and namecache 176 * data attached to them. This target is the preferred minimum size of a 177 * sub-cache consisting mostly of such files. The system balances the size 178 * of this sub-cache with its complement to try to prevent either from 179 * thrashing while the other is relatively inactive. The targets express 180 * a preference for the best balance. 181 * 182 * "Above" this target there are 2 further targets (watermarks) related 183 * to recyling of free vnodes. In the best-operating case, the cache is 184 * exactly full, the free list has size between vlowat and vhiwat above the 185 * free target, and recycling from it and normal use maintains this state. 186 * Sometimes the free list is below vlowat or even empty, but this state 187 * is even better for immediate use provided the cache is not full. 188 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 189 * ones) to reach one of these states. The watermarks are currently hard- 190 * coded as 4% and 9% of the available space higher. These and the default 191 * of 25% for wantfreevnodes are too large if the memory size is large. 192 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 193 * whenever vnlru_proc() becomes active. 194 */ 195 static long wantfreevnodes; 196 static long __exclusive_cache_line freevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 198 &freevnodes, 0, "Number of \"free\" vnodes"); 199 static long freevnodes_old; 200 201 static counter_u64_t recycles_count; 202 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 203 "Number of vnodes recycled to meet vnode cache targets"); 204 205 static counter_u64_t recycles_free_count; 206 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 207 "Number of free vnodes recycled to meet vnode cache targets"); 208 209 /* 210 * Various variables used for debugging the new implementation of 211 * reassignbuf(). 212 * XXX these are probably of (very) limited utility now. 213 */ 214 static int reassignbufcalls; 215 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 216 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 217 218 static counter_u64_t deferred_inact; 219 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 220 "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 238 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 239 static uma_zone_t vnode_zone; 240 static uma_zone_t vnodepoll_zone; 241 242 __read_frequently smr_t vfs_smr; 243 244 /* 245 * The workitem queue. 246 * 247 * It is useful to delay writes of file data and filesystem metadata 248 * for tens of seconds so that quickly created and deleted files need 249 * not waste disk bandwidth being created and removed. To realize this, 250 * we append vnodes to a "workitem" queue. When running with a soft 251 * updates implementation, most pending metadata dependencies should 252 * not wait for more than a few seconds. Thus, mounted on block devices 253 * are delayed only about a half the time that file data is delayed. 254 * Similarly, directory updates are more critical, so are only delayed 255 * about a third the time that file data is delayed. Thus, there are 256 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 257 * one each second (driven off the filesystem syncer process). The 258 * syncer_delayno variable indicates the next queue that is to be processed. 259 * Items that need to be processed soon are placed in this queue: 260 * 261 * syncer_workitem_pending[syncer_delayno] 262 * 263 * A delay of fifteen seconds is done by placing the request fifteen 264 * entries later in the queue: 265 * 266 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 267 * 268 */ 269 static int syncer_delayno; 270 static long syncer_mask; 271 LIST_HEAD(synclist, bufobj); 272 static struct synclist *syncer_workitem_pending; 273 /* 274 * The sync_mtx protects: 275 * bo->bo_synclist 276 * sync_vnode_count 277 * syncer_delayno 278 * syncer_state 279 * syncer_workitem_pending 280 * syncer_worklist_len 281 * rushjob 282 */ 283 static struct mtx sync_mtx; 284 static struct cv sync_wakeup; 285 286 #define SYNCER_MAXDELAY 32 287 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 288 static int syncdelay = 30; /* max time to delay syncing data */ 289 static int filedelay = 30; /* time to delay syncing files */ 290 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 291 "Time to delay syncing files (in seconds)"); 292 static int dirdelay = 29; /* time to delay syncing directories */ 293 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 294 "Time to delay syncing directories (in seconds)"); 295 static int metadelay = 28; /* time to delay syncing metadata */ 296 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 297 "Time to delay syncing metadata (in seconds)"); 298 static int rushjob; /* number of slots to run ASAP */ 299 static int stat_rush_requests; /* number of times I/O speeded up */ 300 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 301 "Number of times I/O speeded up (rush requests)"); 302 303 #define VDBATCH_SIZE 8 304 struct vdbatch { 305 u_int index; 306 long freevnodes; 307 struct mtx lock; 308 struct vnode *tab[VDBATCH_SIZE]; 309 }; 310 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 311 312 static void vdbatch_dequeue(struct vnode *vp); 313 314 /* 315 * When shutting down the syncer, run it at four times normal speed. 316 */ 317 #define SYNCER_SHUTDOWN_SPEEDUP 4 318 static int sync_vnode_count; 319 static int syncer_worklist_len; 320 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 321 syncer_state; 322 323 /* Target for maximum number of vnodes. */ 324 u_long desiredvnodes; 325 static u_long gapvnodes; /* gap between wanted and desired */ 326 static u_long vhiwat; /* enough extras after expansion */ 327 static u_long vlowat; /* minimal extras before expansion */ 328 static u_long vstir; /* nonzero to stir non-free vnodes */ 329 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 330 331 static u_long vnlru_read_freevnodes(void); 332 333 /* 334 * Note that no attempt is made to sanitize these parameters. 335 */ 336 static int 337 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 338 { 339 u_long val; 340 int error; 341 342 val = desiredvnodes; 343 error = sysctl_handle_long(oidp, &val, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 347 if (val == desiredvnodes) 348 return (0); 349 mtx_lock(&vnode_list_mtx); 350 desiredvnodes = val; 351 wantfreevnodes = desiredvnodes / 4; 352 vnlru_recalc(); 353 mtx_unlock(&vnode_list_mtx); 354 /* 355 * XXX There is no protection against multiple threads changing 356 * desiredvnodes at the same time. Locking above only helps vnlru and 357 * getnewvnode. 358 */ 359 vfs_hash_changesize(desiredvnodes); 360 cache_changesize(desiredvnodes); 361 return (0); 362 } 363 364 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 365 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 366 "LU", "Target for maximum number of vnodes"); 367 368 static int 369 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 370 { 371 u_long val; 372 int error; 373 374 val = wantfreevnodes; 375 error = sysctl_handle_long(oidp, &val, 0, req); 376 if (error != 0 || req->newptr == NULL) 377 return (error); 378 379 if (val == wantfreevnodes) 380 return (0); 381 mtx_lock(&vnode_list_mtx); 382 wantfreevnodes = val; 383 vnlru_recalc(); 384 mtx_unlock(&vnode_list_mtx); 385 return (0); 386 } 387 388 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 389 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 390 "LU", "Target for minimum number of \"free\" vnodes"); 391 392 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 393 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 394 static int vnlru_nowhere; 395 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 396 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 397 398 static int 399 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 400 { 401 struct vnode *vp; 402 struct nameidata nd; 403 char *buf; 404 unsigned long ndflags; 405 int error; 406 407 if (req->newptr == NULL) 408 return (EINVAL); 409 if (req->newlen >= PATH_MAX) 410 return (E2BIG); 411 412 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 413 error = SYSCTL_IN(req, buf, req->newlen); 414 if (error != 0) 415 goto out; 416 417 buf[req->newlen] = '\0'; 418 419 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 420 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 421 if ((error = namei(&nd)) != 0) 422 goto out; 423 vp = nd.ni_vp; 424 425 if (VN_IS_DOOMED(vp)) { 426 /* 427 * This vnode is being recycled. Return != 0 to let the caller 428 * know that the sysctl had no effect. Return EAGAIN because a 429 * subsequent call will likely succeed (since namei will create 430 * a new vnode if necessary) 431 */ 432 error = EAGAIN; 433 goto putvnode; 434 } 435 436 counter_u64_add(recycles_count, 1); 437 vgone(vp); 438 putvnode: 439 NDFREE(&nd, 0); 440 out: 441 free(buf, M_TEMP); 442 return (error); 443 } 444 445 static int 446 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 447 { 448 struct thread *td = curthread; 449 struct vnode *vp; 450 struct file *fp; 451 int error; 452 int fd; 453 454 if (req->newptr == NULL) 455 return (EBADF); 456 457 error = sysctl_handle_int(oidp, &fd, 0, req); 458 if (error != 0) 459 return (error); 460 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 461 if (error != 0) 462 return (error); 463 vp = fp->f_vnode; 464 465 error = vn_lock(vp, LK_EXCLUSIVE); 466 if (error != 0) 467 goto drop; 468 469 counter_u64_add(recycles_count, 1); 470 vgone(vp); 471 VOP_UNLOCK(vp); 472 drop: 473 fdrop(fp, td); 474 return (error); 475 } 476 477 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 478 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 479 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 480 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 481 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 482 sysctl_ftry_reclaim_vnode, "I", 483 "Try to reclaim a vnode by its file descriptor"); 484 485 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 486 static int vnsz2log; 487 488 /* 489 * Support for the bufobj clean & dirty pctrie. 490 */ 491 static void * 492 buf_trie_alloc(struct pctrie *ptree) 493 { 494 495 return uma_zalloc(buf_trie_zone, M_NOWAIT); 496 } 497 498 static void 499 buf_trie_free(struct pctrie *ptree, void *node) 500 { 501 502 uma_zfree(buf_trie_zone, node); 503 } 504 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 505 506 /* 507 * Initialize the vnode management data structures. 508 * 509 * Reevaluate the following cap on the number of vnodes after the physical 510 * memory size exceeds 512GB. In the limit, as the physical memory size 511 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 512 */ 513 #ifndef MAXVNODES_MAX 514 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 515 #endif 516 517 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 518 519 static struct vnode * 520 vn_alloc_marker(struct mount *mp) 521 { 522 struct vnode *vp; 523 524 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 525 vp->v_type = VMARKER; 526 vp->v_mount = mp; 527 528 return (vp); 529 } 530 531 static void 532 vn_free_marker(struct vnode *vp) 533 { 534 535 MPASS(vp->v_type == VMARKER); 536 free(vp, M_VNODE_MARKER); 537 } 538 539 /* 540 * Initialize a vnode as it first enters the zone. 541 */ 542 static int 543 vnode_init(void *mem, int size, int flags) 544 { 545 struct vnode *vp; 546 547 vp = mem; 548 bzero(vp, size); 549 /* 550 * Setup locks. 551 */ 552 vp->v_vnlock = &vp->v_lock; 553 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 554 /* 555 * By default, don't allow shared locks unless filesystems opt-in. 556 */ 557 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 558 LK_NOSHARE | LK_IS_VNODE); 559 /* 560 * Initialize bufobj. 561 */ 562 bufobj_init(&vp->v_bufobj, vp); 563 /* 564 * Initialize namecache. 565 */ 566 LIST_INIT(&vp->v_cache_src); 567 TAILQ_INIT(&vp->v_cache_dst); 568 /* 569 * Initialize rangelocks. 570 */ 571 rangelock_init(&vp->v_rl); 572 573 vp->v_dbatchcpu = NOCPU; 574 575 mtx_lock(&vnode_list_mtx); 576 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 577 mtx_unlock(&vnode_list_mtx); 578 return (0); 579 } 580 581 /* 582 * Free a vnode when it is cleared from the zone. 583 */ 584 static void 585 vnode_fini(void *mem, int size) 586 { 587 struct vnode *vp; 588 struct bufobj *bo; 589 590 vp = mem; 591 vdbatch_dequeue(vp); 592 mtx_lock(&vnode_list_mtx); 593 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 594 mtx_unlock(&vnode_list_mtx); 595 rangelock_destroy(&vp->v_rl); 596 lockdestroy(vp->v_vnlock); 597 mtx_destroy(&vp->v_interlock); 598 bo = &vp->v_bufobj; 599 rw_destroy(BO_LOCKPTR(bo)); 600 } 601 602 /* 603 * Provide the size of NFS nclnode and NFS fh for calculation of the 604 * vnode memory consumption. The size is specified directly to 605 * eliminate dependency on NFS-private header. 606 * 607 * Other filesystems may use bigger or smaller (like UFS and ZFS) 608 * private inode data, but the NFS-based estimation is ample enough. 609 * Still, we care about differences in the size between 64- and 32-bit 610 * platforms. 611 * 612 * Namecache structure size is heuristically 613 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 614 */ 615 #ifdef _LP64 616 #define NFS_NCLNODE_SZ (528 + 64) 617 #define NC_SZ 148 618 #else 619 #define NFS_NCLNODE_SZ (360 + 32) 620 #define NC_SZ 92 621 #endif 622 623 static void 624 vntblinit(void *dummy __unused) 625 { 626 struct vdbatch *vd; 627 int cpu, physvnodes, virtvnodes; 628 u_int i; 629 630 /* 631 * Desiredvnodes is a function of the physical memory size and the 632 * kernel's heap size. Generally speaking, it scales with the 633 * physical memory size. The ratio of desiredvnodes to the physical 634 * memory size is 1:16 until desiredvnodes exceeds 98,304. 635 * Thereafter, the 636 * marginal ratio of desiredvnodes to the physical memory size is 637 * 1:64. However, desiredvnodes is limited by the kernel's heap 638 * size. The memory required by desiredvnodes vnodes and vm objects 639 * must not exceed 1/10th of the kernel's heap size. 640 */ 641 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 642 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 643 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 644 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 645 desiredvnodes = min(physvnodes, virtvnodes); 646 if (desiredvnodes > MAXVNODES_MAX) { 647 if (bootverbose) 648 printf("Reducing kern.maxvnodes %lu -> %lu\n", 649 desiredvnodes, MAXVNODES_MAX); 650 desiredvnodes = MAXVNODES_MAX; 651 } 652 wantfreevnodes = desiredvnodes / 4; 653 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 654 TAILQ_INIT(&vnode_list); 655 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 656 /* 657 * The lock is taken to appease WITNESS. 658 */ 659 mtx_lock(&vnode_list_mtx); 660 vnlru_recalc(); 661 mtx_unlock(&vnode_list_mtx); 662 vnode_list_free_marker = vn_alloc_marker(NULL); 663 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 664 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 665 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 666 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 667 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_SMR); 668 vfs_smr = uma_zone_get_smr(vnode_zone); 669 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 670 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 671 /* 672 * Preallocate enough nodes to support one-per buf so that 673 * we can not fail an insert. reassignbuf() callers can not 674 * tolerate the insertion failure. 675 */ 676 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 677 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 678 UMA_ZONE_NOFREE); 679 uma_prealloc(buf_trie_zone, nbuf); 680 681 vnodes_created = counter_u64_alloc(M_WAITOK); 682 recycles_count = counter_u64_alloc(M_WAITOK); 683 recycles_free_count = counter_u64_alloc(M_WAITOK); 684 deferred_inact = counter_u64_alloc(M_WAITOK); 685 686 /* 687 * Initialize the filesystem syncer. 688 */ 689 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 690 &syncer_mask); 691 syncer_maxdelay = syncer_mask + 1; 692 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 693 cv_init(&sync_wakeup, "syncer"); 694 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 695 vnsz2log++; 696 vnsz2log--; 697 698 CPU_FOREACH(cpu) { 699 vd = DPCPU_ID_PTR((cpu), vd); 700 bzero(vd, sizeof(*vd)); 701 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 702 } 703 } 704 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 705 706 /* 707 * Mark a mount point as busy. Used to synchronize access and to delay 708 * unmounting. Eventually, mountlist_mtx is not released on failure. 709 * 710 * vfs_busy() is a custom lock, it can block the caller. 711 * vfs_busy() only sleeps if the unmount is active on the mount point. 712 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 713 * vnode belonging to mp. 714 * 715 * Lookup uses vfs_busy() to traverse mount points. 716 * root fs var fs 717 * / vnode lock A / vnode lock (/var) D 718 * /var vnode lock B /log vnode lock(/var/log) E 719 * vfs_busy lock C vfs_busy lock F 720 * 721 * Within each file system, the lock order is C->A->B and F->D->E. 722 * 723 * When traversing across mounts, the system follows that lock order: 724 * 725 * C->A->B 726 * | 727 * +->F->D->E 728 * 729 * The lookup() process for namei("/var") illustrates the process: 730 * VOP_LOOKUP() obtains B while A is held 731 * vfs_busy() obtains a shared lock on F while A and B are held 732 * vput() releases lock on B 733 * vput() releases lock on A 734 * VFS_ROOT() obtains lock on D while shared lock on F is held 735 * vfs_unbusy() releases shared lock on F 736 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 737 * Attempt to lock A (instead of vp_crossmp) while D is held would 738 * violate the global order, causing deadlocks. 739 * 740 * dounmount() locks B while F is drained. 741 */ 742 int 743 vfs_busy(struct mount *mp, int flags) 744 { 745 746 MPASS((flags & ~MBF_MASK) == 0); 747 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 748 749 if (vfs_op_thread_enter(mp)) { 750 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 751 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 752 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 753 vfs_mp_count_add_pcpu(mp, ref, 1); 754 vfs_mp_count_add_pcpu(mp, lockref, 1); 755 vfs_op_thread_exit(mp); 756 if (flags & MBF_MNTLSTLOCK) 757 mtx_unlock(&mountlist_mtx); 758 return (0); 759 } 760 761 MNT_ILOCK(mp); 762 vfs_assert_mount_counters(mp); 763 MNT_REF(mp); 764 /* 765 * If mount point is currently being unmounted, sleep until the 766 * mount point fate is decided. If thread doing the unmounting fails, 767 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 768 * that this mount point has survived the unmount attempt and vfs_busy 769 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 770 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 771 * about to be really destroyed. vfs_busy needs to release its 772 * reference on the mount point in this case and return with ENOENT, 773 * telling the caller that mount mount it tried to busy is no longer 774 * valid. 775 */ 776 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 777 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 778 MNT_REL(mp); 779 MNT_IUNLOCK(mp); 780 CTR1(KTR_VFS, "%s: failed busying before sleeping", 781 __func__); 782 return (ENOENT); 783 } 784 if (flags & MBF_MNTLSTLOCK) 785 mtx_unlock(&mountlist_mtx); 786 mp->mnt_kern_flag |= MNTK_MWAIT; 787 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 788 if (flags & MBF_MNTLSTLOCK) 789 mtx_lock(&mountlist_mtx); 790 MNT_ILOCK(mp); 791 } 792 if (flags & MBF_MNTLSTLOCK) 793 mtx_unlock(&mountlist_mtx); 794 mp->mnt_lockref++; 795 MNT_IUNLOCK(mp); 796 return (0); 797 } 798 799 /* 800 * Free a busy filesystem. 801 */ 802 void 803 vfs_unbusy(struct mount *mp) 804 { 805 int c; 806 807 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 808 809 if (vfs_op_thread_enter(mp)) { 810 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 811 vfs_mp_count_sub_pcpu(mp, lockref, 1); 812 vfs_mp_count_sub_pcpu(mp, ref, 1); 813 vfs_op_thread_exit(mp); 814 return; 815 } 816 817 MNT_ILOCK(mp); 818 vfs_assert_mount_counters(mp); 819 MNT_REL(mp); 820 c = --mp->mnt_lockref; 821 if (mp->mnt_vfs_ops == 0) { 822 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 823 MNT_IUNLOCK(mp); 824 return; 825 } 826 if (c < 0) 827 vfs_dump_mount_counters(mp); 828 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 829 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 830 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 831 mp->mnt_kern_flag &= ~MNTK_DRAINING; 832 wakeup(&mp->mnt_lockref); 833 } 834 MNT_IUNLOCK(mp); 835 } 836 837 /* 838 * Lookup a mount point by filesystem identifier. 839 */ 840 struct mount * 841 vfs_getvfs(fsid_t *fsid) 842 { 843 struct mount *mp; 844 845 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 846 mtx_lock(&mountlist_mtx); 847 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 848 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 849 vfs_ref(mp); 850 mtx_unlock(&mountlist_mtx); 851 return (mp); 852 } 853 } 854 mtx_unlock(&mountlist_mtx); 855 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 856 return ((struct mount *) 0); 857 } 858 859 /* 860 * Lookup a mount point by filesystem identifier, busying it before 861 * returning. 862 * 863 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 864 * cache for popular filesystem identifiers. The cache is lockess, using 865 * the fact that struct mount's are never freed. In worst case we may 866 * get pointer to unmounted or even different filesystem, so we have to 867 * check what we got, and go slow way if so. 868 */ 869 struct mount * 870 vfs_busyfs(fsid_t *fsid) 871 { 872 #define FSID_CACHE_SIZE 256 873 typedef struct mount * volatile vmp_t; 874 static vmp_t cache[FSID_CACHE_SIZE]; 875 struct mount *mp; 876 int error; 877 uint32_t hash; 878 879 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 880 hash = fsid->val[0] ^ fsid->val[1]; 881 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 882 mp = cache[hash]; 883 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 884 goto slow; 885 if (vfs_busy(mp, 0) != 0) { 886 cache[hash] = NULL; 887 goto slow; 888 } 889 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 890 return (mp); 891 else 892 vfs_unbusy(mp); 893 894 slow: 895 mtx_lock(&mountlist_mtx); 896 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 897 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 898 error = vfs_busy(mp, MBF_MNTLSTLOCK); 899 if (error) { 900 cache[hash] = NULL; 901 mtx_unlock(&mountlist_mtx); 902 return (NULL); 903 } 904 cache[hash] = mp; 905 return (mp); 906 } 907 } 908 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 909 mtx_unlock(&mountlist_mtx); 910 return ((struct mount *) 0); 911 } 912 913 /* 914 * Check if a user can access privileged mount options. 915 */ 916 int 917 vfs_suser(struct mount *mp, struct thread *td) 918 { 919 int error; 920 921 if (jailed(td->td_ucred)) { 922 /* 923 * If the jail of the calling thread lacks permission for 924 * this type of file system, deny immediately. 925 */ 926 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 927 return (EPERM); 928 929 /* 930 * If the file system was mounted outside the jail of the 931 * calling thread, deny immediately. 932 */ 933 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 934 return (EPERM); 935 } 936 937 /* 938 * If file system supports delegated administration, we don't check 939 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 940 * by the file system itself. 941 * If this is not the user that did original mount, we check for 942 * the PRIV_VFS_MOUNT_OWNER privilege. 943 */ 944 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 945 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 946 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 947 return (error); 948 } 949 return (0); 950 } 951 952 /* 953 * Get a new unique fsid. Try to make its val[0] unique, since this value 954 * will be used to create fake device numbers for stat(). Also try (but 955 * not so hard) make its val[0] unique mod 2^16, since some emulators only 956 * support 16-bit device numbers. We end up with unique val[0]'s for the 957 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 958 * 959 * Keep in mind that several mounts may be running in parallel. Starting 960 * the search one past where the previous search terminated is both a 961 * micro-optimization and a defense against returning the same fsid to 962 * different mounts. 963 */ 964 void 965 vfs_getnewfsid(struct mount *mp) 966 { 967 static uint16_t mntid_base; 968 struct mount *nmp; 969 fsid_t tfsid; 970 int mtype; 971 972 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 973 mtx_lock(&mntid_mtx); 974 mtype = mp->mnt_vfc->vfc_typenum; 975 tfsid.val[1] = mtype; 976 mtype = (mtype & 0xFF) << 24; 977 for (;;) { 978 tfsid.val[0] = makedev(255, 979 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 980 mntid_base++; 981 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 982 break; 983 vfs_rel(nmp); 984 } 985 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 986 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 987 mtx_unlock(&mntid_mtx); 988 } 989 990 /* 991 * Knob to control the precision of file timestamps: 992 * 993 * 0 = seconds only; nanoseconds zeroed. 994 * 1 = seconds and nanoseconds, accurate within 1/HZ. 995 * 2 = seconds and nanoseconds, truncated to microseconds. 996 * >=3 = seconds and nanoseconds, maximum precision. 997 */ 998 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 999 1000 static int timestamp_precision = TSP_USEC; 1001 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1002 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1003 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1004 "3+: sec + ns (max. precision))"); 1005 1006 /* 1007 * Get a current timestamp. 1008 */ 1009 void 1010 vfs_timestamp(struct timespec *tsp) 1011 { 1012 struct timeval tv; 1013 1014 switch (timestamp_precision) { 1015 case TSP_SEC: 1016 tsp->tv_sec = time_second; 1017 tsp->tv_nsec = 0; 1018 break; 1019 case TSP_HZ: 1020 getnanotime(tsp); 1021 break; 1022 case TSP_USEC: 1023 microtime(&tv); 1024 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1025 break; 1026 case TSP_NSEC: 1027 default: 1028 nanotime(tsp); 1029 break; 1030 } 1031 } 1032 1033 /* 1034 * Set vnode attributes to VNOVAL 1035 */ 1036 void 1037 vattr_null(struct vattr *vap) 1038 { 1039 1040 vap->va_type = VNON; 1041 vap->va_size = VNOVAL; 1042 vap->va_bytes = VNOVAL; 1043 vap->va_mode = VNOVAL; 1044 vap->va_nlink = VNOVAL; 1045 vap->va_uid = VNOVAL; 1046 vap->va_gid = VNOVAL; 1047 vap->va_fsid = VNOVAL; 1048 vap->va_fileid = VNOVAL; 1049 vap->va_blocksize = VNOVAL; 1050 vap->va_rdev = VNOVAL; 1051 vap->va_atime.tv_sec = VNOVAL; 1052 vap->va_atime.tv_nsec = VNOVAL; 1053 vap->va_mtime.tv_sec = VNOVAL; 1054 vap->va_mtime.tv_nsec = VNOVAL; 1055 vap->va_ctime.tv_sec = VNOVAL; 1056 vap->va_ctime.tv_nsec = VNOVAL; 1057 vap->va_birthtime.tv_sec = VNOVAL; 1058 vap->va_birthtime.tv_nsec = VNOVAL; 1059 vap->va_flags = VNOVAL; 1060 vap->va_gen = VNOVAL; 1061 vap->va_vaflags = 0; 1062 } 1063 1064 /* 1065 * Try to reduce the total number of vnodes. 1066 * 1067 * This routine (and its user) are buggy in at least the following ways: 1068 * - all parameters were picked years ago when RAM sizes were significantly 1069 * smaller 1070 * - it can pick vnodes based on pages used by the vm object, but filesystems 1071 * like ZFS don't use it making the pick broken 1072 * - since ZFS has its own aging policy it gets partially combated by this one 1073 * - a dedicated method should be provided for filesystems to let them decide 1074 * whether the vnode should be recycled 1075 * 1076 * This routine is called when we have too many vnodes. It attempts 1077 * to free <count> vnodes and will potentially free vnodes that still 1078 * have VM backing store (VM backing store is typically the cause 1079 * of a vnode blowout so we want to do this). Therefore, this operation 1080 * is not considered cheap. 1081 * 1082 * A number of conditions may prevent a vnode from being reclaimed. 1083 * the buffer cache may have references on the vnode, a directory 1084 * vnode may still have references due to the namei cache representing 1085 * underlying files, or the vnode may be in active use. It is not 1086 * desirable to reuse such vnodes. These conditions may cause the 1087 * number of vnodes to reach some minimum value regardless of what 1088 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1089 * 1090 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1091 * entries if this argument is strue 1092 * @param trigger Only reclaim vnodes with fewer than this many resident 1093 * pages. 1094 * @param target How many vnodes to reclaim. 1095 * @return The number of vnodes that were reclaimed. 1096 */ 1097 static int 1098 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1099 { 1100 struct vnode *vp, *mvp; 1101 struct mount *mp; 1102 struct vm_object *object; 1103 u_long done; 1104 bool retried; 1105 1106 mtx_assert(&vnode_list_mtx, MA_OWNED); 1107 1108 retried = false; 1109 done = 0; 1110 1111 mvp = vnode_list_reclaim_marker; 1112 restart: 1113 vp = mvp; 1114 while (done < target) { 1115 vp = TAILQ_NEXT(vp, v_vnodelist); 1116 if (__predict_false(vp == NULL)) 1117 break; 1118 1119 if (__predict_false(vp->v_type == VMARKER)) 1120 continue; 1121 1122 /* 1123 * If it's been deconstructed already, it's still 1124 * referenced, or it exceeds the trigger, skip it. 1125 * Also skip free vnodes. We are trying to make space 1126 * to expand the free list, not reduce it. 1127 */ 1128 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1129 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1130 goto next_iter; 1131 1132 if (vp->v_type == VBAD || vp->v_type == VNON) 1133 goto next_iter; 1134 1135 if (!VI_TRYLOCK(vp)) 1136 goto next_iter; 1137 1138 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1139 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1140 VN_IS_DOOMED(vp) || vp->v_type == VNON) { 1141 VI_UNLOCK(vp); 1142 goto next_iter; 1143 } 1144 1145 object = atomic_load_ptr(&vp->v_object); 1146 if (object == NULL || object->resident_page_count > trigger) { 1147 VI_UNLOCK(vp); 1148 goto next_iter; 1149 } 1150 1151 vholdl(vp); 1152 VI_UNLOCK(vp); 1153 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1154 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1155 mtx_unlock(&vnode_list_mtx); 1156 1157 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1158 vdrop(vp); 1159 goto next_iter_unlocked; 1160 } 1161 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1162 vdrop(vp); 1163 vn_finished_write(mp); 1164 goto next_iter_unlocked; 1165 } 1166 1167 VI_LOCK(vp); 1168 if (vp->v_usecount > 0 || 1169 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1170 (vp->v_object != NULL && 1171 vp->v_object->resident_page_count > trigger)) { 1172 VOP_UNLOCK(vp); 1173 vdropl(vp); 1174 vn_finished_write(mp); 1175 goto next_iter_unlocked; 1176 } 1177 counter_u64_add(recycles_count, 1); 1178 vgonel(vp); 1179 VOP_UNLOCK(vp); 1180 vdropl(vp); 1181 vn_finished_write(mp); 1182 done++; 1183 next_iter_unlocked: 1184 if (should_yield()) 1185 kern_yield(PRI_USER); 1186 mtx_lock(&vnode_list_mtx); 1187 goto restart; 1188 next_iter: 1189 MPASS(vp->v_type != VMARKER); 1190 if (!should_yield()) 1191 continue; 1192 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1193 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1194 mtx_unlock(&vnode_list_mtx); 1195 kern_yield(PRI_USER); 1196 mtx_lock(&vnode_list_mtx); 1197 goto restart; 1198 } 1199 if (done == 0 && !retried) { 1200 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1201 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1202 retried = true; 1203 goto restart; 1204 } 1205 return (done); 1206 } 1207 1208 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1209 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1210 0, 1211 "limit on vnode free requests per call to the vnlru_free routine"); 1212 1213 /* 1214 * Attempt to reduce the free list by the requested amount. 1215 */ 1216 static int 1217 vnlru_free_locked(int count, struct vfsops *mnt_op) 1218 { 1219 struct vnode *vp, *mvp; 1220 struct mount *mp; 1221 int ocount; 1222 1223 mtx_assert(&vnode_list_mtx, MA_OWNED); 1224 if (count > max_vnlru_free) 1225 count = max_vnlru_free; 1226 ocount = count; 1227 mvp = vnode_list_free_marker; 1228 restart: 1229 vp = mvp; 1230 while (count > 0) { 1231 vp = TAILQ_NEXT(vp, v_vnodelist); 1232 if (__predict_false(vp == NULL)) { 1233 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1234 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1235 break; 1236 } 1237 if (__predict_false(vp->v_type == VMARKER)) 1238 continue; 1239 1240 /* 1241 * Don't recycle if our vnode is from different type 1242 * of mount point. Note that mp is type-safe, the 1243 * check does not reach unmapped address even if 1244 * vnode is reclaimed. 1245 * Don't recycle if we can't get the interlock without 1246 * blocking. 1247 */ 1248 if (vp->v_holdcnt > 0 || (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1249 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1250 continue; 1251 } 1252 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1253 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1254 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1255 VI_UNLOCK(vp); 1256 continue; 1257 } 1258 vholdl(vp); 1259 count--; 1260 mtx_unlock(&vnode_list_mtx); 1261 VI_UNLOCK(vp); 1262 vtryrecycle(vp); 1263 vdrop(vp); 1264 mtx_lock(&vnode_list_mtx); 1265 goto restart; 1266 } 1267 return (ocount - count); 1268 } 1269 1270 void 1271 vnlru_free(int count, struct vfsops *mnt_op) 1272 { 1273 1274 mtx_lock(&vnode_list_mtx); 1275 vnlru_free_locked(count, mnt_op); 1276 mtx_unlock(&vnode_list_mtx); 1277 } 1278 1279 static void 1280 vnlru_recalc(void) 1281 { 1282 1283 mtx_assert(&vnode_list_mtx, MA_OWNED); 1284 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1285 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1286 vlowat = vhiwat / 2; 1287 } 1288 1289 /* 1290 * Attempt to recycle vnodes in a context that is always safe to block. 1291 * Calling vlrurecycle() from the bowels of filesystem code has some 1292 * interesting deadlock problems. 1293 */ 1294 static struct proc *vnlruproc; 1295 static int vnlruproc_sig; 1296 1297 /* 1298 * The main freevnodes counter is only updated when threads requeue their vnode 1299 * batches. CPUs are conditionally walked to compute a more accurate total. 1300 * 1301 * Limit how much of a slop are we willing to tolerate. Note: the actual value 1302 * at any given moment can still exceed slop, but it should not be by significant 1303 * margin in practice. 1304 */ 1305 #define VNLRU_FREEVNODES_SLOP 128 1306 1307 static u_long 1308 vnlru_read_freevnodes(void) 1309 { 1310 struct vdbatch *vd; 1311 long slop; 1312 int cpu; 1313 1314 mtx_assert(&vnode_list_mtx, MA_OWNED); 1315 if (freevnodes > freevnodes_old) 1316 slop = freevnodes - freevnodes_old; 1317 else 1318 slop = freevnodes_old - freevnodes; 1319 if (slop < VNLRU_FREEVNODES_SLOP) 1320 return (freevnodes >= 0 ? freevnodes : 0); 1321 freevnodes_old = freevnodes; 1322 CPU_FOREACH(cpu) { 1323 vd = DPCPU_ID_PTR((cpu), vd); 1324 freevnodes_old += vd->freevnodes; 1325 } 1326 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1327 } 1328 1329 static bool 1330 vnlru_under(u_long rnumvnodes, u_long limit) 1331 { 1332 u_long rfreevnodes, space; 1333 1334 if (__predict_false(rnumvnodes > desiredvnodes)) 1335 return (true); 1336 1337 space = desiredvnodes - rnumvnodes; 1338 if (space < limit) { 1339 rfreevnodes = vnlru_read_freevnodes(); 1340 if (rfreevnodes > wantfreevnodes) 1341 space += rfreevnodes - wantfreevnodes; 1342 } 1343 return (space < limit); 1344 } 1345 1346 static bool 1347 vnlru_under_unlocked(u_long rnumvnodes, u_long limit) 1348 { 1349 long rfreevnodes, space; 1350 1351 if (__predict_false(rnumvnodes > desiredvnodes)) 1352 return (true); 1353 1354 space = desiredvnodes - rnumvnodes; 1355 if (space < limit) { 1356 rfreevnodes = atomic_load_long(&freevnodes); 1357 if (rfreevnodes > wantfreevnodes) 1358 space += rfreevnodes - wantfreevnodes; 1359 } 1360 return (space < limit); 1361 } 1362 1363 static void 1364 vnlru_kick(void) 1365 { 1366 1367 mtx_assert(&vnode_list_mtx, MA_OWNED); 1368 if (vnlruproc_sig == 0) { 1369 vnlruproc_sig = 1; 1370 wakeup(vnlruproc); 1371 } 1372 } 1373 1374 static void 1375 vnlru_proc(void) 1376 { 1377 u_long rnumvnodes, rfreevnodes, target; 1378 unsigned long onumvnodes; 1379 int done, force, trigger, usevnodes; 1380 bool reclaim_nc_src, want_reread; 1381 1382 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1383 SHUTDOWN_PRI_FIRST); 1384 1385 force = 0; 1386 want_reread = false; 1387 for (;;) { 1388 kproc_suspend_check(vnlruproc); 1389 mtx_lock(&vnode_list_mtx); 1390 rnumvnodes = atomic_load_long(&numvnodes); 1391 1392 if (want_reread) { 1393 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1394 want_reread = false; 1395 } 1396 1397 /* 1398 * If numvnodes is too large (due to desiredvnodes being 1399 * adjusted using its sysctl, or emergency growth), first 1400 * try to reduce it by discarding from the free list. 1401 */ 1402 if (rnumvnodes > desiredvnodes) { 1403 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1404 rnumvnodes = atomic_load_long(&numvnodes); 1405 } 1406 /* 1407 * Sleep if the vnode cache is in a good state. This is 1408 * when it is not over-full and has space for about a 4% 1409 * or 9% expansion (by growing its size or inexcessively 1410 * reducing its free list). Otherwise, try to reclaim 1411 * space for a 10% expansion. 1412 */ 1413 if (vstir && force == 0) { 1414 force = 1; 1415 vstir = 0; 1416 } 1417 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1418 vnlruproc_sig = 0; 1419 wakeup(&vnlruproc_sig); 1420 msleep(vnlruproc, &vnode_list_mtx, 1421 PVFS|PDROP, "vlruwt", hz); 1422 continue; 1423 } 1424 rfreevnodes = vnlru_read_freevnodes(); 1425 1426 onumvnodes = rnumvnodes; 1427 /* 1428 * Calculate parameters for recycling. These are the same 1429 * throughout the loop to give some semblance of fairness. 1430 * The trigger point is to avoid recycling vnodes with lots 1431 * of resident pages. We aren't trying to free memory; we 1432 * are trying to recycle or at least free vnodes. 1433 */ 1434 if (rnumvnodes <= desiredvnodes) 1435 usevnodes = rnumvnodes - rfreevnodes; 1436 else 1437 usevnodes = rnumvnodes; 1438 if (usevnodes <= 0) 1439 usevnodes = 1; 1440 /* 1441 * The trigger value is is chosen to give a conservatively 1442 * large value to ensure that it alone doesn't prevent 1443 * making progress. The value can easily be so large that 1444 * it is effectively infinite in some congested and 1445 * misconfigured cases, and this is necessary. Normally 1446 * it is about 8 to 100 (pages), which is quite large. 1447 */ 1448 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1449 if (force < 2) 1450 trigger = vsmalltrigger; 1451 reclaim_nc_src = force >= 3; 1452 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1453 target = target / 10 + 1; 1454 done = vlrureclaim(reclaim_nc_src, trigger, target); 1455 mtx_unlock(&vnode_list_mtx); 1456 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1457 uma_reclaim(UMA_RECLAIM_DRAIN); 1458 if (done == 0) { 1459 if (force == 0 || force == 1) { 1460 force = 2; 1461 continue; 1462 } 1463 if (force == 2) { 1464 force = 3; 1465 continue; 1466 } 1467 want_reread = true; 1468 force = 0; 1469 vnlru_nowhere++; 1470 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1471 } else { 1472 want_reread = true; 1473 kern_yield(PRI_USER); 1474 } 1475 } 1476 } 1477 1478 static struct kproc_desc vnlru_kp = { 1479 "vnlru", 1480 vnlru_proc, 1481 &vnlruproc 1482 }; 1483 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1484 &vnlru_kp); 1485 1486 /* 1487 * Routines having to do with the management of the vnode table. 1488 */ 1489 1490 /* 1491 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1492 * before we actually vgone(). This function must be called with the vnode 1493 * held to prevent the vnode from being returned to the free list midway 1494 * through vgone(). 1495 */ 1496 static int 1497 vtryrecycle(struct vnode *vp) 1498 { 1499 struct mount *vnmp; 1500 1501 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1502 VNASSERT(vp->v_holdcnt, vp, 1503 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1504 /* 1505 * This vnode may found and locked via some other list, if so we 1506 * can't recycle it yet. 1507 */ 1508 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1509 CTR2(KTR_VFS, 1510 "%s: impossible to recycle, vp %p lock is already held", 1511 __func__, vp); 1512 return (EWOULDBLOCK); 1513 } 1514 /* 1515 * Don't recycle if its filesystem is being suspended. 1516 */ 1517 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1518 VOP_UNLOCK(vp); 1519 CTR2(KTR_VFS, 1520 "%s: impossible to recycle, cannot start the write for %p", 1521 __func__, vp); 1522 return (EBUSY); 1523 } 1524 /* 1525 * If we got this far, we need to acquire the interlock and see if 1526 * anyone picked up this vnode from another list. If not, we will 1527 * mark it with DOOMED via vgonel() so that anyone who does find it 1528 * will skip over it. 1529 */ 1530 VI_LOCK(vp); 1531 if (vp->v_usecount) { 1532 VOP_UNLOCK(vp); 1533 VI_UNLOCK(vp); 1534 vn_finished_write(vnmp); 1535 CTR2(KTR_VFS, 1536 "%s: impossible to recycle, %p is already referenced", 1537 __func__, vp); 1538 return (EBUSY); 1539 } 1540 if (!VN_IS_DOOMED(vp)) { 1541 counter_u64_add(recycles_free_count, 1); 1542 vgonel(vp); 1543 } 1544 VOP_UNLOCK(vp); 1545 VI_UNLOCK(vp); 1546 vn_finished_write(vnmp); 1547 return (0); 1548 } 1549 1550 /* 1551 * Allocate a new vnode. 1552 * 1553 * The operation never returns an error. Returning an error was disabled 1554 * in r145385 (dated 2005) with the following comment: 1555 * 1556 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1557 * 1558 * Given the age of this commit (almost 15 years at the time of writing this 1559 * comment) restoring the ability to fail requires a significant audit of 1560 * all codepaths. 1561 * 1562 * The routine can try to free a vnode or stall for up to 1 second waiting for 1563 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1564 */ 1565 static u_long vn_alloc_cyclecount; 1566 1567 static struct vnode * __noinline 1568 vn_alloc_hard(struct mount *mp) 1569 { 1570 u_long rnumvnodes, rfreevnodes; 1571 1572 mtx_lock(&vnode_list_mtx); 1573 rnumvnodes = atomic_load_long(&numvnodes); 1574 if (rnumvnodes + 1 < desiredvnodes) { 1575 vn_alloc_cyclecount = 0; 1576 goto alloc; 1577 } 1578 rfreevnodes = vnlru_read_freevnodes(); 1579 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1580 vn_alloc_cyclecount = 0; 1581 vstir = 1; 1582 } 1583 /* 1584 * Grow the vnode cache if it will not be above its target max 1585 * after growing. Otherwise, if the free list is nonempty, try 1586 * to reclaim 1 item from it before growing the cache (possibly 1587 * above its target max if the reclamation failed or is delayed). 1588 * Otherwise, wait for some space. In all cases, schedule 1589 * vnlru_proc() if we are getting short of space. The watermarks 1590 * should be chosen so that we never wait or even reclaim from 1591 * the free list to below its target minimum. 1592 */ 1593 if (vnlru_free_locked(1, NULL) > 0) 1594 goto alloc; 1595 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1596 /* 1597 * Wait for space for a new vnode. 1598 */ 1599 vnlru_kick(); 1600 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1601 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1602 vnlru_read_freevnodes() > 1) 1603 vnlru_free_locked(1, NULL); 1604 } 1605 alloc: 1606 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1607 if (vnlru_under(rnumvnodes, vlowat)) 1608 vnlru_kick(); 1609 mtx_unlock(&vnode_list_mtx); 1610 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1611 } 1612 1613 static struct vnode * 1614 vn_alloc(struct mount *mp) 1615 { 1616 u_long rnumvnodes; 1617 1618 if (__predict_false(vn_alloc_cyclecount != 0)) 1619 return (vn_alloc_hard(mp)); 1620 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1621 if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) { 1622 atomic_subtract_long(&numvnodes, 1); 1623 return (vn_alloc_hard(mp)); 1624 } 1625 1626 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1627 } 1628 1629 static void 1630 vn_free(struct vnode *vp) 1631 { 1632 1633 atomic_subtract_long(&numvnodes, 1); 1634 uma_zfree_smr(vnode_zone, vp); 1635 } 1636 1637 /* 1638 * Return the next vnode from the free list. 1639 */ 1640 int 1641 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1642 struct vnode **vpp) 1643 { 1644 struct vnode *vp; 1645 struct thread *td; 1646 struct lock_object *lo; 1647 1648 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1649 1650 KASSERT(vops->registered, 1651 ("%s: not registered vector op %p\n", __func__, vops)); 1652 1653 td = curthread; 1654 if (td->td_vp_reserved != NULL) { 1655 vp = td->td_vp_reserved; 1656 td->td_vp_reserved = NULL; 1657 } else { 1658 vp = vn_alloc(mp); 1659 } 1660 counter_u64_add(vnodes_created, 1); 1661 /* 1662 * Locks are given the generic name "vnode" when created. 1663 * Follow the historic practice of using the filesystem 1664 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1665 * 1666 * Locks live in a witness group keyed on their name. Thus, 1667 * when a lock is renamed, it must also move from the witness 1668 * group of its old name to the witness group of its new name. 1669 * 1670 * The change only needs to be made when the vnode moves 1671 * from one filesystem type to another. We ensure that each 1672 * filesystem use a single static name pointer for its tag so 1673 * that we can compare pointers rather than doing a strcmp(). 1674 */ 1675 lo = &vp->v_vnlock->lock_object; 1676 #ifdef WITNESS 1677 if (lo->lo_name != tag) { 1678 #endif 1679 lo->lo_name = tag; 1680 #ifdef WITNESS 1681 WITNESS_DESTROY(lo); 1682 WITNESS_INIT(lo, tag); 1683 } 1684 #endif 1685 /* 1686 * By default, don't allow shared locks unless filesystems opt-in. 1687 */ 1688 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1689 /* 1690 * Finalize various vnode identity bits. 1691 */ 1692 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1693 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1694 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1695 vp->v_type = VNON; 1696 vp->v_op = vops; 1697 v_init_counters(vp); 1698 vp->v_bufobj.bo_ops = &buf_ops_bio; 1699 #ifdef DIAGNOSTIC 1700 if (mp == NULL && vops != &dead_vnodeops) 1701 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1702 #endif 1703 #ifdef MAC 1704 mac_vnode_init(vp); 1705 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1706 mac_vnode_associate_singlelabel(mp, vp); 1707 #endif 1708 if (mp != NULL) { 1709 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1710 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1711 vp->v_vflag |= VV_NOKNOTE; 1712 } 1713 1714 /* 1715 * For the filesystems which do not use vfs_hash_insert(), 1716 * still initialize v_hash to have vfs_hash_index() useful. 1717 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1718 * its own hashing. 1719 */ 1720 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1721 1722 *vpp = vp; 1723 return (0); 1724 } 1725 1726 void 1727 getnewvnode_reserve(void) 1728 { 1729 struct thread *td; 1730 1731 td = curthread; 1732 MPASS(td->td_vp_reserved == NULL); 1733 td->td_vp_reserved = vn_alloc(NULL); 1734 } 1735 1736 void 1737 getnewvnode_drop_reserve(void) 1738 { 1739 struct thread *td; 1740 1741 td = curthread; 1742 if (td->td_vp_reserved != NULL) { 1743 vn_free(td->td_vp_reserved); 1744 td->td_vp_reserved = NULL; 1745 } 1746 } 1747 1748 static void 1749 freevnode(struct vnode *vp) 1750 { 1751 struct bufobj *bo; 1752 1753 /* 1754 * The vnode has been marked for destruction, so free it. 1755 * 1756 * The vnode will be returned to the zone where it will 1757 * normally remain until it is needed for another vnode. We 1758 * need to cleanup (or verify that the cleanup has already 1759 * been done) any residual data left from its current use 1760 * so as not to contaminate the freshly allocated vnode. 1761 */ 1762 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1763 bo = &vp->v_bufobj; 1764 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1765 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 1766 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1767 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1768 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1769 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1770 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1771 ("clean blk trie not empty")); 1772 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1773 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1774 ("dirty blk trie not empty")); 1775 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1776 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1777 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1778 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1779 ("Dangling rangelock waiters")); 1780 VI_UNLOCK(vp); 1781 #ifdef MAC 1782 mac_vnode_destroy(vp); 1783 #endif 1784 if (vp->v_pollinfo != NULL) { 1785 destroy_vpollinfo(vp->v_pollinfo); 1786 vp->v_pollinfo = NULL; 1787 } 1788 #ifdef INVARIANTS 1789 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1790 vp->v_op = NULL; 1791 #endif 1792 vp->v_mountedhere = NULL; 1793 vp->v_unpcb = NULL; 1794 vp->v_rdev = NULL; 1795 vp->v_fifoinfo = NULL; 1796 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1797 vp->v_irflag = 0; 1798 vp->v_iflag = 0; 1799 vp->v_vflag = 0; 1800 bo->bo_flag = 0; 1801 vn_free(vp); 1802 } 1803 1804 /* 1805 * Delete from old mount point vnode list, if on one. 1806 */ 1807 static void 1808 delmntque(struct vnode *vp) 1809 { 1810 struct mount *mp; 1811 1812 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 1813 1814 mp = vp->v_mount; 1815 if (mp == NULL) 1816 return; 1817 MNT_ILOCK(mp); 1818 VI_LOCK(vp); 1819 vp->v_mount = NULL; 1820 VI_UNLOCK(vp); 1821 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1822 ("bad mount point vnode list size")); 1823 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1824 mp->mnt_nvnodelistsize--; 1825 MNT_REL(mp); 1826 MNT_IUNLOCK(mp); 1827 } 1828 1829 static void 1830 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1831 { 1832 1833 vp->v_data = NULL; 1834 vp->v_op = &dead_vnodeops; 1835 vgone(vp); 1836 vput(vp); 1837 } 1838 1839 /* 1840 * Insert into list of vnodes for the new mount point, if available. 1841 */ 1842 int 1843 insmntque1(struct vnode *vp, struct mount *mp, 1844 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1845 { 1846 1847 KASSERT(vp->v_mount == NULL, 1848 ("insmntque: vnode already on per mount vnode list")); 1849 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1850 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1851 1852 /* 1853 * We acquire the vnode interlock early to ensure that the 1854 * vnode cannot be recycled by another process releasing a 1855 * holdcnt on it before we get it on both the vnode list 1856 * and the active vnode list. The mount mutex protects only 1857 * manipulation of the vnode list and the vnode freelist 1858 * mutex protects only manipulation of the active vnode list. 1859 * Hence the need to hold the vnode interlock throughout. 1860 */ 1861 MNT_ILOCK(mp); 1862 VI_LOCK(vp); 1863 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1864 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1865 mp->mnt_nvnodelistsize == 0)) && 1866 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1867 VI_UNLOCK(vp); 1868 MNT_IUNLOCK(mp); 1869 if (dtr != NULL) 1870 dtr(vp, dtr_arg); 1871 return (EBUSY); 1872 } 1873 vp->v_mount = mp; 1874 MNT_REF(mp); 1875 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1876 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1877 ("neg mount point vnode list size")); 1878 mp->mnt_nvnodelistsize++; 1879 VI_UNLOCK(vp); 1880 MNT_IUNLOCK(mp); 1881 return (0); 1882 } 1883 1884 int 1885 insmntque(struct vnode *vp, struct mount *mp) 1886 { 1887 1888 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1889 } 1890 1891 /* 1892 * Flush out and invalidate all buffers associated with a bufobj 1893 * Called with the underlying object locked. 1894 */ 1895 int 1896 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1897 { 1898 int error; 1899 1900 BO_LOCK(bo); 1901 if (flags & V_SAVE) { 1902 error = bufobj_wwait(bo, slpflag, slptimeo); 1903 if (error) { 1904 BO_UNLOCK(bo); 1905 return (error); 1906 } 1907 if (bo->bo_dirty.bv_cnt > 0) { 1908 BO_UNLOCK(bo); 1909 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1910 return (error); 1911 /* 1912 * XXX We could save a lock/unlock if this was only 1913 * enabled under INVARIANTS 1914 */ 1915 BO_LOCK(bo); 1916 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1917 panic("vinvalbuf: dirty bufs"); 1918 } 1919 } 1920 /* 1921 * If you alter this loop please notice that interlock is dropped and 1922 * reacquired in flushbuflist. Special care is needed to ensure that 1923 * no race conditions occur from this. 1924 */ 1925 do { 1926 error = flushbuflist(&bo->bo_clean, 1927 flags, bo, slpflag, slptimeo); 1928 if (error == 0 && !(flags & V_CLEANONLY)) 1929 error = flushbuflist(&bo->bo_dirty, 1930 flags, bo, slpflag, slptimeo); 1931 if (error != 0 && error != EAGAIN) { 1932 BO_UNLOCK(bo); 1933 return (error); 1934 } 1935 } while (error != 0); 1936 1937 /* 1938 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1939 * have write I/O in-progress but if there is a VM object then the 1940 * VM object can also have read-I/O in-progress. 1941 */ 1942 do { 1943 bufobj_wwait(bo, 0, 0); 1944 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1945 BO_UNLOCK(bo); 1946 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1947 BO_LOCK(bo); 1948 } 1949 } while (bo->bo_numoutput > 0); 1950 BO_UNLOCK(bo); 1951 1952 /* 1953 * Destroy the copy in the VM cache, too. 1954 */ 1955 if (bo->bo_object != NULL && 1956 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1957 VM_OBJECT_WLOCK(bo->bo_object); 1958 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1959 OBJPR_CLEANONLY : 0); 1960 VM_OBJECT_WUNLOCK(bo->bo_object); 1961 } 1962 1963 #ifdef INVARIANTS 1964 BO_LOCK(bo); 1965 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1966 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1967 bo->bo_clean.bv_cnt > 0)) 1968 panic("vinvalbuf: flush failed"); 1969 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1970 bo->bo_dirty.bv_cnt > 0) 1971 panic("vinvalbuf: flush dirty failed"); 1972 BO_UNLOCK(bo); 1973 #endif 1974 return (0); 1975 } 1976 1977 /* 1978 * Flush out and invalidate all buffers associated with a vnode. 1979 * Called with the underlying object locked. 1980 */ 1981 int 1982 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1983 { 1984 1985 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1986 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1987 if (vp->v_object != NULL && vp->v_object->handle != vp) 1988 return (0); 1989 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1990 } 1991 1992 /* 1993 * Flush out buffers on the specified list. 1994 * 1995 */ 1996 static int 1997 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1998 int slptimeo) 1999 { 2000 struct buf *bp, *nbp; 2001 int retval, error; 2002 daddr_t lblkno; 2003 b_xflags_t xflags; 2004 2005 ASSERT_BO_WLOCKED(bo); 2006 2007 retval = 0; 2008 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2009 /* 2010 * If we are flushing both V_NORMAL and V_ALT buffers then 2011 * do not skip any buffers. If we are flushing only V_NORMAL 2012 * buffers then skip buffers marked as BX_ALTDATA. If we are 2013 * flushing only V_ALT buffers then skip buffers not marked 2014 * as BX_ALTDATA. 2015 */ 2016 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2017 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2018 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2019 continue; 2020 } 2021 if (nbp != NULL) { 2022 lblkno = nbp->b_lblkno; 2023 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2024 } 2025 retval = EAGAIN; 2026 error = BUF_TIMELOCK(bp, 2027 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2028 "flushbuf", slpflag, slptimeo); 2029 if (error) { 2030 BO_LOCK(bo); 2031 return (error != ENOLCK ? error : EAGAIN); 2032 } 2033 KASSERT(bp->b_bufobj == bo, 2034 ("bp %p wrong b_bufobj %p should be %p", 2035 bp, bp->b_bufobj, bo)); 2036 /* 2037 * XXX Since there are no node locks for NFS, I 2038 * believe there is a slight chance that a delayed 2039 * write will occur while sleeping just above, so 2040 * check for it. 2041 */ 2042 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2043 (flags & V_SAVE)) { 2044 bremfree(bp); 2045 bp->b_flags |= B_ASYNC; 2046 bwrite(bp); 2047 BO_LOCK(bo); 2048 return (EAGAIN); /* XXX: why not loop ? */ 2049 } 2050 bremfree(bp); 2051 bp->b_flags |= (B_INVAL | B_RELBUF); 2052 bp->b_flags &= ~B_ASYNC; 2053 brelse(bp); 2054 BO_LOCK(bo); 2055 if (nbp == NULL) 2056 break; 2057 nbp = gbincore(bo, lblkno); 2058 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2059 != xflags) 2060 break; /* nbp invalid */ 2061 } 2062 return (retval); 2063 } 2064 2065 int 2066 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2067 { 2068 struct buf *bp; 2069 int error; 2070 daddr_t lblkno; 2071 2072 ASSERT_BO_LOCKED(bo); 2073 2074 for (lblkno = startn;;) { 2075 again: 2076 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2077 if (bp == NULL || bp->b_lblkno >= endn || 2078 bp->b_lblkno < startn) 2079 break; 2080 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2081 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2082 if (error != 0) { 2083 BO_RLOCK(bo); 2084 if (error == ENOLCK) 2085 goto again; 2086 return (error); 2087 } 2088 KASSERT(bp->b_bufobj == bo, 2089 ("bp %p wrong b_bufobj %p should be %p", 2090 bp, bp->b_bufobj, bo)); 2091 lblkno = bp->b_lblkno + 1; 2092 if ((bp->b_flags & B_MANAGED) == 0) 2093 bremfree(bp); 2094 bp->b_flags |= B_RELBUF; 2095 /* 2096 * In the VMIO case, use the B_NOREUSE flag to hint that the 2097 * pages backing each buffer in the range are unlikely to be 2098 * reused. Dirty buffers will have the hint applied once 2099 * they've been written. 2100 */ 2101 if ((bp->b_flags & B_VMIO) != 0) 2102 bp->b_flags |= B_NOREUSE; 2103 brelse(bp); 2104 BO_RLOCK(bo); 2105 } 2106 return (0); 2107 } 2108 2109 /* 2110 * Truncate a file's buffer and pages to a specified length. This 2111 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2112 * sync activity. 2113 */ 2114 int 2115 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2116 { 2117 struct buf *bp, *nbp; 2118 struct bufobj *bo; 2119 daddr_t startlbn; 2120 2121 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2122 vp, blksize, (uintmax_t)length); 2123 2124 /* 2125 * Round up to the *next* lbn. 2126 */ 2127 startlbn = howmany(length, blksize); 2128 2129 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2130 2131 bo = &vp->v_bufobj; 2132 restart_unlocked: 2133 BO_LOCK(bo); 2134 2135 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2136 ; 2137 2138 if (length > 0) { 2139 restartsync: 2140 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2141 if (bp->b_lblkno > 0) 2142 continue; 2143 /* 2144 * Since we hold the vnode lock this should only 2145 * fail if we're racing with the buf daemon. 2146 */ 2147 if (BUF_LOCK(bp, 2148 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2149 BO_LOCKPTR(bo)) == ENOLCK) 2150 goto restart_unlocked; 2151 2152 VNASSERT((bp->b_flags & B_DELWRI), vp, 2153 ("buf(%p) on dirty queue without DELWRI", bp)); 2154 2155 bremfree(bp); 2156 bawrite(bp); 2157 BO_LOCK(bo); 2158 goto restartsync; 2159 } 2160 } 2161 2162 bufobj_wwait(bo, 0, 0); 2163 BO_UNLOCK(bo); 2164 vnode_pager_setsize(vp, length); 2165 2166 return (0); 2167 } 2168 2169 /* 2170 * Invalidate the cached pages of a file's buffer within the range of block 2171 * numbers [startlbn, endlbn). 2172 */ 2173 void 2174 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2175 int blksize) 2176 { 2177 struct bufobj *bo; 2178 off_t start, end; 2179 2180 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2181 2182 start = blksize * startlbn; 2183 end = blksize * endlbn; 2184 2185 bo = &vp->v_bufobj; 2186 BO_LOCK(bo); 2187 MPASS(blksize == bo->bo_bsize); 2188 2189 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2190 ; 2191 2192 BO_UNLOCK(bo); 2193 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2194 } 2195 2196 static int 2197 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2198 daddr_t startlbn, daddr_t endlbn) 2199 { 2200 struct buf *bp, *nbp; 2201 bool anyfreed; 2202 2203 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2204 ASSERT_BO_LOCKED(bo); 2205 2206 do { 2207 anyfreed = false; 2208 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2209 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2210 continue; 2211 if (BUF_LOCK(bp, 2212 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2213 BO_LOCKPTR(bo)) == ENOLCK) { 2214 BO_LOCK(bo); 2215 return (EAGAIN); 2216 } 2217 2218 bremfree(bp); 2219 bp->b_flags |= B_INVAL | B_RELBUF; 2220 bp->b_flags &= ~B_ASYNC; 2221 brelse(bp); 2222 anyfreed = true; 2223 2224 BO_LOCK(bo); 2225 if (nbp != NULL && 2226 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2227 nbp->b_vp != vp || 2228 (nbp->b_flags & B_DELWRI) != 0)) 2229 return (EAGAIN); 2230 } 2231 2232 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2233 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2234 continue; 2235 if (BUF_LOCK(bp, 2236 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2237 BO_LOCKPTR(bo)) == ENOLCK) { 2238 BO_LOCK(bo); 2239 return (EAGAIN); 2240 } 2241 bremfree(bp); 2242 bp->b_flags |= B_INVAL | B_RELBUF; 2243 bp->b_flags &= ~B_ASYNC; 2244 brelse(bp); 2245 anyfreed = true; 2246 2247 BO_LOCK(bo); 2248 if (nbp != NULL && 2249 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2250 (nbp->b_vp != vp) || 2251 (nbp->b_flags & B_DELWRI) == 0)) 2252 return (EAGAIN); 2253 } 2254 } while (anyfreed); 2255 return (0); 2256 } 2257 2258 static void 2259 buf_vlist_remove(struct buf *bp) 2260 { 2261 struct bufv *bv; 2262 2263 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2264 ASSERT_BO_WLOCKED(bp->b_bufobj); 2265 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2266 (BX_VNDIRTY|BX_VNCLEAN), 2267 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2268 if (bp->b_xflags & BX_VNDIRTY) 2269 bv = &bp->b_bufobj->bo_dirty; 2270 else 2271 bv = &bp->b_bufobj->bo_clean; 2272 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2273 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2274 bv->bv_cnt--; 2275 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2276 } 2277 2278 /* 2279 * Add the buffer to the sorted clean or dirty block list. 2280 * 2281 * NOTE: xflags is passed as a constant, optimizing this inline function! 2282 */ 2283 static void 2284 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2285 { 2286 struct bufv *bv; 2287 struct buf *n; 2288 int error; 2289 2290 ASSERT_BO_WLOCKED(bo); 2291 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2292 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2293 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2294 ("dead bo %p", bo)); 2295 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2296 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2297 bp->b_xflags |= xflags; 2298 if (xflags & BX_VNDIRTY) 2299 bv = &bo->bo_dirty; 2300 else 2301 bv = &bo->bo_clean; 2302 2303 /* 2304 * Keep the list ordered. Optimize empty list insertion. Assume 2305 * we tend to grow at the tail so lookup_le should usually be cheaper 2306 * than _ge. 2307 */ 2308 if (bv->bv_cnt == 0 || 2309 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2310 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2311 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2312 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2313 else 2314 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2315 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2316 if (error) 2317 panic("buf_vlist_add: Preallocated nodes insufficient."); 2318 bv->bv_cnt++; 2319 } 2320 2321 /* 2322 * Look up a buffer using the buffer tries. 2323 */ 2324 struct buf * 2325 gbincore(struct bufobj *bo, daddr_t lblkno) 2326 { 2327 struct buf *bp; 2328 2329 ASSERT_BO_LOCKED(bo); 2330 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2331 if (bp != NULL) 2332 return (bp); 2333 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2334 } 2335 2336 /* 2337 * Associate a buffer with a vnode. 2338 */ 2339 void 2340 bgetvp(struct vnode *vp, struct buf *bp) 2341 { 2342 struct bufobj *bo; 2343 2344 bo = &vp->v_bufobj; 2345 ASSERT_BO_WLOCKED(bo); 2346 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2347 2348 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2349 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2350 ("bgetvp: bp already attached! %p", bp)); 2351 2352 vhold(vp); 2353 bp->b_vp = vp; 2354 bp->b_bufobj = bo; 2355 /* 2356 * Insert onto list for new vnode. 2357 */ 2358 buf_vlist_add(bp, bo, BX_VNCLEAN); 2359 } 2360 2361 /* 2362 * Disassociate a buffer from a vnode. 2363 */ 2364 void 2365 brelvp(struct buf *bp) 2366 { 2367 struct bufobj *bo; 2368 struct vnode *vp; 2369 2370 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2371 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2372 2373 /* 2374 * Delete from old vnode list, if on one. 2375 */ 2376 vp = bp->b_vp; /* XXX */ 2377 bo = bp->b_bufobj; 2378 BO_LOCK(bo); 2379 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2380 buf_vlist_remove(bp); 2381 else 2382 panic("brelvp: Buffer %p not on queue.", bp); 2383 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2384 bo->bo_flag &= ~BO_ONWORKLST; 2385 mtx_lock(&sync_mtx); 2386 LIST_REMOVE(bo, bo_synclist); 2387 syncer_worklist_len--; 2388 mtx_unlock(&sync_mtx); 2389 } 2390 bp->b_vp = NULL; 2391 bp->b_bufobj = NULL; 2392 BO_UNLOCK(bo); 2393 vdrop(vp); 2394 } 2395 2396 /* 2397 * Add an item to the syncer work queue. 2398 */ 2399 static void 2400 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2401 { 2402 int slot; 2403 2404 ASSERT_BO_WLOCKED(bo); 2405 2406 mtx_lock(&sync_mtx); 2407 if (bo->bo_flag & BO_ONWORKLST) 2408 LIST_REMOVE(bo, bo_synclist); 2409 else { 2410 bo->bo_flag |= BO_ONWORKLST; 2411 syncer_worklist_len++; 2412 } 2413 2414 if (delay > syncer_maxdelay - 2) 2415 delay = syncer_maxdelay - 2; 2416 slot = (syncer_delayno + delay) & syncer_mask; 2417 2418 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2419 mtx_unlock(&sync_mtx); 2420 } 2421 2422 static int 2423 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2424 { 2425 int error, len; 2426 2427 mtx_lock(&sync_mtx); 2428 len = syncer_worklist_len - sync_vnode_count; 2429 mtx_unlock(&sync_mtx); 2430 error = SYSCTL_OUT(req, &len, sizeof(len)); 2431 return (error); 2432 } 2433 2434 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2435 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2436 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2437 2438 static struct proc *updateproc; 2439 static void sched_sync(void); 2440 static struct kproc_desc up_kp = { 2441 "syncer", 2442 sched_sync, 2443 &updateproc 2444 }; 2445 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2446 2447 static int 2448 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2449 { 2450 struct vnode *vp; 2451 struct mount *mp; 2452 2453 *bo = LIST_FIRST(slp); 2454 if (*bo == NULL) 2455 return (0); 2456 vp = bo2vnode(*bo); 2457 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2458 return (1); 2459 /* 2460 * We use vhold in case the vnode does not 2461 * successfully sync. vhold prevents the vnode from 2462 * going away when we unlock the sync_mtx so that 2463 * we can acquire the vnode interlock. 2464 */ 2465 vholdl(vp); 2466 mtx_unlock(&sync_mtx); 2467 VI_UNLOCK(vp); 2468 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2469 vdrop(vp); 2470 mtx_lock(&sync_mtx); 2471 return (*bo == LIST_FIRST(slp)); 2472 } 2473 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2474 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2475 VOP_UNLOCK(vp); 2476 vn_finished_write(mp); 2477 BO_LOCK(*bo); 2478 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2479 /* 2480 * Put us back on the worklist. The worklist 2481 * routine will remove us from our current 2482 * position and then add us back in at a later 2483 * position. 2484 */ 2485 vn_syncer_add_to_worklist(*bo, syncdelay); 2486 } 2487 BO_UNLOCK(*bo); 2488 vdrop(vp); 2489 mtx_lock(&sync_mtx); 2490 return (0); 2491 } 2492 2493 static int first_printf = 1; 2494 2495 /* 2496 * System filesystem synchronizer daemon. 2497 */ 2498 static void 2499 sched_sync(void) 2500 { 2501 struct synclist *next, *slp; 2502 struct bufobj *bo; 2503 long starttime; 2504 struct thread *td = curthread; 2505 int last_work_seen; 2506 int net_worklist_len; 2507 int syncer_final_iter; 2508 int error; 2509 2510 last_work_seen = 0; 2511 syncer_final_iter = 0; 2512 syncer_state = SYNCER_RUNNING; 2513 starttime = time_uptime; 2514 td->td_pflags |= TDP_NORUNNINGBUF; 2515 2516 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2517 SHUTDOWN_PRI_LAST); 2518 2519 mtx_lock(&sync_mtx); 2520 for (;;) { 2521 if (syncer_state == SYNCER_FINAL_DELAY && 2522 syncer_final_iter == 0) { 2523 mtx_unlock(&sync_mtx); 2524 kproc_suspend_check(td->td_proc); 2525 mtx_lock(&sync_mtx); 2526 } 2527 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2528 if (syncer_state != SYNCER_RUNNING && 2529 starttime != time_uptime) { 2530 if (first_printf) { 2531 printf("\nSyncing disks, vnodes remaining... "); 2532 first_printf = 0; 2533 } 2534 printf("%d ", net_worklist_len); 2535 } 2536 starttime = time_uptime; 2537 2538 /* 2539 * Push files whose dirty time has expired. Be careful 2540 * of interrupt race on slp queue. 2541 * 2542 * Skip over empty worklist slots when shutting down. 2543 */ 2544 do { 2545 slp = &syncer_workitem_pending[syncer_delayno]; 2546 syncer_delayno += 1; 2547 if (syncer_delayno == syncer_maxdelay) 2548 syncer_delayno = 0; 2549 next = &syncer_workitem_pending[syncer_delayno]; 2550 /* 2551 * If the worklist has wrapped since the 2552 * it was emptied of all but syncer vnodes, 2553 * switch to the FINAL_DELAY state and run 2554 * for one more second. 2555 */ 2556 if (syncer_state == SYNCER_SHUTTING_DOWN && 2557 net_worklist_len == 0 && 2558 last_work_seen == syncer_delayno) { 2559 syncer_state = SYNCER_FINAL_DELAY; 2560 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2561 } 2562 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2563 syncer_worklist_len > 0); 2564 2565 /* 2566 * Keep track of the last time there was anything 2567 * on the worklist other than syncer vnodes. 2568 * Return to the SHUTTING_DOWN state if any 2569 * new work appears. 2570 */ 2571 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2572 last_work_seen = syncer_delayno; 2573 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2574 syncer_state = SYNCER_SHUTTING_DOWN; 2575 while (!LIST_EMPTY(slp)) { 2576 error = sync_vnode(slp, &bo, td); 2577 if (error == 1) { 2578 LIST_REMOVE(bo, bo_synclist); 2579 LIST_INSERT_HEAD(next, bo, bo_synclist); 2580 continue; 2581 } 2582 2583 if (first_printf == 0) { 2584 /* 2585 * Drop the sync mutex, because some watchdog 2586 * drivers need to sleep while patting 2587 */ 2588 mtx_unlock(&sync_mtx); 2589 wdog_kern_pat(WD_LASTVAL); 2590 mtx_lock(&sync_mtx); 2591 } 2592 2593 } 2594 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2595 syncer_final_iter--; 2596 /* 2597 * The variable rushjob allows the kernel to speed up the 2598 * processing of the filesystem syncer process. A rushjob 2599 * value of N tells the filesystem syncer to process the next 2600 * N seconds worth of work on its queue ASAP. Currently rushjob 2601 * is used by the soft update code to speed up the filesystem 2602 * syncer process when the incore state is getting so far 2603 * ahead of the disk that the kernel memory pool is being 2604 * threatened with exhaustion. 2605 */ 2606 if (rushjob > 0) { 2607 rushjob -= 1; 2608 continue; 2609 } 2610 /* 2611 * Just sleep for a short period of time between 2612 * iterations when shutting down to allow some I/O 2613 * to happen. 2614 * 2615 * If it has taken us less than a second to process the 2616 * current work, then wait. Otherwise start right over 2617 * again. We can still lose time if any single round 2618 * takes more than two seconds, but it does not really 2619 * matter as we are just trying to generally pace the 2620 * filesystem activity. 2621 */ 2622 if (syncer_state != SYNCER_RUNNING || 2623 time_uptime == starttime) { 2624 thread_lock(td); 2625 sched_prio(td, PPAUSE); 2626 thread_unlock(td); 2627 } 2628 if (syncer_state != SYNCER_RUNNING) 2629 cv_timedwait(&sync_wakeup, &sync_mtx, 2630 hz / SYNCER_SHUTDOWN_SPEEDUP); 2631 else if (time_uptime == starttime) 2632 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2633 } 2634 } 2635 2636 /* 2637 * Request the syncer daemon to speed up its work. 2638 * We never push it to speed up more than half of its 2639 * normal turn time, otherwise it could take over the cpu. 2640 */ 2641 int 2642 speedup_syncer(void) 2643 { 2644 int ret = 0; 2645 2646 mtx_lock(&sync_mtx); 2647 if (rushjob < syncdelay / 2) { 2648 rushjob += 1; 2649 stat_rush_requests += 1; 2650 ret = 1; 2651 } 2652 mtx_unlock(&sync_mtx); 2653 cv_broadcast(&sync_wakeup); 2654 return (ret); 2655 } 2656 2657 /* 2658 * Tell the syncer to speed up its work and run though its work 2659 * list several times, then tell it to shut down. 2660 */ 2661 static void 2662 syncer_shutdown(void *arg, int howto) 2663 { 2664 2665 if (howto & RB_NOSYNC) 2666 return; 2667 mtx_lock(&sync_mtx); 2668 syncer_state = SYNCER_SHUTTING_DOWN; 2669 rushjob = 0; 2670 mtx_unlock(&sync_mtx); 2671 cv_broadcast(&sync_wakeup); 2672 kproc_shutdown(arg, howto); 2673 } 2674 2675 void 2676 syncer_suspend(void) 2677 { 2678 2679 syncer_shutdown(updateproc, 0); 2680 } 2681 2682 void 2683 syncer_resume(void) 2684 { 2685 2686 mtx_lock(&sync_mtx); 2687 first_printf = 1; 2688 syncer_state = SYNCER_RUNNING; 2689 mtx_unlock(&sync_mtx); 2690 cv_broadcast(&sync_wakeup); 2691 kproc_resume(updateproc); 2692 } 2693 2694 /* 2695 * Reassign a buffer from one vnode to another. 2696 * Used to assign file specific control information 2697 * (indirect blocks) to the vnode to which they belong. 2698 */ 2699 void 2700 reassignbuf(struct buf *bp) 2701 { 2702 struct vnode *vp; 2703 struct bufobj *bo; 2704 int delay; 2705 #ifdef INVARIANTS 2706 struct bufv *bv; 2707 #endif 2708 2709 vp = bp->b_vp; 2710 bo = bp->b_bufobj; 2711 ++reassignbufcalls; 2712 2713 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2714 bp, bp->b_vp, bp->b_flags); 2715 /* 2716 * B_PAGING flagged buffers cannot be reassigned because their vp 2717 * is not fully linked in. 2718 */ 2719 if (bp->b_flags & B_PAGING) 2720 panic("cannot reassign paging buffer"); 2721 2722 /* 2723 * Delete from old vnode list, if on one. 2724 */ 2725 BO_LOCK(bo); 2726 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2727 buf_vlist_remove(bp); 2728 else 2729 panic("reassignbuf: Buffer %p not on queue.", bp); 2730 /* 2731 * If dirty, put on list of dirty buffers; otherwise insert onto list 2732 * of clean buffers. 2733 */ 2734 if (bp->b_flags & B_DELWRI) { 2735 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2736 switch (vp->v_type) { 2737 case VDIR: 2738 delay = dirdelay; 2739 break; 2740 case VCHR: 2741 delay = metadelay; 2742 break; 2743 default: 2744 delay = filedelay; 2745 } 2746 vn_syncer_add_to_worklist(bo, delay); 2747 } 2748 buf_vlist_add(bp, bo, BX_VNDIRTY); 2749 } else { 2750 buf_vlist_add(bp, bo, BX_VNCLEAN); 2751 2752 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2753 mtx_lock(&sync_mtx); 2754 LIST_REMOVE(bo, bo_synclist); 2755 syncer_worklist_len--; 2756 mtx_unlock(&sync_mtx); 2757 bo->bo_flag &= ~BO_ONWORKLST; 2758 } 2759 } 2760 #ifdef INVARIANTS 2761 bv = &bo->bo_clean; 2762 bp = TAILQ_FIRST(&bv->bv_hd); 2763 KASSERT(bp == NULL || bp->b_bufobj == bo, 2764 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2765 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2766 KASSERT(bp == NULL || bp->b_bufobj == bo, 2767 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2768 bv = &bo->bo_dirty; 2769 bp = TAILQ_FIRST(&bv->bv_hd); 2770 KASSERT(bp == NULL || bp->b_bufobj == bo, 2771 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2772 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2773 KASSERT(bp == NULL || bp->b_bufobj == bo, 2774 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2775 #endif 2776 BO_UNLOCK(bo); 2777 } 2778 2779 static void 2780 v_init_counters(struct vnode *vp) 2781 { 2782 2783 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2784 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2785 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2786 2787 refcount_init(&vp->v_holdcnt, 1); 2788 refcount_init(&vp->v_usecount, 1); 2789 } 2790 2791 /* 2792 * Increment si_usecount of the associated device, if any. 2793 */ 2794 static void 2795 v_incr_devcount(struct vnode *vp) 2796 { 2797 2798 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2799 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2800 dev_lock(); 2801 vp->v_rdev->si_usecount++; 2802 dev_unlock(); 2803 } 2804 } 2805 2806 /* 2807 * Decrement si_usecount of the associated device, if any. 2808 * 2809 * The caller is required to hold the interlock when transitioning a VCHR use 2810 * count to zero. This prevents a race with devfs_reclaim_vchr() that would 2811 * leak a si_usecount reference. The vnode lock will also prevent this race 2812 * if it is held while dropping the last ref. 2813 * 2814 * The race is: 2815 * 2816 * CPU1 CPU2 2817 * devfs_reclaim_vchr 2818 * make v_usecount == 0 2819 * VI_LOCK 2820 * sees v_usecount == 0, no updates 2821 * vp->v_rdev = NULL; 2822 * ... 2823 * VI_UNLOCK 2824 * VI_LOCK 2825 * v_decr_devcount 2826 * sees v_rdev == NULL, no updates 2827 * 2828 * In this scenario si_devcount decrement is not performed. 2829 */ 2830 static void 2831 v_decr_devcount(struct vnode *vp) 2832 { 2833 2834 ASSERT_VOP_LOCKED(vp, __func__); 2835 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2836 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2837 dev_lock(); 2838 VNPASS(vp->v_rdev->si_usecount > 0, vp); 2839 vp->v_rdev->si_usecount--; 2840 dev_unlock(); 2841 } 2842 } 2843 2844 /* 2845 * Grab a particular vnode from the free list, increment its 2846 * reference count and lock it. VIRF_DOOMED is set if the vnode 2847 * is being destroyed. Only callers who specify LK_RETRY will 2848 * see doomed vnodes. If inactive processing was delayed in 2849 * vput try to do it here. 2850 * 2851 * usecount is manipulated using atomics without holding any locks. 2852 * 2853 * holdcnt can be manipulated using atomics without holding any locks, 2854 * except when transitioning 1<->0, in which case the interlock is held. 2855 * 2856 * Consumers which don't guarantee liveness of the vnode can use SMR to 2857 * try to get a reference. Note this operation can fail since the vnode 2858 * may be awaiting getting freed by the time they get to it. 2859 */ 2860 enum vgetstate 2861 vget_prep_smr(struct vnode *vp) 2862 { 2863 enum vgetstate vs; 2864 2865 VFS_SMR_ASSERT_ENTERED(); 2866 2867 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2868 vs = VGET_USECOUNT; 2869 } else { 2870 if (vhold_smr(vp)) 2871 vs = VGET_HOLDCNT; 2872 else 2873 vs = VGET_NONE; 2874 } 2875 return (vs); 2876 } 2877 2878 enum vgetstate 2879 vget_prep(struct vnode *vp) 2880 { 2881 enum vgetstate vs; 2882 2883 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2884 vs = VGET_USECOUNT; 2885 } else { 2886 vhold(vp); 2887 vs = VGET_HOLDCNT; 2888 } 2889 return (vs); 2890 } 2891 2892 int 2893 vget(struct vnode *vp, int flags, struct thread *td) 2894 { 2895 enum vgetstate vs; 2896 2897 MPASS(td == curthread); 2898 2899 vs = vget_prep(vp); 2900 return (vget_finish(vp, flags, vs)); 2901 } 2902 2903 static int __noinline 2904 vget_finish_vchr(struct vnode *vp) 2905 { 2906 2907 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 2908 2909 /* 2910 * See the comment in vget_finish before usecount bump. 2911 */ 2912 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2913 #ifdef INVARIANTS 2914 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2915 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 2916 #else 2917 refcount_release(&vp->v_holdcnt); 2918 #endif 2919 return (0); 2920 } 2921 2922 VI_LOCK(vp); 2923 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2924 #ifdef INVARIANTS 2925 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2926 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2927 #else 2928 refcount_release(&vp->v_holdcnt); 2929 #endif 2930 VI_UNLOCK(vp); 2931 return (0); 2932 } 2933 v_incr_devcount(vp); 2934 refcount_acquire(&vp->v_usecount); 2935 VI_UNLOCK(vp); 2936 return (0); 2937 } 2938 2939 int 2940 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2941 { 2942 int error, old; 2943 2944 if ((flags & LK_INTERLOCK) != 0) 2945 ASSERT_VI_LOCKED(vp, __func__); 2946 else 2947 ASSERT_VI_UNLOCKED(vp, __func__); 2948 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 2949 VNPASS(vp->v_holdcnt > 0, vp); 2950 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 2951 2952 error = vn_lock(vp, flags); 2953 if (__predict_false(error != 0)) { 2954 if (vs == VGET_USECOUNT) 2955 vrele(vp); 2956 else 2957 vdrop(vp); 2958 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2959 vp); 2960 return (error); 2961 } 2962 2963 if (vs == VGET_USECOUNT) 2964 return (0); 2965 2966 if (__predict_false(vp->v_type == VCHR)) 2967 return (vget_finish_vchr(vp)); 2968 2969 /* 2970 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2971 * the vnode around. Otherwise someone else lended their hold count and 2972 * we have to drop ours. 2973 */ 2974 old = atomic_fetchadd_int(&vp->v_usecount, 1); 2975 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 2976 if (old != 0) { 2977 #ifdef INVARIANTS 2978 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2979 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2980 #else 2981 refcount_release(&vp->v_holdcnt); 2982 #endif 2983 } 2984 return (0); 2985 } 2986 2987 /* 2988 * Increase the reference (use) and hold count of a vnode. 2989 * This will also remove the vnode from the free list if it is presently free. 2990 */ 2991 static void __noinline 2992 vref_vchr(struct vnode *vp, bool interlock) 2993 { 2994 2995 /* 2996 * See the comment in vget_finish before usecount bump. 2997 */ 2998 if (!interlock) { 2999 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3000 VNODE_REFCOUNT_FENCE_ACQ(); 3001 VNASSERT(vp->v_holdcnt > 0, vp, 3002 ("%s: active vnode not held", __func__)); 3003 return; 3004 } 3005 VI_LOCK(vp); 3006 /* 3007 * By the time we get here the vnode might have been doomed, at 3008 * which point the 0->1 use count transition is no longer 3009 * protected by the interlock. Since it can't bounce back to 3010 * VCHR and requires vref semantics, punt it back 3011 */ 3012 if (__predict_false(vp->v_type == VBAD)) { 3013 VI_UNLOCK(vp); 3014 vref(vp); 3015 return; 3016 } 3017 } 3018 VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); 3019 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3020 VNODE_REFCOUNT_FENCE_ACQ(); 3021 VNASSERT(vp->v_holdcnt > 0, vp, 3022 ("%s: active vnode not held", __func__)); 3023 if (!interlock) 3024 VI_UNLOCK(vp); 3025 return; 3026 } 3027 vhold(vp); 3028 v_incr_devcount(vp); 3029 refcount_acquire(&vp->v_usecount); 3030 if (!interlock) 3031 VI_UNLOCK(vp); 3032 return; 3033 } 3034 3035 void 3036 vref(struct vnode *vp) 3037 { 3038 int old; 3039 3040 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3041 if (__predict_false(vp->v_type == VCHR)) { 3042 vref_vchr(vp, false); 3043 return; 3044 } 3045 3046 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3047 VNODE_REFCOUNT_FENCE_ACQ(); 3048 VNASSERT(vp->v_holdcnt > 0, vp, 3049 ("%s: active vnode not held", __func__)); 3050 return; 3051 } 3052 vhold(vp); 3053 /* 3054 * See the comment in vget_finish. 3055 */ 3056 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3057 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3058 if (old != 0) { 3059 #ifdef INVARIANTS 3060 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3061 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3062 #else 3063 refcount_release(&vp->v_holdcnt); 3064 #endif 3065 } 3066 } 3067 3068 void 3069 vrefl(struct vnode *vp) 3070 { 3071 3072 ASSERT_VI_LOCKED(vp, __func__); 3073 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3074 if (__predict_false(vp->v_type == VCHR)) { 3075 vref_vchr(vp, true); 3076 return; 3077 } 3078 vref(vp); 3079 } 3080 3081 void 3082 vrefact(struct vnode *vp) 3083 { 3084 3085 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3086 #ifdef INVARIANTS 3087 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3088 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3089 #else 3090 refcount_acquire(&vp->v_usecount); 3091 #endif 3092 } 3093 3094 void 3095 vrefactn(struct vnode *vp, u_int n) 3096 { 3097 3098 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3099 #ifdef INVARIANTS 3100 int old = atomic_fetchadd_int(&vp->v_usecount, n); 3101 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3102 #else 3103 atomic_add_int(&vp->v_usecount, n); 3104 #endif 3105 } 3106 3107 /* 3108 * Return reference count of a vnode. 3109 * 3110 * The results of this call are only guaranteed when some mechanism is used to 3111 * stop other processes from gaining references to the vnode. This may be the 3112 * case if the caller holds the only reference. This is also useful when stale 3113 * data is acceptable as race conditions may be accounted for by some other 3114 * means. 3115 */ 3116 int 3117 vrefcnt(struct vnode *vp) 3118 { 3119 3120 return (vp->v_usecount); 3121 } 3122 3123 void 3124 vlazy(struct vnode *vp) 3125 { 3126 struct mount *mp; 3127 3128 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3129 3130 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3131 return; 3132 /* 3133 * We may get here for inactive routines after the vnode got doomed. 3134 */ 3135 if (VN_IS_DOOMED(vp)) 3136 return; 3137 mp = vp->v_mount; 3138 mtx_lock(&mp->mnt_listmtx); 3139 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3140 vp->v_mflag |= VMP_LAZYLIST; 3141 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3142 mp->mnt_lazyvnodelistsize++; 3143 } 3144 mtx_unlock(&mp->mnt_listmtx); 3145 } 3146 3147 /* 3148 * This routine is only meant to be called from vgonel prior to dooming 3149 * the vnode. 3150 */ 3151 static void 3152 vunlazy_gone(struct vnode *vp) 3153 { 3154 struct mount *mp; 3155 3156 ASSERT_VOP_ELOCKED(vp, __func__); 3157 ASSERT_VI_LOCKED(vp, __func__); 3158 VNPASS(!VN_IS_DOOMED(vp), vp); 3159 3160 if (vp->v_mflag & VMP_LAZYLIST) { 3161 mp = vp->v_mount; 3162 mtx_lock(&mp->mnt_listmtx); 3163 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3164 vp->v_mflag &= ~VMP_LAZYLIST; 3165 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3166 mp->mnt_lazyvnodelistsize--; 3167 mtx_unlock(&mp->mnt_listmtx); 3168 } 3169 } 3170 3171 static void 3172 vdefer_inactive(struct vnode *vp) 3173 { 3174 3175 ASSERT_VI_LOCKED(vp, __func__); 3176 VNASSERT(vp->v_holdcnt > 0, vp, 3177 ("%s: vnode without hold count", __func__)); 3178 if (VN_IS_DOOMED(vp)) { 3179 vdropl(vp); 3180 return; 3181 } 3182 if (vp->v_iflag & VI_DEFINACT) { 3183 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3184 vdropl(vp); 3185 return; 3186 } 3187 if (vp->v_usecount > 0) { 3188 vp->v_iflag &= ~VI_OWEINACT; 3189 vdropl(vp); 3190 return; 3191 } 3192 vlazy(vp); 3193 vp->v_iflag |= VI_DEFINACT; 3194 VI_UNLOCK(vp); 3195 counter_u64_add(deferred_inact, 1); 3196 } 3197 3198 static void 3199 vdefer_inactive_unlocked(struct vnode *vp) 3200 { 3201 3202 VI_LOCK(vp); 3203 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3204 vdropl(vp); 3205 return; 3206 } 3207 vdefer_inactive(vp); 3208 } 3209 3210 enum vput_op { VRELE, VPUT, VUNREF }; 3211 3212 /* 3213 * Handle ->v_usecount transitioning to 0. 3214 * 3215 * By releasing the last usecount we take ownership of the hold count which 3216 * provides liveness of the vnode, meaning we have to vdrop. 3217 * 3218 * If the vnode is of type VCHR we may need to decrement si_usecount, see 3219 * v_decr_devcount for details. 3220 * 3221 * For all vnodes we may need to perform inactive processing. It requires an 3222 * exclusive lock on the vnode, while it is legal to call here with only a 3223 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3224 * inactive processing gets deferred to the syncer. 3225 * 3226 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3227 * on the lock being held all the way until VOP_INACTIVE. This in particular 3228 * happens with UFS which adds half-constructed vnodes to the hash, where they 3229 * can be found by other code. 3230 */ 3231 static void 3232 vput_final(struct vnode *vp, enum vput_op func) 3233 { 3234 int error; 3235 bool want_unlock; 3236 3237 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3238 VNPASS(vp->v_holdcnt > 0, vp); 3239 3240 VI_LOCK(vp); 3241 if (__predict_false(vp->v_type == VCHR && func != VRELE)) 3242 v_decr_devcount(vp); 3243 3244 /* 3245 * By the time we got here someone else might have transitioned 3246 * the count back to > 0. 3247 */ 3248 if (vp->v_usecount > 0) 3249 goto out; 3250 3251 /* 3252 * If the vnode is doomed vgone already performed inactive processing 3253 * (if needed). 3254 */ 3255 if (VN_IS_DOOMED(vp)) 3256 goto out; 3257 3258 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3259 goto out; 3260 3261 if (vp->v_iflag & VI_DOINGINACT) 3262 goto out; 3263 3264 /* 3265 * Locking operations here will drop the interlock and possibly the 3266 * vnode lock, opening a window where the vnode can get doomed all the 3267 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3268 * perform inactive. 3269 */ 3270 vp->v_iflag |= VI_OWEINACT; 3271 want_unlock = false; 3272 error = 0; 3273 switch (func) { 3274 case VRELE: 3275 switch (VOP_ISLOCKED(vp)) { 3276 case LK_EXCLUSIVE: 3277 break; 3278 case LK_EXCLOTHER: 3279 case 0: 3280 want_unlock = true; 3281 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3282 VI_LOCK(vp); 3283 break; 3284 default: 3285 /* 3286 * The lock has at least one sharer, but we have no way 3287 * to conclude whether this is us. Play it safe and 3288 * defer processing. 3289 */ 3290 error = EAGAIN; 3291 break; 3292 } 3293 break; 3294 case VPUT: 3295 want_unlock = true; 3296 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3297 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3298 LK_NOWAIT); 3299 VI_LOCK(vp); 3300 } 3301 break; 3302 case VUNREF: 3303 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3304 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3305 VI_LOCK(vp); 3306 } 3307 break; 3308 } 3309 if (error == 0) { 3310 vinactive(vp); 3311 if (want_unlock) 3312 VOP_UNLOCK(vp); 3313 vdropl(vp); 3314 } else { 3315 vdefer_inactive(vp); 3316 } 3317 return; 3318 out: 3319 if (func == VPUT) 3320 VOP_UNLOCK(vp); 3321 vdropl(vp); 3322 } 3323 3324 /* 3325 * Decrement ->v_usecount for a vnode. 3326 * 3327 * Releasing the last use count requires additional processing, see vput_final 3328 * above for details. 3329 * 3330 * Note that releasing use count without the vnode lock requires special casing 3331 * for VCHR, see v_decr_devcount for details. 3332 * 3333 * Comment above each variant denotes lock state on entry and exit. 3334 */ 3335 3336 static void __noinline 3337 vrele_vchr(struct vnode *vp) 3338 { 3339 3340 if (refcount_release_if_not_last(&vp->v_usecount)) 3341 return; 3342 VI_LOCK(vp); 3343 if (!refcount_release(&vp->v_usecount)) { 3344 VI_UNLOCK(vp); 3345 return; 3346 } 3347 v_decr_devcount(vp); 3348 VI_UNLOCK(vp); 3349 vput_final(vp, VRELE); 3350 } 3351 3352 /* 3353 * in: any 3354 * out: same as passed in 3355 */ 3356 void 3357 vrele(struct vnode *vp) 3358 { 3359 3360 ASSERT_VI_UNLOCKED(vp, __func__); 3361 if (__predict_false(vp->v_type == VCHR)) { 3362 vrele_vchr(vp); 3363 return; 3364 } 3365 if (!refcount_release(&vp->v_usecount)) 3366 return; 3367 vput_final(vp, VRELE); 3368 } 3369 3370 /* 3371 * in: locked 3372 * out: unlocked 3373 */ 3374 void 3375 vput(struct vnode *vp) 3376 { 3377 3378 ASSERT_VOP_LOCKED(vp, __func__); 3379 ASSERT_VI_UNLOCKED(vp, __func__); 3380 if (!refcount_release(&vp->v_usecount)) { 3381 VOP_UNLOCK(vp); 3382 return; 3383 } 3384 vput_final(vp, VPUT); 3385 } 3386 3387 /* 3388 * in: locked 3389 * out: locked 3390 */ 3391 void 3392 vunref(struct vnode *vp) 3393 { 3394 3395 ASSERT_VOP_LOCKED(vp, __func__); 3396 ASSERT_VI_UNLOCKED(vp, __func__); 3397 if (!refcount_release(&vp->v_usecount)) 3398 return; 3399 vput_final(vp, VUNREF); 3400 } 3401 3402 void 3403 vhold(struct vnode *vp) 3404 { 3405 struct vdbatch *vd; 3406 int old; 3407 3408 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3409 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3410 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3411 ("%s: wrong hold count %d", __func__, old)); 3412 if (old != 0) 3413 return; 3414 critical_enter(); 3415 vd = DPCPU_PTR(vd); 3416 vd->freevnodes--; 3417 critical_exit(); 3418 } 3419 3420 void 3421 vholdl(struct vnode *vp) 3422 { 3423 3424 ASSERT_VI_LOCKED(vp, __func__); 3425 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3426 vhold(vp); 3427 } 3428 3429 void 3430 vholdnz(struct vnode *vp) 3431 { 3432 3433 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3434 #ifdef INVARIANTS 3435 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3436 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3437 ("%s: wrong hold count %d", __func__, old)); 3438 #else 3439 atomic_add_int(&vp->v_holdcnt, 1); 3440 #endif 3441 } 3442 3443 /* 3444 * Grab a hold count unless the vnode is freed. 3445 * 3446 * Only use this routine if vfs smr is the only protection you have against 3447 * freeing the vnode. 3448 * 3449 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3450 * is not set. After the flag is set the vnode becomes immutable to anyone but 3451 * the thread which managed to set the flag. 3452 * 3453 * It may be tempting to replace the loop with: 3454 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3455 * if (count & VHOLD_NO_SMR) { 3456 * backpedal and error out; 3457 * } 3458 * 3459 * However, while this is more performant, it hinders debugging by eliminating 3460 * the previously mentioned invariant. 3461 */ 3462 bool 3463 vhold_smr(struct vnode *vp) 3464 { 3465 int count; 3466 3467 VFS_SMR_ASSERT_ENTERED(); 3468 3469 count = atomic_load_int(&vp->v_holdcnt); 3470 for (;;) { 3471 if (count & VHOLD_NO_SMR) { 3472 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3473 ("non-zero hold count with flags %d\n", count)); 3474 return (false); 3475 } 3476 3477 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3478 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) 3479 return (true); 3480 } 3481 } 3482 3483 static void __noinline 3484 vdbatch_process(struct vdbatch *vd) 3485 { 3486 struct vnode *vp; 3487 int i; 3488 3489 mtx_assert(&vd->lock, MA_OWNED); 3490 MPASS(curthread->td_pinned > 0); 3491 MPASS(vd->index == VDBATCH_SIZE); 3492 3493 mtx_lock(&vnode_list_mtx); 3494 critical_enter(); 3495 freevnodes += vd->freevnodes; 3496 for (i = 0; i < VDBATCH_SIZE; i++) { 3497 vp = vd->tab[i]; 3498 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3499 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3500 MPASS(vp->v_dbatchcpu != NOCPU); 3501 vp->v_dbatchcpu = NOCPU; 3502 } 3503 mtx_unlock(&vnode_list_mtx); 3504 vd->freevnodes = 0; 3505 bzero(vd->tab, sizeof(vd->tab)); 3506 vd->index = 0; 3507 critical_exit(); 3508 } 3509 3510 static void 3511 vdbatch_enqueue(struct vnode *vp) 3512 { 3513 struct vdbatch *vd; 3514 3515 ASSERT_VI_LOCKED(vp, __func__); 3516 VNASSERT(!VN_IS_DOOMED(vp), vp, 3517 ("%s: deferring requeue of a doomed vnode", __func__)); 3518 3519 critical_enter(); 3520 vd = DPCPU_PTR(vd); 3521 vd->freevnodes++; 3522 if (vp->v_dbatchcpu != NOCPU) { 3523 VI_UNLOCK(vp); 3524 critical_exit(); 3525 return; 3526 } 3527 3528 sched_pin(); 3529 critical_exit(); 3530 mtx_lock(&vd->lock); 3531 MPASS(vd->index < VDBATCH_SIZE); 3532 MPASS(vd->tab[vd->index] == NULL); 3533 /* 3534 * A hack: we depend on being pinned so that we know what to put in 3535 * ->v_dbatchcpu. 3536 */ 3537 vp->v_dbatchcpu = curcpu; 3538 vd->tab[vd->index] = vp; 3539 vd->index++; 3540 VI_UNLOCK(vp); 3541 if (vd->index == VDBATCH_SIZE) 3542 vdbatch_process(vd); 3543 mtx_unlock(&vd->lock); 3544 sched_unpin(); 3545 } 3546 3547 /* 3548 * This routine must only be called for vnodes which are about to be 3549 * deallocated. Supporting dequeue for arbitrary vndoes would require 3550 * validating that the locked batch matches. 3551 */ 3552 static void 3553 vdbatch_dequeue(struct vnode *vp) 3554 { 3555 struct vdbatch *vd; 3556 int i; 3557 short cpu; 3558 3559 VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp, 3560 ("%s: called for a used vnode\n", __func__)); 3561 3562 cpu = vp->v_dbatchcpu; 3563 if (cpu == NOCPU) 3564 return; 3565 3566 vd = DPCPU_ID_PTR(cpu, vd); 3567 mtx_lock(&vd->lock); 3568 for (i = 0; i < vd->index; i++) { 3569 if (vd->tab[i] != vp) 3570 continue; 3571 vp->v_dbatchcpu = NOCPU; 3572 vd->index--; 3573 vd->tab[i] = vd->tab[vd->index]; 3574 vd->tab[vd->index] = NULL; 3575 break; 3576 } 3577 mtx_unlock(&vd->lock); 3578 /* 3579 * Either we dequeued the vnode above or the target CPU beat us to it. 3580 */ 3581 MPASS(vp->v_dbatchcpu == NOCPU); 3582 } 3583 3584 /* 3585 * Drop the hold count of the vnode. If this is the last reference to 3586 * the vnode we place it on the free list unless it has been vgone'd 3587 * (marked VIRF_DOOMED) in which case we will free it. 3588 * 3589 * Because the vnode vm object keeps a hold reference on the vnode if 3590 * there is at least one resident non-cached page, the vnode cannot 3591 * leave the active list without the page cleanup done. 3592 */ 3593 static void 3594 vdrop_deactivate(struct vnode *vp) 3595 { 3596 struct mount *mp; 3597 3598 ASSERT_VI_LOCKED(vp, __func__); 3599 /* 3600 * Mark a vnode as free: remove it from its active list 3601 * and put it up for recycling on the freelist. 3602 */ 3603 VNASSERT(!VN_IS_DOOMED(vp), vp, 3604 ("vdrop: returning doomed vnode")); 3605 VNASSERT(vp->v_op != NULL, vp, 3606 ("vdrop: vnode already reclaimed.")); 3607 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3608 ("vnode with VI_OWEINACT set")); 3609 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3610 ("vnode with VI_DEFINACT set")); 3611 if (vp->v_mflag & VMP_LAZYLIST) { 3612 mp = vp->v_mount; 3613 mtx_lock(&mp->mnt_listmtx); 3614 VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST")); 3615 /* 3616 * Don't remove the vnode from the lazy list if another thread 3617 * has increased the hold count. It may have re-enqueued the 3618 * vnode to the lazy list and is now responsible for its 3619 * removal. 3620 */ 3621 if (vp->v_holdcnt == 0) { 3622 vp->v_mflag &= ~VMP_LAZYLIST; 3623 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3624 mp->mnt_lazyvnodelistsize--; 3625 } 3626 mtx_unlock(&mp->mnt_listmtx); 3627 } 3628 vdbatch_enqueue(vp); 3629 } 3630 3631 void 3632 vdrop(struct vnode *vp) 3633 { 3634 3635 ASSERT_VI_UNLOCKED(vp, __func__); 3636 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3637 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3638 return; 3639 VI_LOCK(vp); 3640 vdropl(vp); 3641 } 3642 3643 void 3644 vdropl(struct vnode *vp) 3645 { 3646 3647 ASSERT_VI_LOCKED(vp, __func__); 3648 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3649 if (!refcount_release(&vp->v_holdcnt)) { 3650 VI_UNLOCK(vp); 3651 return; 3652 } 3653 if (!VN_IS_DOOMED(vp)) { 3654 vdrop_deactivate(vp); 3655 return; 3656 } 3657 /* 3658 * We may be racing against vhold_smr. 3659 * 3660 * If they win we can just pretend we never got this far, they will 3661 * vdrop later. 3662 */ 3663 if (!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR)) { 3664 /* 3665 * We lost the aforementioned race. Note that any subsequent 3666 * access is invalid as they might have managed to vdropl on 3667 * their own. 3668 */ 3669 return; 3670 } 3671 freevnode(vp); 3672 } 3673 3674 /* 3675 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3676 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3677 */ 3678 static void 3679 vinactivef(struct vnode *vp) 3680 { 3681 struct vm_object *obj; 3682 3683 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3684 ASSERT_VI_LOCKED(vp, "vinactive"); 3685 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3686 ("vinactive: recursed on VI_DOINGINACT")); 3687 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3688 vp->v_iflag |= VI_DOINGINACT; 3689 vp->v_iflag &= ~VI_OWEINACT; 3690 VI_UNLOCK(vp); 3691 /* 3692 * Before moving off the active list, we must be sure that any 3693 * modified pages are converted into the vnode's dirty 3694 * buffers, since these will no longer be checked once the 3695 * vnode is on the inactive list. 3696 * 3697 * The write-out of the dirty pages is asynchronous. At the 3698 * point that VOP_INACTIVE() is called, there could still be 3699 * pending I/O and dirty pages in the object. 3700 */ 3701 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3702 vm_object_mightbedirty(obj)) { 3703 VM_OBJECT_WLOCK(obj); 3704 vm_object_page_clean(obj, 0, 0, 0); 3705 VM_OBJECT_WUNLOCK(obj); 3706 } 3707 VOP_INACTIVE(vp, curthread); 3708 VI_LOCK(vp); 3709 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3710 ("vinactive: lost VI_DOINGINACT")); 3711 vp->v_iflag &= ~VI_DOINGINACT; 3712 } 3713 3714 void 3715 vinactive(struct vnode *vp) 3716 { 3717 3718 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3719 ASSERT_VI_LOCKED(vp, "vinactive"); 3720 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3721 3722 if ((vp->v_iflag & VI_OWEINACT) == 0) 3723 return; 3724 if (vp->v_iflag & VI_DOINGINACT) 3725 return; 3726 if (vp->v_usecount > 0) { 3727 vp->v_iflag &= ~VI_OWEINACT; 3728 return; 3729 } 3730 vinactivef(vp); 3731 } 3732 3733 /* 3734 * Remove any vnodes in the vnode table belonging to mount point mp. 3735 * 3736 * If FORCECLOSE is not specified, there should not be any active ones, 3737 * return error if any are found (nb: this is a user error, not a 3738 * system error). If FORCECLOSE is specified, detach any active vnodes 3739 * that are found. 3740 * 3741 * If WRITECLOSE is set, only flush out regular file vnodes open for 3742 * writing. 3743 * 3744 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3745 * 3746 * `rootrefs' specifies the base reference count for the root vnode 3747 * of this filesystem. The root vnode is considered busy if its 3748 * v_usecount exceeds this value. On a successful return, vflush(, td) 3749 * will call vrele() on the root vnode exactly rootrefs times. 3750 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3751 * be zero. 3752 */ 3753 #ifdef DIAGNOSTIC 3754 static int busyprt = 0; /* print out busy vnodes */ 3755 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3756 #endif 3757 3758 int 3759 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3760 { 3761 struct vnode *vp, *mvp, *rootvp = NULL; 3762 struct vattr vattr; 3763 int busy = 0, error; 3764 3765 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3766 rootrefs, flags); 3767 if (rootrefs > 0) { 3768 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3769 ("vflush: bad args")); 3770 /* 3771 * Get the filesystem root vnode. We can vput() it 3772 * immediately, since with rootrefs > 0, it won't go away. 3773 */ 3774 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3775 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3776 __func__, error); 3777 return (error); 3778 } 3779 vput(rootvp); 3780 } 3781 loop: 3782 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3783 vholdl(vp); 3784 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3785 if (error) { 3786 vdrop(vp); 3787 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3788 goto loop; 3789 } 3790 /* 3791 * Skip over a vnodes marked VV_SYSTEM. 3792 */ 3793 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3794 VOP_UNLOCK(vp); 3795 vdrop(vp); 3796 continue; 3797 } 3798 /* 3799 * If WRITECLOSE is set, flush out unlinked but still open 3800 * files (even if open only for reading) and regular file 3801 * vnodes open for writing. 3802 */ 3803 if (flags & WRITECLOSE) { 3804 if (vp->v_object != NULL) { 3805 VM_OBJECT_WLOCK(vp->v_object); 3806 vm_object_page_clean(vp->v_object, 0, 0, 0); 3807 VM_OBJECT_WUNLOCK(vp->v_object); 3808 } 3809 error = VOP_FSYNC(vp, MNT_WAIT, td); 3810 if (error != 0) { 3811 VOP_UNLOCK(vp); 3812 vdrop(vp); 3813 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3814 return (error); 3815 } 3816 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3817 VI_LOCK(vp); 3818 3819 if ((vp->v_type == VNON || 3820 (error == 0 && vattr.va_nlink > 0)) && 3821 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3822 VOP_UNLOCK(vp); 3823 vdropl(vp); 3824 continue; 3825 } 3826 } else 3827 VI_LOCK(vp); 3828 /* 3829 * With v_usecount == 0, all we need to do is clear out the 3830 * vnode data structures and we are done. 3831 * 3832 * If FORCECLOSE is set, forcibly close the vnode. 3833 */ 3834 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3835 vgonel(vp); 3836 } else { 3837 busy++; 3838 #ifdef DIAGNOSTIC 3839 if (busyprt) 3840 vn_printf(vp, "vflush: busy vnode "); 3841 #endif 3842 } 3843 VOP_UNLOCK(vp); 3844 vdropl(vp); 3845 } 3846 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3847 /* 3848 * If just the root vnode is busy, and if its refcount 3849 * is equal to `rootrefs', then go ahead and kill it. 3850 */ 3851 VI_LOCK(rootvp); 3852 KASSERT(busy > 0, ("vflush: not busy")); 3853 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3854 ("vflush: usecount %d < rootrefs %d", 3855 rootvp->v_usecount, rootrefs)); 3856 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3857 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3858 vgone(rootvp); 3859 VOP_UNLOCK(rootvp); 3860 busy = 0; 3861 } else 3862 VI_UNLOCK(rootvp); 3863 } 3864 if (busy) { 3865 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3866 busy); 3867 return (EBUSY); 3868 } 3869 for (; rootrefs > 0; rootrefs--) 3870 vrele(rootvp); 3871 return (0); 3872 } 3873 3874 /* 3875 * Recycle an unused vnode to the front of the free list. 3876 */ 3877 int 3878 vrecycle(struct vnode *vp) 3879 { 3880 int recycled; 3881 3882 VI_LOCK(vp); 3883 recycled = vrecyclel(vp); 3884 VI_UNLOCK(vp); 3885 return (recycled); 3886 } 3887 3888 /* 3889 * vrecycle, with the vp interlock held. 3890 */ 3891 int 3892 vrecyclel(struct vnode *vp) 3893 { 3894 int recycled; 3895 3896 ASSERT_VOP_ELOCKED(vp, __func__); 3897 ASSERT_VI_LOCKED(vp, __func__); 3898 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3899 recycled = 0; 3900 if (vp->v_usecount == 0) { 3901 recycled = 1; 3902 vgonel(vp); 3903 } 3904 return (recycled); 3905 } 3906 3907 /* 3908 * Eliminate all activity associated with a vnode 3909 * in preparation for reuse. 3910 */ 3911 void 3912 vgone(struct vnode *vp) 3913 { 3914 VI_LOCK(vp); 3915 vgonel(vp); 3916 VI_UNLOCK(vp); 3917 } 3918 3919 static void 3920 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3921 struct vnode *lowervp __unused) 3922 { 3923 } 3924 3925 /* 3926 * Notify upper mounts about reclaimed or unlinked vnode. 3927 */ 3928 void 3929 vfs_notify_upper(struct vnode *vp, int event) 3930 { 3931 static struct vfsops vgonel_vfsops = { 3932 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3933 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3934 }; 3935 struct mount *mp, *ump, *mmp; 3936 3937 mp = vp->v_mount; 3938 if (mp == NULL) 3939 return; 3940 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3941 return; 3942 3943 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3944 mmp->mnt_op = &vgonel_vfsops; 3945 mmp->mnt_kern_flag |= MNTK_MARKER; 3946 MNT_ILOCK(mp); 3947 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3948 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3949 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3950 ump = TAILQ_NEXT(ump, mnt_upper_link); 3951 continue; 3952 } 3953 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3954 MNT_IUNLOCK(mp); 3955 switch (event) { 3956 case VFS_NOTIFY_UPPER_RECLAIM: 3957 VFS_RECLAIM_LOWERVP(ump, vp); 3958 break; 3959 case VFS_NOTIFY_UPPER_UNLINK: 3960 VFS_UNLINK_LOWERVP(ump, vp); 3961 break; 3962 default: 3963 KASSERT(0, ("invalid event %d", event)); 3964 break; 3965 } 3966 MNT_ILOCK(mp); 3967 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3968 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3969 } 3970 free(mmp, M_TEMP); 3971 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3972 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3973 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3974 wakeup(&mp->mnt_uppers); 3975 } 3976 MNT_IUNLOCK(mp); 3977 } 3978 3979 /* 3980 * vgone, with the vp interlock held. 3981 */ 3982 static void 3983 vgonel(struct vnode *vp) 3984 { 3985 struct thread *td; 3986 struct mount *mp; 3987 vm_object_t object; 3988 bool active, oweinact; 3989 3990 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3991 ASSERT_VI_LOCKED(vp, "vgonel"); 3992 VNASSERT(vp->v_holdcnt, vp, 3993 ("vgonel: vp %p has no reference.", vp)); 3994 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3995 td = curthread; 3996 3997 /* 3998 * Don't vgonel if we're already doomed. 3999 */ 4000 if (vp->v_irflag & VIRF_DOOMED) 4001 return; 4002 vunlazy_gone(vp); 4003 vp->v_irflag |= VIRF_DOOMED; 4004 4005 /* 4006 * Check to see if the vnode is in use. If so, we have to call 4007 * VOP_CLOSE() and VOP_INACTIVE(). 4008 */ 4009 active = vp->v_usecount > 0; 4010 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4011 /* 4012 * If we need to do inactive VI_OWEINACT will be set. 4013 */ 4014 if (vp->v_iflag & VI_DEFINACT) { 4015 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4016 vp->v_iflag &= ~VI_DEFINACT; 4017 vdropl(vp); 4018 } else { 4019 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4020 VI_UNLOCK(vp); 4021 } 4022 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4023 4024 /* 4025 * If purging an active vnode, it must be closed and 4026 * deactivated before being reclaimed. 4027 */ 4028 if (active) 4029 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4030 if (oweinact || active) { 4031 VI_LOCK(vp); 4032 vinactivef(vp); 4033 VI_UNLOCK(vp); 4034 } 4035 if (vp->v_type == VSOCK) 4036 vfs_unp_reclaim(vp); 4037 4038 /* 4039 * Clean out any buffers associated with the vnode. 4040 * If the flush fails, just toss the buffers. 4041 */ 4042 mp = NULL; 4043 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4044 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4045 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4046 while (vinvalbuf(vp, 0, 0, 0) != 0) 4047 ; 4048 } 4049 4050 BO_LOCK(&vp->v_bufobj); 4051 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4052 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4053 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4054 vp->v_bufobj.bo_clean.bv_cnt == 0, 4055 ("vp %p bufobj not invalidated", vp)); 4056 4057 /* 4058 * For VMIO bufobj, BO_DEAD is set later, or in 4059 * vm_object_terminate() after the object's page queue is 4060 * flushed. 4061 */ 4062 object = vp->v_bufobj.bo_object; 4063 if (object == NULL) 4064 vp->v_bufobj.bo_flag |= BO_DEAD; 4065 BO_UNLOCK(&vp->v_bufobj); 4066 4067 /* 4068 * Handle the VM part. Tmpfs handles v_object on its own (the 4069 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4070 * should not touch the object borrowed from the lower vnode 4071 * (the handle check). 4072 */ 4073 if (object != NULL && object->type == OBJT_VNODE && 4074 object->handle == vp) 4075 vnode_destroy_vobject(vp); 4076 4077 /* 4078 * Reclaim the vnode. 4079 */ 4080 if (VOP_RECLAIM(vp, td)) 4081 panic("vgone: cannot reclaim"); 4082 if (mp != NULL) 4083 vn_finished_secondary_write(mp); 4084 VNASSERT(vp->v_object == NULL, vp, 4085 ("vop_reclaim left v_object vp=%p", vp)); 4086 /* 4087 * Clear the advisory locks and wake up waiting threads. 4088 */ 4089 (void)VOP_ADVLOCKPURGE(vp); 4090 vp->v_lockf = NULL; 4091 /* 4092 * Delete from old mount point vnode list. 4093 */ 4094 delmntque(vp); 4095 cache_purge(vp); 4096 /* 4097 * Done with purge, reset to the standard lock and invalidate 4098 * the vnode. 4099 */ 4100 VI_LOCK(vp); 4101 vp->v_vnlock = &vp->v_lock; 4102 vp->v_op = &dead_vnodeops; 4103 vp->v_type = VBAD; 4104 } 4105 4106 /* 4107 * Calculate the total number of references to a special device. 4108 */ 4109 int 4110 vcount(struct vnode *vp) 4111 { 4112 int count; 4113 4114 dev_lock(); 4115 count = vp->v_rdev->si_usecount; 4116 dev_unlock(); 4117 return (count); 4118 } 4119 4120 /* 4121 * Print out a description of a vnode. 4122 */ 4123 static const char * const typename[] = 4124 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 4125 "VMARKER"}; 4126 4127 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4128 "new hold count flag not added to vn_printf"); 4129 4130 void 4131 vn_printf(struct vnode *vp, const char *fmt, ...) 4132 { 4133 va_list ap; 4134 char buf[256], buf2[16]; 4135 u_long flags; 4136 u_int holdcnt; 4137 4138 va_start(ap, fmt); 4139 vprintf(fmt, ap); 4140 va_end(ap); 4141 printf("%p: ", (void *)vp); 4142 printf("type %s\n", typename[vp->v_type]); 4143 holdcnt = atomic_load_int(&vp->v_holdcnt); 4144 printf(" usecount %d, writecount %d, refcount %d", 4145 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS); 4146 switch (vp->v_type) { 4147 case VDIR: 4148 printf(" mountedhere %p\n", vp->v_mountedhere); 4149 break; 4150 case VCHR: 4151 printf(" rdev %p\n", vp->v_rdev); 4152 break; 4153 case VSOCK: 4154 printf(" socket %p\n", vp->v_unpcb); 4155 break; 4156 case VFIFO: 4157 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4158 break; 4159 default: 4160 printf("\n"); 4161 break; 4162 } 4163 buf[0] = '\0'; 4164 buf[1] = '\0'; 4165 if (holdcnt & VHOLD_NO_SMR) 4166 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4167 printf(" hold count flags (%s)\n", buf + 1); 4168 4169 buf[0] = '\0'; 4170 buf[1] = '\0'; 4171 if (vp->v_irflag & VIRF_DOOMED) 4172 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4173 flags = vp->v_irflag & ~(VIRF_DOOMED); 4174 if (flags != 0) { 4175 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4176 strlcat(buf, buf2, sizeof(buf)); 4177 } 4178 if (vp->v_vflag & VV_ROOT) 4179 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4180 if (vp->v_vflag & VV_ISTTY) 4181 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4182 if (vp->v_vflag & VV_NOSYNC) 4183 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4184 if (vp->v_vflag & VV_ETERNALDEV) 4185 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4186 if (vp->v_vflag & VV_CACHEDLABEL) 4187 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4188 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4189 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4190 if (vp->v_vflag & VV_COPYONWRITE) 4191 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4192 if (vp->v_vflag & VV_SYSTEM) 4193 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4194 if (vp->v_vflag & VV_PROCDEP) 4195 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4196 if (vp->v_vflag & VV_NOKNOTE) 4197 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 4198 if (vp->v_vflag & VV_DELETED) 4199 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4200 if (vp->v_vflag & VV_MD) 4201 strlcat(buf, "|VV_MD", sizeof(buf)); 4202 if (vp->v_vflag & VV_FORCEINSMQ) 4203 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4204 if (vp->v_vflag & VV_READLINK) 4205 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4206 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4207 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 4208 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 4209 if (flags != 0) { 4210 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4211 strlcat(buf, buf2, sizeof(buf)); 4212 } 4213 if (vp->v_iflag & VI_TEXT_REF) 4214 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 4215 if (vp->v_iflag & VI_MOUNT) 4216 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4217 if (vp->v_iflag & VI_DOINGINACT) 4218 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4219 if (vp->v_iflag & VI_OWEINACT) 4220 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4221 if (vp->v_iflag & VI_DEFINACT) 4222 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4223 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT | 4224 VI_OWEINACT | VI_DEFINACT); 4225 if (flags != 0) { 4226 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4227 strlcat(buf, buf2, sizeof(buf)); 4228 } 4229 if (vp->v_mflag & VMP_LAZYLIST) 4230 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4231 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4232 if (flags != 0) { 4233 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4234 strlcat(buf, buf2, sizeof(buf)); 4235 } 4236 printf(" flags (%s)\n", buf + 1); 4237 if (mtx_owned(VI_MTX(vp))) 4238 printf(" VI_LOCKed"); 4239 if (vp->v_object != NULL) 4240 printf(" v_object %p ref %d pages %d " 4241 "cleanbuf %d dirtybuf %d\n", 4242 vp->v_object, vp->v_object->ref_count, 4243 vp->v_object->resident_page_count, 4244 vp->v_bufobj.bo_clean.bv_cnt, 4245 vp->v_bufobj.bo_dirty.bv_cnt); 4246 printf(" "); 4247 lockmgr_printinfo(vp->v_vnlock); 4248 if (vp->v_data != NULL) 4249 VOP_PRINT(vp); 4250 } 4251 4252 #ifdef DDB 4253 /* 4254 * List all of the locked vnodes in the system. 4255 * Called when debugging the kernel. 4256 */ 4257 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 4258 { 4259 struct mount *mp; 4260 struct vnode *vp; 4261 4262 /* 4263 * Note: because this is DDB, we can't obey the locking semantics 4264 * for these structures, which means we could catch an inconsistent 4265 * state and dereference a nasty pointer. Not much to be done 4266 * about that. 4267 */ 4268 db_printf("Locked vnodes\n"); 4269 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4270 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4271 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4272 vn_printf(vp, "vnode "); 4273 } 4274 } 4275 } 4276 4277 /* 4278 * Show details about the given vnode. 4279 */ 4280 DB_SHOW_COMMAND(vnode, db_show_vnode) 4281 { 4282 struct vnode *vp; 4283 4284 if (!have_addr) 4285 return; 4286 vp = (struct vnode *)addr; 4287 vn_printf(vp, "vnode "); 4288 } 4289 4290 /* 4291 * Show details about the given mount point. 4292 */ 4293 DB_SHOW_COMMAND(mount, db_show_mount) 4294 { 4295 struct mount *mp; 4296 struct vfsopt *opt; 4297 struct statfs *sp; 4298 struct vnode *vp; 4299 char buf[512]; 4300 uint64_t mflags; 4301 u_int flags; 4302 4303 if (!have_addr) { 4304 /* No address given, print short info about all mount points. */ 4305 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4306 db_printf("%p %s on %s (%s)\n", mp, 4307 mp->mnt_stat.f_mntfromname, 4308 mp->mnt_stat.f_mntonname, 4309 mp->mnt_stat.f_fstypename); 4310 if (db_pager_quit) 4311 break; 4312 } 4313 db_printf("\nMore info: show mount <addr>\n"); 4314 return; 4315 } 4316 4317 mp = (struct mount *)addr; 4318 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4319 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4320 4321 buf[0] = '\0'; 4322 mflags = mp->mnt_flag; 4323 #define MNT_FLAG(flag) do { \ 4324 if (mflags & (flag)) { \ 4325 if (buf[0] != '\0') \ 4326 strlcat(buf, ", ", sizeof(buf)); \ 4327 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4328 mflags &= ~(flag); \ 4329 } \ 4330 } while (0) 4331 MNT_FLAG(MNT_RDONLY); 4332 MNT_FLAG(MNT_SYNCHRONOUS); 4333 MNT_FLAG(MNT_NOEXEC); 4334 MNT_FLAG(MNT_NOSUID); 4335 MNT_FLAG(MNT_NFS4ACLS); 4336 MNT_FLAG(MNT_UNION); 4337 MNT_FLAG(MNT_ASYNC); 4338 MNT_FLAG(MNT_SUIDDIR); 4339 MNT_FLAG(MNT_SOFTDEP); 4340 MNT_FLAG(MNT_NOSYMFOLLOW); 4341 MNT_FLAG(MNT_GJOURNAL); 4342 MNT_FLAG(MNT_MULTILABEL); 4343 MNT_FLAG(MNT_ACLS); 4344 MNT_FLAG(MNT_NOATIME); 4345 MNT_FLAG(MNT_NOCLUSTERR); 4346 MNT_FLAG(MNT_NOCLUSTERW); 4347 MNT_FLAG(MNT_SUJ); 4348 MNT_FLAG(MNT_EXRDONLY); 4349 MNT_FLAG(MNT_EXPORTED); 4350 MNT_FLAG(MNT_DEFEXPORTED); 4351 MNT_FLAG(MNT_EXPORTANON); 4352 MNT_FLAG(MNT_EXKERB); 4353 MNT_FLAG(MNT_EXPUBLIC); 4354 MNT_FLAG(MNT_LOCAL); 4355 MNT_FLAG(MNT_QUOTA); 4356 MNT_FLAG(MNT_ROOTFS); 4357 MNT_FLAG(MNT_USER); 4358 MNT_FLAG(MNT_IGNORE); 4359 MNT_FLAG(MNT_UPDATE); 4360 MNT_FLAG(MNT_DELEXPORT); 4361 MNT_FLAG(MNT_RELOAD); 4362 MNT_FLAG(MNT_FORCE); 4363 MNT_FLAG(MNT_SNAPSHOT); 4364 MNT_FLAG(MNT_BYFSID); 4365 #undef MNT_FLAG 4366 if (mflags != 0) { 4367 if (buf[0] != '\0') 4368 strlcat(buf, ", ", sizeof(buf)); 4369 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4370 "0x%016jx", mflags); 4371 } 4372 db_printf(" mnt_flag = %s\n", buf); 4373 4374 buf[0] = '\0'; 4375 flags = mp->mnt_kern_flag; 4376 #define MNT_KERN_FLAG(flag) do { \ 4377 if (flags & (flag)) { \ 4378 if (buf[0] != '\0') \ 4379 strlcat(buf, ", ", sizeof(buf)); \ 4380 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4381 flags &= ~(flag); \ 4382 } \ 4383 } while (0) 4384 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4385 MNT_KERN_FLAG(MNTK_ASYNC); 4386 MNT_KERN_FLAG(MNTK_SOFTDEP); 4387 MNT_KERN_FLAG(MNTK_DRAINING); 4388 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4389 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4390 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4391 MNT_KERN_FLAG(MNTK_NO_IOPF); 4392 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4393 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4394 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4395 MNT_KERN_FLAG(MNTK_MARKER); 4396 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4397 MNT_KERN_FLAG(MNTK_NOASYNC); 4398 MNT_KERN_FLAG(MNTK_UNMOUNT); 4399 MNT_KERN_FLAG(MNTK_MWAIT); 4400 MNT_KERN_FLAG(MNTK_SUSPEND); 4401 MNT_KERN_FLAG(MNTK_SUSPEND2); 4402 MNT_KERN_FLAG(MNTK_SUSPENDED); 4403 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4404 MNT_KERN_FLAG(MNTK_NOKNOTE); 4405 #undef MNT_KERN_FLAG 4406 if (flags != 0) { 4407 if (buf[0] != '\0') 4408 strlcat(buf, ", ", sizeof(buf)); 4409 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4410 "0x%08x", flags); 4411 } 4412 db_printf(" mnt_kern_flag = %s\n", buf); 4413 4414 db_printf(" mnt_opt = "); 4415 opt = TAILQ_FIRST(mp->mnt_opt); 4416 if (opt != NULL) { 4417 db_printf("%s", opt->name); 4418 opt = TAILQ_NEXT(opt, link); 4419 while (opt != NULL) { 4420 db_printf(", %s", opt->name); 4421 opt = TAILQ_NEXT(opt, link); 4422 } 4423 } 4424 db_printf("\n"); 4425 4426 sp = &mp->mnt_stat; 4427 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4428 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4429 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4430 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4431 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4432 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4433 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4434 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4435 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4436 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4437 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4438 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4439 4440 db_printf(" mnt_cred = { uid=%u ruid=%u", 4441 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4442 if (jailed(mp->mnt_cred)) 4443 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4444 db_printf(" }\n"); 4445 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4446 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4447 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4448 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4449 db_printf(" mnt_lazyvnodelistsize = %d\n", 4450 mp->mnt_lazyvnodelistsize); 4451 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4452 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4453 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4454 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4455 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4456 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4457 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4458 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4459 db_printf(" mnt_secondary_accwrites = %d\n", 4460 mp->mnt_secondary_accwrites); 4461 db_printf(" mnt_gjprovider = %s\n", 4462 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4463 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4464 4465 db_printf("\n\nList of active vnodes\n"); 4466 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4467 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4468 vn_printf(vp, "vnode "); 4469 if (db_pager_quit) 4470 break; 4471 } 4472 } 4473 db_printf("\n\nList of inactive vnodes\n"); 4474 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4475 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4476 vn_printf(vp, "vnode "); 4477 if (db_pager_quit) 4478 break; 4479 } 4480 } 4481 } 4482 #endif /* DDB */ 4483 4484 /* 4485 * Fill in a struct xvfsconf based on a struct vfsconf. 4486 */ 4487 static int 4488 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4489 { 4490 struct xvfsconf xvfsp; 4491 4492 bzero(&xvfsp, sizeof(xvfsp)); 4493 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4494 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4495 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4496 xvfsp.vfc_flags = vfsp->vfc_flags; 4497 /* 4498 * These are unused in userland, we keep them 4499 * to not break binary compatibility. 4500 */ 4501 xvfsp.vfc_vfsops = NULL; 4502 xvfsp.vfc_next = NULL; 4503 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4504 } 4505 4506 #ifdef COMPAT_FREEBSD32 4507 struct xvfsconf32 { 4508 uint32_t vfc_vfsops; 4509 char vfc_name[MFSNAMELEN]; 4510 int32_t vfc_typenum; 4511 int32_t vfc_refcount; 4512 int32_t vfc_flags; 4513 uint32_t vfc_next; 4514 }; 4515 4516 static int 4517 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4518 { 4519 struct xvfsconf32 xvfsp; 4520 4521 bzero(&xvfsp, sizeof(xvfsp)); 4522 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4523 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4524 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4525 xvfsp.vfc_flags = vfsp->vfc_flags; 4526 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4527 } 4528 #endif 4529 4530 /* 4531 * Top level filesystem related information gathering. 4532 */ 4533 static int 4534 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4535 { 4536 struct vfsconf *vfsp; 4537 int error; 4538 4539 error = 0; 4540 vfsconf_slock(); 4541 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4542 #ifdef COMPAT_FREEBSD32 4543 if (req->flags & SCTL_MASK32) 4544 error = vfsconf2x32(req, vfsp); 4545 else 4546 #endif 4547 error = vfsconf2x(req, vfsp); 4548 if (error) 4549 break; 4550 } 4551 vfsconf_sunlock(); 4552 return (error); 4553 } 4554 4555 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4556 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4557 "S,xvfsconf", "List of all configured filesystems"); 4558 4559 #ifndef BURN_BRIDGES 4560 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4561 4562 static int 4563 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4564 { 4565 int *name = (int *)arg1 - 1; /* XXX */ 4566 u_int namelen = arg2 + 1; /* XXX */ 4567 struct vfsconf *vfsp; 4568 4569 log(LOG_WARNING, "userland calling deprecated sysctl, " 4570 "please rebuild world\n"); 4571 4572 #if 1 || defined(COMPAT_PRELITE2) 4573 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4574 if (namelen == 1) 4575 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4576 #endif 4577 4578 switch (name[1]) { 4579 case VFS_MAXTYPENUM: 4580 if (namelen != 2) 4581 return (ENOTDIR); 4582 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4583 case VFS_CONF: 4584 if (namelen != 3) 4585 return (ENOTDIR); /* overloaded */ 4586 vfsconf_slock(); 4587 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4588 if (vfsp->vfc_typenum == name[2]) 4589 break; 4590 } 4591 vfsconf_sunlock(); 4592 if (vfsp == NULL) 4593 return (EOPNOTSUPP); 4594 #ifdef COMPAT_FREEBSD32 4595 if (req->flags & SCTL_MASK32) 4596 return (vfsconf2x32(req, vfsp)); 4597 else 4598 #endif 4599 return (vfsconf2x(req, vfsp)); 4600 } 4601 return (EOPNOTSUPP); 4602 } 4603 4604 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4605 CTLFLAG_MPSAFE, vfs_sysctl, 4606 "Generic filesystem"); 4607 4608 #if 1 || defined(COMPAT_PRELITE2) 4609 4610 static int 4611 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4612 { 4613 int error; 4614 struct vfsconf *vfsp; 4615 struct ovfsconf ovfs; 4616 4617 vfsconf_slock(); 4618 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4619 bzero(&ovfs, sizeof(ovfs)); 4620 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4621 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4622 ovfs.vfc_index = vfsp->vfc_typenum; 4623 ovfs.vfc_refcount = vfsp->vfc_refcount; 4624 ovfs.vfc_flags = vfsp->vfc_flags; 4625 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4626 if (error != 0) { 4627 vfsconf_sunlock(); 4628 return (error); 4629 } 4630 } 4631 vfsconf_sunlock(); 4632 return (0); 4633 } 4634 4635 #endif /* 1 || COMPAT_PRELITE2 */ 4636 #endif /* !BURN_BRIDGES */ 4637 4638 #define KINFO_VNODESLOP 10 4639 #ifdef notyet 4640 /* 4641 * Dump vnode list (via sysctl). 4642 */ 4643 /* ARGSUSED */ 4644 static int 4645 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4646 { 4647 struct xvnode *xvn; 4648 struct mount *mp; 4649 struct vnode *vp; 4650 int error, len, n; 4651 4652 /* 4653 * Stale numvnodes access is not fatal here. 4654 */ 4655 req->lock = 0; 4656 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4657 if (!req->oldptr) 4658 /* Make an estimate */ 4659 return (SYSCTL_OUT(req, 0, len)); 4660 4661 error = sysctl_wire_old_buffer(req, 0); 4662 if (error != 0) 4663 return (error); 4664 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4665 n = 0; 4666 mtx_lock(&mountlist_mtx); 4667 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4668 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4669 continue; 4670 MNT_ILOCK(mp); 4671 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4672 if (n == len) 4673 break; 4674 vref(vp); 4675 xvn[n].xv_size = sizeof *xvn; 4676 xvn[n].xv_vnode = vp; 4677 xvn[n].xv_id = 0; /* XXX compat */ 4678 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4679 XV_COPY(usecount); 4680 XV_COPY(writecount); 4681 XV_COPY(holdcnt); 4682 XV_COPY(mount); 4683 XV_COPY(numoutput); 4684 XV_COPY(type); 4685 #undef XV_COPY 4686 xvn[n].xv_flag = vp->v_vflag; 4687 4688 switch (vp->v_type) { 4689 case VREG: 4690 case VDIR: 4691 case VLNK: 4692 break; 4693 case VBLK: 4694 case VCHR: 4695 if (vp->v_rdev == NULL) { 4696 vrele(vp); 4697 continue; 4698 } 4699 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4700 break; 4701 case VSOCK: 4702 xvn[n].xv_socket = vp->v_socket; 4703 break; 4704 case VFIFO: 4705 xvn[n].xv_fifo = vp->v_fifoinfo; 4706 break; 4707 case VNON: 4708 case VBAD: 4709 default: 4710 /* shouldn't happen? */ 4711 vrele(vp); 4712 continue; 4713 } 4714 vrele(vp); 4715 ++n; 4716 } 4717 MNT_IUNLOCK(mp); 4718 mtx_lock(&mountlist_mtx); 4719 vfs_unbusy(mp); 4720 if (n == len) 4721 break; 4722 } 4723 mtx_unlock(&mountlist_mtx); 4724 4725 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4726 free(xvn, M_TEMP); 4727 return (error); 4728 } 4729 4730 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4731 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4732 ""); 4733 #endif 4734 4735 static void 4736 unmount_or_warn(struct mount *mp) 4737 { 4738 int error; 4739 4740 error = dounmount(mp, MNT_FORCE, curthread); 4741 if (error != 0) { 4742 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4743 if (error == EBUSY) 4744 printf("BUSY)\n"); 4745 else 4746 printf("%d)\n", error); 4747 } 4748 } 4749 4750 /* 4751 * Unmount all filesystems. The list is traversed in reverse order 4752 * of mounting to avoid dependencies. 4753 */ 4754 void 4755 vfs_unmountall(void) 4756 { 4757 struct mount *mp, *tmp; 4758 4759 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4760 4761 /* 4762 * Since this only runs when rebooting, it is not interlocked. 4763 */ 4764 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4765 vfs_ref(mp); 4766 4767 /* 4768 * Forcibly unmounting "/dev" before "/" would prevent clean 4769 * unmount of the latter. 4770 */ 4771 if (mp == rootdevmp) 4772 continue; 4773 4774 unmount_or_warn(mp); 4775 } 4776 4777 if (rootdevmp != NULL) 4778 unmount_or_warn(rootdevmp); 4779 } 4780 4781 static void 4782 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4783 { 4784 4785 ASSERT_VI_LOCKED(vp, __func__); 4786 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4787 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4788 vdropl(vp); 4789 return; 4790 } 4791 if (vn_lock(vp, lkflags) == 0) { 4792 VI_LOCK(vp); 4793 vinactive(vp); 4794 VOP_UNLOCK(vp); 4795 vdropl(vp); 4796 return; 4797 } 4798 vdefer_inactive_unlocked(vp); 4799 } 4800 4801 static int 4802 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 4803 { 4804 4805 return (vp->v_iflag & VI_DEFINACT); 4806 } 4807 4808 static void __noinline 4809 vfs_periodic_inactive(struct mount *mp, int flags) 4810 { 4811 struct vnode *vp, *mvp; 4812 int lkflags; 4813 4814 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4815 if (flags != MNT_WAIT) 4816 lkflags |= LK_NOWAIT; 4817 4818 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 4819 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4820 VI_UNLOCK(vp); 4821 continue; 4822 } 4823 vp->v_iflag &= ~VI_DEFINACT; 4824 vfs_deferred_inactive(vp, lkflags); 4825 } 4826 } 4827 4828 static inline bool 4829 vfs_want_msync(struct vnode *vp) 4830 { 4831 struct vm_object *obj; 4832 4833 /* 4834 * This test may be performed without any locks held. 4835 * We rely on vm_object's type stability. 4836 */ 4837 if (vp->v_vflag & VV_NOSYNC) 4838 return (false); 4839 obj = vp->v_object; 4840 return (obj != NULL && vm_object_mightbedirty(obj)); 4841 } 4842 4843 static int 4844 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 4845 { 4846 4847 if (vp->v_vflag & VV_NOSYNC) 4848 return (false); 4849 if (vp->v_iflag & VI_DEFINACT) 4850 return (true); 4851 return (vfs_want_msync(vp)); 4852 } 4853 4854 static void __noinline 4855 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4856 { 4857 struct vnode *vp, *mvp; 4858 struct vm_object *obj; 4859 struct thread *td; 4860 int lkflags, objflags; 4861 bool seen_defer; 4862 4863 td = curthread; 4864 4865 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4866 if (flags != MNT_WAIT) { 4867 lkflags |= LK_NOWAIT; 4868 objflags = OBJPC_NOSYNC; 4869 } else { 4870 objflags = OBJPC_SYNC; 4871 } 4872 4873 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 4874 seen_defer = false; 4875 if (vp->v_iflag & VI_DEFINACT) { 4876 vp->v_iflag &= ~VI_DEFINACT; 4877 seen_defer = true; 4878 } 4879 if (!vfs_want_msync(vp)) { 4880 if (seen_defer) 4881 vfs_deferred_inactive(vp, lkflags); 4882 else 4883 VI_UNLOCK(vp); 4884 continue; 4885 } 4886 if (vget(vp, lkflags, td) == 0) { 4887 obj = vp->v_object; 4888 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4889 VM_OBJECT_WLOCK(obj); 4890 vm_object_page_clean(obj, 0, 0, objflags); 4891 VM_OBJECT_WUNLOCK(obj); 4892 } 4893 vput(vp); 4894 if (seen_defer) 4895 vdrop(vp); 4896 } else { 4897 if (seen_defer) 4898 vdefer_inactive_unlocked(vp); 4899 } 4900 } 4901 } 4902 4903 void 4904 vfs_periodic(struct mount *mp, int flags) 4905 { 4906 4907 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4908 4909 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4910 vfs_periodic_inactive(mp, flags); 4911 else 4912 vfs_periodic_msync_inactive(mp, flags); 4913 } 4914 4915 static void 4916 destroy_vpollinfo_free(struct vpollinfo *vi) 4917 { 4918 4919 knlist_destroy(&vi->vpi_selinfo.si_note); 4920 mtx_destroy(&vi->vpi_lock); 4921 uma_zfree(vnodepoll_zone, vi); 4922 } 4923 4924 static void 4925 destroy_vpollinfo(struct vpollinfo *vi) 4926 { 4927 4928 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4929 seldrain(&vi->vpi_selinfo); 4930 destroy_vpollinfo_free(vi); 4931 } 4932 4933 /* 4934 * Initialize per-vnode helper structure to hold poll-related state. 4935 */ 4936 void 4937 v_addpollinfo(struct vnode *vp) 4938 { 4939 struct vpollinfo *vi; 4940 4941 if (vp->v_pollinfo != NULL) 4942 return; 4943 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4944 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4945 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4946 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4947 VI_LOCK(vp); 4948 if (vp->v_pollinfo != NULL) { 4949 VI_UNLOCK(vp); 4950 destroy_vpollinfo_free(vi); 4951 return; 4952 } 4953 vp->v_pollinfo = vi; 4954 VI_UNLOCK(vp); 4955 } 4956 4957 /* 4958 * Record a process's interest in events which might happen to 4959 * a vnode. Because poll uses the historic select-style interface 4960 * internally, this routine serves as both the ``check for any 4961 * pending events'' and the ``record my interest in future events'' 4962 * functions. (These are done together, while the lock is held, 4963 * to avoid race conditions.) 4964 */ 4965 int 4966 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4967 { 4968 4969 v_addpollinfo(vp); 4970 mtx_lock(&vp->v_pollinfo->vpi_lock); 4971 if (vp->v_pollinfo->vpi_revents & events) { 4972 /* 4973 * This leaves events we are not interested 4974 * in available for the other process which 4975 * which presumably had requested them 4976 * (otherwise they would never have been 4977 * recorded). 4978 */ 4979 events &= vp->v_pollinfo->vpi_revents; 4980 vp->v_pollinfo->vpi_revents &= ~events; 4981 4982 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4983 return (events); 4984 } 4985 vp->v_pollinfo->vpi_events |= events; 4986 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4987 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4988 return (0); 4989 } 4990 4991 /* 4992 * Routine to create and manage a filesystem syncer vnode. 4993 */ 4994 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4995 static int sync_fsync(struct vop_fsync_args *); 4996 static int sync_inactive(struct vop_inactive_args *); 4997 static int sync_reclaim(struct vop_reclaim_args *); 4998 4999 static struct vop_vector sync_vnodeops = { 5000 .vop_bypass = VOP_EOPNOTSUPP, 5001 .vop_close = sync_close, /* close */ 5002 .vop_fsync = sync_fsync, /* fsync */ 5003 .vop_inactive = sync_inactive, /* inactive */ 5004 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 5005 .vop_reclaim = sync_reclaim, /* reclaim */ 5006 .vop_lock1 = vop_stdlock, /* lock */ 5007 .vop_unlock = vop_stdunlock, /* unlock */ 5008 .vop_islocked = vop_stdislocked, /* islocked */ 5009 }; 5010 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5011 5012 /* 5013 * Create a new filesystem syncer vnode for the specified mount point. 5014 */ 5015 void 5016 vfs_allocate_syncvnode(struct mount *mp) 5017 { 5018 struct vnode *vp; 5019 struct bufobj *bo; 5020 static long start, incr, next; 5021 int error; 5022 5023 /* Allocate a new vnode */ 5024 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5025 if (error != 0) 5026 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5027 vp->v_type = VNON; 5028 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5029 vp->v_vflag |= VV_FORCEINSMQ; 5030 error = insmntque(vp, mp); 5031 if (error != 0) 5032 panic("vfs_allocate_syncvnode: insmntque() failed"); 5033 vp->v_vflag &= ~VV_FORCEINSMQ; 5034 VOP_UNLOCK(vp); 5035 /* 5036 * Place the vnode onto the syncer worklist. We attempt to 5037 * scatter them about on the list so that they will go off 5038 * at evenly distributed times even if all the filesystems 5039 * are mounted at once. 5040 */ 5041 next += incr; 5042 if (next == 0 || next > syncer_maxdelay) { 5043 start /= 2; 5044 incr /= 2; 5045 if (start == 0) { 5046 start = syncer_maxdelay / 2; 5047 incr = syncer_maxdelay; 5048 } 5049 next = start; 5050 } 5051 bo = &vp->v_bufobj; 5052 BO_LOCK(bo); 5053 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5054 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5055 mtx_lock(&sync_mtx); 5056 sync_vnode_count++; 5057 if (mp->mnt_syncer == NULL) { 5058 mp->mnt_syncer = vp; 5059 vp = NULL; 5060 } 5061 mtx_unlock(&sync_mtx); 5062 BO_UNLOCK(bo); 5063 if (vp != NULL) { 5064 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5065 vgone(vp); 5066 vput(vp); 5067 } 5068 } 5069 5070 void 5071 vfs_deallocate_syncvnode(struct mount *mp) 5072 { 5073 struct vnode *vp; 5074 5075 mtx_lock(&sync_mtx); 5076 vp = mp->mnt_syncer; 5077 if (vp != NULL) 5078 mp->mnt_syncer = NULL; 5079 mtx_unlock(&sync_mtx); 5080 if (vp != NULL) 5081 vrele(vp); 5082 } 5083 5084 /* 5085 * Do a lazy sync of the filesystem. 5086 */ 5087 static int 5088 sync_fsync(struct vop_fsync_args *ap) 5089 { 5090 struct vnode *syncvp = ap->a_vp; 5091 struct mount *mp = syncvp->v_mount; 5092 int error, save; 5093 struct bufobj *bo; 5094 5095 /* 5096 * We only need to do something if this is a lazy evaluation. 5097 */ 5098 if (ap->a_waitfor != MNT_LAZY) 5099 return (0); 5100 5101 /* 5102 * Move ourselves to the back of the sync list. 5103 */ 5104 bo = &syncvp->v_bufobj; 5105 BO_LOCK(bo); 5106 vn_syncer_add_to_worklist(bo, syncdelay); 5107 BO_UNLOCK(bo); 5108 5109 /* 5110 * Walk the list of vnodes pushing all that are dirty and 5111 * not already on the sync list. 5112 */ 5113 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5114 return (0); 5115 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 5116 vfs_unbusy(mp); 5117 return (0); 5118 } 5119 save = curthread_pflags_set(TDP_SYNCIO); 5120 /* 5121 * The filesystem at hand may be idle with free vnodes stored in the 5122 * batch. Return them instead of letting them stay there indefinitely. 5123 */ 5124 vfs_periodic(mp, MNT_NOWAIT); 5125 error = VFS_SYNC(mp, MNT_LAZY); 5126 curthread_pflags_restore(save); 5127 vn_finished_write(mp); 5128 vfs_unbusy(mp); 5129 return (error); 5130 } 5131 5132 /* 5133 * The syncer vnode is no referenced. 5134 */ 5135 static int 5136 sync_inactive(struct vop_inactive_args *ap) 5137 { 5138 5139 vgone(ap->a_vp); 5140 return (0); 5141 } 5142 5143 /* 5144 * The syncer vnode is no longer needed and is being decommissioned. 5145 * 5146 * Modifications to the worklist must be protected by sync_mtx. 5147 */ 5148 static int 5149 sync_reclaim(struct vop_reclaim_args *ap) 5150 { 5151 struct vnode *vp = ap->a_vp; 5152 struct bufobj *bo; 5153 5154 bo = &vp->v_bufobj; 5155 BO_LOCK(bo); 5156 mtx_lock(&sync_mtx); 5157 if (vp->v_mount->mnt_syncer == vp) 5158 vp->v_mount->mnt_syncer = NULL; 5159 if (bo->bo_flag & BO_ONWORKLST) { 5160 LIST_REMOVE(bo, bo_synclist); 5161 syncer_worklist_len--; 5162 sync_vnode_count--; 5163 bo->bo_flag &= ~BO_ONWORKLST; 5164 } 5165 mtx_unlock(&sync_mtx); 5166 BO_UNLOCK(bo); 5167 5168 return (0); 5169 } 5170 5171 int 5172 vn_need_pageq_flush(struct vnode *vp) 5173 { 5174 struct vm_object *obj; 5175 int need; 5176 5177 MPASS(mtx_owned(VI_MTX(vp))); 5178 need = 0; 5179 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5180 vm_object_mightbedirty(obj)) 5181 need = 1; 5182 return (need); 5183 } 5184 5185 /* 5186 * Check if vnode represents a disk device 5187 */ 5188 int 5189 vn_isdisk(struct vnode *vp, int *errp) 5190 { 5191 int error; 5192 5193 if (vp->v_type != VCHR) { 5194 error = ENOTBLK; 5195 goto out; 5196 } 5197 error = 0; 5198 dev_lock(); 5199 if (vp->v_rdev == NULL) 5200 error = ENXIO; 5201 else if (vp->v_rdev->si_devsw == NULL) 5202 error = ENXIO; 5203 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5204 error = ENOTBLK; 5205 dev_unlock(); 5206 out: 5207 if (errp != NULL) 5208 *errp = error; 5209 return (error == 0); 5210 } 5211 5212 /* 5213 * Common filesystem object access control check routine. Accepts a 5214 * vnode's type, "mode", uid and gid, requested access mode, credentials, 5215 * and optional call-by-reference privused argument allowing vaccess() 5216 * to indicate to the caller whether privilege was used to satisfy the 5217 * request (obsoleted). Returns 0 on success, or an errno on failure. 5218 */ 5219 int 5220 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5221 accmode_t accmode, struct ucred *cred, int *privused) 5222 { 5223 accmode_t dac_granted; 5224 accmode_t priv_granted; 5225 5226 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5227 ("invalid bit in accmode")); 5228 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5229 ("VAPPEND without VWRITE")); 5230 5231 /* 5232 * Look for a normal, non-privileged way to access the file/directory 5233 * as requested. If it exists, go with that. 5234 */ 5235 5236 if (privused != NULL) 5237 *privused = 0; 5238 5239 dac_granted = 0; 5240 5241 /* Check the owner. */ 5242 if (cred->cr_uid == file_uid) { 5243 dac_granted |= VADMIN; 5244 if (file_mode & S_IXUSR) 5245 dac_granted |= VEXEC; 5246 if (file_mode & S_IRUSR) 5247 dac_granted |= VREAD; 5248 if (file_mode & S_IWUSR) 5249 dac_granted |= (VWRITE | VAPPEND); 5250 5251 if ((accmode & dac_granted) == accmode) 5252 return (0); 5253 5254 goto privcheck; 5255 } 5256 5257 /* Otherwise, check the groups (first match) */ 5258 if (groupmember(file_gid, cred)) { 5259 if (file_mode & S_IXGRP) 5260 dac_granted |= VEXEC; 5261 if (file_mode & S_IRGRP) 5262 dac_granted |= VREAD; 5263 if (file_mode & S_IWGRP) 5264 dac_granted |= (VWRITE | VAPPEND); 5265 5266 if ((accmode & dac_granted) == accmode) 5267 return (0); 5268 5269 goto privcheck; 5270 } 5271 5272 /* Otherwise, check everyone else. */ 5273 if (file_mode & S_IXOTH) 5274 dac_granted |= VEXEC; 5275 if (file_mode & S_IROTH) 5276 dac_granted |= VREAD; 5277 if (file_mode & S_IWOTH) 5278 dac_granted |= (VWRITE | VAPPEND); 5279 if ((accmode & dac_granted) == accmode) 5280 return (0); 5281 5282 privcheck: 5283 /* 5284 * Build a privilege mask to determine if the set of privileges 5285 * satisfies the requirements when combined with the granted mask 5286 * from above. For each privilege, if the privilege is required, 5287 * bitwise or the request type onto the priv_granted mask. 5288 */ 5289 priv_granted = 0; 5290 5291 if (type == VDIR) { 5292 /* 5293 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5294 * requests, instead of PRIV_VFS_EXEC. 5295 */ 5296 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5297 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5298 priv_granted |= VEXEC; 5299 } else { 5300 /* 5301 * Ensure that at least one execute bit is on. Otherwise, 5302 * a privileged user will always succeed, and we don't want 5303 * this to happen unless the file really is executable. 5304 */ 5305 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5306 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5307 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5308 priv_granted |= VEXEC; 5309 } 5310 5311 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5312 !priv_check_cred(cred, PRIV_VFS_READ)) 5313 priv_granted |= VREAD; 5314 5315 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5316 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5317 priv_granted |= (VWRITE | VAPPEND); 5318 5319 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5320 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5321 priv_granted |= VADMIN; 5322 5323 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5324 /* XXX audit: privilege used */ 5325 if (privused != NULL) 5326 *privused = 1; 5327 return (0); 5328 } 5329 5330 return ((accmode & VADMIN) ? EPERM : EACCES); 5331 } 5332 5333 /* 5334 * Credential check based on process requesting service, and per-attribute 5335 * permissions. 5336 */ 5337 int 5338 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5339 struct thread *td, accmode_t accmode) 5340 { 5341 5342 /* 5343 * Kernel-invoked always succeeds. 5344 */ 5345 if (cred == NOCRED) 5346 return (0); 5347 5348 /* 5349 * Do not allow privileged processes in jail to directly manipulate 5350 * system attributes. 5351 */ 5352 switch (attrnamespace) { 5353 case EXTATTR_NAMESPACE_SYSTEM: 5354 /* Potentially should be: return (EPERM); */ 5355 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5356 case EXTATTR_NAMESPACE_USER: 5357 return (VOP_ACCESS(vp, accmode, cred, td)); 5358 default: 5359 return (EPERM); 5360 } 5361 } 5362 5363 #ifdef DEBUG_VFS_LOCKS 5364 /* 5365 * This only exists to suppress warnings from unlocked specfs accesses. It is 5366 * no longer ok to have an unlocked VFS. 5367 */ 5368 #define IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL || \ 5369 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5370 5371 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5372 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5373 "Drop into debugger on lock violation"); 5374 5375 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5376 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5377 0, "Check for interlock across VOPs"); 5378 5379 int vfs_badlock_print = 1; /* Print lock violations. */ 5380 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5381 0, "Print lock violations"); 5382 5383 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5384 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5385 0, "Print vnode details on lock violations"); 5386 5387 #ifdef KDB 5388 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5389 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5390 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5391 #endif 5392 5393 static void 5394 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5395 { 5396 5397 #ifdef KDB 5398 if (vfs_badlock_backtrace) 5399 kdb_backtrace(); 5400 #endif 5401 if (vfs_badlock_vnode) 5402 vn_printf(vp, "vnode "); 5403 if (vfs_badlock_print) 5404 printf("%s: %p %s\n", str, (void *)vp, msg); 5405 if (vfs_badlock_ddb) 5406 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5407 } 5408 5409 void 5410 assert_vi_locked(struct vnode *vp, const char *str) 5411 { 5412 5413 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5414 vfs_badlock("interlock is not locked but should be", str, vp); 5415 } 5416 5417 void 5418 assert_vi_unlocked(struct vnode *vp, const char *str) 5419 { 5420 5421 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5422 vfs_badlock("interlock is locked but should not be", str, vp); 5423 } 5424 5425 void 5426 assert_vop_locked(struct vnode *vp, const char *str) 5427 { 5428 int locked; 5429 5430 if (!IGNORE_LOCK(vp)) { 5431 locked = VOP_ISLOCKED(vp); 5432 if (locked == 0 || locked == LK_EXCLOTHER) 5433 vfs_badlock("is not locked but should be", str, vp); 5434 } 5435 } 5436 5437 void 5438 assert_vop_unlocked(struct vnode *vp, const char *str) 5439 { 5440 5441 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5442 vfs_badlock("is locked but should not be", str, vp); 5443 } 5444 5445 void 5446 assert_vop_elocked(struct vnode *vp, const char *str) 5447 { 5448 5449 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5450 vfs_badlock("is not exclusive locked but should be", str, vp); 5451 } 5452 #endif /* DEBUG_VFS_LOCKS */ 5453 5454 void 5455 vop_rename_fail(struct vop_rename_args *ap) 5456 { 5457 5458 if (ap->a_tvp != NULL) 5459 vput(ap->a_tvp); 5460 if (ap->a_tdvp == ap->a_tvp) 5461 vrele(ap->a_tdvp); 5462 else 5463 vput(ap->a_tdvp); 5464 vrele(ap->a_fdvp); 5465 vrele(ap->a_fvp); 5466 } 5467 5468 void 5469 vop_rename_pre(void *ap) 5470 { 5471 struct vop_rename_args *a = ap; 5472 5473 #ifdef DEBUG_VFS_LOCKS 5474 if (a->a_tvp) 5475 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5476 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5477 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5478 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5479 5480 /* Check the source (from). */ 5481 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5482 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5483 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5484 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5485 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5486 5487 /* Check the target. */ 5488 if (a->a_tvp) 5489 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5490 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5491 #endif 5492 if (a->a_tdvp != a->a_fdvp) 5493 vhold(a->a_fdvp); 5494 if (a->a_tvp != a->a_fvp) 5495 vhold(a->a_fvp); 5496 vhold(a->a_tdvp); 5497 if (a->a_tvp) 5498 vhold(a->a_tvp); 5499 } 5500 5501 #ifdef DEBUG_VFS_LOCKS 5502 void 5503 vop_strategy_pre(void *ap) 5504 { 5505 struct vop_strategy_args *a; 5506 struct buf *bp; 5507 5508 a = ap; 5509 bp = a->a_bp; 5510 5511 /* 5512 * Cluster ops lock their component buffers but not the IO container. 5513 */ 5514 if ((bp->b_flags & B_CLUSTER) != 0) 5515 return; 5516 5517 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5518 if (vfs_badlock_print) 5519 printf( 5520 "VOP_STRATEGY: bp is not locked but should be\n"); 5521 if (vfs_badlock_ddb) 5522 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5523 } 5524 } 5525 5526 void 5527 vop_lock_pre(void *ap) 5528 { 5529 struct vop_lock1_args *a = ap; 5530 5531 if ((a->a_flags & LK_INTERLOCK) == 0) 5532 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5533 else 5534 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5535 } 5536 5537 void 5538 vop_lock_post(void *ap, int rc) 5539 { 5540 struct vop_lock1_args *a = ap; 5541 5542 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5543 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5544 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5545 } 5546 5547 void 5548 vop_unlock_pre(void *ap) 5549 { 5550 struct vop_unlock_args *a = ap; 5551 5552 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5553 } 5554 5555 void 5556 vop_need_inactive_pre(void *ap) 5557 { 5558 struct vop_need_inactive_args *a = ap; 5559 5560 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5561 } 5562 5563 void 5564 vop_need_inactive_post(void *ap, int rc) 5565 { 5566 struct vop_need_inactive_args *a = ap; 5567 5568 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5569 } 5570 #endif 5571 5572 void 5573 vop_create_post(void *ap, int rc) 5574 { 5575 struct vop_create_args *a = ap; 5576 5577 if (!rc) 5578 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5579 } 5580 5581 void 5582 vop_deleteextattr_post(void *ap, int rc) 5583 { 5584 struct vop_deleteextattr_args *a = ap; 5585 5586 if (!rc) 5587 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5588 } 5589 5590 void 5591 vop_link_post(void *ap, int rc) 5592 { 5593 struct vop_link_args *a = ap; 5594 5595 if (!rc) { 5596 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5597 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5598 } 5599 } 5600 5601 void 5602 vop_mkdir_post(void *ap, int rc) 5603 { 5604 struct vop_mkdir_args *a = ap; 5605 5606 if (!rc) 5607 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5608 } 5609 5610 void 5611 vop_mknod_post(void *ap, int rc) 5612 { 5613 struct vop_mknod_args *a = ap; 5614 5615 if (!rc) 5616 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5617 } 5618 5619 void 5620 vop_reclaim_post(void *ap, int rc) 5621 { 5622 struct vop_reclaim_args *a = ap; 5623 5624 if (!rc) 5625 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5626 } 5627 5628 void 5629 vop_remove_post(void *ap, int rc) 5630 { 5631 struct vop_remove_args *a = ap; 5632 5633 if (!rc) { 5634 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5635 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5636 } 5637 } 5638 5639 void 5640 vop_rename_post(void *ap, int rc) 5641 { 5642 struct vop_rename_args *a = ap; 5643 long hint; 5644 5645 if (!rc) { 5646 hint = NOTE_WRITE; 5647 if (a->a_fdvp == a->a_tdvp) { 5648 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5649 hint |= NOTE_LINK; 5650 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5651 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5652 } else { 5653 hint |= NOTE_EXTEND; 5654 if (a->a_fvp->v_type == VDIR) 5655 hint |= NOTE_LINK; 5656 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5657 5658 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5659 a->a_tvp->v_type == VDIR) 5660 hint &= ~NOTE_LINK; 5661 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5662 } 5663 5664 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5665 if (a->a_tvp) 5666 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5667 } 5668 if (a->a_tdvp != a->a_fdvp) 5669 vdrop(a->a_fdvp); 5670 if (a->a_tvp != a->a_fvp) 5671 vdrop(a->a_fvp); 5672 vdrop(a->a_tdvp); 5673 if (a->a_tvp) 5674 vdrop(a->a_tvp); 5675 } 5676 5677 void 5678 vop_rmdir_post(void *ap, int rc) 5679 { 5680 struct vop_rmdir_args *a = ap; 5681 5682 if (!rc) { 5683 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5684 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5685 } 5686 } 5687 5688 void 5689 vop_setattr_post(void *ap, int rc) 5690 { 5691 struct vop_setattr_args *a = ap; 5692 5693 if (!rc) 5694 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5695 } 5696 5697 void 5698 vop_setextattr_post(void *ap, int rc) 5699 { 5700 struct vop_setextattr_args *a = ap; 5701 5702 if (!rc) 5703 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5704 } 5705 5706 void 5707 vop_symlink_post(void *ap, int rc) 5708 { 5709 struct vop_symlink_args *a = ap; 5710 5711 if (!rc) 5712 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5713 } 5714 5715 void 5716 vop_open_post(void *ap, int rc) 5717 { 5718 struct vop_open_args *a = ap; 5719 5720 if (!rc) 5721 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5722 } 5723 5724 void 5725 vop_close_post(void *ap, int rc) 5726 { 5727 struct vop_close_args *a = ap; 5728 5729 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5730 !VN_IS_DOOMED(a->a_vp))) { 5731 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5732 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5733 } 5734 } 5735 5736 void 5737 vop_read_post(void *ap, int rc) 5738 { 5739 struct vop_read_args *a = ap; 5740 5741 if (!rc) 5742 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5743 } 5744 5745 void 5746 vop_readdir_post(void *ap, int rc) 5747 { 5748 struct vop_readdir_args *a = ap; 5749 5750 if (!rc) 5751 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5752 } 5753 5754 static struct knlist fs_knlist; 5755 5756 static void 5757 vfs_event_init(void *arg) 5758 { 5759 knlist_init_mtx(&fs_knlist, NULL); 5760 } 5761 /* XXX - correct order? */ 5762 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5763 5764 void 5765 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5766 { 5767 5768 KNOTE_UNLOCKED(&fs_knlist, event); 5769 } 5770 5771 static int filt_fsattach(struct knote *kn); 5772 static void filt_fsdetach(struct knote *kn); 5773 static int filt_fsevent(struct knote *kn, long hint); 5774 5775 struct filterops fs_filtops = { 5776 .f_isfd = 0, 5777 .f_attach = filt_fsattach, 5778 .f_detach = filt_fsdetach, 5779 .f_event = filt_fsevent 5780 }; 5781 5782 static int 5783 filt_fsattach(struct knote *kn) 5784 { 5785 5786 kn->kn_flags |= EV_CLEAR; 5787 knlist_add(&fs_knlist, kn, 0); 5788 return (0); 5789 } 5790 5791 static void 5792 filt_fsdetach(struct knote *kn) 5793 { 5794 5795 knlist_remove(&fs_knlist, kn, 0); 5796 } 5797 5798 static int 5799 filt_fsevent(struct knote *kn, long hint) 5800 { 5801 5802 kn->kn_fflags |= hint; 5803 return (kn->kn_fflags != 0); 5804 } 5805 5806 static int 5807 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5808 { 5809 struct vfsidctl vc; 5810 int error; 5811 struct mount *mp; 5812 5813 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5814 if (error) 5815 return (error); 5816 if (vc.vc_vers != VFS_CTL_VERS1) 5817 return (EINVAL); 5818 mp = vfs_getvfs(&vc.vc_fsid); 5819 if (mp == NULL) 5820 return (ENOENT); 5821 /* ensure that a specific sysctl goes to the right filesystem. */ 5822 if (strcmp(vc.vc_fstypename, "*") != 0 && 5823 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5824 vfs_rel(mp); 5825 return (EINVAL); 5826 } 5827 VCTLTOREQ(&vc, req); 5828 error = VFS_SYSCTL(mp, vc.vc_op, req); 5829 vfs_rel(mp); 5830 return (error); 5831 } 5832 5833 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5834 NULL, 0, sysctl_vfs_ctl, "", 5835 "Sysctl by fsid"); 5836 5837 /* 5838 * Function to initialize a va_filerev field sensibly. 5839 * XXX: Wouldn't a random number make a lot more sense ?? 5840 */ 5841 u_quad_t 5842 init_va_filerev(void) 5843 { 5844 struct bintime bt; 5845 5846 getbinuptime(&bt); 5847 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5848 } 5849 5850 static int filt_vfsread(struct knote *kn, long hint); 5851 static int filt_vfswrite(struct knote *kn, long hint); 5852 static int filt_vfsvnode(struct knote *kn, long hint); 5853 static void filt_vfsdetach(struct knote *kn); 5854 static struct filterops vfsread_filtops = { 5855 .f_isfd = 1, 5856 .f_detach = filt_vfsdetach, 5857 .f_event = filt_vfsread 5858 }; 5859 static struct filterops vfswrite_filtops = { 5860 .f_isfd = 1, 5861 .f_detach = filt_vfsdetach, 5862 .f_event = filt_vfswrite 5863 }; 5864 static struct filterops vfsvnode_filtops = { 5865 .f_isfd = 1, 5866 .f_detach = filt_vfsdetach, 5867 .f_event = filt_vfsvnode 5868 }; 5869 5870 static void 5871 vfs_knllock(void *arg) 5872 { 5873 struct vnode *vp = arg; 5874 5875 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5876 } 5877 5878 static void 5879 vfs_knlunlock(void *arg) 5880 { 5881 struct vnode *vp = arg; 5882 5883 VOP_UNLOCK(vp); 5884 } 5885 5886 static void 5887 vfs_knl_assert_locked(void *arg) 5888 { 5889 #ifdef DEBUG_VFS_LOCKS 5890 struct vnode *vp = arg; 5891 5892 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5893 #endif 5894 } 5895 5896 static void 5897 vfs_knl_assert_unlocked(void *arg) 5898 { 5899 #ifdef DEBUG_VFS_LOCKS 5900 struct vnode *vp = arg; 5901 5902 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5903 #endif 5904 } 5905 5906 int 5907 vfs_kqfilter(struct vop_kqfilter_args *ap) 5908 { 5909 struct vnode *vp = ap->a_vp; 5910 struct knote *kn = ap->a_kn; 5911 struct knlist *knl; 5912 5913 switch (kn->kn_filter) { 5914 case EVFILT_READ: 5915 kn->kn_fop = &vfsread_filtops; 5916 break; 5917 case EVFILT_WRITE: 5918 kn->kn_fop = &vfswrite_filtops; 5919 break; 5920 case EVFILT_VNODE: 5921 kn->kn_fop = &vfsvnode_filtops; 5922 break; 5923 default: 5924 return (EINVAL); 5925 } 5926 5927 kn->kn_hook = (caddr_t)vp; 5928 5929 v_addpollinfo(vp); 5930 if (vp->v_pollinfo == NULL) 5931 return (ENOMEM); 5932 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5933 vhold(vp); 5934 knlist_add(knl, kn, 0); 5935 5936 return (0); 5937 } 5938 5939 /* 5940 * Detach knote from vnode 5941 */ 5942 static void 5943 filt_vfsdetach(struct knote *kn) 5944 { 5945 struct vnode *vp = (struct vnode *)kn->kn_hook; 5946 5947 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5948 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5949 vdrop(vp); 5950 } 5951 5952 /*ARGSUSED*/ 5953 static int 5954 filt_vfsread(struct knote *kn, long hint) 5955 { 5956 struct vnode *vp = (struct vnode *)kn->kn_hook; 5957 struct vattr va; 5958 int res; 5959 5960 /* 5961 * filesystem is gone, so set the EOF flag and schedule 5962 * the knote for deletion. 5963 */ 5964 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5965 VI_LOCK(vp); 5966 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5967 VI_UNLOCK(vp); 5968 return (1); 5969 } 5970 5971 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5972 return (0); 5973 5974 VI_LOCK(vp); 5975 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5976 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5977 VI_UNLOCK(vp); 5978 return (res); 5979 } 5980 5981 /*ARGSUSED*/ 5982 static int 5983 filt_vfswrite(struct knote *kn, long hint) 5984 { 5985 struct vnode *vp = (struct vnode *)kn->kn_hook; 5986 5987 VI_LOCK(vp); 5988 5989 /* 5990 * filesystem is gone, so set the EOF flag and schedule 5991 * the knote for deletion. 5992 */ 5993 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5994 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5995 5996 kn->kn_data = 0; 5997 VI_UNLOCK(vp); 5998 return (1); 5999 } 6000 6001 static int 6002 filt_vfsvnode(struct knote *kn, long hint) 6003 { 6004 struct vnode *vp = (struct vnode *)kn->kn_hook; 6005 int res; 6006 6007 VI_LOCK(vp); 6008 if (kn->kn_sfflags & hint) 6009 kn->kn_fflags |= hint; 6010 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6011 kn->kn_flags |= EV_EOF; 6012 VI_UNLOCK(vp); 6013 return (1); 6014 } 6015 res = (kn->kn_fflags != 0); 6016 VI_UNLOCK(vp); 6017 return (res); 6018 } 6019 6020 /* 6021 * Returns whether the directory is empty or not. 6022 * If it is empty, the return value is 0; otherwise 6023 * the return value is an error value (which may 6024 * be ENOTEMPTY). 6025 */ 6026 int 6027 vfs_emptydir(struct vnode *vp) 6028 { 6029 struct uio uio; 6030 struct iovec iov; 6031 struct dirent *dirent, *dp, *endp; 6032 int error, eof; 6033 6034 error = 0; 6035 eof = 0; 6036 6037 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 6038 6039 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 6040 iov.iov_base = dirent; 6041 iov.iov_len = sizeof(struct dirent); 6042 6043 uio.uio_iov = &iov; 6044 uio.uio_iovcnt = 1; 6045 uio.uio_offset = 0; 6046 uio.uio_resid = sizeof(struct dirent); 6047 uio.uio_segflg = UIO_SYSSPACE; 6048 uio.uio_rw = UIO_READ; 6049 uio.uio_td = curthread; 6050 6051 while (eof == 0 && error == 0) { 6052 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 6053 NULL, NULL); 6054 if (error != 0) 6055 break; 6056 endp = (void *)((uint8_t *)dirent + 6057 sizeof(struct dirent) - uio.uio_resid); 6058 for (dp = dirent; dp < endp; 6059 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 6060 if (dp->d_type == DT_WHT) 6061 continue; 6062 if (dp->d_namlen == 0) 6063 continue; 6064 if (dp->d_type != DT_DIR && 6065 dp->d_type != DT_UNKNOWN) { 6066 error = ENOTEMPTY; 6067 break; 6068 } 6069 if (dp->d_namlen > 2) { 6070 error = ENOTEMPTY; 6071 break; 6072 } 6073 if (dp->d_namlen == 1 && 6074 dp->d_name[0] != '.') { 6075 error = ENOTEMPTY; 6076 break; 6077 } 6078 if (dp->d_namlen == 2 && 6079 dp->d_name[1] != '.') { 6080 error = ENOTEMPTY; 6081 break; 6082 } 6083 uio.uio_resid = sizeof(struct dirent); 6084 } 6085 } 6086 free(dirent, M_TEMP); 6087 return (error); 6088 } 6089 6090 int 6091 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6092 { 6093 int error; 6094 6095 if (dp->d_reclen > ap->a_uio->uio_resid) 6096 return (ENAMETOOLONG); 6097 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6098 if (error) { 6099 if (ap->a_ncookies != NULL) { 6100 if (ap->a_cookies != NULL) 6101 free(ap->a_cookies, M_TEMP); 6102 ap->a_cookies = NULL; 6103 *ap->a_ncookies = 0; 6104 } 6105 return (error); 6106 } 6107 if (ap->a_ncookies == NULL) 6108 return (0); 6109 6110 KASSERT(ap->a_cookies, 6111 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6112 6113 *ap->a_cookies = realloc(*ap->a_cookies, 6114 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 6115 (*ap->a_cookies)[*ap->a_ncookies] = off; 6116 *ap->a_ncookies += 1; 6117 return (0); 6118 } 6119 6120 /* 6121 * The purpose of this routine is to remove granularity from accmode_t, 6122 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6123 * VADMIN and VAPPEND. 6124 * 6125 * If it returns 0, the caller is supposed to continue with the usual 6126 * access checks using 'accmode' as modified by this routine. If it 6127 * returns nonzero value, the caller is supposed to return that value 6128 * as errno. 6129 * 6130 * Note that after this routine runs, accmode may be zero. 6131 */ 6132 int 6133 vfs_unixify_accmode(accmode_t *accmode) 6134 { 6135 /* 6136 * There is no way to specify explicit "deny" rule using 6137 * file mode or POSIX.1e ACLs. 6138 */ 6139 if (*accmode & VEXPLICIT_DENY) { 6140 *accmode = 0; 6141 return (0); 6142 } 6143 6144 /* 6145 * None of these can be translated into usual access bits. 6146 * Also, the common case for NFSv4 ACLs is to not contain 6147 * either of these bits. Caller should check for VWRITE 6148 * on the containing directory instead. 6149 */ 6150 if (*accmode & (VDELETE_CHILD | VDELETE)) 6151 return (EPERM); 6152 6153 if (*accmode & VADMIN_PERMS) { 6154 *accmode &= ~VADMIN_PERMS; 6155 *accmode |= VADMIN; 6156 } 6157 6158 /* 6159 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6160 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6161 */ 6162 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6163 6164 return (0); 6165 } 6166 6167 /* 6168 * Clear out a doomed vnode (if any) and replace it with a new one as long 6169 * as the fs is not being unmounted. Return the root vnode to the caller. 6170 */ 6171 static int __noinline 6172 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6173 { 6174 struct vnode *vp; 6175 int error; 6176 6177 restart: 6178 if (mp->mnt_rootvnode != NULL) { 6179 MNT_ILOCK(mp); 6180 vp = mp->mnt_rootvnode; 6181 if (vp != NULL) { 6182 if (!VN_IS_DOOMED(vp)) { 6183 vrefact(vp); 6184 MNT_IUNLOCK(mp); 6185 error = vn_lock(vp, flags); 6186 if (error == 0) { 6187 *vpp = vp; 6188 return (0); 6189 } 6190 vrele(vp); 6191 goto restart; 6192 } 6193 /* 6194 * Clear the old one. 6195 */ 6196 mp->mnt_rootvnode = NULL; 6197 } 6198 MNT_IUNLOCK(mp); 6199 if (vp != NULL) { 6200 vfs_op_barrier_wait(mp); 6201 vrele(vp); 6202 } 6203 } 6204 error = VFS_CACHEDROOT(mp, flags, vpp); 6205 if (error != 0) 6206 return (error); 6207 if (mp->mnt_vfs_ops == 0) { 6208 MNT_ILOCK(mp); 6209 if (mp->mnt_vfs_ops != 0) { 6210 MNT_IUNLOCK(mp); 6211 return (0); 6212 } 6213 if (mp->mnt_rootvnode == NULL) { 6214 vrefact(*vpp); 6215 mp->mnt_rootvnode = *vpp; 6216 } else { 6217 if (mp->mnt_rootvnode != *vpp) { 6218 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6219 panic("%s: mismatch between vnode returned " 6220 " by VFS_CACHEDROOT and the one cached " 6221 " (%p != %p)", 6222 __func__, *vpp, mp->mnt_rootvnode); 6223 } 6224 } 6225 } 6226 MNT_IUNLOCK(mp); 6227 } 6228 return (0); 6229 } 6230 6231 int 6232 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6233 { 6234 struct vnode *vp; 6235 int error; 6236 6237 if (!vfs_op_thread_enter(mp)) 6238 return (vfs_cache_root_fallback(mp, flags, vpp)); 6239 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6240 if (vp == NULL || VN_IS_DOOMED(vp)) { 6241 vfs_op_thread_exit(mp); 6242 return (vfs_cache_root_fallback(mp, flags, vpp)); 6243 } 6244 vrefact(vp); 6245 vfs_op_thread_exit(mp); 6246 error = vn_lock(vp, flags); 6247 if (error != 0) { 6248 vrele(vp); 6249 return (vfs_cache_root_fallback(mp, flags, vpp)); 6250 } 6251 *vpp = vp; 6252 return (0); 6253 } 6254 6255 struct vnode * 6256 vfs_cache_root_clear(struct mount *mp) 6257 { 6258 struct vnode *vp; 6259 6260 /* 6261 * ops > 0 guarantees there is nobody who can see this vnode 6262 */ 6263 MPASS(mp->mnt_vfs_ops > 0); 6264 vp = mp->mnt_rootvnode; 6265 mp->mnt_rootvnode = NULL; 6266 return (vp); 6267 } 6268 6269 void 6270 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6271 { 6272 6273 MPASS(mp->mnt_vfs_ops > 0); 6274 vrefact(vp); 6275 mp->mnt_rootvnode = vp; 6276 } 6277 6278 /* 6279 * These are helper functions for filesystems to traverse all 6280 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6281 * 6282 * This interface replaces MNT_VNODE_FOREACH. 6283 */ 6284 6285 struct vnode * 6286 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6287 { 6288 struct vnode *vp; 6289 6290 if (should_yield()) 6291 kern_yield(PRI_USER); 6292 MNT_ILOCK(mp); 6293 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6294 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6295 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6296 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6297 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6298 continue; 6299 VI_LOCK(vp); 6300 if (VN_IS_DOOMED(vp)) { 6301 VI_UNLOCK(vp); 6302 continue; 6303 } 6304 break; 6305 } 6306 if (vp == NULL) { 6307 __mnt_vnode_markerfree_all(mvp, mp); 6308 /* MNT_IUNLOCK(mp); -- done in above function */ 6309 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6310 return (NULL); 6311 } 6312 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6313 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6314 MNT_IUNLOCK(mp); 6315 return (vp); 6316 } 6317 6318 struct vnode * 6319 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6320 { 6321 struct vnode *vp; 6322 6323 *mvp = vn_alloc_marker(mp); 6324 MNT_ILOCK(mp); 6325 MNT_REF(mp); 6326 6327 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6328 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6329 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6330 continue; 6331 VI_LOCK(vp); 6332 if (VN_IS_DOOMED(vp)) { 6333 VI_UNLOCK(vp); 6334 continue; 6335 } 6336 break; 6337 } 6338 if (vp == NULL) { 6339 MNT_REL(mp); 6340 MNT_IUNLOCK(mp); 6341 vn_free_marker(*mvp); 6342 *mvp = NULL; 6343 return (NULL); 6344 } 6345 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6346 MNT_IUNLOCK(mp); 6347 return (vp); 6348 } 6349 6350 void 6351 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6352 { 6353 6354 if (*mvp == NULL) { 6355 MNT_IUNLOCK(mp); 6356 return; 6357 } 6358 6359 mtx_assert(MNT_MTX(mp), MA_OWNED); 6360 6361 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6362 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6363 MNT_REL(mp); 6364 MNT_IUNLOCK(mp); 6365 vn_free_marker(*mvp); 6366 *mvp = NULL; 6367 } 6368 6369 /* 6370 * These are helper functions for filesystems to traverse their 6371 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6372 */ 6373 static void 6374 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6375 { 6376 6377 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6378 6379 MNT_ILOCK(mp); 6380 MNT_REL(mp); 6381 MNT_IUNLOCK(mp); 6382 vn_free_marker(*mvp); 6383 *mvp = NULL; 6384 } 6385 6386 /* 6387 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6388 * conventional lock order during mnt_vnode_next_lazy iteration. 6389 * 6390 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6391 * The list lock is dropped and reacquired. On success, both locks are held. 6392 * On failure, the mount vnode list lock is held but the vnode interlock is 6393 * not, and the procedure may have yielded. 6394 */ 6395 static bool 6396 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6397 struct vnode *vp) 6398 { 6399 6400 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6401 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6402 ("%s: bad marker", __func__)); 6403 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6404 ("%s: inappropriate vnode", __func__)); 6405 ASSERT_VI_UNLOCKED(vp, __func__); 6406 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6407 6408 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6409 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6410 6411 /* 6412 * Note we may be racing against vdrop which transitioned the hold 6413 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6414 * if we are the only user after we get the interlock we will just 6415 * vdrop. 6416 */ 6417 vhold(vp); 6418 mtx_unlock(&mp->mnt_listmtx); 6419 VI_LOCK(vp); 6420 if (VN_IS_DOOMED(vp)) { 6421 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6422 goto out_lost; 6423 } 6424 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6425 /* 6426 * There is nothing to do if we are the last user. 6427 */ 6428 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6429 goto out_lost; 6430 mtx_lock(&mp->mnt_listmtx); 6431 return (true); 6432 out_lost: 6433 vdropl(vp); 6434 maybe_yield(); 6435 mtx_lock(&mp->mnt_listmtx); 6436 return (false); 6437 } 6438 6439 static struct vnode * 6440 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6441 void *cbarg) 6442 { 6443 struct vnode *vp; 6444 6445 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6446 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6447 restart: 6448 vp = TAILQ_NEXT(*mvp, v_lazylist); 6449 while (vp != NULL) { 6450 if (vp->v_type == VMARKER) { 6451 vp = TAILQ_NEXT(vp, v_lazylist); 6452 continue; 6453 } 6454 /* 6455 * See if we want to process the vnode. Note we may encounter a 6456 * long string of vnodes we don't care about and hog the list 6457 * as a result. Check for it and requeue the marker. 6458 */ 6459 VNPASS(!VN_IS_DOOMED(vp), vp); 6460 if (!cb(vp, cbarg)) { 6461 if (!should_yield()) { 6462 vp = TAILQ_NEXT(vp, v_lazylist); 6463 continue; 6464 } 6465 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6466 v_lazylist); 6467 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6468 v_lazylist); 6469 mtx_unlock(&mp->mnt_listmtx); 6470 kern_yield(PRI_USER); 6471 mtx_lock(&mp->mnt_listmtx); 6472 goto restart; 6473 } 6474 /* 6475 * Try-lock because this is the wrong lock order. 6476 */ 6477 if (!VI_TRYLOCK(vp) && 6478 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6479 goto restart; 6480 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6481 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6482 ("alien vnode on the lazy list %p %p", vp, mp)); 6483 VNPASS(vp->v_mount == mp, vp); 6484 VNPASS(!VN_IS_DOOMED(vp), vp); 6485 break; 6486 } 6487 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6488 6489 /* Check if we are done */ 6490 if (vp == NULL) { 6491 mtx_unlock(&mp->mnt_listmtx); 6492 mnt_vnode_markerfree_lazy(mvp, mp); 6493 return (NULL); 6494 } 6495 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 6496 mtx_unlock(&mp->mnt_listmtx); 6497 ASSERT_VI_LOCKED(vp, "lazy iter"); 6498 return (vp); 6499 } 6500 6501 struct vnode * 6502 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6503 void *cbarg) 6504 { 6505 6506 if (should_yield()) 6507 kern_yield(PRI_USER); 6508 mtx_lock(&mp->mnt_listmtx); 6509 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6510 } 6511 6512 struct vnode * 6513 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6514 void *cbarg) 6515 { 6516 struct vnode *vp; 6517 6518 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 6519 return (NULL); 6520 6521 *mvp = vn_alloc_marker(mp); 6522 MNT_ILOCK(mp); 6523 MNT_REF(mp); 6524 MNT_IUNLOCK(mp); 6525 6526 mtx_lock(&mp->mnt_listmtx); 6527 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 6528 if (vp == NULL) { 6529 mtx_unlock(&mp->mnt_listmtx); 6530 mnt_vnode_markerfree_lazy(mvp, mp); 6531 return (NULL); 6532 } 6533 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 6534 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 6535 } 6536 6537 void 6538 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6539 { 6540 6541 if (*mvp == NULL) 6542 return; 6543 6544 mtx_lock(&mp->mnt_listmtx); 6545 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 6546 mtx_unlock(&mp->mnt_listmtx); 6547 mnt_vnode_markerfree_lazy(mvp, mp); 6548 } 6549 6550 int 6551 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 6552 { 6553 6554 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 6555 cnp->cn_flags &= ~NOEXECCHECK; 6556 return (0); 6557 } 6558 6559 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread)); 6560 } 6561