1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 #include "opt_ddb.h" 47 #include "opt_watchdog.h" 48 49 #include <sys/param.h> 50 #include <sys/systm.h> 51 #include <sys/bio.h> 52 #include <sys/buf.h> 53 #include <sys/capsicum.h> 54 #include <sys/condvar.h> 55 #include <sys/conf.h> 56 #include <sys/counter.h> 57 #include <sys/dirent.h> 58 #include <sys/event.h> 59 #include <sys/eventhandler.h> 60 #include <sys/extattr.h> 61 #include <sys/file.h> 62 #include <sys/fcntl.h> 63 #include <sys/jail.h> 64 #include <sys/kdb.h> 65 #include <sys/kernel.h> 66 #include <sys/kthread.h> 67 #include <sys/ktr.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smp.h> 80 #include <sys/stat.h> 81 #include <sys/sysctl.h> 82 #include <sys/syslog.h> 83 #include <sys/vmmeter.h> 84 #include <sys/vnode.h> 85 #include <sys/watchdog.h> 86 87 #include <machine/stdarg.h> 88 89 #include <security/mac/mac_framework.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_object.h> 93 #include <vm/vm_extern.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_kern.h> 98 #include <vm/uma.h> 99 100 #ifdef DDB 101 #include <ddb/ddb.h> 102 #endif 103 104 static void delmntque(struct vnode *vp); 105 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 106 int slpflag, int slptimeo); 107 static void syncer_shutdown(void *arg, int howto); 108 static int vtryrecycle(struct vnode *vp); 109 static void v_init_counters(struct vnode *); 110 static void v_incr_devcount(struct vnode *); 111 static void v_decr_devcount(struct vnode *); 112 static void vgonel(struct vnode *); 113 static void vfs_knllock(void *arg); 114 static void vfs_knlunlock(void *arg); 115 static void vfs_knl_assert_locked(void *arg); 116 static void vfs_knl_assert_unlocked(void *arg); 117 static void vnlru_return_batches(struct vfsops *mnt_op); 118 static void destroy_vpollinfo(struct vpollinfo *vi); 119 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 120 daddr_t startlbn, daddr_t endlbn); 121 122 /* 123 * These fences are intended for cases where some synchronization is 124 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt 125 * and v_usecount) updates. Access to v_iflags is generally synchronized 126 * by the interlock, but we have some internal assertions that check vnode 127 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only 128 * for now. 129 */ 130 #ifdef INVARIANTS 131 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq() 132 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel() 133 #else 134 #define VNODE_REFCOUNT_FENCE_ACQ() 135 #define VNODE_REFCOUNT_FENCE_REL() 136 #endif 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence"); 146 147 static counter_u64_t vnodes_created; 148 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 149 "Number of vnodes created by getnewvnode"); 150 151 static u_long mnt_free_list_batch = 128; 152 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW, 153 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 enum vtype iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of vnodes that are ready for recycling. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_free_list; 172 173 /* 174 * "Free" vnode target. Free vnodes are rarely completely free, but are 175 * just ones that are cheap to recycle. Usually they are for files which 176 * have been stat'd but not read; these usually have inode and namecache 177 * data attached to them. This target is the preferred minimum size of a 178 * sub-cache consisting mostly of such files. The system balances the size 179 * of this sub-cache with its complement to try to prevent either from 180 * thrashing while the other is relatively inactive. The targets express 181 * a preference for the best balance. 182 * 183 * "Above" this target there are 2 further targets (watermarks) related 184 * to recyling of free vnodes. In the best-operating case, the cache is 185 * exactly full, the free list has size between vlowat and vhiwat above the 186 * free target, and recycling from it and normal use maintains this state. 187 * Sometimes the free list is below vlowat or even empty, but this state 188 * is even better for immediate use provided the cache is not full. 189 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 190 * ones) to reach one of these states. The watermarks are currently hard- 191 * coded as 4% and 9% of the available space higher. These and the default 192 * of 25% for wantfreevnodes are too large if the memory size is large. 193 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 194 * whenever vnlru_proc() becomes active. 195 */ 196 static u_long wantfreevnodes; 197 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, 198 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes"); 199 static u_long freevnodes; 200 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, 201 &freevnodes, 0, "Number of \"free\" vnodes"); 202 203 static counter_u64_t recycles_count; 204 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 205 "Number of vnodes recycled to meet vnode cache targets"); 206 207 static counter_u64_t recycles_free_count; 208 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 209 "Number of free vnodes recycled to meet vnode cache targets"); 210 211 /* 212 * Various variables used for debugging the new implementation of 213 * reassignbuf(). 214 * XXX these are probably of (very) limited utility now. 215 */ 216 static int reassignbufcalls; 217 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW | CTLFLAG_STATS, 218 &reassignbufcalls, 0, "Number of calls to reassignbuf"); 219 220 static counter_u64_t deferred_inact; 221 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact, 222 "Number of times inactive processing was deferred"); 223 224 /* To keep more than one thread at a time from running vfs_getnewfsid */ 225 static struct mtx mntid_mtx; 226 227 /* 228 * Lock for any access to the following: 229 * vnode_free_list 230 * numvnodes 231 * freevnodes 232 */ 233 static struct mtx __exclusive_cache_line vnode_free_list_mtx; 234 235 /* Publicly exported FS */ 236 struct nfs_public nfs_pub; 237 238 static uma_zone_t buf_trie_zone; 239 240 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 241 static uma_zone_t vnode_zone; 242 static uma_zone_t vnodepoll_zone; 243 244 /* 245 * The workitem queue. 246 * 247 * It is useful to delay writes of file data and filesystem metadata 248 * for tens of seconds so that quickly created and deleted files need 249 * not waste disk bandwidth being created and removed. To realize this, 250 * we append vnodes to a "workitem" queue. When running with a soft 251 * updates implementation, most pending metadata dependencies should 252 * not wait for more than a few seconds. Thus, mounted on block devices 253 * are delayed only about a half the time that file data is delayed. 254 * Similarly, directory updates are more critical, so are only delayed 255 * about a third the time that file data is delayed. Thus, there are 256 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 257 * one each second (driven off the filesystem syncer process). The 258 * syncer_delayno variable indicates the next queue that is to be processed. 259 * Items that need to be processed soon are placed in this queue: 260 * 261 * syncer_workitem_pending[syncer_delayno] 262 * 263 * A delay of fifteen seconds is done by placing the request fifteen 264 * entries later in the queue: 265 * 266 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 267 * 268 */ 269 static int syncer_delayno; 270 static long syncer_mask; 271 LIST_HEAD(synclist, bufobj); 272 static struct synclist *syncer_workitem_pending; 273 /* 274 * The sync_mtx protects: 275 * bo->bo_synclist 276 * sync_vnode_count 277 * syncer_delayno 278 * syncer_state 279 * syncer_workitem_pending 280 * syncer_worklist_len 281 * rushjob 282 */ 283 static struct mtx sync_mtx; 284 static struct cv sync_wakeup; 285 286 #define SYNCER_MAXDELAY 32 287 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 288 static int syncdelay = 30; /* max time to delay syncing data */ 289 static int filedelay = 30; /* time to delay syncing files */ 290 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 291 "Time to delay syncing files (in seconds)"); 292 static int dirdelay = 29; /* time to delay syncing directories */ 293 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 294 "Time to delay syncing directories (in seconds)"); 295 static int metadelay = 28; /* time to delay syncing metadata */ 296 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 297 "Time to delay syncing metadata (in seconds)"); 298 static int rushjob; /* number of slots to run ASAP */ 299 static int stat_rush_requests; /* number of times I/O speeded up */ 300 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 301 "Number of times I/O speeded up (rush requests)"); 302 303 /* 304 * When shutting down the syncer, run it at four times normal speed. 305 */ 306 #define SYNCER_SHUTDOWN_SPEEDUP 4 307 static int sync_vnode_count; 308 static int syncer_worklist_len; 309 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 310 syncer_state; 311 312 /* Target for maximum number of vnodes. */ 313 u_long desiredvnodes; 314 static u_long gapvnodes; /* gap between wanted and desired */ 315 static u_long vhiwat; /* enough extras after expansion */ 316 static u_long vlowat; /* minimal extras before expansion */ 317 static u_long vstir; /* nonzero to stir non-free vnodes */ 318 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 319 320 static int 321 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS) 322 { 323 u_long old_desiredvnodes; 324 int error; 325 326 old_desiredvnodes = desiredvnodes; 327 if ((error = sysctl_handle_long(oidp, arg1, arg2, req)) != 0) 328 return (error); 329 if (old_desiredvnodes != desiredvnodes) { 330 wantfreevnodes = desiredvnodes / 4; 331 /* XXX locking seems to be incomplete. */ 332 vfs_hash_changesize(desiredvnodes); 333 cache_changesize(desiredvnodes); 334 } 335 return (0); 336 } 337 338 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 339 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0, 340 sysctl_update_desiredvnodes, "UL", "Target for maximum number of vnodes"); 341 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW, 342 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)"); 343 static int vnlru_nowhere; 344 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, 345 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 346 347 static int 348 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 349 { 350 struct vnode *vp; 351 struct nameidata nd; 352 char *buf; 353 unsigned long ndflags; 354 int error; 355 356 if (req->newptr == NULL) 357 return (EINVAL); 358 if (req->newlen >= PATH_MAX) 359 return (E2BIG); 360 361 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 362 error = SYSCTL_IN(req, buf, req->newlen); 363 if (error != 0) 364 goto out; 365 366 buf[req->newlen] = '\0'; 367 368 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; 369 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); 370 if ((error = namei(&nd)) != 0) 371 goto out; 372 vp = nd.ni_vp; 373 374 if (VN_IS_DOOMED(vp)) { 375 /* 376 * This vnode is being recycled. Return != 0 to let the caller 377 * know that the sysctl had no effect. Return EAGAIN because a 378 * subsequent call will likely succeed (since namei will create 379 * a new vnode if necessary) 380 */ 381 error = EAGAIN; 382 goto putvnode; 383 } 384 385 counter_u64_add(recycles_count, 1); 386 vgone(vp); 387 putvnode: 388 NDFREE(&nd, 0); 389 out: 390 free(buf, M_TEMP); 391 return (error); 392 } 393 394 static int 395 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 396 { 397 struct thread *td = curthread; 398 struct vnode *vp; 399 struct file *fp; 400 int error; 401 int fd; 402 403 if (req->newptr == NULL) 404 return (EBADF); 405 406 error = sysctl_handle_int(oidp, &fd, 0, req); 407 if (error != 0) 408 return (error); 409 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 410 if (error != 0) 411 return (error); 412 vp = fp->f_vnode; 413 414 error = vn_lock(vp, LK_EXCLUSIVE); 415 if (error != 0) 416 goto drop; 417 418 counter_u64_add(recycles_count, 1); 419 vgone(vp); 420 VOP_UNLOCK(vp); 421 drop: 422 fdrop(fp, td); 423 return (error); 424 } 425 426 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 427 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 428 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 429 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 430 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 431 sysctl_ftry_reclaim_vnode, "I", 432 "Try to reclaim a vnode by its file descriptor"); 433 434 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 435 static int vnsz2log; 436 437 /* 438 * Support for the bufobj clean & dirty pctrie. 439 */ 440 static void * 441 buf_trie_alloc(struct pctrie *ptree) 442 { 443 444 return uma_zalloc(buf_trie_zone, M_NOWAIT); 445 } 446 447 static void 448 buf_trie_free(struct pctrie *ptree, void *node) 449 { 450 451 uma_zfree(buf_trie_zone, node); 452 } 453 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free); 454 455 /* 456 * Initialize the vnode management data structures. 457 * 458 * Reevaluate the following cap on the number of vnodes after the physical 459 * memory size exceeds 512GB. In the limit, as the physical memory size 460 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 461 */ 462 #ifndef MAXVNODES_MAX 463 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 464 #endif 465 466 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 467 468 static struct vnode * 469 vn_alloc_marker(struct mount *mp) 470 { 471 struct vnode *vp; 472 473 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 474 vp->v_type = VMARKER; 475 vp->v_mount = mp; 476 477 return (vp); 478 } 479 480 static void 481 vn_free_marker(struct vnode *vp) 482 { 483 484 MPASS(vp->v_type == VMARKER); 485 free(vp, M_VNODE_MARKER); 486 } 487 488 /* 489 * Initialize a vnode as it first enters the zone. 490 */ 491 static int 492 vnode_init(void *mem, int size, int flags) 493 { 494 struct vnode *vp; 495 496 vp = mem; 497 bzero(vp, size); 498 /* 499 * Setup locks. 500 */ 501 vp->v_vnlock = &vp->v_lock; 502 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 503 /* 504 * By default, don't allow shared locks unless filesystems opt-in. 505 */ 506 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 507 LK_NOSHARE | LK_IS_VNODE); 508 /* 509 * Initialize bufobj. 510 */ 511 bufobj_init(&vp->v_bufobj, vp); 512 /* 513 * Initialize namecache. 514 */ 515 LIST_INIT(&vp->v_cache_src); 516 TAILQ_INIT(&vp->v_cache_dst); 517 /* 518 * Initialize rangelocks. 519 */ 520 rangelock_init(&vp->v_rl); 521 return (0); 522 } 523 524 /* 525 * Free a vnode when it is cleared from the zone. 526 */ 527 static void 528 vnode_fini(void *mem, int size) 529 { 530 struct vnode *vp; 531 struct bufobj *bo; 532 533 vp = mem; 534 rangelock_destroy(&vp->v_rl); 535 lockdestroy(vp->v_vnlock); 536 mtx_destroy(&vp->v_interlock); 537 bo = &vp->v_bufobj; 538 rw_destroy(BO_LOCKPTR(bo)); 539 } 540 541 /* 542 * Provide the size of NFS nclnode and NFS fh for calculation of the 543 * vnode memory consumption. The size is specified directly to 544 * eliminate dependency on NFS-private header. 545 * 546 * Other filesystems may use bigger or smaller (like UFS and ZFS) 547 * private inode data, but the NFS-based estimation is ample enough. 548 * Still, we care about differences in the size between 64- and 32-bit 549 * platforms. 550 * 551 * Namecache structure size is heuristically 552 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 553 */ 554 #ifdef _LP64 555 #define NFS_NCLNODE_SZ (528 + 64) 556 #define NC_SZ 148 557 #else 558 #define NFS_NCLNODE_SZ (360 + 32) 559 #define NC_SZ 92 560 #endif 561 562 static void 563 vntblinit(void *dummy __unused) 564 { 565 u_int i; 566 int physvnodes, virtvnodes; 567 568 /* 569 * Desiredvnodes is a function of the physical memory size and the 570 * kernel's heap size. Generally speaking, it scales with the 571 * physical memory size. The ratio of desiredvnodes to the physical 572 * memory size is 1:16 until desiredvnodes exceeds 98,304. 573 * Thereafter, the 574 * marginal ratio of desiredvnodes to the physical memory size is 575 * 1:64. However, desiredvnodes is limited by the kernel's heap 576 * size. The memory required by desiredvnodes vnodes and vm objects 577 * must not exceed 1/10th of the kernel's heap size. 578 */ 579 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 580 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 581 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 582 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 583 desiredvnodes = min(physvnodes, virtvnodes); 584 if (desiredvnodes > MAXVNODES_MAX) { 585 if (bootverbose) 586 printf("Reducing kern.maxvnodes %lu -> %lu\n", 587 desiredvnodes, MAXVNODES_MAX); 588 desiredvnodes = MAXVNODES_MAX; 589 } 590 wantfreevnodes = desiredvnodes / 4; 591 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 592 TAILQ_INIT(&vnode_free_list); 593 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF); 594 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL, 595 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0); 596 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo), 597 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 598 /* 599 * Preallocate enough nodes to support one-per buf so that 600 * we can not fail an insert. reassignbuf() callers can not 601 * tolerate the insertion failure. 602 */ 603 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 604 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 605 UMA_ZONE_NOFREE | UMA_ZONE_VM); 606 uma_prealloc(buf_trie_zone, nbuf); 607 608 vnodes_created = counter_u64_alloc(M_WAITOK); 609 recycles_count = counter_u64_alloc(M_WAITOK); 610 recycles_free_count = counter_u64_alloc(M_WAITOK); 611 deferred_inact = counter_u64_alloc(M_WAITOK); 612 613 /* 614 * Initialize the filesystem syncer. 615 */ 616 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 617 &syncer_mask); 618 syncer_maxdelay = syncer_mask + 1; 619 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 620 cv_init(&sync_wakeup, "syncer"); 621 for (i = 1; i <= sizeof(struct vnode); i <<= 1) 622 vnsz2log++; 623 vnsz2log--; 624 } 625 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 626 627 628 /* 629 * Mark a mount point as busy. Used to synchronize access and to delay 630 * unmounting. Eventually, mountlist_mtx is not released on failure. 631 * 632 * vfs_busy() is a custom lock, it can block the caller. 633 * vfs_busy() only sleeps if the unmount is active on the mount point. 634 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 635 * vnode belonging to mp. 636 * 637 * Lookup uses vfs_busy() to traverse mount points. 638 * root fs var fs 639 * / vnode lock A / vnode lock (/var) D 640 * /var vnode lock B /log vnode lock(/var/log) E 641 * vfs_busy lock C vfs_busy lock F 642 * 643 * Within each file system, the lock order is C->A->B and F->D->E. 644 * 645 * When traversing across mounts, the system follows that lock order: 646 * 647 * C->A->B 648 * | 649 * +->F->D->E 650 * 651 * The lookup() process for namei("/var") illustrates the process: 652 * VOP_LOOKUP() obtains B while A is held 653 * vfs_busy() obtains a shared lock on F while A and B are held 654 * vput() releases lock on B 655 * vput() releases lock on A 656 * VFS_ROOT() obtains lock on D while shared lock on F is held 657 * vfs_unbusy() releases shared lock on F 658 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 659 * Attempt to lock A (instead of vp_crossmp) while D is held would 660 * violate the global order, causing deadlocks. 661 * 662 * dounmount() locks B while F is drained. 663 */ 664 int 665 vfs_busy(struct mount *mp, int flags) 666 { 667 668 MPASS((flags & ~MBF_MASK) == 0); 669 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 670 671 if (vfs_op_thread_enter(mp)) { 672 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 673 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 674 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 675 vfs_mp_count_add_pcpu(mp, ref, 1); 676 vfs_mp_count_add_pcpu(mp, lockref, 1); 677 vfs_op_thread_exit(mp); 678 if (flags & MBF_MNTLSTLOCK) 679 mtx_unlock(&mountlist_mtx); 680 return (0); 681 } 682 683 MNT_ILOCK(mp); 684 vfs_assert_mount_counters(mp); 685 MNT_REF(mp); 686 /* 687 * If mount point is currently being unmounted, sleep until the 688 * mount point fate is decided. If thread doing the unmounting fails, 689 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 690 * that this mount point has survived the unmount attempt and vfs_busy 691 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 692 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 693 * about to be really destroyed. vfs_busy needs to release its 694 * reference on the mount point in this case and return with ENOENT, 695 * telling the caller that mount mount it tried to busy is no longer 696 * valid. 697 */ 698 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 699 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 700 MNT_REL(mp); 701 MNT_IUNLOCK(mp); 702 CTR1(KTR_VFS, "%s: failed busying before sleeping", 703 __func__); 704 return (ENOENT); 705 } 706 if (flags & MBF_MNTLSTLOCK) 707 mtx_unlock(&mountlist_mtx); 708 mp->mnt_kern_flag |= MNTK_MWAIT; 709 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 710 if (flags & MBF_MNTLSTLOCK) 711 mtx_lock(&mountlist_mtx); 712 MNT_ILOCK(mp); 713 } 714 if (flags & MBF_MNTLSTLOCK) 715 mtx_unlock(&mountlist_mtx); 716 mp->mnt_lockref++; 717 MNT_IUNLOCK(mp); 718 return (0); 719 } 720 721 /* 722 * Free a busy filesystem. 723 */ 724 void 725 vfs_unbusy(struct mount *mp) 726 { 727 int c; 728 729 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 730 731 if (vfs_op_thread_enter(mp)) { 732 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 733 vfs_mp_count_sub_pcpu(mp, lockref, 1); 734 vfs_mp_count_sub_pcpu(mp, ref, 1); 735 vfs_op_thread_exit(mp); 736 return; 737 } 738 739 MNT_ILOCK(mp); 740 vfs_assert_mount_counters(mp); 741 MNT_REL(mp); 742 c = --mp->mnt_lockref; 743 if (mp->mnt_vfs_ops == 0) { 744 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 745 MNT_IUNLOCK(mp); 746 return; 747 } 748 if (c < 0) 749 vfs_dump_mount_counters(mp); 750 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 751 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 752 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 753 mp->mnt_kern_flag &= ~MNTK_DRAINING; 754 wakeup(&mp->mnt_lockref); 755 } 756 MNT_IUNLOCK(mp); 757 } 758 759 /* 760 * Lookup a mount point by filesystem identifier. 761 */ 762 struct mount * 763 vfs_getvfs(fsid_t *fsid) 764 { 765 struct mount *mp; 766 767 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 768 mtx_lock(&mountlist_mtx); 769 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 770 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 771 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 772 vfs_ref(mp); 773 mtx_unlock(&mountlist_mtx); 774 return (mp); 775 } 776 } 777 mtx_unlock(&mountlist_mtx); 778 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 779 return ((struct mount *) 0); 780 } 781 782 /* 783 * Lookup a mount point by filesystem identifier, busying it before 784 * returning. 785 * 786 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 787 * cache for popular filesystem identifiers. The cache is lockess, using 788 * the fact that struct mount's are never freed. In worst case we may 789 * get pointer to unmounted or even different filesystem, so we have to 790 * check what we got, and go slow way if so. 791 */ 792 struct mount * 793 vfs_busyfs(fsid_t *fsid) 794 { 795 #define FSID_CACHE_SIZE 256 796 typedef struct mount * volatile vmp_t; 797 static vmp_t cache[FSID_CACHE_SIZE]; 798 struct mount *mp; 799 int error; 800 uint32_t hash; 801 802 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 803 hash = fsid->val[0] ^ fsid->val[1]; 804 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 805 mp = cache[hash]; 806 if (mp == NULL || 807 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] || 808 mp->mnt_stat.f_fsid.val[1] != fsid->val[1]) 809 goto slow; 810 if (vfs_busy(mp, 0) != 0) { 811 cache[hash] = NULL; 812 goto slow; 813 } 814 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 815 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) 816 return (mp); 817 else 818 vfs_unbusy(mp); 819 820 slow: 821 mtx_lock(&mountlist_mtx); 822 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 823 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 824 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 825 error = vfs_busy(mp, MBF_MNTLSTLOCK); 826 if (error) { 827 cache[hash] = NULL; 828 mtx_unlock(&mountlist_mtx); 829 return (NULL); 830 } 831 cache[hash] = mp; 832 return (mp); 833 } 834 } 835 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 836 mtx_unlock(&mountlist_mtx); 837 return ((struct mount *) 0); 838 } 839 840 /* 841 * Check if a user can access privileged mount options. 842 */ 843 int 844 vfs_suser(struct mount *mp, struct thread *td) 845 { 846 int error; 847 848 if (jailed(td->td_ucred)) { 849 /* 850 * If the jail of the calling thread lacks permission for 851 * this type of file system, deny immediately. 852 */ 853 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 854 return (EPERM); 855 856 /* 857 * If the file system was mounted outside the jail of the 858 * calling thread, deny immediately. 859 */ 860 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 861 return (EPERM); 862 } 863 864 /* 865 * If file system supports delegated administration, we don't check 866 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 867 * by the file system itself. 868 * If this is not the user that did original mount, we check for 869 * the PRIV_VFS_MOUNT_OWNER privilege. 870 */ 871 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 872 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 873 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 874 return (error); 875 } 876 return (0); 877 } 878 879 /* 880 * Get a new unique fsid. Try to make its val[0] unique, since this value 881 * will be used to create fake device numbers for stat(). Also try (but 882 * not so hard) make its val[0] unique mod 2^16, since some emulators only 883 * support 16-bit device numbers. We end up with unique val[0]'s for the 884 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 885 * 886 * Keep in mind that several mounts may be running in parallel. Starting 887 * the search one past where the previous search terminated is both a 888 * micro-optimization and a defense against returning the same fsid to 889 * different mounts. 890 */ 891 void 892 vfs_getnewfsid(struct mount *mp) 893 { 894 static uint16_t mntid_base; 895 struct mount *nmp; 896 fsid_t tfsid; 897 int mtype; 898 899 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 900 mtx_lock(&mntid_mtx); 901 mtype = mp->mnt_vfc->vfc_typenum; 902 tfsid.val[1] = mtype; 903 mtype = (mtype & 0xFF) << 24; 904 for (;;) { 905 tfsid.val[0] = makedev(255, 906 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 907 mntid_base++; 908 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 909 break; 910 vfs_rel(nmp); 911 } 912 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 913 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 914 mtx_unlock(&mntid_mtx); 915 } 916 917 /* 918 * Knob to control the precision of file timestamps: 919 * 920 * 0 = seconds only; nanoseconds zeroed. 921 * 1 = seconds and nanoseconds, accurate within 1/HZ. 922 * 2 = seconds and nanoseconds, truncated to microseconds. 923 * >=3 = seconds and nanoseconds, maximum precision. 924 */ 925 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 926 927 static int timestamp_precision = TSP_USEC; 928 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 929 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 930 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 931 "3+: sec + ns (max. precision))"); 932 933 /* 934 * Get a current timestamp. 935 */ 936 void 937 vfs_timestamp(struct timespec *tsp) 938 { 939 struct timeval tv; 940 941 switch (timestamp_precision) { 942 case TSP_SEC: 943 tsp->tv_sec = time_second; 944 tsp->tv_nsec = 0; 945 break; 946 case TSP_HZ: 947 getnanotime(tsp); 948 break; 949 case TSP_USEC: 950 microtime(&tv); 951 TIMEVAL_TO_TIMESPEC(&tv, tsp); 952 break; 953 case TSP_NSEC: 954 default: 955 nanotime(tsp); 956 break; 957 } 958 } 959 960 /* 961 * Set vnode attributes to VNOVAL 962 */ 963 void 964 vattr_null(struct vattr *vap) 965 { 966 967 vap->va_type = VNON; 968 vap->va_size = VNOVAL; 969 vap->va_bytes = VNOVAL; 970 vap->va_mode = VNOVAL; 971 vap->va_nlink = VNOVAL; 972 vap->va_uid = VNOVAL; 973 vap->va_gid = VNOVAL; 974 vap->va_fsid = VNOVAL; 975 vap->va_fileid = VNOVAL; 976 vap->va_blocksize = VNOVAL; 977 vap->va_rdev = VNOVAL; 978 vap->va_atime.tv_sec = VNOVAL; 979 vap->va_atime.tv_nsec = VNOVAL; 980 vap->va_mtime.tv_sec = VNOVAL; 981 vap->va_mtime.tv_nsec = VNOVAL; 982 vap->va_ctime.tv_sec = VNOVAL; 983 vap->va_ctime.tv_nsec = VNOVAL; 984 vap->va_birthtime.tv_sec = VNOVAL; 985 vap->va_birthtime.tv_nsec = VNOVAL; 986 vap->va_flags = VNOVAL; 987 vap->va_gen = VNOVAL; 988 vap->va_vaflags = 0; 989 } 990 991 /* 992 * This routine is called when we have too many vnodes. It attempts 993 * to free <count> vnodes and will potentially free vnodes that still 994 * have VM backing store (VM backing store is typically the cause 995 * of a vnode blowout so we want to do this). Therefore, this operation 996 * is not considered cheap. 997 * 998 * A number of conditions may prevent a vnode from being reclaimed. 999 * the buffer cache may have references on the vnode, a directory 1000 * vnode may still have references due to the namei cache representing 1001 * underlying files, or the vnode may be in active use. It is not 1002 * desirable to reuse such vnodes. These conditions may cause the 1003 * number of vnodes to reach some minimum value regardless of what 1004 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1005 * 1006 * @param mp Try to reclaim vnodes from this mountpoint 1007 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1008 * entries if this argument is strue 1009 * @param trigger Only reclaim vnodes with fewer than this many resident 1010 * pages. 1011 * @return The number of vnodes that were reclaimed. 1012 */ 1013 static int 1014 vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) 1015 { 1016 struct vnode *vp; 1017 int count, done, target; 1018 1019 done = 0; 1020 vn_start_write(NULL, &mp, V_WAIT); 1021 MNT_ILOCK(mp); 1022 count = mp->mnt_nvnodelistsize; 1023 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1024 target = target / 10 + 1; 1025 while (count != 0 && done < target) { 1026 vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1027 while (vp != NULL && vp->v_type == VMARKER) 1028 vp = TAILQ_NEXT(vp, v_nmntvnodes); 1029 if (vp == NULL) 1030 break; 1031 /* 1032 * XXX LRU is completely broken for non-free vnodes. First 1033 * by calling here in mountpoint order, then by moving 1034 * unselected vnodes to the end here, and most grossly by 1035 * removing the vlruvp() function that was supposed to 1036 * maintain the order. (This function was born broken 1037 * since syncer problems prevented it doing anything.) The 1038 * order is closer to LRC (C = Created). 1039 * 1040 * LRU reclaiming of vnodes seems to have last worked in 1041 * FreeBSD-3 where LRU wasn't mentioned under any spelling. 1042 * Then there was no hold count, and inactive vnodes were 1043 * simply put on the free list in LRU order. The separate 1044 * lists also break LRU. We prefer to reclaim from the 1045 * free list for technical reasons. This tends to thrash 1046 * the free list to keep very unrecently used held vnodes. 1047 * The problem is mitigated by keeping the free list large. 1048 */ 1049 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1050 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1051 --count; 1052 if (!VI_TRYLOCK(vp)) 1053 goto next_iter; 1054 /* 1055 * If it's been deconstructed already, it's still 1056 * referenced, or it exceeds the trigger, skip it. 1057 * Also skip free vnodes. We are trying to make space 1058 * to expand the free list, not reduce it. 1059 */ 1060 if (vp->v_usecount || 1061 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1062 ((vp->v_iflag & VI_FREE) != 0) || 1063 VN_IS_DOOMED(vp) || (vp->v_object != NULL && 1064 vp->v_object->resident_page_count > trigger)) { 1065 VI_UNLOCK(vp); 1066 goto next_iter; 1067 } 1068 MNT_IUNLOCK(mp); 1069 vholdl(vp); 1070 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) { 1071 vdrop(vp); 1072 goto next_iter_mntunlocked; 1073 } 1074 VI_LOCK(vp); 1075 /* 1076 * v_usecount may have been bumped after VOP_LOCK() dropped 1077 * the vnode interlock and before it was locked again. 1078 * 1079 * It is not necessary to recheck VIRF_DOOMED because it can 1080 * only be set by another thread that holds both the vnode 1081 * lock and vnode interlock. If another thread has the 1082 * vnode lock before we get to VOP_LOCK() and obtains the 1083 * vnode interlock after VOP_LOCK() drops the vnode 1084 * interlock, the other thread will be unable to drop the 1085 * vnode lock before our VOP_LOCK() call fails. 1086 */ 1087 if (vp->v_usecount || 1088 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1089 (vp->v_object != NULL && 1090 vp->v_object->resident_page_count > trigger)) { 1091 VOP_UNLOCK(vp); 1092 vdropl(vp); 1093 goto next_iter_mntunlocked; 1094 } 1095 KASSERT(!VN_IS_DOOMED(vp), 1096 ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); 1097 counter_u64_add(recycles_count, 1); 1098 vgonel(vp); 1099 VOP_UNLOCK(vp); 1100 vdropl(vp); 1101 done++; 1102 next_iter_mntunlocked: 1103 if (!should_yield()) 1104 goto relock_mnt; 1105 goto yield; 1106 next_iter: 1107 if (!should_yield()) 1108 continue; 1109 MNT_IUNLOCK(mp); 1110 yield: 1111 kern_yield(PRI_USER); 1112 relock_mnt: 1113 MNT_ILOCK(mp); 1114 } 1115 MNT_IUNLOCK(mp); 1116 vn_finished_write(mp); 1117 return done; 1118 } 1119 1120 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1121 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1122 0, 1123 "limit on vnode free requests per call to the vnlru_free routine"); 1124 1125 /* 1126 * Attempt to reduce the free list by the requested amount. 1127 */ 1128 static void 1129 vnlru_free_locked(int count, struct vfsops *mnt_op) 1130 { 1131 struct vnode *vp; 1132 struct mount *mp; 1133 bool tried_batches; 1134 1135 tried_batches = false; 1136 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1137 if (count > max_vnlru_free) 1138 count = max_vnlru_free; 1139 for (; count > 0; count--) { 1140 vp = TAILQ_FIRST(&vnode_free_list); 1141 /* 1142 * The list can be modified while the free_list_mtx 1143 * has been dropped and vp could be NULL here. 1144 */ 1145 if (vp == NULL) { 1146 if (tried_batches) 1147 break; 1148 mtx_unlock(&vnode_free_list_mtx); 1149 vnlru_return_batches(mnt_op); 1150 tried_batches = true; 1151 mtx_lock(&vnode_free_list_mtx); 1152 continue; 1153 } 1154 1155 VNASSERT(vp->v_op != NULL, vp, 1156 ("vnlru_free: vnode already reclaimed.")); 1157 KASSERT((vp->v_iflag & VI_FREE) != 0, 1158 ("Removing vnode not on freelist")); 1159 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1160 ("Mangling active vnode")); 1161 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 1162 1163 /* 1164 * Don't recycle if our vnode is from different type 1165 * of mount point. Note that mp is type-safe, the 1166 * check does not reach unmapped address even if 1167 * vnode is reclaimed. 1168 * Don't recycle if we can't get the interlock without 1169 * blocking. 1170 */ 1171 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL && 1172 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) { 1173 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist); 1174 continue; 1175 } 1176 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0, 1177 vp, ("vp inconsistent on freelist")); 1178 1179 /* 1180 * The clear of VI_FREE prevents activation of the 1181 * vnode. There is no sense in putting the vnode on 1182 * the mount point active list, only to remove it 1183 * later during recycling. Inline the relevant part 1184 * of vholdl(), to avoid triggering assertions or 1185 * activating. 1186 */ 1187 freevnodes--; 1188 vp->v_iflag &= ~VI_FREE; 1189 VNODE_REFCOUNT_FENCE_REL(); 1190 refcount_acquire(&vp->v_holdcnt); 1191 1192 mtx_unlock(&vnode_free_list_mtx); 1193 VI_UNLOCK(vp); 1194 vtryrecycle(vp); 1195 /* 1196 * If the recycled succeeded this vdrop will actually free 1197 * the vnode. If not it will simply place it back on 1198 * the free list. 1199 */ 1200 vdrop(vp); 1201 mtx_lock(&vnode_free_list_mtx); 1202 } 1203 } 1204 1205 void 1206 vnlru_free(int count, struct vfsops *mnt_op) 1207 { 1208 1209 mtx_lock(&vnode_free_list_mtx); 1210 vnlru_free_locked(count, mnt_op); 1211 mtx_unlock(&vnode_free_list_mtx); 1212 } 1213 1214 1215 /* XXX some names and initialization are bad for limits and watermarks. */ 1216 static int 1217 vspace(void) 1218 { 1219 u_long rnumvnodes, rfreevnodes; 1220 int space; 1221 1222 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1223 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1224 vlowat = vhiwat / 2; 1225 rnumvnodes = atomic_load_long(&numvnodes); 1226 rfreevnodes = atomic_load_long(&freevnodes); 1227 if (rnumvnodes > desiredvnodes) 1228 return (0); 1229 space = desiredvnodes - rnumvnodes; 1230 if (freevnodes > wantfreevnodes) 1231 space += rfreevnodes - wantfreevnodes; 1232 return (space); 1233 } 1234 1235 static void 1236 vnlru_return_batch_locked(struct mount *mp) 1237 { 1238 struct vnode *vp; 1239 1240 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 1241 1242 if (mp->mnt_tmpfreevnodelistsize == 0) 1243 return; 1244 1245 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) { 1246 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp, 1247 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist")); 1248 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 1249 } 1250 mtx_lock(&vnode_free_list_mtx); 1251 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist); 1252 freevnodes += mp->mnt_tmpfreevnodelistsize; 1253 mtx_unlock(&vnode_free_list_mtx); 1254 mp->mnt_tmpfreevnodelistsize = 0; 1255 } 1256 1257 static void 1258 vnlru_return_batch(struct mount *mp) 1259 { 1260 1261 mtx_lock(&mp->mnt_listmtx); 1262 vnlru_return_batch_locked(mp); 1263 mtx_unlock(&mp->mnt_listmtx); 1264 } 1265 1266 static void 1267 vnlru_return_batches(struct vfsops *mnt_op) 1268 { 1269 struct mount *mp, *nmp; 1270 bool need_unbusy; 1271 1272 mtx_lock(&mountlist_mtx); 1273 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1274 need_unbusy = false; 1275 if (mnt_op != NULL && mp->mnt_op != mnt_op) 1276 goto next; 1277 if (mp->mnt_tmpfreevnodelistsize == 0) 1278 goto next; 1279 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) { 1280 vnlru_return_batch(mp); 1281 need_unbusy = true; 1282 mtx_lock(&mountlist_mtx); 1283 } 1284 next: 1285 nmp = TAILQ_NEXT(mp, mnt_list); 1286 if (need_unbusy) 1287 vfs_unbusy(mp); 1288 } 1289 mtx_unlock(&mountlist_mtx); 1290 } 1291 1292 /* 1293 * Attempt to recycle vnodes in a context that is always safe to block. 1294 * Calling vlrurecycle() from the bowels of filesystem code has some 1295 * interesting deadlock problems. 1296 */ 1297 static struct proc *vnlruproc; 1298 static int vnlruproc_sig; 1299 1300 static void 1301 vnlru_proc(void) 1302 { 1303 u_long rnumvnodes, rfreevnodes; 1304 struct mount *mp, *nmp; 1305 unsigned long onumvnodes; 1306 int done, force, trigger, usevnodes, vsp; 1307 bool reclaim_nc_src; 1308 1309 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1310 SHUTDOWN_PRI_FIRST); 1311 1312 force = 0; 1313 for (;;) { 1314 kproc_suspend_check(vnlruproc); 1315 mtx_lock(&vnode_free_list_mtx); 1316 rnumvnodes = atomic_load_long(&numvnodes); 1317 /* 1318 * If numvnodes is too large (due to desiredvnodes being 1319 * adjusted using its sysctl, or emergency growth), first 1320 * try to reduce it by discarding from the free list. 1321 */ 1322 if (rnumvnodes > desiredvnodes) 1323 vnlru_free_locked(rnumvnodes - desiredvnodes, NULL); 1324 /* 1325 * Sleep if the vnode cache is in a good state. This is 1326 * when it is not over-full and has space for about a 4% 1327 * or 9% expansion (by growing its size or inexcessively 1328 * reducing its free list). Otherwise, try to reclaim 1329 * space for a 10% expansion. 1330 */ 1331 if (vstir && force == 0) { 1332 force = 1; 1333 vstir = 0; 1334 } 1335 vsp = vspace(); 1336 if (vsp >= vlowat && force == 0) { 1337 vnlruproc_sig = 0; 1338 wakeup(&vnlruproc_sig); 1339 msleep(vnlruproc, &vnode_free_list_mtx, 1340 PVFS|PDROP, "vlruwt", hz); 1341 continue; 1342 } 1343 mtx_unlock(&vnode_free_list_mtx); 1344 done = 0; 1345 rnumvnodes = atomic_load_long(&numvnodes); 1346 rfreevnodes = atomic_load_long(&freevnodes); 1347 1348 onumvnodes = rnumvnodes; 1349 /* 1350 * Calculate parameters for recycling. These are the same 1351 * throughout the loop to give some semblance of fairness. 1352 * The trigger point is to avoid recycling vnodes with lots 1353 * of resident pages. We aren't trying to free memory; we 1354 * are trying to recycle or at least free vnodes. 1355 */ 1356 if (rnumvnodes <= desiredvnodes) 1357 usevnodes = rnumvnodes - rfreevnodes; 1358 else 1359 usevnodes = rnumvnodes; 1360 if (usevnodes <= 0) 1361 usevnodes = 1; 1362 /* 1363 * The trigger value is is chosen to give a conservatively 1364 * large value to ensure that it alone doesn't prevent 1365 * making progress. The value can easily be so large that 1366 * it is effectively infinite in some congested and 1367 * misconfigured cases, and this is necessary. Normally 1368 * it is about 8 to 100 (pages), which is quite large. 1369 */ 1370 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1371 if (force < 2) 1372 trigger = vsmalltrigger; 1373 reclaim_nc_src = force >= 3; 1374 mtx_lock(&mountlist_mtx); 1375 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1376 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) { 1377 nmp = TAILQ_NEXT(mp, mnt_list); 1378 continue; 1379 } 1380 done += vlrureclaim(mp, reclaim_nc_src, trigger); 1381 mtx_lock(&mountlist_mtx); 1382 nmp = TAILQ_NEXT(mp, mnt_list); 1383 vfs_unbusy(mp); 1384 } 1385 mtx_unlock(&mountlist_mtx); 1386 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes) 1387 uma_reclaim(UMA_RECLAIM_DRAIN); 1388 if (done == 0) { 1389 if (force == 0 || force == 1) { 1390 force = 2; 1391 continue; 1392 } 1393 if (force == 2) { 1394 force = 3; 1395 continue; 1396 } 1397 force = 0; 1398 vnlru_nowhere++; 1399 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1400 } else 1401 kern_yield(PRI_USER); 1402 /* 1403 * After becoming active to expand above low water, keep 1404 * active until above high water. 1405 */ 1406 vsp = vspace(); 1407 force = vsp < vhiwat; 1408 } 1409 } 1410 1411 static struct kproc_desc vnlru_kp = { 1412 "vnlru", 1413 vnlru_proc, 1414 &vnlruproc 1415 }; 1416 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1417 &vnlru_kp); 1418 1419 /* 1420 * Routines having to do with the management of the vnode table. 1421 */ 1422 1423 /* 1424 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1425 * before we actually vgone(). This function must be called with the vnode 1426 * held to prevent the vnode from being returned to the free list midway 1427 * through vgone(). 1428 */ 1429 static int 1430 vtryrecycle(struct vnode *vp) 1431 { 1432 struct mount *vnmp; 1433 1434 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1435 VNASSERT(vp->v_holdcnt, vp, 1436 ("vtryrecycle: Recycling vp %p without a reference.", vp)); 1437 /* 1438 * This vnode may found and locked via some other list, if so we 1439 * can't recycle it yet. 1440 */ 1441 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1442 CTR2(KTR_VFS, 1443 "%s: impossible to recycle, vp %p lock is already held", 1444 __func__, vp); 1445 return (EWOULDBLOCK); 1446 } 1447 /* 1448 * Don't recycle if its filesystem is being suspended. 1449 */ 1450 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1451 VOP_UNLOCK(vp); 1452 CTR2(KTR_VFS, 1453 "%s: impossible to recycle, cannot start the write for %p", 1454 __func__, vp); 1455 return (EBUSY); 1456 } 1457 /* 1458 * If we got this far, we need to acquire the interlock and see if 1459 * anyone picked up this vnode from another list. If not, we will 1460 * mark it with DOOMED via vgonel() so that anyone who does find it 1461 * will skip over it. 1462 */ 1463 VI_LOCK(vp); 1464 if (vp->v_usecount) { 1465 VOP_UNLOCK(vp); 1466 VI_UNLOCK(vp); 1467 vn_finished_write(vnmp); 1468 CTR2(KTR_VFS, 1469 "%s: impossible to recycle, %p is already referenced", 1470 __func__, vp); 1471 return (EBUSY); 1472 } 1473 if (!VN_IS_DOOMED(vp)) { 1474 counter_u64_add(recycles_free_count, 1); 1475 vgonel(vp); 1476 } 1477 VOP_UNLOCK(vp); 1478 VI_UNLOCK(vp); 1479 vn_finished_write(vnmp); 1480 return (0); 1481 } 1482 1483 static void 1484 vcheckspace(void) 1485 { 1486 int vsp; 1487 1488 vsp = vspace(); 1489 if (vsp < vlowat && vnlruproc_sig == 0) { 1490 vnlruproc_sig = 1; 1491 wakeup(vnlruproc); 1492 } 1493 } 1494 1495 /* 1496 * Wait if necessary for space for a new vnode. 1497 */ 1498 static int 1499 getnewvnode_wait(int suspended) 1500 { 1501 1502 mtx_assert(&vnode_free_list_mtx, MA_OWNED); 1503 if (numvnodes >= desiredvnodes) { 1504 if (suspended) { 1505 /* 1506 * The file system is being suspended. We cannot 1507 * risk a deadlock here, so allow allocation of 1508 * another vnode even if this would give too many. 1509 */ 1510 return (0); 1511 } 1512 if (vnlruproc_sig == 0) { 1513 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 1514 wakeup(vnlruproc); 1515 } 1516 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS, 1517 "vlruwk", hz); 1518 } 1519 /* Post-adjust like the pre-adjust in getnewvnode(). */ 1520 if (numvnodes + 1 > desiredvnodes && freevnodes > 1) 1521 vnlru_free_locked(1, NULL); 1522 return (numvnodes >= desiredvnodes ? ENFILE : 0); 1523 } 1524 1525 /* 1526 * This hack is fragile, and probably not needed any more now that the 1527 * watermark handling works. 1528 */ 1529 void 1530 getnewvnode_reserve(u_int count) 1531 { 1532 u_long rnumvnodes, rfreevnodes; 1533 struct thread *td; 1534 1535 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ 1536 /* XXX no longer so quick, but this part is not racy. */ 1537 mtx_lock(&vnode_free_list_mtx); 1538 rnumvnodes = atomic_load_long(&numvnodes); 1539 rfreevnodes = atomic_load_long(&freevnodes); 1540 if (rnumvnodes + count > desiredvnodes && rfreevnodes > wantfreevnodes) 1541 vnlru_free_locked(ulmin(rnumvnodes + count - desiredvnodes, 1542 rfreevnodes - wantfreevnodes), NULL); 1543 mtx_unlock(&vnode_free_list_mtx); 1544 1545 td = curthread; 1546 /* First try to be quick and racy. */ 1547 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) { 1548 td->td_vp_reserv += count; 1549 vcheckspace(); /* XXX no longer so quick, but more racy */ 1550 return; 1551 } else 1552 atomic_subtract_long(&numvnodes, count); 1553 1554 mtx_lock(&vnode_free_list_mtx); 1555 while (count > 0) { 1556 if (getnewvnode_wait(0) == 0) { 1557 count--; 1558 td->td_vp_reserv++; 1559 atomic_add_long(&numvnodes, 1); 1560 } 1561 } 1562 vcheckspace(); 1563 mtx_unlock(&vnode_free_list_mtx); 1564 } 1565 1566 /* 1567 * This hack is fragile, especially if desiredvnodes or wantvnodes are 1568 * misconfgured or changed significantly. Reducing desiredvnodes below 1569 * the reserved amount should cause bizarre behaviour like reducing it 1570 * below the number of active vnodes -- the system will try to reduce 1571 * numvnodes to match, but should fail, so the subtraction below should 1572 * not overflow. 1573 */ 1574 void 1575 getnewvnode_drop_reserve(void) 1576 { 1577 struct thread *td; 1578 1579 td = curthread; 1580 atomic_subtract_long(&numvnodes, td->td_vp_reserv); 1581 td->td_vp_reserv = 0; 1582 } 1583 1584 /* 1585 * Return the next vnode from the free list. 1586 */ 1587 int 1588 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1589 struct vnode **vpp) 1590 { 1591 struct vnode *vp; 1592 struct thread *td; 1593 struct lock_object *lo; 1594 static int cyclecount; 1595 int error __unused; 1596 1597 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 1598 1599 KASSERT(vops->registered, 1600 ("%s: not registered vector op %p\n", __func__, vops)); 1601 1602 vp = NULL; 1603 td = curthread; 1604 if (td->td_vp_reserv > 0) { 1605 td->td_vp_reserv -= 1; 1606 goto alloc; 1607 } 1608 mtx_lock(&vnode_free_list_mtx); 1609 if (numvnodes < desiredvnodes) 1610 cyclecount = 0; 1611 else if (cyclecount++ >= freevnodes) { 1612 cyclecount = 0; 1613 vstir = 1; 1614 } 1615 /* 1616 * Grow the vnode cache if it will not be above its target max 1617 * after growing. Otherwise, if the free list is nonempty, try 1618 * to reclaim 1 item from it before growing the cache (possibly 1619 * above its target max if the reclamation failed or is delayed). 1620 * Otherwise, wait for some space. In all cases, schedule 1621 * vnlru_proc() if we are getting short of space. The watermarks 1622 * should be chosen so that we never wait or even reclaim from 1623 * the free list to below its target minimum. 1624 */ 1625 if (numvnodes + 1 <= desiredvnodes) 1626 ; 1627 else if (freevnodes > 0) 1628 vnlru_free_locked(1, NULL); 1629 else { 1630 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag & 1631 MNTK_SUSPEND)); 1632 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */ 1633 if (error != 0) { 1634 mtx_unlock(&vnode_free_list_mtx); 1635 return (error); 1636 } 1637 #endif 1638 } 1639 vcheckspace(); 1640 atomic_add_long(&numvnodes, 1); 1641 mtx_unlock(&vnode_free_list_mtx); 1642 alloc: 1643 counter_u64_add(vnodes_created, 1); 1644 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK); 1645 /* 1646 * Locks are given the generic name "vnode" when created. 1647 * Follow the historic practice of using the filesystem 1648 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 1649 * 1650 * Locks live in a witness group keyed on their name. Thus, 1651 * when a lock is renamed, it must also move from the witness 1652 * group of its old name to the witness group of its new name. 1653 * 1654 * The change only needs to be made when the vnode moves 1655 * from one filesystem type to another. We ensure that each 1656 * filesystem use a single static name pointer for its tag so 1657 * that we can compare pointers rather than doing a strcmp(). 1658 */ 1659 lo = &vp->v_vnlock->lock_object; 1660 if (lo->lo_name != tag) { 1661 lo->lo_name = tag; 1662 WITNESS_DESTROY(lo); 1663 WITNESS_INIT(lo, tag); 1664 } 1665 /* 1666 * By default, don't allow shared locks unless filesystems opt-in. 1667 */ 1668 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 1669 /* 1670 * Finalize various vnode identity bits. 1671 */ 1672 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 1673 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 1674 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 1675 vp->v_type = VNON; 1676 vp->v_op = vops; 1677 v_init_counters(vp); 1678 vp->v_bufobj.bo_ops = &buf_ops_bio; 1679 #ifdef DIAGNOSTIC 1680 if (mp == NULL && vops != &dead_vnodeops) 1681 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 1682 #endif 1683 #ifdef MAC 1684 mac_vnode_init(vp); 1685 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 1686 mac_vnode_associate_singlelabel(mp, vp); 1687 #endif 1688 if (mp != NULL) { 1689 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 1690 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) 1691 vp->v_vflag |= VV_NOKNOTE; 1692 } 1693 1694 /* 1695 * For the filesystems which do not use vfs_hash_insert(), 1696 * still initialize v_hash to have vfs_hash_index() useful. 1697 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 1698 * its own hashing. 1699 */ 1700 vp->v_hash = (uintptr_t)vp >> vnsz2log; 1701 1702 *vpp = vp; 1703 return (0); 1704 } 1705 1706 static void 1707 freevnode(struct vnode *vp) 1708 { 1709 struct bufobj *bo; 1710 1711 /* 1712 * The vnode has been marked for destruction, so free it. 1713 * 1714 * The vnode will be returned to the zone where it will 1715 * normally remain until it is needed for another vnode. We 1716 * need to cleanup (or verify that the cleanup has already 1717 * been done) any residual data left from its current use 1718 * so as not to contaminate the freshly allocated vnode. 1719 */ 1720 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 1721 atomic_subtract_long(&numvnodes, 1); 1722 bo = &vp->v_bufobj; 1723 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 1724 ("cleaned vnode still on the free list.")); 1725 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 1726 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); 1727 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 1728 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 1729 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 1730 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 1731 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 1732 ("clean blk trie not empty")); 1733 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 1734 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 1735 ("dirty blk trie not empty")); 1736 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); 1737 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); 1738 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); 1739 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 1740 ("Dangling rangelock waiters")); 1741 VI_UNLOCK(vp); 1742 #ifdef MAC 1743 mac_vnode_destroy(vp); 1744 #endif 1745 if (vp->v_pollinfo != NULL) { 1746 destroy_vpollinfo(vp->v_pollinfo); 1747 vp->v_pollinfo = NULL; 1748 } 1749 #ifdef INVARIANTS 1750 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ 1751 vp->v_op = NULL; 1752 #endif 1753 vp->v_mountedhere = NULL; 1754 vp->v_unpcb = NULL; 1755 vp->v_rdev = NULL; 1756 vp->v_fifoinfo = NULL; 1757 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 1758 vp->v_irflag = 0; 1759 vp->v_iflag = 0; 1760 vp->v_vflag = 0; 1761 bo->bo_flag = 0; 1762 uma_zfree(vnode_zone, vp); 1763 } 1764 1765 /* 1766 * Delete from old mount point vnode list, if on one. 1767 */ 1768 static void 1769 delmntque(struct vnode *vp) 1770 { 1771 struct mount *mp; 1772 1773 mp = vp->v_mount; 1774 if (mp == NULL) 1775 return; 1776 MNT_ILOCK(mp); 1777 VI_LOCK(vp); 1778 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize, 1779 ("Active vnode list size %d > Vnode list size %d", 1780 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize)); 1781 if (vp->v_iflag & VI_ACTIVE) { 1782 vp->v_iflag &= ~VI_ACTIVE; 1783 mtx_lock(&mp->mnt_listmtx); 1784 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 1785 mp->mnt_activevnodelistsize--; 1786 mtx_unlock(&mp->mnt_listmtx); 1787 } 1788 vp->v_mount = NULL; 1789 VI_UNLOCK(vp); 1790 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 1791 ("bad mount point vnode list size")); 1792 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1793 mp->mnt_nvnodelistsize--; 1794 MNT_REL(mp); 1795 MNT_IUNLOCK(mp); 1796 } 1797 1798 static void 1799 insmntque_stddtr(struct vnode *vp, void *dtr_arg) 1800 { 1801 1802 vp->v_data = NULL; 1803 vp->v_op = &dead_vnodeops; 1804 vgone(vp); 1805 vput(vp); 1806 } 1807 1808 /* 1809 * Insert into list of vnodes for the new mount point, if available. 1810 */ 1811 int 1812 insmntque1(struct vnode *vp, struct mount *mp, 1813 void (*dtr)(struct vnode *, void *), void *dtr_arg) 1814 { 1815 1816 KASSERT(vp->v_mount == NULL, 1817 ("insmntque: vnode already on per mount vnode list")); 1818 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 1819 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 1820 1821 /* 1822 * We acquire the vnode interlock early to ensure that the 1823 * vnode cannot be recycled by another process releasing a 1824 * holdcnt on it before we get it on both the vnode list 1825 * and the active vnode list. The mount mutex protects only 1826 * manipulation of the vnode list and the vnode freelist 1827 * mutex protects only manipulation of the active vnode list. 1828 * Hence the need to hold the vnode interlock throughout. 1829 */ 1830 MNT_ILOCK(mp); 1831 VI_LOCK(vp); 1832 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 1833 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 1834 mp->mnt_nvnodelistsize == 0)) && 1835 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 1836 VI_UNLOCK(vp); 1837 MNT_IUNLOCK(mp); 1838 if (dtr != NULL) 1839 dtr(vp, dtr_arg); 1840 return (EBUSY); 1841 } 1842 vp->v_mount = mp; 1843 MNT_REF(mp); 1844 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1845 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 1846 ("neg mount point vnode list size")); 1847 mp->mnt_nvnodelistsize++; 1848 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 1849 ("Activating already active vnode")); 1850 vp->v_iflag |= VI_ACTIVE; 1851 mtx_lock(&mp->mnt_listmtx); 1852 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 1853 mp->mnt_activevnodelistsize++; 1854 mtx_unlock(&mp->mnt_listmtx); 1855 VI_UNLOCK(vp); 1856 MNT_IUNLOCK(mp); 1857 return (0); 1858 } 1859 1860 int 1861 insmntque(struct vnode *vp, struct mount *mp) 1862 { 1863 1864 return (insmntque1(vp, mp, insmntque_stddtr, NULL)); 1865 } 1866 1867 /* 1868 * Flush out and invalidate all buffers associated with a bufobj 1869 * Called with the underlying object locked. 1870 */ 1871 int 1872 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 1873 { 1874 int error; 1875 1876 BO_LOCK(bo); 1877 if (flags & V_SAVE) { 1878 error = bufobj_wwait(bo, slpflag, slptimeo); 1879 if (error) { 1880 BO_UNLOCK(bo); 1881 return (error); 1882 } 1883 if (bo->bo_dirty.bv_cnt > 0) { 1884 BO_UNLOCK(bo); 1885 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0) 1886 return (error); 1887 /* 1888 * XXX We could save a lock/unlock if this was only 1889 * enabled under INVARIANTS 1890 */ 1891 BO_LOCK(bo); 1892 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) 1893 panic("vinvalbuf: dirty bufs"); 1894 } 1895 } 1896 /* 1897 * If you alter this loop please notice that interlock is dropped and 1898 * reacquired in flushbuflist. Special care is needed to ensure that 1899 * no race conditions occur from this. 1900 */ 1901 do { 1902 error = flushbuflist(&bo->bo_clean, 1903 flags, bo, slpflag, slptimeo); 1904 if (error == 0 && !(flags & V_CLEANONLY)) 1905 error = flushbuflist(&bo->bo_dirty, 1906 flags, bo, slpflag, slptimeo); 1907 if (error != 0 && error != EAGAIN) { 1908 BO_UNLOCK(bo); 1909 return (error); 1910 } 1911 } while (error != 0); 1912 1913 /* 1914 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 1915 * have write I/O in-progress but if there is a VM object then the 1916 * VM object can also have read-I/O in-progress. 1917 */ 1918 do { 1919 bufobj_wwait(bo, 0, 0); 1920 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 1921 BO_UNLOCK(bo); 1922 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 1923 BO_LOCK(bo); 1924 } 1925 } while (bo->bo_numoutput > 0); 1926 BO_UNLOCK(bo); 1927 1928 /* 1929 * Destroy the copy in the VM cache, too. 1930 */ 1931 if (bo->bo_object != NULL && 1932 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 1933 VM_OBJECT_WLOCK(bo->bo_object); 1934 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 1935 OBJPR_CLEANONLY : 0); 1936 VM_OBJECT_WUNLOCK(bo->bo_object); 1937 } 1938 1939 #ifdef INVARIANTS 1940 BO_LOCK(bo); 1941 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 1942 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 1943 bo->bo_clean.bv_cnt > 0)) 1944 panic("vinvalbuf: flush failed"); 1945 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 1946 bo->bo_dirty.bv_cnt > 0) 1947 panic("vinvalbuf: flush dirty failed"); 1948 BO_UNLOCK(bo); 1949 #endif 1950 return (0); 1951 } 1952 1953 /* 1954 * Flush out and invalidate all buffers associated with a vnode. 1955 * Called with the underlying object locked. 1956 */ 1957 int 1958 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 1959 { 1960 1961 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 1962 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 1963 if (vp->v_object != NULL && vp->v_object->handle != vp) 1964 return (0); 1965 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 1966 } 1967 1968 /* 1969 * Flush out buffers on the specified list. 1970 * 1971 */ 1972 static int 1973 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 1974 int slptimeo) 1975 { 1976 struct buf *bp, *nbp; 1977 int retval, error; 1978 daddr_t lblkno; 1979 b_xflags_t xflags; 1980 1981 ASSERT_BO_WLOCKED(bo); 1982 1983 retval = 0; 1984 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 1985 /* 1986 * If we are flushing both V_NORMAL and V_ALT buffers then 1987 * do not skip any buffers. If we are flushing only V_NORMAL 1988 * buffers then skip buffers marked as BX_ALTDATA. If we are 1989 * flushing only V_ALT buffers then skip buffers not marked 1990 * as BX_ALTDATA. 1991 */ 1992 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 1993 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 1994 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 1995 continue; 1996 } 1997 if (nbp != NULL) { 1998 lblkno = nbp->b_lblkno; 1999 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2000 } 2001 retval = EAGAIN; 2002 error = BUF_TIMELOCK(bp, 2003 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2004 "flushbuf", slpflag, slptimeo); 2005 if (error) { 2006 BO_LOCK(bo); 2007 return (error != ENOLCK ? error : EAGAIN); 2008 } 2009 KASSERT(bp->b_bufobj == bo, 2010 ("bp %p wrong b_bufobj %p should be %p", 2011 bp, bp->b_bufobj, bo)); 2012 /* 2013 * XXX Since there are no node locks for NFS, I 2014 * believe there is a slight chance that a delayed 2015 * write will occur while sleeping just above, so 2016 * check for it. 2017 */ 2018 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2019 (flags & V_SAVE)) { 2020 bremfree(bp); 2021 bp->b_flags |= B_ASYNC; 2022 bwrite(bp); 2023 BO_LOCK(bo); 2024 return (EAGAIN); /* XXX: why not loop ? */ 2025 } 2026 bremfree(bp); 2027 bp->b_flags |= (B_INVAL | B_RELBUF); 2028 bp->b_flags &= ~B_ASYNC; 2029 brelse(bp); 2030 BO_LOCK(bo); 2031 if (nbp == NULL) 2032 break; 2033 nbp = gbincore(bo, lblkno); 2034 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2035 != xflags) 2036 break; /* nbp invalid */ 2037 } 2038 return (retval); 2039 } 2040 2041 int 2042 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2043 { 2044 struct buf *bp; 2045 int error; 2046 daddr_t lblkno; 2047 2048 ASSERT_BO_LOCKED(bo); 2049 2050 for (lblkno = startn;;) { 2051 again: 2052 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2053 if (bp == NULL || bp->b_lblkno >= endn || 2054 bp->b_lblkno < startn) 2055 break; 2056 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2057 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2058 if (error != 0) { 2059 BO_RLOCK(bo); 2060 if (error == ENOLCK) 2061 goto again; 2062 return (error); 2063 } 2064 KASSERT(bp->b_bufobj == bo, 2065 ("bp %p wrong b_bufobj %p should be %p", 2066 bp, bp->b_bufobj, bo)); 2067 lblkno = bp->b_lblkno + 1; 2068 if ((bp->b_flags & B_MANAGED) == 0) 2069 bremfree(bp); 2070 bp->b_flags |= B_RELBUF; 2071 /* 2072 * In the VMIO case, use the B_NOREUSE flag to hint that the 2073 * pages backing each buffer in the range are unlikely to be 2074 * reused. Dirty buffers will have the hint applied once 2075 * they've been written. 2076 */ 2077 if ((bp->b_flags & B_VMIO) != 0) 2078 bp->b_flags |= B_NOREUSE; 2079 brelse(bp); 2080 BO_RLOCK(bo); 2081 } 2082 return (0); 2083 } 2084 2085 /* 2086 * Truncate a file's buffer and pages to a specified length. This 2087 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2088 * sync activity. 2089 */ 2090 int 2091 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2092 { 2093 struct buf *bp, *nbp; 2094 struct bufobj *bo; 2095 daddr_t startlbn; 2096 2097 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2098 vp, blksize, (uintmax_t)length); 2099 2100 /* 2101 * Round up to the *next* lbn. 2102 */ 2103 startlbn = howmany(length, blksize); 2104 2105 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2106 2107 bo = &vp->v_bufobj; 2108 restart_unlocked: 2109 BO_LOCK(bo); 2110 2111 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2112 ; 2113 2114 if (length > 0) { 2115 restartsync: 2116 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2117 if (bp->b_lblkno > 0) 2118 continue; 2119 /* 2120 * Since we hold the vnode lock this should only 2121 * fail if we're racing with the buf daemon. 2122 */ 2123 if (BUF_LOCK(bp, 2124 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2125 BO_LOCKPTR(bo)) == ENOLCK) 2126 goto restart_unlocked; 2127 2128 VNASSERT((bp->b_flags & B_DELWRI), vp, 2129 ("buf(%p) on dirty queue without DELWRI", bp)); 2130 2131 bremfree(bp); 2132 bawrite(bp); 2133 BO_LOCK(bo); 2134 goto restartsync; 2135 } 2136 } 2137 2138 bufobj_wwait(bo, 0, 0); 2139 BO_UNLOCK(bo); 2140 vnode_pager_setsize(vp, length); 2141 2142 return (0); 2143 } 2144 2145 /* 2146 * Invalidate the cached pages of a file's buffer within the range of block 2147 * numbers [startlbn, endlbn). 2148 */ 2149 void 2150 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2151 int blksize) 2152 { 2153 struct bufobj *bo; 2154 off_t start, end; 2155 2156 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2157 2158 start = blksize * startlbn; 2159 end = blksize * endlbn; 2160 2161 bo = &vp->v_bufobj; 2162 BO_LOCK(bo); 2163 MPASS(blksize == bo->bo_bsize); 2164 2165 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2166 ; 2167 2168 BO_UNLOCK(bo); 2169 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2170 } 2171 2172 static int 2173 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2174 daddr_t startlbn, daddr_t endlbn) 2175 { 2176 struct buf *bp, *nbp; 2177 bool anyfreed; 2178 2179 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2180 ASSERT_BO_LOCKED(bo); 2181 2182 do { 2183 anyfreed = false; 2184 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2185 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2186 continue; 2187 if (BUF_LOCK(bp, 2188 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2189 BO_LOCKPTR(bo)) == ENOLCK) { 2190 BO_LOCK(bo); 2191 return (EAGAIN); 2192 } 2193 2194 bremfree(bp); 2195 bp->b_flags |= B_INVAL | B_RELBUF; 2196 bp->b_flags &= ~B_ASYNC; 2197 brelse(bp); 2198 anyfreed = true; 2199 2200 BO_LOCK(bo); 2201 if (nbp != NULL && 2202 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2203 nbp->b_vp != vp || 2204 (nbp->b_flags & B_DELWRI) != 0)) 2205 return (EAGAIN); 2206 } 2207 2208 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2209 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2210 continue; 2211 if (BUF_LOCK(bp, 2212 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2213 BO_LOCKPTR(bo)) == ENOLCK) { 2214 BO_LOCK(bo); 2215 return (EAGAIN); 2216 } 2217 bremfree(bp); 2218 bp->b_flags |= B_INVAL | B_RELBUF; 2219 bp->b_flags &= ~B_ASYNC; 2220 brelse(bp); 2221 anyfreed = true; 2222 2223 BO_LOCK(bo); 2224 if (nbp != NULL && 2225 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2226 (nbp->b_vp != vp) || 2227 (nbp->b_flags & B_DELWRI) == 0)) 2228 return (EAGAIN); 2229 } 2230 } while (anyfreed); 2231 return (0); 2232 } 2233 2234 static void 2235 buf_vlist_remove(struct buf *bp) 2236 { 2237 struct bufv *bv; 2238 2239 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2240 ASSERT_BO_WLOCKED(bp->b_bufobj); 2241 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) != 2242 (BX_VNDIRTY|BX_VNCLEAN), 2243 ("buf_vlist_remove: Buf %p is on two lists", bp)); 2244 if (bp->b_xflags & BX_VNDIRTY) 2245 bv = &bp->b_bufobj->bo_dirty; 2246 else 2247 bv = &bp->b_bufobj->bo_clean; 2248 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2249 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2250 bv->bv_cnt--; 2251 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2252 } 2253 2254 /* 2255 * Add the buffer to the sorted clean or dirty block list. 2256 * 2257 * NOTE: xflags is passed as a constant, optimizing this inline function! 2258 */ 2259 static void 2260 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2261 { 2262 struct bufv *bv; 2263 struct buf *n; 2264 int error; 2265 2266 ASSERT_BO_WLOCKED(bo); 2267 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2268 ("dead bo %p", bo)); 2269 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2270 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2271 bp->b_xflags |= xflags; 2272 if (xflags & BX_VNDIRTY) 2273 bv = &bo->bo_dirty; 2274 else 2275 bv = &bo->bo_clean; 2276 2277 /* 2278 * Keep the list ordered. Optimize empty list insertion. Assume 2279 * we tend to grow at the tail so lookup_le should usually be cheaper 2280 * than _ge. 2281 */ 2282 if (bv->bv_cnt == 0 || 2283 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2284 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2285 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2286 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2287 else 2288 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2289 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2290 if (error) 2291 panic("buf_vlist_add: Preallocated nodes insufficient."); 2292 bv->bv_cnt++; 2293 } 2294 2295 /* 2296 * Look up a buffer using the buffer tries. 2297 */ 2298 struct buf * 2299 gbincore(struct bufobj *bo, daddr_t lblkno) 2300 { 2301 struct buf *bp; 2302 2303 ASSERT_BO_LOCKED(bo); 2304 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2305 if (bp != NULL) 2306 return (bp); 2307 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno); 2308 } 2309 2310 /* 2311 * Associate a buffer with a vnode. 2312 */ 2313 void 2314 bgetvp(struct vnode *vp, struct buf *bp) 2315 { 2316 struct bufobj *bo; 2317 2318 bo = &vp->v_bufobj; 2319 ASSERT_BO_WLOCKED(bo); 2320 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2321 2322 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2323 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2324 ("bgetvp: bp already attached! %p", bp)); 2325 2326 vhold(vp); 2327 bp->b_vp = vp; 2328 bp->b_bufobj = bo; 2329 /* 2330 * Insert onto list for new vnode. 2331 */ 2332 buf_vlist_add(bp, bo, BX_VNCLEAN); 2333 } 2334 2335 /* 2336 * Disassociate a buffer from a vnode. 2337 */ 2338 void 2339 brelvp(struct buf *bp) 2340 { 2341 struct bufobj *bo; 2342 struct vnode *vp; 2343 2344 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2345 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2346 2347 /* 2348 * Delete from old vnode list, if on one. 2349 */ 2350 vp = bp->b_vp; /* XXX */ 2351 bo = bp->b_bufobj; 2352 BO_LOCK(bo); 2353 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2354 buf_vlist_remove(bp); 2355 else 2356 panic("brelvp: Buffer %p not on queue.", bp); 2357 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2358 bo->bo_flag &= ~BO_ONWORKLST; 2359 mtx_lock(&sync_mtx); 2360 LIST_REMOVE(bo, bo_synclist); 2361 syncer_worklist_len--; 2362 mtx_unlock(&sync_mtx); 2363 } 2364 bp->b_vp = NULL; 2365 bp->b_bufobj = NULL; 2366 BO_UNLOCK(bo); 2367 vdrop(vp); 2368 } 2369 2370 /* 2371 * Add an item to the syncer work queue. 2372 */ 2373 static void 2374 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2375 { 2376 int slot; 2377 2378 ASSERT_BO_WLOCKED(bo); 2379 2380 mtx_lock(&sync_mtx); 2381 if (bo->bo_flag & BO_ONWORKLST) 2382 LIST_REMOVE(bo, bo_synclist); 2383 else { 2384 bo->bo_flag |= BO_ONWORKLST; 2385 syncer_worklist_len++; 2386 } 2387 2388 if (delay > syncer_maxdelay - 2) 2389 delay = syncer_maxdelay - 2; 2390 slot = (syncer_delayno + delay) & syncer_mask; 2391 2392 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2393 mtx_unlock(&sync_mtx); 2394 } 2395 2396 static int 2397 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2398 { 2399 int error, len; 2400 2401 mtx_lock(&sync_mtx); 2402 len = syncer_worklist_len - sync_vnode_count; 2403 mtx_unlock(&sync_mtx); 2404 error = SYSCTL_OUT(req, &len, sizeof(len)); 2405 return (error); 2406 } 2407 2408 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2409 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2410 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2411 2412 static struct proc *updateproc; 2413 static void sched_sync(void); 2414 static struct kproc_desc up_kp = { 2415 "syncer", 2416 sched_sync, 2417 &updateproc 2418 }; 2419 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2420 2421 static int 2422 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2423 { 2424 struct vnode *vp; 2425 struct mount *mp; 2426 2427 *bo = LIST_FIRST(slp); 2428 if (*bo == NULL) 2429 return (0); 2430 vp = bo2vnode(*bo); 2431 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2432 return (1); 2433 /* 2434 * We use vhold in case the vnode does not 2435 * successfully sync. vhold prevents the vnode from 2436 * going away when we unlock the sync_mtx so that 2437 * we can acquire the vnode interlock. 2438 */ 2439 vholdl(vp); 2440 mtx_unlock(&sync_mtx); 2441 VI_UNLOCK(vp); 2442 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2443 vdrop(vp); 2444 mtx_lock(&sync_mtx); 2445 return (*bo == LIST_FIRST(slp)); 2446 } 2447 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2448 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2449 VOP_UNLOCK(vp); 2450 vn_finished_write(mp); 2451 BO_LOCK(*bo); 2452 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2453 /* 2454 * Put us back on the worklist. The worklist 2455 * routine will remove us from our current 2456 * position and then add us back in at a later 2457 * position. 2458 */ 2459 vn_syncer_add_to_worklist(*bo, syncdelay); 2460 } 2461 BO_UNLOCK(*bo); 2462 vdrop(vp); 2463 mtx_lock(&sync_mtx); 2464 return (0); 2465 } 2466 2467 static int first_printf = 1; 2468 2469 /* 2470 * System filesystem synchronizer daemon. 2471 */ 2472 static void 2473 sched_sync(void) 2474 { 2475 struct synclist *next, *slp; 2476 struct bufobj *bo; 2477 long starttime; 2478 struct thread *td = curthread; 2479 int last_work_seen; 2480 int net_worklist_len; 2481 int syncer_final_iter; 2482 int error; 2483 2484 last_work_seen = 0; 2485 syncer_final_iter = 0; 2486 syncer_state = SYNCER_RUNNING; 2487 starttime = time_uptime; 2488 td->td_pflags |= TDP_NORUNNINGBUF; 2489 2490 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2491 SHUTDOWN_PRI_LAST); 2492 2493 mtx_lock(&sync_mtx); 2494 for (;;) { 2495 if (syncer_state == SYNCER_FINAL_DELAY && 2496 syncer_final_iter == 0) { 2497 mtx_unlock(&sync_mtx); 2498 kproc_suspend_check(td->td_proc); 2499 mtx_lock(&sync_mtx); 2500 } 2501 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2502 if (syncer_state != SYNCER_RUNNING && 2503 starttime != time_uptime) { 2504 if (first_printf) { 2505 printf("\nSyncing disks, vnodes remaining... "); 2506 first_printf = 0; 2507 } 2508 printf("%d ", net_worklist_len); 2509 } 2510 starttime = time_uptime; 2511 2512 /* 2513 * Push files whose dirty time has expired. Be careful 2514 * of interrupt race on slp queue. 2515 * 2516 * Skip over empty worklist slots when shutting down. 2517 */ 2518 do { 2519 slp = &syncer_workitem_pending[syncer_delayno]; 2520 syncer_delayno += 1; 2521 if (syncer_delayno == syncer_maxdelay) 2522 syncer_delayno = 0; 2523 next = &syncer_workitem_pending[syncer_delayno]; 2524 /* 2525 * If the worklist has wrapped since the 2526 * it was emptied of all but syncer vnodes, 2527 * switch to the FINAL_DELAY state and run 2528 * for one more second. 2529 */ 2530 if (syncer_state == SYNCER_SHUTTING_DOWN && 2531 net_worklist_len == 0 && 2532 last_work_seen == syncer_delayno) { 2533 syncer_state = SYNCER_FINAL_DELAY; 2534 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2535 } 2536 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2537 syncer_worklist_len > 0); 2538 2539 /* 2540 * Keep track of the last time there was anything 2541 * on the worklist other than syncer vnodes. 2542 * Return to the SHUTTING_DOWN state if any 2543 * new work appears. 2544 */ 2545 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2546 last_work_seen = syncer_delayno; 2547 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2548 syncer_state = SYNCER_SHUTTING_DOWN; 2549 while (!LIST_EMPTY(slp)) { 2550 error = sync_vnode(slp, &bo, td); 2551 if (error == 1) { 2552 LIST_REMOVE(bo, bo_synclist); 2553 LIST_INSERT_HEAD(next, bo, bo_synclist); 2554 continue; 2555 } 2556 2557 if (first_printf == 0) { 2558 /* 2559 * Drop the sync mutex, because some watchdog 2560 * drivers need to sleep while patting 2561 */ 2562 mtx_unlock(&sync_mtx); 2563 wdog_kern_pat(WD_LASTVAL); 2564 mtx_lock(&sync_mtx); 2565 } 2566 2567 } 2568 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2569 syncer_final_iter--; 2570 /* 2571 * The variable rushjob allows the kernel to speed up the 2572 * processing of the filesystem syncer process. A rushjob 2573 * value of N tells the filesystem syncer to process the next 2574 * N seconds worth of work on its queue ASAP. Currently rushjob 2575 * is used by the soft update code to speed up the filesystem 2576 * syncer process when the incore state is getting so far 2577 * ahead of the disk that the kernel memory pool is being 2578 * threatened with exhaustion. 2579 */ 2580 if (rushjob > 0) { 2581 rushjob -= 1; 2582 continue; 2583 } 2584 /* 2585 * Just sleep for a short period of time between 2586 * iterations when shutting down to allow some I/O 2587 * to happen. 2588 * 2589 * If it has taken us less than a second to process the 2590 * current work, then wait. Otherwise start right over 2591 * again. We can still lose time if any single round 2592 * takes more than two seconds, but it does not really 2593 * matter as we are just trying to generally pace the 2594 * filesystem activity. 2595 */ 2596 if (syncer_state != SYNCER_RUNNING || 2597 time_uptime == starttime) { 2598 thread_lock(td); 2599 sched_prio(td, PPAUSE); 2600 thread_unlock(td); 2601 } 2602 if (syncer_state != SYNCER_RUNNING) 2603 cv_timedwait(&sync_wakeup, &sync_mtx, 2604 hz / SYNCER_SHUTDOWN_SPEEDUP); 2605 else if (time_uptime == starttime) 2606 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 2607 } 2608 } 2609 2610 /* 2611 * Request the syncer daemon to speed up its work. 2612 * We never push it to speed up more than half of its 2613 * normal turn time, otherwise it could take over the cpu. 2614 */ 2615 int 2616 speedup_syncer(void) 2617 { 2618 int ret = 0; 2619 2620 mtx_lock(&sync_mtx); 2621 if (rushjob < syncdelay / 2) { 2622 rushjob += 1; 2623 stat_rush_requests += 1; 2624 ret = 1; 2625 } 2626 mtx_unlock(&sync_mtx); 2627 cv_broadcast(&sync_wakeup); 2628 return (ret); 2629 } 2630 2631 /* 2632 * Tell the syncer to speed up its work and run though its work 2633 * list several times, then tell it to shut down. 2634 */ 2635 static void 2636 syncer_shutdown(void *arg, int howto) 2637 { 2638 2639 if (howto & RB_NOSYNC) 2640 return; 2641 mtx_lock(&sync_mtx); 2642 syncer_state = SYNCER_SHUTTING_DOWN; 2643 rushjob = 0; 2644 mtx_unlock(&sync_mtx); 2645 cv_broadcast(&sync_wakeup); 2646 kproc_shutdown(arg, howto); 2647 } 2648 2649 void 2650 syncer_suspend(void) 2651 { 2652 2653 syncer_shutdown(updateproc, 0); 2654 } 2655 2656 void 2657 syncer_resume(void) 2658 { 2659 2660 mtx_lock(&sync_mtx); 2661 first_printf = 1; 2662 syncer_state = SYNCER_RUNNING; 2663 mtx_unlock(&sync_mtx); 2664 cv_broadcast(&sync_wakeup); 2665 kproc_resume(updateproc); 2666 } 2667 2668 /* 2669 * Reassign a buffer from one vnode to another. 2670 * Used to assign file specific control information 2671 * (indirect blocks) to the vnode to which they belong. 2672 */ 2673 void 2674 reassignbuf(struct buf *bp) 2675 { 2676 struct vnode *vp; 2677 struct bufobj *bo; 2678 int delay; 2679 #ifdef INVARIANTS 2680 struct bufv *bv; 2681 #endif 2682 2683 vp = bp->b_vp; 2684 bo = bp->b_bufobj; 2685 ++reassignbufcalls; 2686 2687 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 2688 bp, bp->b_vp, bp->b_flags); 2689 /* 2690 * B_PAGING flagged buffers cannot be reassigned because their vp 2691 * is not fully linked in. 2692 */ 2693 if (bp->b_flags & B_PAGING) 2694 panic("cannot reassign paging buffer"); 2695 2696 /* 2697 * Delete from old vnode list, if on one. 2698 */ 2699 BO_LOCK(bo); 2700 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2701 buf_vlist_remove(bp); 2702 else 2703 panic("reassignbuf: Buffer %p not on queue.", bp); 2704 /* 2705 * If dirty, put on list of dirty buffers; otherwise insert onto list 2706 * of clean buffers. 2707 */ 2708 if (bp->b_flags & B_DELWRI) { 2709 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 2710 switch (vp->v_type) { 2711 case VDIR: 2712 delay = dirdelay; 2713 break; 2714 case VCHR: 2715 delay = metadelay; 2716 break; 2717 default: 2718 delay = filedelay; 2719 } 2720 vn_syncer_add_to_worklist(bo, delay); 2721 } 2722 buf_vlist_add(bp, bo, BX_VNDIRTY); 2723 } else { 2724 buf_vlist_add(bp, bo, BX_VNCLEAN); 2725 2726 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2727 mtx_lock(&sync_mtx); 2728 LIST_REMOVE(bo, bo_synclist); 2729 syncer_worklist_len--; 2730 mtx_unlock(&sync_mtx); 2731 bo->bo_flag &= ~BO_ONWORKLST; 2732 } 2733 } 2734 #ifdef INVARIANTS 2735 bv = &bo->bo_clean; 2736 bp = TAILQ_FIRST(&bv->bv_hd); 2737 KASSERT(bp == NULL || bp->b_bufobj == bo, 2738 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2739 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2740 KASSERT(bp == NULL || bp->b_bufobj == bo, 2741 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2742 bv = &bo->bo_dirty; 2743 bp = TAILQ_FIRST(&bv->bv_hd); 2744 KASSERT(bp == NULL || bp->b_bufobj == bo, 2745 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2746 bp = TAILQ_LAST(&bv->bv_hd, buflists); 2747 KASSERT(bp == NULL || bp->b_bufobj == bo, 2748 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 2749 #endif 2750 BO_UNLOCK(bo); 2751 } 2752 2753 static void 2754 v_init_counters(struct vnode *vp) 2755 { 2756 2757 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 2758 vp, ("%s called for an initialized vnode", __FUNCTION__)); 2759 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 2760 2761 refcount_init(&vp->v_holdcnt, 1); 2762 refcount_init(&vp->v_usecount, 1); 2763 } 2764 2765 /* 2766 * Increment si_usecount of the associated device, if any. 2767 */ 2768 static void 2769 v_incr_devcount(struct vnode *vp) 2770 { 2771 2772 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2773 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2774 dev_lock(); 2775 vp->v_rdev->si_usecount++; 2776 dev_unlock(); 2777 } 2778 } 2779 2780 /* 2781 * Decrement si_usecount of the associated device, if any. 2782 */ 2783 static void 2784 v_decr_devcount(struct vnode *vp) 2785 { 2786 2787 ASSERT_VI_LOCKED(vp, __FUNCTION__); 2788 if (vp->v_type == VCHR && vp->v_rdev != NULL) { 2789 dev_lock(); 2790 vp->v_rdev->si_usecount--; 2791 dev_unlock(); 2792 } 2793 } 2794 2795 /* 2796 * Grab a particular vnode from the free list, increment its 2797 * reference count and lock it. VIRF_DOOMED is set if the vnode 2798 * is being destroyed. Only callers who specify LK_RETRY will 2799 * see doomed vnodes. If inactive processing was delayed in 2800 * vput try to do it here. 2801 * 2802 * Both holdcnt and usecount can be manipulated using atomics without holding 2803 * any locks except in these cases which require the vnode interlock: 2804 * holdcnt: 1->0 and 0->1 2805 * usecount: 0->1 2806 * 2807 * usecount is permitted to transition 1->0 without the interlock because 2808 * vnode is kept live by holdcnt. 2809 */ 2810 static enum vgetstate __always_inline 2811 _vget_prep(struct vnode *vp, bool interlock) 2812 { 2813 enum vgetstate vs; 2814 2815 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2816 vs = VGET_USECOUNT; 2817 } else { 2818 if (interlock) 2819 vholdl(vp); 2820 else 2821 vhold(vp); 2822 vs = VGET_HOLDCNT; 2823 } 2824 return (vs); 2825 } 2826 2827 enum vgetstate 2828 vget_prep(struct vnode *vp) 2829 { 2830 2831 return (_vget_prep(vp, false)); 2832 } 2833 2834 int 2835 vget(struct vnode *vp, int flags, struct thread *td) 2836 { 2837 enum vgetstate vs; 2838 2839 MPASS(td == curthread); 2840 2841 vs = _vget_prep(vp, (flags & LK_INTERLOCK) != 0); 2842 return (vget_finish(vp, flags, vs)); 2843 } 2844 2845 int 2846 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 2847 { 2848 int error, oweinact; 2849 2850 VNASSERT((flags & LK_TYPE_MASK) != 0, vp, 2851 ("%s: invalid lock operation", __func__)); 2852 2853 if ((flags & LK_INTERLOCK) != 0) 2854 ASSERT_VI_LOCKED(vp, __func__); 2855 else 2856 ASSERT_VI_UNLOCKED(vp, __func__); 2857 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 2858 if (vs == VGET_USECOUNT) { 2859 VNASSERT(vp->v_usecount > 0, vp, 2860 ("%s: vnode without usecount when VGET_USECOUNT was passed", 2861 __func__)); 2862 } 2863 2864 if ((error = vn_lock(vp, flags)) != 0) { 2865 if (vs == VGET_USECOUNT) 2866 vrele(vp); 2867 else 2868 vdrop(vp); 2869 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 2870 vp); 2871 return (error); 2872 } 2873 2874 if (vs == VGET_USECOUNT) { 2875 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2876 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2877 return (0); 2878 } 2879 2880 /* 2881 * We hold the vnode. If the usecount is 0 it will be utilized to keep 2882 * the vnode around. Otherwise someone else lended their hold count and 2883 * we have to drop ours. 2884 */ 2885 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2886 #ifdef INVARIANTS 2887 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2888 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2889 #else 2890 refcount_release(&vp->v_holdcnt); 2891 #endif 2892 VNODE_REFCOUNT_FENCE_ACQ(); 2893 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2894 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2895 return (0); 2896 } 2897 2898 /* 2899 * We don't guarantee that any particular close will 2900 * trigger inactive processing so just make a best effort 2901 * here at preventing a reference to a removed file. If 2902 * we don't succeed no harm is done. 2903 * 2904 * Upgrade our holdcnt to a usecount. 2905 */ 2906 VI_LOCK(vp); 2907 /* 2908 * See the previous section. By the time we get here we may find 2909 * ourselves in the same spot. 2910 */ 2911 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2912 #ifdef INVARIANTS 2913 int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 2914 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 2915 #else 2916 refcount_release(&vp->v_holdcnt); 2917 #endif 2918 VNODE_REFCOUNT_FENCE_ACQ(); 2919 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2920 ("%s: vnode with usecount and VI_OWEINACT set", 2921 __func__)); 2922 VI_UNLOCK(vp); 2923 return (0); 2924 } 2925 if ((vp->v_iflag & VI_OWEINACT) == 0) { 2926 oweinact = 0; 2927 } else { 2928 oweinact = 1; 2929 vp->v_iflag &= ~VI_OWEINACT; 2930 VNODE_REFCOUNT_FENCE_REL(); 2931 } 2932 v_incr_devcount(vp); 2933 refcount_acquire(&vp->v_usecount); 2934 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE && 2935 (flags & LK_NOWAIT) == 0) 2936 vinactive(vp); 2937 VI_UNLOCK(vp); 2938 return (0); 2939 } 2940 2941 /* 2942 * Increase the reference (use) and hold count of a vnode. 2943 * This will also remove the vnode from the free list if it is presently free. 2944 */ 2945 void 2946 vref(struct vnode *vp) 2947 { 2948 2949 ASSERT_VI_UNLOCKED(vp, __func__); 2950 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2951 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2952 VNODE_REFCOUNT_FENCE_ACQ(); 2953 VNASSERT(vp->v_holdcnt > 0, vp, 2954 ("%s: active vnode not held", __func__)); 2955 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2956 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2957 return; 2958 } 2959 VI_LOCK(vp); 2960 vrefl(vp); 2961 VI_UNLOCK(vp); 2962 } 2963 2964 void 2965 vrefl(struct vnode *vp) 2966 { 2967 2968 ASSERT_VI_LOCKED(vp, __func__); 2969 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2970 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 2971 VNODE_REFCOUNT_FENCE_ACQ(); 2972 VNASSERT(vp->v_holdcnt > 0, vp, 2973 ("%s: active vnode not held", __func__)); 2974 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 2975 ("%s: vnode with usecount and VI_OWEINACT set", __func__)); 2976 return; 2977 } 2978 vholdl(vp); 2979 if ((vp->v_iflag & VI_OWEINACT) != 0) { 2980 vp->v_iflag &= ~VI_OWEINACT; 2981 VNODE_REFCOUNT_FENCE_REL(); 2982 } 2983 v_incr_devcount(vp); 2984 refcount_acquire(&vp->v_usecount); 2985 } 2986 2987 void 2988 vrefact(struct vnode *vp) 2989 { 2990 2991 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 2992 #ifdef INVARIANTS 2993 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 2994 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 2995 #else 2996 refcount_acquire(&vp->v_usecount); 2997 #endif 2998 } 2999 3000 /* 3001 * Return reference count of a vnode. 3002 * 3003 * The results of this call are only guaranteed when some mechanism is used to 3004 * stop other processes from gaining references to the vnode. This may be the 3005 * case if the caller holds the only reference. This is also useful when stale 3006 * data is acceptable as race conditions may be accounted for by some other 3007 * means. 3008 */ 3009 int 3010 vrefcnt(struct vnode *vp) 3011 { 3012 3013 return (vp->v_usecount); 3014 } 3015 3016 static void 3017 vdefer_inactive(struct vnode *vp) 3018 { 3019 3020 ASSERT_VI_LOCKED(vp, __func__); 3021 VNASSERT(vp->v_iflag & VI_OWEINACT, vp, 3022 ("%s: vnode without VI_OWEINACT", __func__)); 3023 if (VN_IS_DOOMED(vp)) { 3024 vdropl(vp); 3025 return; 3026 } 3027 if (vp->v_iflag & VI_DEFINACT) { 3028 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3029 vdropl(vp); 3030 return; 3031 } 3032 vp->v_iflag |= VI_DEFINACT; 3033 VI_UNLOCK(vp); 3034 counter_u64_add(deferred_inact, 1); 3035 } 3036 3037 static void 3038 vdefer_inactive_cond(struct vnode *vp) 3039 { 3040 3041 VI_LOCK(vp); 3042 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3043 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3044 vdropl(vp); 3045 return; 3046 } 3047 vdefer_inactive(vp); 3048 } 3049 3050 enum vputx_op { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; 3051 3052 /* 3053 * Decrement the use and hold counts for a vnode. 3054 * 3055 * See an explanation near vget() as to why atomic operation is safe. 3056 */ 3057 static void 3058 vputx(struct vnode *vp, enum vputx_op func) 3059 { 3060 int error; 3061 3062 KASSERT(vp != NULL, ("vputx: null vp")); 3063 if (func == VPUTX_VUNREF) 3064 ASSERT_VOP_LOCKED(vp, "vunref"); 3065 ASSERT_VI_UNLOCKED(vp, __func__); 3066 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, 3067 ("%s: wrong ref counts", __func__)); 3068 3069 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3070 3071 /* 3072 * We want to hold the vnode until the inactive finishes to 3073 * prevent vgone() races. We drop the use count here and the 3074 * hold count below when we're done. 3075 * 3076 * If we release the last usecount we take ownership of the hold 3077 * count which provides liveness of the vnode, in which case we 3078 * have to vdrop. 3079 */ 3080 if (!refcount_release(&vp->v_usecount)) 3081 return; 3082 VI_LOCK(vp); 3083 v_decr_devcount(vp); 3084 /* 3085 * By the time we got here someone else might have transitioned 3086 * the count back to > 0. 3087 */ 3088 if (vp->v_usecount > 0) { 3089 vdropl(vp); 3090 return; 3091 } 3092 if (vp->v_iflag & VI_DOINGINACT) { 3093 vdropl(vp); 3094 return; 3095 } 3096 3097 /* 3098 * Check if the fs wants to perform inactive processing. Note we 3099 * may be only holding the interlock, in which case it is possible 3100 * someone else called vgone on the vnode and ->v_data is now NULL. 3101 * Since vgone performs inactive on its own there is nothing to do 3102 * here but to drop our hold count. 3103 */ 3104 if (__predict_false(VN_IS_DOOMED(vp)) || 3105 VOP_NEED_INACTIVE(vp) == 0) { 3106 vdropl(vp); 3107 return; 3108 } 3109 3110 /* 3111 * We must call VOP_INACTIVE with the node locked. Mark 3112 * as VI_DOINGINACT to avoid recursion. 3113 */ 3114 vp->v_iflag |= VI_OWEINACT; 3115 switch (func) { 3116 case VPUTX_VRELE: 3117 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3118 VI_LOCK(vp); 3119 break; 3120 case VPUTX_VPUT: 3121 error = VOP_LOCK(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT); 3122 VI_LOCK(vp); 3123 break; 3124 case VPUTX_VUNREF: 3125 error = 0; 3126 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3127 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3128 VI_LOCK(vp); 3129 } 3130 break; 3131 } 3132 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp, 3133 ("vnode with usecount and VI_OWEINACT set")); 3134 if (error == 0) { 3135 if (vp->v_iflag & VI_OWEINACT) 3136 vinactive(vp); 3137 if (func != VPUTX_VUNREF) 3138 VOP_UNLOCK(vp); 3139 vdropl(vp); 3140 } else if (vp->v_iflag & VI_OWEINACT) { 3141 vdefer_inactive(vp); 3142 } else { 3143 vdropl(vp); 3144 } 3145 } 3146 3147 /* 3148 * Vnode put/release. 3149 * If count drops to zero, call inactive routine and return to freelist. 3150 */ 3151 void 3152 vrele(struct vnode *vp) 3153 { 3154 3155 vputx(vp, VPUTX_VRELE); 3156 } 3157 3158 /* 3159 * Release an already locked vnode. This give the same effects as 3160 * unlock+vrele(), but takes less time and avoids releasing and 3161 * re-aquiring the lock (as vrele() acquires the lock internally.) 3162 * 3163 * It is an invariant that all VOP_* calls operate on a held vnode. 3164 * We may be only having an implicit hold stemming from our usecount, 3165 * which we are about to release. If we unlock the vnode afterwards we 3166 * open a time window where someone else dropped the last usecount and 3167 * proceeded to free the vnode before our unlock finished. For this 3168 * reason we unlock the vnode early. This is a little bit wasteful as 3169 * it may be the vnode is exclusively locked and inactive processing is 3170 * needed, in which case we are adding work. 3171 */ 3172 void 3173 vput(struct vnode *vp) 3174 { 3175 3176 VOP_UNLOCK(vp); 3177 vputx(vp, VPUTX_VPUT); 3178 } 3179 3180 /* 3181 * Release an exclusively locked vnode. Do not unlock the vnode lock. 3182 */ 3183 void 3184 vunref(struct vnode *vp) 3185 { 3186 3187 vputx(vp, VPUTX_VUNREF); 3188 } 3189 3190 /* 3191 * Increase the hold count and activate if this is the first reference. 3192 */ 3193 static void 3194 vhold_activate(struct vnode *vp) 3195 { 3196 struct mount *mp; 3197 3198 ASSERT_VI_LOCKED(vp, __func__); 3199 VNASSERT(vp->v_holdcnt == 0, vp, 3200 ("%s: wrong hold count", __func__)); 3201 VNASSERT(vp->v_op != NULL, vp, 3202 ("%s: vnode already reclaimed.", __func__)); 3203 /* 3204 * Remove a vnode from the free list, mark it as in use, 3205 * and put it on the active list. 3206 */ 3207 VNASSERT(vp->v_mount != NULL, vp, 3208 ("_vhold: vnode not on per mount vnode list")); 3209 mp = vp->v_mount; 3210 mtx_lock(&mp->mnt_listmtx); 3211 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { 3212 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3213 mp->mnt_tmpfreevnodelistsize--; 3214 vp->v_mflag &= ~VMP_TMPMNTFREELIST; 3215 } else { 3216 mtx_lock(&vnode_free_list_mtx); 3217 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist); 3218 freevnodes--; 3219 mtx_unlock(&vnode_free_list_mtx); 3220 } 3221 KASSERT((vp->v_iflag & VI_ACTIVE) == 0, 3222 ("Activating already active vnode")); 3223 vp->v_iflag &= ~VI_FREE; 3224 vp->v_iflag |= VI_ACTIVE; 3225 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist); 3226 mp->mnt_activevnodelistsize++; 3227 mtx_unlock(&mp->mnt_listmtx); 3228 refcount_acquire(&vp->v_holdcnt); 3229 } 3230 3231 void 3232 vhold(struct vnode *vp) 3233 { 3234 3235 ASSERT_VI_UNLOCKED(vp, __func__); 3236 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3237 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { 3238 VNODE_REFCOUNT_FENCE_ACQ(); 3239 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3240 ("vhold: vnode with holdcnt is free")); 3241 return; 3242 } 3243 VI_LOCK(vp); 3244 vholdl(vp); 3245 VI_UNLOCK(vp); 3246 } 3247 3248 void 3249 vholdl(struct vnode *vp) 3250 { 3251 3252 ASSERT_VI_LOCKED(vp, __func__); 3253 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3254 if ((vp->v_iflag & VI_FREE) == 0) { 3255 refcount_acquire(&vp->v_holdcnt); 3256 return; 3257 } 3258 vhold_activate(vp); 3259 } 3260 3261 void 3262 vholdnz(struct vnode *vp) 3263 { 3264 3265 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3266 #ifdef INVARIANTS 3267 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3268 VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); 3269 #else 3270 atomic_add_int(&vp->v_holdcnt, 1); 3271 #endif 3272 } 3273 3274 /* 3275 * Drop the hold count of the vnode. If this is the last reference to 3276 * the vnode we place it on the free list unless it has been vgone'd 3277 * (marked VIRF_DOOMED) in which case we will free it. 3278 * 3279 * Because the vnode vm object keeps a hold reference on the vnode if 3280 * there is at least one resident non-cached page, the vnode cannot 3281 * leave the active list without the page cleanup done. 3282 */ 3283 static void 3284 vdrop_deactivate(struct vnode *vp) 3285 { 3286 struct mount *mp; 3287 3288 ASSERT_VI_LOCKED(vp, __func__); 3289 /* 3290 * Mark a vnode as free: remove it from its active list 3291 * and put it up for recycling on the freelist. 3292 */ 3293 VNASSERT(!VN_IS_DOOMED(vp), vp, 3294 ("vdrop: returning doomed vnode")); 3295 VNASSERT(vp->v_op != NULL, vp, 3296 ("vdrop: vnode already reclaimed.")); 3297 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, 3298 ("vnode already free")); 3299 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp, 3300 ("vnode with VI_OWEINACT set")); 3301 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, 3302 ("vnode with VI_DEFINACT set")); 3303 VNASSERT(vp->v_holdcnt == 0, vp, 3304 ("vdrop: freeing when we shouldn't")); 3305 mp = vp->v_mount; 3306 mtx_lock(&mp->mnt_listmtx); 3307 if (vp->v_iflag & VI_ACTIVE) { 3308 vp->v_iflag &= ~VI_ACTIVE; 3309 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); 3310 mp->mnt_activevnodelistsize--; 3311 } 3312 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); 3313 mp->mnt_tmpfreevnodelistsize++; 3314 vp->v_iflag |= VI_FREE; 3315 vp->v_mflag |= VMP_TMPMNTFREELIST; 3316 VI_UNLOCK(vp); 3317 if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch) 3318 vnlru_return_batch_locked(mp); 3319 mtx_unlock(&mp->mnt_listmtx); 3320 } 3321 3322 void 3323 vdrop(struct vnode *vp) 3324 { 3325 3326 ASSERT_VI_UNLOCKED(vp, __func__); 3327 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3328 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3329 return; 3330 VI_LOCK(vp); 3331 vdropl(vp); 3332 } 3333 3334 void 3335 vdropl(struct vnode *vp) 3336 { 3337 3338 ASSERT_VI_LOCKED(vp, __func__); 3339 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3340 if (!refcount_release(&vp->v_holdcnt)) { 3341 VI_UNLOCK(vp); 3342 return; 3343 } 3344 if (VN_IS_DOOMED(vp)) { 3345 freevnode(vp); 3346 return; 3347 } 3348 vdrop_deactivate(vp); 3349 } 3350 3351 /* 3352 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3353 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3354 * OWEINACT tracks whether a vnode missed a call to inactive due to a 3355 * failed lock upgrade. 3356 */ 3357 void 3358 vinactive(struct vnode *vp) 3359 { 3360 struct vm_object *obj; 3361 3362 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3363 ASSERT_VI_LOCKED(vp, "vinactive"); 3364 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp, 3365 ("vinactive: recursed on VI_DOINGINACT")); 3366 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3367 vp->v_iflag |= VI_DOINGINACT; 3368 vp->v_iflag &= ~VI_OWEINACT; 3369 VI_UNLOCK(vp); 3370 /* 3371 * Before moving off the active list, we must be sure that any 3372 * modified pages are converted into the vnode's dirty 3373 * buffers, since these will no longer be checked once the 3374 * vnode is on the inactive list. 3375 * 3376 * The write-out of the dirty pages is asynchronous. At the 3377 * point that VOP_INACTIVE() is called, there could still be 3378 * pending I/O and dirty pages in the object. 3379 */ 3380 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3381 vm_object_mightbedirty(obj)) { 3382 VM_OBJECT_WLOCK(obj); 3383 vm_object_page_clean(obj, 0, 0, 0); 3384 VM_OBJECT_WUNLOCK(obj); 3385 } 3386 VOP_INACTIVE(vp, curthread); 3387 VI_LOCK(vp); 3388 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp, 3389 ("vinactive: lost VI_DOINGINACT")); 3390 vp->v_iflag &= ~VI_DOINGINACT; 3391 } 3392 3393 /* 3394 * Remove any vnodes in the vnode table belonging to mount point mp. 3395 * 3396 * If FORCECLOSE is not specified, there should not be any active ones, 3397 * return error if any are found (nb: this is a user error, not a 3398 * system error). If FORCECLOSE is specified, detach any active vnodes 3399 * that are found. 3400 * 3401 * If WRITECLOSE is set, only flush out regular file vnodes open for 3402 * writing. 3403 * 3404 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 3405 * 3406 * `rootrefs' specifies the base reference count for the root vnode 3407 * of this filesystem. The root vnode is considered busy if its 3408 * v_usecount exceeds this value. On a successful return, vflush(, td) 3409 * will call vrele() on the root vnode exactly rootrefs times. 3410 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 3411 * be zero. 3412 */ 3413 #ifdef DIAGNOSTIC 3414 static int busyprt = 0; /* print out busy vnodes */ 3415 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 3416 #endif 3417 3418 int 3419 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 3420 { 3421 struct vnode *vp, *mvp, *rootvp = NULL; 3422 struct vattr vattr; 3423 int busy = 0, error; 3424 3425 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 3426 rootrefs, flags); 3427 if (rootrefs > 0) { 3428 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 3429 ("vflush: bad args")); 3430 /* 3431 * Get the filesystem root vnode. We can vput() it 3432 * immediately, since with rootrefs > 0, it won't go away. 3433 */ 3434 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 3435 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 3436 __func__, error); 3437 return (error); 3438 } 3439 vput(rootvp); 3440 } 3441 loop: 3442 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 3443 vholdl(vp); 3444 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 3445 if (error) { 3446 vdrop(vp); 3447 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3448 goto loop; 3449 } 3450 /* 3451 * Skip over a vnodes marked VV_SYSTEM. 3452 */ 3453 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 3454 VOP_UNLOCK(vp); 3455 vdrop(vp); 3456 continue; 3457 } 3458 /* 3459 * If WRITECLOSE is set, flush out unlinked but still open 3460 * files (even if open only for reading) and regular file 3461 * vnodes open for writing. 3462 */ 3463 if (flags & WRITECLOSE) { 3464 if (vp->v_object != NULL) { 3465 VM_OBJECT_WLOCK(vp->v_object); 3466 vm_object_page_clean(vp->v_object, 0, 0, 0); 3467 VM_OBJECT_WUNLOCK(vp->v_object); 3468 } 3469 error = VOP_FSYNC(vp, MNT_WAIT, td); 3470 if (error != 0) { 3471 VOP_UNLOCK(vp); 3472 vdrop(vp); 3473 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 3474 return (error); 3475 } 3476 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 3477 VI_LOCK(vp); 3478 3479 if ((vp->v_type == VNON || 3480 (error == 0 && vattr.va_nlink > 0)) && 3481 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 3482 VOP_UNLOCK(vp); 3483 vdropl(vp); 3484 continue; 3485 } 3486 } else 3487 VI_LOCK(vp); 3488 /* 3489 * With v_usecount == 0, all we need to do is clear out the 3490 * vnode data structures and we are done. 3491 * 3492 * If FORCECLOSE is set, forcibly close the vnode. 3493 */ 3494 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 3495 vgonel(vp); 3496 } else { 3497 busy++; 3498 #ifdef DIAGNOSTIC 3499 if (busyprt) 3500 vn_printf(vp, "vflush: busy vnode "); 3501 #endif 3502 } 3503 VOP_UNLOCK(vp); 3504 vdropl(vp); 3505 } 3506 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 3507 /* 3508 * If just the root vnode is busy, and if its refcount 3509 * is equal to `rootrefs', then go ahead and kill it. 3510 */ 3511 VI_LOCK(rootvp); 3512 KASSERT(busy > 0, ("vflush: not busy")); 3513 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 3514 ("vflush: usecount %d < rootrefs %d", 3515 rootvp->v_usecount, rootrefs)); 3516 if (busy == 1 && rootvp->v_usecount == rootrefs) { 3517 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 3518 vgone(rootvp); 3519 VOP_UNLOCK(rootvp); 3520 busy = 0; 3521 } else 3522 VI_UNLOCK(rootvp); 3523 } 3524 if (busy) { 3525 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 3526 busy); 3527 return (EBUSY); 3528 } 3529 for (; rootrefs > 0; rootrefs--) 3530 vrele(rootvp); 3531 return (0); 3532 } 3533 3534 /* 3535 * Recycle an unused vnode to the front of the free list. 3536 */ 3537 int 3538 vrecycle(struct vnode *vp) 3539 { 3540 int recycled; 3541 3542 VI_LOCK(vp); 3543 recycled = vrecyclel(vp); 3544 VI_UNLOCK(vp); 3545 return (recycled); 3546 } 3547 3548 /* 3549 * vrecycle, with the vp interlock held. 3550 */ 3551 int 3552 vrecyclel(struct vnode *vp) 3553 { 3554 int recycled; 3555 3556 ASSERT_VOP_ELOCKED(vp, __func__); 3557 ASSERT_VI_LOCKED(vp, __func__); 3558 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3559 recycled = 0; 3560 if (vp->v_usecount == 0) { 3561 recycled = 1; 3562 vgonel(vp); 3563 } 3564 return (recycled); 3565 } 3566 3567 /* 3568 * Eliminate all activity associated with a vnode 3569 * in preparation for reuse. 3570 */ 3571 void 3572 vgone(struct vnode *vp) 3573 { 3574 VI_LOCK(vp); 3575 vgonel(vp); 3576 VI_UNLOCK(vp); 3577 } 3578 3579 static void 3580 notify_lowervp_vfs_dummy(struct mount *mp __unused, 3581 struct vnode *lowervp __unused) 3582 { 3583 } 3584 3585 /* 3586 * Notify upper mounts about reclaimed or unlinked vnode. 3587 */ 3588 void 3589 vfs_notify_upper(struct vnode *vp, int event) 3590 { 3591 static struct vfsops vgonel_vfsops = { 3592 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy, 3593 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy, 3594 }; 3595 struct mount *mp, *ump, *mmp; 3596 3597 mp = vp->v_mount; 3598 if (mp == NULL) 3599 return; 3600 if (TAILQ_EMPTY(&mp->mnt_uppers)) 3601 return; 3602 3603 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO); 3604 mmp->mnt_op = &vgonel_vfsops; 3605 mmp->mnt_kern_flag |= MNTK_MARKER; 3606 MNT_ILOCK(mp); 3607 mp->mnt_kern_flag |= MNTK_VGONE_UPPER; 3608 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) { 3609 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) { 3610 ump = TAILQ_NEXT(ump, mnt_upper_link); 3611 continue; 3612 } 3613 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link); 3614 MNT_IUNLOCK(mp); 3615 switch (event) { 3616 case VFS_NOTIFY_UPPER_RECLAIM: 3617 VFS_RECLAIM_LOWERVP(ump, vp); 3618 break; 3619 case VFS_NOTIFY_UPPER_UNLINK: 3620 VFS_UNLINK_LOWERVP(ump, vp); 3621 break; 3622 default: 3623 KASSERT(0, ("invalid event %d", event)); 3624 break; 3625 } 3626 MNT_ILOCK(mp); 3627 ump = TAILQ_NEXT(mmp, mnt_upper_link); 3628 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link); 3629 } 3630 free(mmp, M_TEMP); 3631 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER; 3632 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) { 3633 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER; 3634 wakeup(&mp->mnt_uppers); 3635 } 3636 MNT_IUNLOCK(mp); 3637 } 3638 3639 /* 3640 * vgone, with the vp interlock held. 3641 */ 3642 static void 3643 vgonel(struct vnode *vp) 3644 { 3645 struct thread *td; 3646 struct mount *mp; 3647 vm_object_t object; 3648 bool active, oweinact; 3649 3650 ASSERT_VOP_ELOCKED(vp, "vgonel"); 3651 ASSERT_VI_LOCKED(vp, "vgonel"); 3652 VNASSERT(vp->v_holdcnt, vp, 3653 ("vgonel: vp %p has no reference.", vp)); 3654 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3655 td = curthread; 3656 3657 /* 3658 * Don't vgonel if we're already doomed. 3659 */ 3660 if (vp->v_irflag & VIRF_DOOMED) 3661 return; 3662 vp->v_irflag |= VIRF_DOOMED; 3663 3664 /* 3665 * Check to see if the vnode is in use. If so, we have to call 3666 * VOP_CLOSE() and VOP_INACTIVE(). 3667 */ 3668 active = vp->v_usecount > 0; 3669 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 3670 /* 3671 * If we need to do inactive VI_OWEINACT will be set. 3672 */ 3673 if (vp->v_iflag & VI_DEFINACT) { 3674 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 3675 vp->v_iflag &= ~VI_DEFINACT; 3676 vdropl(vp); 3677 } else { 3678 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 3679 VI_UNLOCK(vp); 3680 } 3681 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 3682 3683 /* 3684 * If purging an active vnode, it must be closed and 3685 * deactivated before being reclaimed. 3686 */ 3687 if (active) 3688 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 3689 if (oweinact || active) { 3690 VI_LOCK(vp); 3691 if ((vp->v_iflag & VI_DOINGINACT) == 0) 3692 vinactive(vp); 3693 VI_UNLOCK(vp); 3694 } 3695 if (vp->v_type == VSOCK) 3696 vfs_unp_reclaim(vp); 3697 3698 /* 3699 * Clean out any buffers associated with the vnode. 3700 * If the flush fails, just toss the buffers. 3701 */ 3702 mp = NULL; 3703 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 3704 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 3705 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 3706 while (vinvalbuf(vp, 0, 0, 0) != 0) 3707 ; 3708 } 3709 3710 BO_LOCK(&vp->v_bufobj); 3711 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 3712 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 3713 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 3714 vp->v_bufobj.bo_clean.bv_cnt == 0, 3715 ("vp %p bufobj not invalidated", vp)); 3716 3717 /* 3718 * For VMIO bufobj, BO_DEAD is set later, or in 3719 * vm_object_terminate() after the object's page queue is 3720 * flushed. 3721 */ 3722 object = vp->v_bufobj.bo_object; 3723 if (object == NULL) 3724 vp->v_bufobj.bo_flag |= BO_DEAD; 3725 BO_UNLOCK(&vp->v_bufobj); 3726 3727 /* 3728 * Handle the VM part. Tmpfs handles v_object on its own (the 3729 * OBJT_VNODE check). Nullfs or other bypassing filesystems 3730 * should not touch the object borrowed from the lower vnode 3731 * (the handle check). 3732 */ 3733 if (object != NULL && object->type == OBJT_VNODE && 3734 object->handle == vp) 3735 vnode_destroy_vobject(vp); 3736 3737 /* 3738 * Reclaim the vnode. 3739 */ 3740 if (VOP_RECLAIM(vp, td)) 3741 panic("vgone: cannot reclaim"); 3742 if (mp != NULL) 3743 vn_finished_secondary_write(mp); 3744 VNASSERT(vp->v_object == NULL, vp, 3745 ("vop_reclaim left v_object vp=%p", vp)); 3746 /* 3747 * Clear the advisory locks and wake up waiting threads. 3748 */ 3749 (void)VOP_ADVLOCKPURGE(vp); 3750 vp->v_lockf = NULL; 3751 /* 3752 * Delete from old mount point vnode list. 3753 */ 3754 delmntque(vp); 3755 cache_purge(vp); 3756 /* 3757 * Done with purge, reset to the standard lock and invalidate 3758 * the vnode. 3759 */ 3760 VI_LOCK(vp); 3761 vp->v_vnlock = &vp->v_lock; 3762 vp->v_op = &dead_vnodeops; 3763 vp->v_type = VBAD; 3764 } 3765 3766 /* 3767 * Calculate the total number of references to a special device. 3768 */ 3769 int 3770 vcount(struct vnode *vp) 3771 { 3772 int count; 3773 3774 dev_lock(); 3775 count = vp->v_rdev->si_usecount; 3776 dev_unlock(); 3777 return (count); 3778 } 3779 3780 /* 3781 * Print out a description of a vnode. 3782 */ 3783 static char *typename[] = 3784 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD", 3785 "VMARKER"}; 3786 3787 void 3788 vn_printf(struct vnode *vp, const char *fmt, ...) 3789 { 3790 va_list ap; 3791 char buf[256], buf2[16]; 3792 u_long flags; 3793 3794 va_start(ap, fmt); 3795 vprintf(fmt, ap); 3796 va_end(ap); 3797 printf("%p: ", (void *)vp); 3798 printf("type %s\n", typename[vp->v_type]); 3799 printf(" usecount %d, writecount %d, refcount %d", 3800 vp->v_usecount, vp->v_writecount, vp->v_holdcnt); 3801 switch (vp->v_type) { 3802 case VDIR: 3803 printf(" mountedhere %p\n", vp->v_mountedhere); 3804 break; 3805 case VCHR: 3806 printf(" rdev %p\n", vp->v_rdev); 3807 break; 3808 case VSOCK: 3809 printf(" socket %p\n", vp->v_unpcb); 3810 break; 3811 case VFIFO: 3812 printf(" fifoinfo %p\n", vp->v_fifoinfo); 3813 break; 3814 default: 3815 printf("\n"); 3816 break; 3817 } 3818 buf[0] = '\0'; 3819 buf[1] = '\0'; 3820 if (vp->v_irflag & VIRF_DOOMED) 3821 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 3822 flags = vp->v_irflag & ~(VIRF_DOOMED); 3823 if (flags != 0) { 3824 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 3825 strlcat(buf, buf2, sizeof(buf)); 3826 } 3827 if (vp->v_vflag & VV_ROOT) 3828 strlcat(buf, "|VV_ROOT", sizeof(buf)); 3829 if (vp->v_vflag & VV_ISTTY) 3830 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 3831 if (vp->v_vflag & VV_NOSYNC) 3832 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 3833 if (vp->v_vflag & VV_ETERNALDEV) 3834 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 3835 if (vp->v_vflag & VV_CACHEDLABEL) 3836 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 3837 if (vp->v_vflag & VV_VMSIZEVNLOCK) 3838 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 3839 if (vp->v_vflag & VV_COPYONWRITE) 3840 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 3841 if (vp->v_vflag & VV_SYSTEM) 3842 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 3843 if (vp->v_vflag & VV_PROCDEP) 3844 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 3845 if (vp->v_vflag & VV_NOKNOTE) 3846 strlcat(buf, "|VV_NOKNOTE", sizeof(buf)); 3847 if (vp->v_vflag & VV_DELETED) 3848 strlcat(buf, "|VV_DELETED", sizeof(buf)); 3849 if (vp->v_vflag & VV_MD) 3850 strlcat(buf, "|VV_MD", sizeof(buf)); 3851 if (vp->v_vflag & VV_FORCEINSMQ) 3852 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 3853 if (vp->v_vflag & VV_READLINK) 3854 strlcat(buf, "|VV_READLINK", sizeof(buf)); 3855 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 3856 VV_CACHEDLABEL | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP | 3857 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ); 3858 if (flags != 0) { 3859 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 3860 strlcat(buf, buf2, sizeof(buf)); 3861 } 3862 if (vp->v_iflag & VI_TEXT_REF) 3863 strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); 3864 if (vp->v_iflag & VI_MOUNT) 3865 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 3866 if (vp->v_iflag & VI_FREE) 3867 strlcat(buf, "|VI_FREE", sizeof(buf)); 3868 if (vp->v_iflag & VI_ACTIVE) 3869 strlcat(buf, "|VI_ACTIVE", sizeof(buf)); 3870 if (vp->v_iflag & VI_DOINGINACT) 3871 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 3872 if (vp->v_iflag & VI_OWEINACT) 3873 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 3874 if (vp->v_iflag & VI_DEFINACT) 3875 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 3876 flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE | 3877 VI_DOINGINACT | VI_OWEINACT | VI_DEFINACT); 3878 if (flags != 0) { 3879 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 3880 strlcat(buf, buf2, sizeof(buf)); 3881 } 3882 if (vp->v_mflag & VMP_TMPMNTFREELIST) 3883 strlcat(buf, "|VMP_TMPMNTFREELIST", sizeof(buf)); 3884 flags = vp->v_mflag & ~(VMP_TMPMNTFREELIST); 3885 if (flags != 0) { 3886 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 3887 strlcat(buf, buf2, sizeof(buf)); 3888 } 3889 printf(" flags (%s)\n", buf + 1); 3890 if (mtx_owned(VI_MTX(vp))) 3891 printf(" VI_LOCKed"); 3892 if (vp->v_object != NULL) 3893 printf(" v_object %p ref %d pages %d " 3894 "cleanbuf %d dirtybuf %d\n", 3895 vp->v_object, vp->v_object->ref_count, 3896 vp->v_object->resident_page_count, 3897 vp->v_bufobj.bo_clean.bv_cnt, 3898 vp->v_bufobj.bo_dirty.bv_cnt); 3899 printf(" "); 3900 lockmgr_printinfo(vp->v_vnlock); 3901 if (vp->v_data != NULL) 3902 VOP_PRINT(vp); 3903 } 3904 3905 #ifdef DDB 3906 /* 3907 * List all of the locked vnodes in the system. 3908 * Called when debugging the kernel. 3909 */ 3910 DB_SHOW_COMMAND(lockedvnods, lockedvnodes) 3911 { 3912 struct mount *mp; 3913 struct vnode *vp; 3914 3915 /* 3916 * Note: because this is DDB, we can't obey the locking semantics 3917 * for these structures, which means we could catch an inconsistent 3918 * state and dereference a nasty pointer. Not much to be done 3919 * about that. 3920 */ 3921 db_printf("Locked vnodes\n"); 3922 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3923 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 3924 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 3925 vn_printf(vp, "vnode "); 3926 } 3927 } 3928 } 3929 3930 /* 3931 * Show details about the given vnode. 3932 */ 3933 DB_SHOW_COMMAND(vnode, db_show_vnode) 3934 { 3935 struct vnode *vp; 3936 3937 if (!have_addr) 3938 return; 3939 vp = (struct vnode *)addr; 3940 vn_printf(vp, "vnode "); 3941 } 3942 3943 /* 3944 * Show details about the given mount point. 3945 */ 3946 DB_SHOW_COMMAND(mount, db_show_mount) 3947 { 3948 struct mount *mp; 3949 struct vfsopt *opt; 3950 struct statfs *sp; 3951 struct vnode *vp; 3952 char buf[512]; 3953 uint64_t mflags; 3954 u_int flags; 3955 3956 if (!have_addr) { 3957 /* No address given, print short info about all mount points. */ 3958 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 3959 db_printf("%p %s on %s (%s)\n", mp, 3960 mp->mnt_stat.f_mntfromname, 3961 mp->mnt_stat.f_mntonname, 3962 mp->mnt_stat.f_fstypename); 3963 if (db_pager_quit) 3964 break; 3965 } 3966 db_printf("\nMore info: show mount <addr>\n"); 3967 return; 3968 } 3969 3970 mp = (struct mount *)addr; 3971 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 3972 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 3973 3974 buf[0] = '\0'; 3975 mflags = mp->mnt_flag; 3976 #define MNT_FLAG(flag) do { \ 3977 if (mflags & (flag)) { \ 3978 if (buf[0] != '\0') \ 3979 strlcat(buf, ", ", sizeof(buf)); \ 3980 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 3981 mflags &= ~(flag); \ 3982 } \ 3983 } while (0) 3984 MNT_FLAG(MNT_RDONLY); 3985 MNT_FLAG(MNT_SYNCHRONOUS); 3986 MNT_FLAG(MNT_NOEXEC); 3987 MNT_FLAG(MNT_NOSUID); 3988 MNT_FLAG(MNT_NFS4ACLS); 3989 MNT_FLAG(MNT_UNION); 3990 MNT_FLAG(MNT_ASYNC); 3991 MNT_FLAG(MNT_SUIDDIR); 3992 MNT_FLAG(MNT_SOFTDEP); 3993 MNT_FLAG(MNT_NOSYMFOLLOW); 3994 MNT_FLAG(MNT_GJOURNAL); 3995 MNT_FLAG(MNT_MULTILABEL); 3996 MNT_FLAG(MNT_ACLS); 3997 MNT_FLAG(MNT_NOATIME); 3998 MNT_FLAG(MNT_NOCLUSTERR); 3999 MNT_FLAG(MNT_NOCLUSTERW); 4000 MNT_FLAG(MNT_SUJ); 4001 MNT_FLAG(MNT_EXRDONLY); 4002 MNT_FLAG(MNT_EXPORTED); 4003 MNT_FLAG(MNT_DEFEXPORTED); 4004 MNT_FLAG(MNT_EXPORTANON); 4005 MNT_FLAG(MNT_EXKERB); 4006 MNT_FLAG(MNT_EXPUBLIC); 4007 MNT_FLAG(MNT_LOCAL); 4008 MNT_FLAG(MNT_QUOTA); 4009 MNT_FLAG(MNT_ROOTFS); 4010 MNT_FLAG(MNT_USER); 4011 MNT_FLAG(MNT_IGNORE); 4012 MNT_FLAG(MNT_UPDATE); 4013 MNT_FLAG(MNT_DELEXPORT); 4014 MNT_FLAG(MNT_RELOAD); 4015 MNT_FLAG(MNT_FORCE); 4016 MNT_FLAG(MNT_SNAPSHOT); 4017 MNT_FLAG(MNT_BYFSID); 4018 #undef MNT_FLAG 4019 if (mflags != 0) { 4020 if (buf[0] != '\0') 4021 strlcat(buf, ", ", sizeof(buf)); 4022 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4023 "0x%016jx", mflags); 4024 } 4025 db_printf(" mnt_flag = %s\n", buf); 4026 4027 buf[0] = '\0'; 4028 flags = mp->mnt_kern_flag; 4029 #define MNT_KERN_FLAG(flag) do { \ 4030 if (flags & (flag)) { \ 4031 if (buf[0] != '\0') \ 4032 strlcat(buf, ", ", sizeof(buf)); \ 4033 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4034 flags &= ~(flag); \ 4035 } \ 4036 } while (0) 4037 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4038 MNT_KERN_FLAG(MNTK_ASYNC); 4039 MNT_KERN_FLAG(MNTK_SOFTDEP); 4040 MNT_KERN_FLAG(MNTK_DRAINING); 4041 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4042 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4043 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4044 MNT_KERN_FLAG(MNTK_NO_IOPF); 4045 MNT_KERN_FLAG(MNTK_VGONE_UPPER); 4046 MNT_KERN_FLAG(MNTK_VGONE_WAITER); 4047 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT); 4048 MNT_KERN_FLAG(MNTK_MARKER); 4049 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4050 MNT_KERN_FLAG(MNTK_NOASYNC); 4051 MNT_KERN_FLAG(MNTK_UNMOUNT); 4052 MNT_KERN_FLAG(MNTK_MWAIT); 4053 MNT_KERN_FLAG(MNTK_SUSPEND); 4054 MNT_KERN_FLAG(MNTK_SUSPEND2); 4055 MNT_KERN_FLAG(MNTK_SUSPENDED); 4056 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4057 MNT_KERN_FLAG(MNTK_NOKNOTE); 4058 #undef MNT_KERN_FLAG 4059 if (flags != 0) { 4060 if (buf[0] != '\0') 4061 strlcat(buf, ", ", sizeof(buf)); 4062 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4063 "0x%08x", flags); 4064 } 4065 db_printf(" mnt_kern_flag = %s\n", buf); 4066 4067 db_printf(" mnt_opt = "); 4068 opt = TAILQ_FIRST(mp->mnt_opt); 4069 if (opt != NULL) { 4070 db_printf("%s", opt->name); 4071 opt = TAILQ_NEXT(opt, link); 4072 while (opt != NULL) { 4073 db_printf(", %s", opt->name); 4074 opt = TAILQ_NEXT(opt, link); 4075 } 4076 } 4077 db_printf("\n"); 4078 4079 sp = &mp->mnt_stat; 4080 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4081 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4082 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4083 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4084 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4085 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4086 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4087 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4088 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4089 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4090 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4091 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4092 4093 db_printf(" mnt_cred = { uid=%u ruid=%u", 4094 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4095 if (jailed(mp->mnt_cred)) 4096 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4097 db_printf(" }\n"); 4098 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4099 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4100 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4101 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4102 db_printf(" mnt_activevnodelistsize = %d\n", 4103 mp->mnt_activevnodelistsize); 4104 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4105 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4106 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen); 4107 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4108 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4109 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4110 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4111 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4112 db_printf(" mnt_secondary_accwrites = %d\n", 4113 mp->mnt_secondary_accwrites); 4114 db_printf(" mnt_gjprovider = %s\n", 4115 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4116 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4117 4118 db_printf("\n\nList of active vnodes\n"); 4119 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) { 4120 if (vp->v_type != VMARKER) { 4121 vn_printf(vp, "vnode "); 4122 if (db_pager_quit) 4123 break; 4124 } 4125 } 4126 db_printf("\n\nList of inactive vnodes\n"); 4127 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4128 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) { 4129 vn_printf(vp, "vnode "); 4130 if (db_pager_quit) 4131 break; 4132 } 4133 } 4134 } 4135 #endif /* DDB */ 4136 4137 /* 4138 * Fill in a struct xvfsconf based on a struct vfsconf. 4139 */ 4140 static int 4141 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4142 { 4143 struct xvfsconf xvfsp; 4144 4145 bzero(&xvfsp, sizeof(xvfsp)); 4146 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4147 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4148 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4149 xvfsp.vfc_flags = vfsp->vfc_flags; 4150 /* 4151 * These are unused in userland, we keep them 4152 * to not break binary compatibility. 4153 */ 4154 xvfsp.vfc_vfsops = NULL; 4155 xvfsp.vfc_next = NULL; 4156 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4157 } 4158 4159 #ifdef COMPAT_FREEBSD32 4160 struct xvfsconf32 { 4161 uint32_t vfc_vfsops; 4162 char vfc_name[MFSNAMELEN]; 4163 int32_t vfc_typenum; 4164 int32_t vfc_refcount; 4165 int32_t vfc_flags; 4166 uint32_t vfc_next; 4167 }; 4168 4169 static int 4170 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4171 { 4172 struct xvfsconf32 xvfsp; 4173 4174 bzero(&xvfsp, sizeof(xvfsp)); 4175 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4176 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4177 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4178 xvfsp.vfc_flags = vfsp->vfc_flags; 4179 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4180 } 4181 #endif 4182 4183 /* 4184 * Top level filesystem related information gathering. 4185 */ 4186 static int 4187 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4188 { 4189 struct vfsconf *vfsp; 4190 int error; 4191 4192 error = 0; 4193 vfsconf_slock(); 4194 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4195 #ifdef COMPAT_FREEBSD32 4196 if (req->flags & SCTL_MASK32) 4197 error = vfsconf2x32(req, vfsp); 4198 else 4199 #endif 4200 error = vfsconf2x(req, vfsp); 4201 if (error) 4202 break; 4203 } 4204 vfsconf_sunlock(); 4205 return (error); 4206 } 4207 4208 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4209 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4210 "S,xvfsconf", "List of all configured filesystems"); 4211 4212 #ifndef BURN_BRIDGES 4213 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4214 4215 static int 4216 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4217 { 4218 int *name = (int *)arg1 - 1; /* XXX */ 4219 u_int namelen = arg2 + 1; /* XXX */ 4220 struct vfsconf *vfsp; 4221 4222 log(LOG_WARNING, "userland calling deprecated sysctl, " 4223 "please rebuild world\n"); 4224 4225 #if 1 || defined(COMPAT_PRELITE2) 4226 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4227 if (namelen == 1) 4228 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4229 #endif 4230 4231 switch (name[1]) { 4232 case VFS_MAXTYPENUM: 4233 if (namelen != 2) 4234 return (ENOTDIR); 4235 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4236 case VFS_CONF: 4237 if (namelen != 3) 4238 return (ENOTDIR); /* overloaded */ 4239 vfsconf_slock(); 4240 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4241 if (vfsp->vfc_typenum == name[2]) 4242 break; 4243 } 4244 vfsconf_sunlock(); 4245 if (vfsp == NULL) 4246 return (EOPNOTSUPP); 4247 #ifdef COMPAT_FREEBSD32 4248 if (req->flags & SCTL_MASK32) 4249 return (vfsconf2x32(req, vfsp)); 4250 else 4251 #endif 4252 return (vfsconf2x(req, vfsp)); 4253 } 4254 return (EOPNOTSUPP); 4255 } 4256 4257 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4258 CTLFLAG_MPSAFE, vfs_sysctl, 4259 "Generic filesystem"); 4260 4261 #if 1 || defined(COMPAT_PRELITE2) 4262 4263 static int 4264 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4265 { 4266 int error; 4267 struct vfsconf *vfsp; 4268 struct ovfsconf ovfs; 4269 4270 vfsconf_slock(); 4271 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4272 bzero(&ovfs, sizeof(ovfs)); 4273 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4274 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4275 ovfs.vfc_index = vfsp->vfc_typenum; 4276 ovfs.vfc_refcount = vfsp->vfc_refcount; 4277 ovfs.vfc_flags = vfsp->vfc_flags; 4278 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4279 if (error != 0) { 4280 vfsconf_sunlock(); 4281 return (error); 4282 } 4283 } 4284 vfsconf_sunlock(); 4285 return (0); 4286 } 4287 4288 #endif /* 1 || COMPAT_PRELITE2 */ 4289 #endif /* !BURN_BRIDGES */ 4290 4291 #define KINFO_VNODESLOP 10 4292 #ifdef notyet 4293 /* 4294 * Dump vnode list (via sysctl). 4295 */ 4296 /* ARGSUSED */ 4297 static int 4298 sysctl_vnode(SYSCTL_HANDLER_ARGS) 4299 { 4300 struct xvnode *xvn; 4301 struct mount *mp; 4302 struct vnode *vp; 4303 int error, len, n; 4304 4305 /* 4306 * Stale numvnodes access is not fatal here. 4307 */ 4308 req->lock = 0; 4309 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn; 4310 if (!req->oldptr) 4311 /* Make an estimate */ 4312 return (SYSCTL_OUT(req, 0, len)); 4313 4314 error = sysctl_wire_old_buffer(req, 0); 4315 if (error != 0) 4316 return (error); 4317 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK); 4318 n = 0; 4319 mtx_lock(&mountlist_mtx); 4320 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4321 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 4322 continue; 4323 MNT_ILOCK(mp); 4324 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4325 if (n == len) 4326 break; 4327 vref(vp); 4328 xvn[n].xv_size = sizeof *xvn; 4329 xvn[n].xv_vnode = vp; 4330 xvn[n].xv_id = 0; /* XXX compat */ 4331 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field 4332 XV_COPY(usecount); 4333 XV_COPY(writecount); 4334 XV_COPY(holdcnt); 4335 XV_COPY(mount); 4336 XV_COPY(numoutput); 4337 XV_COPY(type); 4338 #undef XV_COPY 4339 xvn[n].xv_flag = vp->v_vflag; 4340 4341 switch (vp->v_type) { 4342 case VREG: 4343 case VDIR: 4344 case VLNK: 4345 break; 4346 case VBLK: 4347 case VCHR: 4348 if (vp->v_rdev == NULL) { 4349 vrele(vp); 4350 continue; 4351 } 4352 xvn[n].xv_dev = dev2udev(vp->v_rdev); 4353 break; 4354 case VSOCK: 4355 xvn[n].xv_socket = vp->v_socket; 4356 break; 4357 case VFIFO: 4358 xvn[n].xv_fifo = vp->v_fifoinfo; 4359 break; 4360 case VNON: 4361 case VBAD: 4362 default: 4363 /* shouldn't happen? */ 4364 vrele(vp); 4365 continue; 4366 } 4367 vrele(vp); 4368 ++n; 4369 } 4370 MNT_IUNLOCK(mp); 4371 mtx_lock(&mountlist_mtx); 4372 vfs_unbusy(mp); 4373 if (n == len) 4374 break; 4375 } 4376 mtx_unlock(&mountlist_mtx); 4377 4378 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn); 4379 free(xvn, M_TEMP); 4380 return (error); 4381 } 4382 4383 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD | 4384 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode", 4385 ""); 4386 #endif 4387 4388 static void 4389 unmount_or_warn(struct mount *mp) 4390 { 4391 int error; 4392 4393 error = dounmount(mp, MNT_FORCE, curthread); 4394 if (error != 0) { 4395 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4396 if (error == EBUSY) 4397 printf("BUSY)\n"); 4398 else 4399 printf("%d)\n", error); 4400 } 4401 } 4402 4403 /* 4404 * Unmount all filesystems. The list is traversed in reverse order 4405 * of mounting to avoid dependencies. 4406 */ 4407 void 4408 vfs_unmountall(void) 4409 { 4410 struct mount *mp, *tmp; 4411 4412 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4413 4414 /* 4415 * Since this only runs when rebooting, it is not interlocked. 4416 */ 4417 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4418 vfs_ref(mp); 4419 4420 /* 4421 * Forcibly unmounting "/dev" before "/" would prevent clean 4422 * unmount of the latter. 4423 */ 4424 if (mp == rootdevmp) 4425 continue; 4426 4427 unmount_or_warn(mp); 4428 } 4429 4430 if (rootdevmp != NULL) 4431 unmount_or_warn(rootdevmp); 4432 } 4433 4434 static void 4435 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4436 { 4437 4438 ASSERT_VI_LOCKED(vp, __func__); 4439 VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set")); 4440 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4441 vdropl(vp); 4442 return; 4443 } 4444 if (vn_lock(vp, lkflags) == 0) { 4445 VI_LOCK(vp); 4446 if ((vp->v_iflag & (VI_OWEINACT | VI_DOINGINACT)) == VI_OWEINACT) 4447 vinactive(vp); 4448 VOP_UNLOCK(vp); 4449 vdropl(vp); 4450 return; 4451 } 4452 vdefer_inactive_cond(vp); 4453 } 4454 4455 static void __noinline 4456 vfs_periodic_inactive(struct mount *mp, int flags) 4457 { 4458 struct vnode *vp, *mvp; 4459 int lkflags; 4460 4461 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4462 if (flags != MNT_WAIT) 4463 lkflags |= LK_NOWAIT; 4464 4465 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4466 if ((vp->v_iflag & VI_DEFINACT) == 0) { 4467 VI_UNLOCK(vp); 4468 continue; 4469 } 4470 vp->v_iflag &= ~VI_DEFINACT; 4471 vfs_deferred_inactive(vp, lkflags); 4472 } 4473 } 4474 4475 static inline bool 4476 vfs_want_msync(struct vnode *vp) 4477 { 4478 struct vm_object *obj; 4479 4480 if (vp->v_vflag & VV_NOSYNC) 4481 return (false); 4482 obj = vp->v_object; 4483 return (obj != NULL && vm_object_mightbedirty(obj)); 4484 } 4485 4486 static void __noinline 4487 vfs_periodic_msync_inactive(struct mount *mp, int flags) 4488 { 4489 struct vnode *vp, *mvp; 4490 struct vm_object *obj; 4491 struct thread *td; 4492 int lkflags, objflags; 4493 bool seen_defer; 4494 4495 td = curthread; 4496 4497 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 4498 if (flags != MNT_WAIT) { 4499 lkflags |= LK_NOWAIT; 4500 objflags = OBJPC_NOSYNC; 4501 } else { 4502 objflags = OBJPC_SYNC; 4503 } 4504 4505 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) { 4506 seen_defer = false; 4507 if (vp->v_iflag & VI_DEFINACT) { 4508 vp->v_iflag &= ~VI_DEFINACT; 4509 seen_defer = true; 4510 } 4511 if (!vfs_want_msync(vp)) { 4512 if (seen_defer) 4513 vfs_deferred_inactive(vp, lkflags); 4514 else 4515 VI_UNLOCK(vp); 4516 continue; 4517 } 4518 if (vget(vp, lkflags, td) == 0) { 4519 obj = vp->v_object; 4520 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 4521 VM_OBJECT_WLOCK(obj); 4522 vm_object_page_clean(obj, 0, 0, objflags); 4523 VM_OBJECT_WUNLOCK(obj); 4524 } 4525 vput(vp); 4526 if (seen_defer) 4527 vdrop(vp); 4528 } else { 4529 if (seen_defer) 4530 vdefer_inactive_cond(vp); 4531 } 4532 } 4533 } 4534 4535 void 4536 vfs_periodic(struct mount *mp, int flags) 4537 { 4538 4539 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 4540 4541 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 4542 vfs_periodic_inactive(mp, flags); 4543 else 4544 vfs_periodic_msync_inactive(mp, flags); 4545 } 4546 4547 static void 4548 destroy_vpollinfo_free(struct vpollinfo *vi) 4549 { 4550 4551 knlist_destroy(&vi->vpi_selinfo.si_note); 4552 mtx_destroy(&vi->vpi_lock); 4553 uma_zfree(vnodepoll_zone, vi); 4554 } 4555 4556 static void 4557 destroy_vpollinfo(struct vpollinfo *vi) 4558 { 4559 4560 knlist_clear(&vi->vpi_selinfo.si_note, 1); 4561 seldrain(&vi->vpi_selinfo); 4562 destroy_vpollinfo_free(vi); 4563 } 4564 4565 /* 4566 * Initialize per-vnode helper structure to hold poll-related state. 4567 */ 4568 void 4569 v_addpollinfo(struct vnode *vp) 4570 { 4571 struct vpollinfo *vi; 4572 4573 if (vp->v_pollinfo != NULL) 4574 return; 4575 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO); 4576 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 4577 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 4578 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked); 4579 VI_LOCK(vp); 4580 if (vp->v_pollinfo != NULL) { 4581 VI_UNLOCK(vp); 4582 destroy_vpollinfo_free(vi); 4583 return; 4584 } 4585 vp->v_pollinfo = vi; 4586 VI_UNLOCK(vp); 4587 } 4588 4589 /* 4590 * Record a process's interest in events which might happen to 4591 * a vnode. Because poll uses the historic select-style interface 4592 * internally, this routine serves as both the ``check for any 4593 * pending events'' and the ``record my interest in future events'' 4594 * functions. (These are done together, while the lock is held, 4595 * to avoid race conditions.) 4596 */ 4597 int 4598 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 4599 { 4600 4601 v_addpollinfo(vp); 4602 mtx_lock(&vp->v_pollinfo->vpi_lock); 4603 if (vp->v_pollinfo->vpi_revents & events) { 4604 /* 4605 * This leaves events we are not interested 4606 * in available for the other process which 4607 * which presumably had requested them 4608 * (otherwise they would never have been 4609 * recorded). 4610 */ 4611 events &= vp->v_pollinfo->vpi_revents; 4612 vp->v_pollinfo->vpi_revents &= ~events; 4613 4614 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4615 return (events); 4616 } 4617 vp->v_pollinfo->vpi_events |= events; 4618 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 4619 mtx_unlock(&vp->v_pollinfo->vpi_lock); 4620 return (0); 4621 } 4622 4623 /* 4624 * Routine to create and manage a filesystem syncer vnode. 4625 */ 4626 #define sync_close ((int (*)(struct vop_close_args *))nullop) 4627 static int sync_fsync(struct vop_fsync_args *); 4628 static int sync_inactive(struct vop_inactive_args *); 4629 static int sync_reclaim(struct vop_reclaim_args *); 4630 4631 static struct vop_vector sync_vnodeops = { 4632 .vop_bypass = VOP_EOPNOTSUPP, 4633 .vop_close = sync_close, /* close */ 4634 .vop_fsync = sync_fsync, /* fsync */ 4635 .vop_inactive = sync_inactive, /* inactive */ 4636 .vop_need_inactive = vop_stdneed_inactive, /* need_inactive */ 4637 .vop_reclaim = sync_reclaim, /* reclaim */ 4638 .vop_lock1 = vop_stdlock, /* lock */ 4639 .vop_unlock = vop_stdunlock, /* unlock */ 4640 .vop_islocked = vop_stdislocked, /* islocked */ 4641 }; 4642 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 4643 4644 /* 4645 * Create a new filesystem syncer vnode for the specified mount point. 4646 */ 4647 void 4648 vfs_allocate_syncvnode(struct mount *mp) 4649 { 4650 struct vnode *vp; 4651 struct bufobj *bo; 4652 static long start, incr, next; 4653 int error; 4654 4655 /* Allocate a new vnode */ 4656 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 4657 if (error != 0) 4658 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 4659 vp->v_type = VNON; 4660 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4661 vp->v_vflag |= VV_FORCEINSMQ; 4662 error = insmntque(vp, mp); 4663 if (error != 0) 4664 panic("vfs_allocate_syncvnode: insmntque() failed"); 4665 vp->v_vflag &= ~VV_FORCEINSMQ; 4666 VOP_UNLOCK(vp); 4667 /* 4668 * Place the vnode onto the syncer worklist. We attempt to 4669 * scatter them about on the list so that they will go off 4670 * at evenly distributed times even if all the filesystems 4671 * are mounted at once. 4672 */ 4673 next += incr; 4674 if (next == 0 || next > syncer_maxdelay) { 4675 start /= 2; 4676 incr /= 2; 4677 if (start == 0) { 4678 start = syncer_maxdelay / 2; 4679 incr = syncer_maxdelay; 4680 } 4681 next = start; 4682 } 4683 bo = &vp->v_bufobj; 4684 BO_LOCK(bo); 4685 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 4686 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 4687 mtx_lock(&sync_mtx); 4688 sync_vnode_count++; 4689 if (mp->mnt_syncer == NULL) { 4690 mp->mnt_syncer = vp; 4691 vp = NULL; 4692 } 4693 mtx_unlock(&sync_mtx); 4694 BO_UNLOCK(bo); 4695 if (vp != NULL) { 4696 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 4697 vgone(vp); 4698 vput(vp); 4699 } 4700 } 4701 4702 void 4703 vfs_deallocate_syncvnode(struct mount *mp) 4704 { 4705 struct vnode *vp; 4706 4707 mtx_lock(&sync_mtx); 4708 vp = mp->mnt_syncer; 4709 if (vp != NULL) 4710 mp->mnt_syncer = NULL; 4711 mtx_unlock(&sync_mtx); 4712 if (vp != NULL) 4713 vrele(vp); 4714 } 4715 4716 /* 4717 * Do a lazy sync of the filesystem. 4718 */ 4719 static int 4720 sync_fsync(struct vop_fsync_args *ap) 4721 { 4722 struct vnode *syncvp = ap->a_vp; 4723 struct mount *mp = syncvp->v_mount; 4724 int error, save; 4725 struct bufobj *bo; 4726 4727 /* 4728 * We only need to do something if this is a lazy evaluation. 4729 */ 4730 if (ap->a_waitfor != MNT_LAZY) 4731 return (0); 4732 4733 /* 4734 * Move ourselves to the back of the sync list. 4735 */ 4736 bo = &syncvp->v_bufobj; 4737 BO_LOCK(bo); 4738 vn_syncer_add_to_worklist(bo, syncdelay); 4739 BO_UNLOCK(bo); 4740 4741 /* 4742 * Walk the list of vnodes pushing all that are dirty and 4743 * not already on the sync list. 4744 */ 4745 if (vfs_busy(mp, MBF_NOWAIT) != 0) 4746 return (0); 4747 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { 4748 vfs_unbusy(mp); 4749 return (0); 4750 } 4751 save = curthread_pflags_set(TDP_SYNCIO); 4752 /* 4753 * The filesystem at hand may be idle with free vnodes stored in the 4754 * batch. Return them instead of letting them stay there indefinitely. 4755 */ 4756 vnlru_return_batch(mp); 4757 vfs_periodic(mp, MNT_NOWAIT); 4758 error = VFS_SYNC(mp, MNT_LAZY); 4759 curthread_pflags_restore(save); 4760 vn_finished_write(mp); 4761 vfs_unbusy(mp); 4762 return (error); 4763 } 4764 4765 /* 4766 * The syncer vnode is no referenced. 4767 */ 4768 static int 4769 sync_inactive(struct vop_inactive_args *ap) 4770 { 4771 4772 vgone(ap->a_vp); 4773 return (0); 4774 } 4775 4776 /* 4777 * The syncer vnode is no longer needed and is being decommissioned. 4778 * 4779 * Modifications to the worklist must be protected by sync_mtx. 4780 */ 4781 static int 4782 sync_reclaim(struct vop_reclaim_args *ap) 4783 { 4784 struct vnode *vp = ap->a_vp; 4785 struct bufobj *bo; 4786 4787 bo = &vp->v_bufobj; 4788 BO_LOCK(bo); 4789 mtx_lock(&sync_mtx); 4790 if (vp->v_mount->mnt_syncer == vp) 4791 vp->v_mount->mnt_syncer = NULL; 4792 if (bo->bo_flag & BO_ONWORKLST) { 4793 LIST_REMOVE(bo, bo_synclist); 4794 syncer_worklist_len--; 4795 sync_vnode_count--; 4796 bo->bo_flag &= ~BO_ONWORKLST; 4797 } 4798 mtx_unlock(&sync_mtx); 4799 BO_UNLOCK(bo); 4800 4801 return (0); 4802 } 4803 4804 int 4805 vn_need_pageq_flush(struct vnode *vp) 4806 { 4807 struct vm_object *obj; 4808 int need; 4809 4810 MPASS(mtx_owned(VI_MTX(vp))); 4811 need = 0; 4812 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4813 vm_object_mightbedirty(obj)) 4814 need = 1; 4815 return (need); 4816 } 4817 4818 /* 4819 * Check if vnode represents a disk device 4820 */ 4821 int 4822 vn_isdisk(struct vnode *vp, int *errp) 4823 { 4824 int error; 4825 4826 if (vp->v_type != VCHR) { 4827 error = ENOTBLK; 4828 goto out; 4829 } 4830 error = 0; 4831 dev_lock(); 4832 if (vp->v_rdev == NULL) 4833 error = ENXIO; 4834 else if (vp->v_rdev->si_devsw == NULL) 4835 error = ENXIO; 4836 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 4837 error = ENOTBLK; 4838 dev_unlock(); 4839 out: 4840 if (errp != NULL) 4841 *errp = error; 4842 return (error == 0); 4843 } 4844 4845 /* 4846 * Common filesystem object access control check routine. Accepts a 4847 * vnode's type, "mode", uid and gid, requested access mode, credentials, 4848 * and optional call-by-reference privused argument allowing vaccess() 4849 * to indicate to the caller whether privilege was used to satisfy the 4850 * request (obsoleted). Returns 0 on success, or an errno on failure. 4851 */ 4852 int 4853 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 4854 accmode_t accmode, struct ucred *cred, int *privused) 4855 { 4856 accmode_t dac_granted; 4857 accmode_t priv_granted; 4858 4859 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 4860 ("invalid bit in accmode")); 4861 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 4862 ("VAPPEND without VWRITE")); 4863 4864 /* 4865 * Look for a normal, non-privileged way to access the file/directory 4866 * as requested. If it exists, go with that. 4867 */ 4868 4869 if (privused != NULL) 4870 *privused = 0; 4871 4872 dac_granted = 0; 4873 4874 /* Check the owner. */ 4875 if (cred->cr_uid == file_uid) { 4876 dac_granted |= VADMIN; 4877 if (file_mode & S_IXUSR) 4878 dac_granted |= VEXEC; 4879 if (file_mode & S_IRUSR) 4880 dac_granted |= VREAD; 4881 if (file_mode & S_IWUSR) 4882 dac_granted |= (VWRITE | VAPPEND); 4883 4884 if ((accmode & dac_granted) == accmode) 4885 return (0); 4886 4887 goto privcheck; 4888 } 4889 4890 /* Otherwise, check the groups (first match) */ 4891 if (groupmember(file_gid, cred)) { 4892 if (file_mode & S_IXGRP) 4893 dac_granted |= VEXEC; 4894 if (file_mode & S_IRGRP) 4895 dac_granted |= VREAD; 4896 if (file_mode & S_IWGRP) 4897 dac_granted |= (VWRITE | VAPPEND); 4898 4899 if ((accmode & dac_granted) == accmode) 4900 return (0); 4901 4902 goto privcheck; 4903 } 4904 4905 /* Otherwise, check everyone else. */ 4906 if (file_mode & S_IXOTH) 4907 dac_granted |= VEXEC; 4908 if (file_mode & S_IROTH) 4909 dac_granted |= VREAD; 4910 if (file_mode & S_IWOTH) 4911 dac_granted |= (VWRITE | VAPPEND); 4912 if ((accmode & dac_granted) == accmode) 4913 return (0); 4914 4915 privcheck: 4916 /* 4917 * Build a privilege mask to determine if the set of privileges 4918 * satisfies the requirements when combined with the granted mask 4919 * from above. For each privilege, if the privilege is required, 4920 * bitwise or the request type onto the priv_granted mask. 4921 */ 4922 priv_granted = 0; 4923 4924 if (type == VDIR) { 4925 /* 4926 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 4927 * requests, instead of PRIV_VFS_EXEC. 4928 */ 4929 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4930 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 4931 priv_granted |= VEXEC; 4932 } else { 4933 /* 4934 * Ensure that at least one execute bit is on. Otherwise, 4935 * a privileged user will always succeed, and we don't want 4936 * this to happen unless the file really is executable. 4937 */ 4938 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 4939 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 4940 !priv_check_cred(cred, PRIV_VFS_EXEC)) 4941 priv_granted |= VEXEC; 4942 } 4943 4944 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 4945 !priv_check_cred(cred, PRIV_VFS_READ)) 4946 priv_granted |= VREAD; 4947 4948 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 4949 !priv_check_cred(cred, PRIV_VFS_WRITE)) 4950 priv_granted |= (VWRITE | VAPPEND); 4951 4952 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 4953 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 4954 priv_granted |= VADMIN; 4955 4956 if ((accmode & (priv_granted | dac_granted)) == accmode) { 4957 /* XXX audit: privilege used */ 4958 if (privused != NULL) 4959 *privused = 1; 4960 return (0); 4961 } 4962 4963 return ((accmode & VADMIN) ? EPERM : EACCES); 4964 } 4965 4966 /* 4967 * Credential check based on process requesting service, and per-attribute 4968 * permissions. 4969 */ 4970 int 4971 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 4972 struct thread *td, accmode_t accmode) 4973 { 4974 4975 /* 4976 * Kernel-invoked always succeeds. 4977 */ 4978 if (cred == NOCRED) 4979 return (0); 4980 4981 /* 4982 * Do not allow privileged processes in jail to directly manipulate 4983 * system attributes. 4984 */ 4985 switch (attrnamespace) { 4986 case EXTATTR_NAMESPACE_SYSTEM: 4987 /* Potentially should be: return (EPERM); */ 4988 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 4989 case EXTATTR_NAMESPACE_USER: 4990 return (VOP_ACCESS(vp, accmode, cred, td)); 4991 default: 4992 return (EPERM); 4993 } 4994 } 4995 4996 #ifdef DEBUG_VFS_LOCKS 4997 /* 4998 * This only exists to suppress warnings from unlocked specfs accesses. It is 4999 * no longer ok to have an unlocked VFS. 5000 */ 5001 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \ 5002 (vp)->v_type == VCHR || (vp)->v_type == VBAD) 5003 5004 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5005 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5006 "Drop into debugger on lock violation"); 5007 5008 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5009 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5010 0, "Check for interlock across VOPs"); 5011 5012 int vfs_badlock_print = 1; /* Print lock violations. */ 5013 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5014 0, "Print lock violations"); 5015 5016 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5017 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5018 0, "Print vnode details on lock violations"); 5019 5020 #ifdef KDB 5021 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5022 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5023 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5024 #endif 5025 5026 static void 5027 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5028 { 5029 5030 #ifdef KDB 5031 if (vfs_badlock_backtrace) 5032 kdb_backtrace(); 5033 #endif 5034 if (vfs_badlock_vnode) 5035 vn_printf(vp, "vnode "); 5036 if (vfs_badlock_print) 5037 printf("%s: %p %s\n", str, (void *)vp, msg); 5038 if (vfs_badlock_ddb) 5039 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5040 } 5041 5042 void 5043 assert_vi_locked(struct vnode *vp, const char *str) 5044 { 5045 5046 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5047 vfs_badlock("interlock is not locked but should be", str, vp); 5048 } 5049 5050 void 5051 assert_vi_unlocked(struct vnode *vp, const char *str) 5052 { 5053 5054 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5055 vfs_badlock("interlock is locked but should not be", str, vp); 5056 } 5057 5058 void 5059 assert_vop_locked(struct vnode *vp, const char *str) 5060 { 5061 int locked; 5062 5063 if (!IGNORE_LOCK(vp)) { 5064 locked = VOP_ISLOCKED(vp); 5065 if (locked == 0 || locked == LK_EXCLOTHER) 5066 vfs_badlock("is not locked but should be", str, vp); 5067 } 5068 } 5069 5070 void 5071 assert_vop_unlocked(struct vnode *vp, const char *str) 5072 { 5073 5074 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5075 vfs_badlock("is locked but should not be", str, vp); 5076 } 5077 5078 void 5079 assert_vop_elocked(struct vnode *vp, const char *str) 5080 { 5081 5082 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5083 vfs_badlock("is not exclusive locked but should be", str, vp); 5084 } 5085 #endif /* DEBUG_VFS_LOCKS */ 5086 5087 void 5088 vop_rename_fail(struct vop_rename_args *ap) 5089 { 5090 5091 if (ap->a_tvp != NULL) 5092 vput(ap->a_tvp); 5093 if (ap->a_tdvp == ap->a_tvp) 5094 vrele(ap->a_tdvp); 5095 else 5096 vput(ap->a_tdvp); 5097 vrele(ap->a_fdvp); 5098 vrele(ap->a_fvp); 5099 } 5100 5101 void 5102 vop_rename_pre(void *ap) 5103 { 5104 struct vop_rename_args *a = ap; 5105 5106 #ifdef DEBUG_VFS_LOCKS 5107 if (a->a_tvp) 5108 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5109 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5110 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5111 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5112 5113 /* Check the source (from). */ 5114 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5115 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5116 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5117 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5118 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5119 5120 /* Check the target. */ 5121 if (a->a_tvp) 5122 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5123 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5124 #endif 5125 if (a->a_tdvp != a->a_fdvp) 5126 vhold(a->a_fdvp); 5127 if (a->a_tvp != a->a_fvp) 5128 vhold(a->a_fvp); 5129 vhold(a->a_tdvp); 5130 if (a->a_tvp) 5131 vhold(a->a_tvp); 5132 } 5133 5134 #ifdef DEBUG_VFS_LOCKS 5135 void 5136 vop_strategy_pre(void *ap) 5137 { 5138 struct vop_strategy_args *a; 5139 struct buf *bp; 5140 5141 a = ap; 5142 bp = a->a_bp; 5143 5144 /* 5145 * Cluster ops lock their component buffers but not the IO container. 5146 */ 5147 if ((bp->b_flags & B_CLUSTER) != 0) 5148 return; 5149 5150 if (panicstr == NULL && !BUF_ISLOCKED(bp)) { 5151 if (vfs_badlock_print) 5152 printf( 5153 "VOP_STRATEGY: bp is not locked but should be\n"); 5154 if (vfs_badlock_ddb) 5155 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5156 } 5157 } 5158 5159 void 5160 vop_lock_pre(void *ap) 5161 { 5162 struct vop_lock1_args *a = ap; 5163 5164 if ((a->a_flags & LK_INTERLOCK) == 0) 5165 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5166 else 5167 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5168 } 5169 5170 void 5171 vop_lock_post(void *ap, int rc) 5172 { 5173 struct vop_lock1_args *a = ap; 5174 5175 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5176 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5177 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5178 } 5179 5180 void 5181 vop_unlock_pre(void *ap) 5182 { 5183 struct vop_unlock_args *a = ap; 5184 5185 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK"); 5186 } 5187 5188 void 5189 vop_unlock_post(void *ap, int rc) 5190 { 5191 return; 5192 } 5193 5194 void 5195 vop_need_inactive_pre(void *ap) 5196 { 5197 struct vop_need_inactive_args *a = ap; 5198 5199 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5200 } 5201 5202 void 5203 vop_need_inactive_post(void *ap, int rc) 5204 { 5205 struct vop_need_inactive_args *a = ap; 5206 5207 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5208 } 5209 #endif 5210 5211 void 5212 vop_create_post(void *ap, int rc) 5213 { 5214 struct vop_create_args *a = ap; 5215 5216 if (!rc) 5217 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5218 } 5219 5220 void 5221 vop_deleteextattr_post(void *ap, int rc) 5222 { 5223 struct vop_deleteextattr_args *a = ap; 5224 5225 if (!rc) 5226 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5227 } 5228 5229 void 5230 vop_link_post(void *ap, int rc) 5231 { 5232 struct vop_link_args *a = ap; 5233 5234 if (!rc) { 5235 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK); 5236 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE); 5237 } 5238 } 5239 5240 void 5241 vop_mkdir_post(void *ap, int rc) 5242 { 5243 struct vop_mkdir_args *a = ap; 5244 5245 if (!rc) 5246 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5247 } 5248 5249 void 5250 vop_mknod_post(void *ap, int rc) 5251 { 5252 struct vop_mknod_args *a = ap; 5253 5254 if (!rc) 5255 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5256 } 5257 5258 void 5259 vop_reclaim_post(void *ap, int rc) 5260 { 5261 struct vop_reclaim_args *a = ap; 5262 5263 if (!rc) 5264 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE); 5265 } 5266 5267 void 5268 vop_remove_post(void *ap, int rc) 5269 { 5270 struct vop_remove_args *a = ap; 5271 5272 if (!rc) { 5273 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5274 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5275 } 5276 } 5277 5278 void 5279 vop_rename_post(void *ap, int rc) 5280 { 5281 struct vop_rename_args *a = ap; 5282 long hint; 5283 5284 if (!rc) { 5285 hint = NOTE_WRITE; 5286 if (a->a_fdvp == a->a_tdvp) { 5287 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 5288 hint |= NOTE_LINK; 5289 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5290 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5291 } else { 5292 hint |= NOTE_EXTEND; 5293 if (a->a_fvp->v_type == VDIR) 5294 hint |= NOTE_LINK; 5295 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 5296 5297 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 5298 a->a_tvp->v_type == VDIR) 5299 hint &= ~NOTE_LINK; 5300 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 5301 } 5302 5303 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 5304 if (a->a_tvp) 5305 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 5306 } 5307 if (a->a_tdvp != a->a_fdvp) 5308 vdrop(a->a_fdvp); 5309 if (a->a_tvp != a->a_fvp) 5310 vdrop(a->a_fvp); 5311 vdrop(a->a_tdvp); 5312 if (a->a_tvp) 5313 vdrop(a->a_tvp); 5314 } 5315 5316 void 5317 vop_rmdir_post(void *ap, int rc) 5318 { 5319 struct vop_rmdir_args *a = ap; 5320 5321 if (!rc) { 5322 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK); 5323 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE); 5324 } 5325 } 5326 5327 void 5328 vop_setattr_post(void *ap, int rc) 5329 { 5330 struct vop_setattr_args *a = ap; 5331 5332 if (!rc) 5333 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5334 } 5335 5336 void 5337 vop_setextattr_post(void *ap, int rc) 5338 { 5339 struct vop_setextattr_args *a = ap; 5340 5341 if (!rc) 5342 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5343 } 5344 5345 void 5346 vop_symlink_post(void *ap, int rc) 5347 { 5348 struct vop_symlink_args *a = ap; 5349 5350 if (!rc) 5351 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE); 5352 } 5353 5354 void 5355 vop_open_post(void *ap, int rc) 5356 { 5357 struct vop_open_args *a = ap; 5358 5359 if (!rc) 5360 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 5361 } 5362 5363 void 5364 vop_close_post(void *ap, int rc) 5365 { 5366 struct vop_close_args *a = ap; 5367 5368 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 5369 !VN_IS_DOOMED(a->a_vp))) { 5370 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 5371 NOTE_CLOSE_WRITE : NOTE_CLOSE); 5372 } 5373 } 5374 5375 void 5376 vop_read_post(void *ap, int rc) 5377 { 5378 struct vop_read_args *a = ap; 5379 5380 if (!rc) 5381 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5382 } 5383 5384 void 5385 vop_readdir_post(void *ap, int rc) 5386 { 5387 struct vop_readdir_args *a = ap; 5388 5389 if (!rc) 5390 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 5391 } 5392 5393 static struct knlist fs_knlist; 5394 5395 static void 5396 vfs_event_init(void *arg) 5397 { 5398 knlist_init_mtx(&fs_knlist, NULL); 5399 } 5400 /* XXX - correct order? */ 5401 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 5402 5403 void 5404 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 5405 { 5406 5407 KNOTE_UNLOCKED(&fs_knlist, event); 5408 } 5409 5410 static int filt_fsattach(struct knote *kn); 5411 static void filt_fsdetach(struct knote *kn); 5412 static int filt_fsevent(struct knote *kn, long hint); 5413 5414 struct filterops fs_filtops = { 5415 .f_isfd = 0, 5416 .f_attach = filt_fsattach, 5417 .f_detach = filt_fsdetach, 5418 .f_event = filt_fsevent 5419 }; 5420 5421 static int 5422 filt_fsattach(struct knote *kn) 5423 { 5424 5425 kn->kn_flags |= EV_CLEAR; 5426 knlist_add(&fs_knlist, kn, 0); 5427 return (0); 5428 } 5429 5430 static void 5431 filt_fsdetach(struct knote *kn) 5432 { 5433 5434 knlist_remove(&fs_knlist, kn, 0); 5435 } 5436 5437 static int 5438 filt_fsevent(struct knote *kn, long hint) 5439 { 5440 5441 kn->kn_fflags |= hint; 5442 return (kn->kn_fflags != 0); 5443 } 5444 5445 static int 5446 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 5447 { 5448 struct vfsidctl vc; 5449 int error; 5450 struct mount *mp; 5451 5452 error = SYSCTL_IN(req, &vc, sizeof(vc)); 5453 if (error) 5454 return (error); 5455 if (vc.vc_vers != VFS_CTL_VERS1) 5456 return (EINVAL); 5457 mp = vfs_getvfs(&vc.vc_fsid); 5458 if (mp == NULL) 5459 return (ENOENT); 5460 /* ensure that a specific sysctl goes to the right filesystem. */ 5461 if (strcmp(vc.vc_fstypename, "*") != 0 && 5462 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 5463 vfs_rel(mp); 5464 return (EINVAL); 5465 } 5466 VCTLTOREQ(&vc, req); 5467 error = VFS_SYSCTL(mp, vc.vc_op, req); 5468 vfs_rel(mp); 5469 return (error); 5470 } 5471 5472 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 5473 NULL, 0, sysctl_vfs_ctl, "", 5474 "Sysctl by fsid"); 5475 5476 /* 5477 * Function to initialize a va_filerev field sensibly. 5478 * XXX: Wouldn't a random number make a lot more sense ?? 5479 */ 5480 u_quad_t 5481 init_va_filerev(void) 5482 { 5483 struct bintime bt; 5484 5485 getbinuptime(&bt); 5486 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 5487 } 5488 5489 static int filt_vfsread(struct knote *kn, long hint); 5490 static int filt_vfswrite(struct knote *kn, long hint); 5491 static int filt_vfsvnode(struct knote *kn, long hint); 5492 static void filt_vfsdetach(struct knote *kn); 5493 static struct filterops vfsread_filtops = { 5494 .f_isfd = 1, 5495 .f_detach = filt_vfsdetach, 5496 .f_event = filt_vfsread 5497 }; 5498 static struct filterops vfswrite_filtops = { 5499 .f_isfd = 1, 5500 .f_detach = filt_vfsdetach, 5501 .f_event = filt_vfswrite 5502 }; 5503 static struct filterops vfsvnode_filtops = { 5504 .f_isfd = 1, 5505 .f_detach = filt_vfsdetach, 5506 .f_event = filt_vfsvnode 5507 }; 5508 5509 static void 5510 vfs_knllock(void *arg) 5511 { 5512 struct vnode *vp = arg; 5513 5514 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5515 } 5516 5517 static void 5518 vfs_knlunlock(void *arg) 5519 { 5520 struct vnode *vp = arg; 5521 5522 VOP_UNLOCK(vp); 5523 } 5524 5525 static void 5526 vfs_knl_assert_locked(void *arg) 5527 { 5528 #ifdef DEBUG_VFS_LOCKS 5529 struct vnode *vp = arg; 5530 5531 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 5532 #endif 5533 } 5534 5535 static void 5536 vfs_knl_assert_unlocked(void *arg) 5537 { 5538 #ifdef DEBUG_VFS_LOCKS 5539 struct vnode *vp = arg; 5540 5541 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 5542 #endif 5543 } 5544 5545 int 5546 vfs_kqfilter(struct vop_kqfilter_args *ap) 5547 { 5548 struct vnode *vp = ap->a_vp; 5549 struct knote *kn = ap->a_kn; 5550 struct knlist *knl; 5551 5552 switch (kn->kn_filter) { 5553 case EVFILT_READ: 5554 kn->kn_fop = &vfsread_filtops; 5555 break; 5556 case EVFILT_WRITE: 5557 kn->kn_fop = &vfswrite_filtops; 5558 break; 5559 case EVFILT_VNODE: 5560 kn->kn_fop = &vfsvnode_filtops; 5561 break; 5562 default: 5563 return (EINVAL); 5564 } 5565 5566 kn->kn_hook = (caddr_t)vp; 5567 5568 v_addpollinfo(vp); 5569 if (vp->v_pollinfo == NULL) 5570 return (ENOMEM); 5571 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 5572 vhold(vp); 5573 knlist_add(knl, kn, 0); 5574 5575 return (0); 5576 } 5577 5578 /* 5579 * Detach knote from vnode 5580 */ 5581 static void 5582 filt_vfsdetach(struct knote *kn) 5583 { 5584 struct vnode *vp = (struct vnode *)kn->kn_hook; 5585 5586 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 5587 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 5588 vdrop(vp); 5589 } 5590 5591 /*ARGSUSED*/ 5592 static int 5593 filt_vfsread(struct knote *kn, long hint) 5594 { 5595 struct vnode *vp = (struct vnode *)kn->kn_hook; 5596 struct vattr va; 5597 int res; 5598 5599 /* 5600 * filesystem is gone, so set the EOF flag and schedule 5601 * the knote for deletion. 5602 */ 5603 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5604 VI_LOCK(vp); 5605 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5606 VI_UNLOCK(vp); 5607 return (1); 5608 } 5609 5610 if (VOP_GETATTR(vp, &va, curthread->td_ucred)) 5611 return (0); 5612 5613 VI_LOCK(vp); 5614 kn->kn_data = va.va_size - kn->kn_fp->f_offset; 5615 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 5616 VI_UNLOCK(vp); 5617 return (res); 5618 } 5619 5620 /*ARGSUSED*/ 5621 static int 5622 filt_vfswrite(struct knote *kn, long hint) 5623 { 5624 struct vnode *vp = (struct vnode *)kn->kn_hook; 5625 5626 VI_LOCK(vp); 5627 5628 /* 5629 * filesystem is gone, so set the EOF flag and schedule 5630 * the knote for deletion. 5631 */ 5632 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 5633 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 5634 5635 kn->kn_data = 0; 5636 VI_UNLOCK(vp); 5637 return (1); 5638 } 5639 5640 static int 5641 filt_vfsvnode(struct knote *kn, long hint) 5642 { 5643 struct vnode *vp = (struct vnode *)kn->kn_hook; 5644 int res; 5645 5646 VI_LOCK(vp); 5647 if (kn->kn_sfflags & hint) 5648 kn->kn_fflags |= hint; 5649 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 5650 kn->kn_flags |= EV_EOF; 5651 VI_UNLOCK(vp); 5652 return (1); 5653 } 5654 res = (kn->kn_fflags != 0); 5655 VI_UNLOCK(vp); 5656 return (res); 5657 } 5658 5659 /* 5660 * Returns whether the directory is empty or not. 5661 * If it is empty, the return value is 0; otherwise 5662 * the return value is an error value (which may 5663 * be ENOTEMPTY). 5664 */ 5665 int 5666 vfs_emptydir(struct vnode *vp) 5667 { 5668 struct uio uio; 5669 struct iovec iov; 5670 struct dirent *dirent, *dp, *endp; 5671 int error, eof; 5672 5673 error = 0; 5674 eof = 0; 5675 5676 ASSERT_VOP_LOCKED(vp, "vfs_emptydir"); 5677 5678 dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK); 5679 iov.iov_base = dirent; 5680 iov.iov_len = sizeof(struct dirent); 5681 5682 uio.uio_iov = &iov; 5683 uio.uio_iovcnt = 1; 5684 uio.uio_offset = 0; 5685 uio.uio_resid = sizeof(struct dirent); 5686 uio.uio_segflg = UIO_SYSSPACE; 5687 uio.uio_rw = UIO_READ; 5688 uio.uio_td = curthread; 5689 5690 while (eof == 0 && error == 0) { 5691 error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof, 5692 NULL, NULL); 5693 if (error != 0) 5694 break; 5695 endp = (void *)((uint8_t *)dirent + 5696 sizeof(struct dirent) - uio.uio_resid); 5697 for (dp = dirent; dp < endp; 5698 dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) { 5699 if (dp->d_type == DT_WHT) 5700 continue; 5701 if (dp->d_namlen == 0) 5702 continue; 5703 if (dp->d_type != DT_DIR && 5704 dp->d_type != DT_UNKNOWN) { 5705 error = ENOTEMPTY; 5706 break; 5707 } 5708 if (dp->d_namlen > 2) { 5709 error = ENOTEMPTY; 5710 break; 5711 } 5712 if (dp->d_namlen == 1 && 5713 dp->d_name[0] != '.') { 5714 error = ENOTEMPTY; 5715 break; 5716 } 5717 if (dp->d_namlen == 2 && 5718 dp->d_name[1] != '.') { 5719 error = ENOTEMPTY; 5720 break; 5721 } 5722 uio.uio_resid = sizeof(struct dirent); 5723 } 5724 } 5725 free(dirent, M_TEMP); 5726 return (error); 5727 } 5728 5729 int 5730 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 5731 { 5732 int error; 5733 5734 if (dp->d_reclen > ap->a_uio->uio_resid) 5735 return (ENAMETOOLONG); 5736 error = uiomove(dp, dp->d_reclen, ap->a_uio); 5737 if (error) { 5738 if (ap->a_ncookies != NULL) { 5739 if (ap->a_cookies != NULL) 5740 free(ap->a_cookies, M_TEMP); 5741 ap->a_cookies = NULL; 5742 *ap->a_ncookies = 0; 5743 } 5744 return (error); 5745 } 5746 if (ap->a_ncookies == NULL) 5747 return (0); 5748 5749 KASSERT(ap->a_cookies, 5750 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 5751 5752 *ap->a_cookies = realloc(*ap->a_cookies, 5753 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO); 5754 (*ap->a_cookies)[*ap->a_ncookies] = off; 5755 *ap->a_ncookies += 1; 5756 return (0); 5757 } 5758 5759 /* 5760 * Mark for update the access time of the file if the filesystem 5761 * supports VOP_MARKATIME. This functionality is used by execve and 5762 * mmap, so we want to avoid the I/O implied by directly setting 5763 * va_atime for the sake of efficiency. 5764 */ 5765 void 5766 vfs_mark_atime(struct vnode *vp, struct ucred *cred) 5767 { 5768 struct mount *mp; 5769 5770 mp = vp->v_mount; 5771 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime"); 5772 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) 5773 (void)VOP_MARKATIME(vp); 5774 } 5775 5776 /* 5777 * The purpose of this routine is to remove granularity from accmode_t, 5778 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 5779 * VADMIN and VAPPEND. 5780 * 5781 * If it returns 0, the caller is supposed to continue with the usual 5782 * access checks using 'accmode' as modified by this routine. If it 5783 * returns nonzero value, the caller is supposed to return that value 5784 * as errno. 5785 * 5786 * Note that after this routine runs, accmode may be zero. 5787 */ 5788 int 5789 vfs_unixify_accmode(accmode_t *accmode) 5790 { 5791 /* 5792 * There is no way to specify explicit "deny" rule using 5793 * file mode or POSIX.1e ACLs. 5794 */ 5795 if (*accmode & VEXPLICIT_DENY) { 5796 *accmode = 0; 5797 return (0); 5798 } 5799 5800 /* 5801 * None of these can be translated into usual access bits. 5802 * Also, the common case for NFSv4 ACLs is to not contain 5803 * either of these bits. Caller should check for VWRITE 5804 * on the containing directory instead. 5805 */ 5806 if (*accmode & (VDELETE_CHILD | VDELETE)) 5807 return (EPERM); 5808 5809 if (*accmode & VADMIN_PERMS) { 5810 *accmode &= ~VADMIN_PERMS; 5811 *accmode |= VADMIN; 5812 } 5813 5814 /* 5815 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 5816 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 5817 */ 5818 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 5819 5820 return (0); 5821 } 5822 5823 /* 5824 * Clear out a doomed vnode (if any) and replace it with a new one as long 5825 * as the fs is not being unmounted. Return the root vnode to the caller. 5826 */ 5827 static int __noinline 5828 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 5829 { 5830 struct vnode *vp; 5831 int error; 5832 5833 restart: 5834 if (mp->mnt_rootvnode != NULL) { 5835 MNT_ILOCK(mp); 5836 vp = mp->mnt_rootvnode; 5837 if (vp != NULL) { 5838 if (!VN_IS_DOOMED(vp)) { 5839 vrefact(vp); 5840 MNT_IUNLOCK(mp); 5841 error = vn_lock(vp, flags); 5842 if (error == 0) { 5843 *vpp = vp; 5844 return (0); 5845 } 5846 vrele(vp); 5847 goto restart; 5848 } 5849 /* 5850 * Clear the old one. 5851 */ 5852 mp->mnt_rootvnode = NULL; 5853 } 5854 MNT_IUNLOCK(mp); 5855 if (vp != NULL) { 5856 /* 5857 * Paired with a fence in vfs_op_thread_exit(). 5858 */ 5859 atomic_thread_fence_acq(); 5860 vfs_op_barrier_wait(mp); 5861 vrele(vp); 5862 } 5863 } 5864 error = VFS_CACHEDROOT(mp, flags, vpp); 5865 if (error != 0) 5866 return (error); 5867 if (mp->mnt_vfs_ops == 0) { 5868 MNT_ILOCK(mp); 5869 if (mp->mnt_vfs_ops != 0) { 5870 MNT_IUNLOCK(mp); 5871 return (0); 5872 } 5873 if (mp->mnt_rootvnode == NULL) { 5874 vrefact(*vpp); 5875 mp->mnt_rootvnode = *vpp; 5876 } else { 5877 if (mp->mnt_rootvnode != *vpp) { 5878 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 5879 panic("%s: mismatch between vnode returned " 5880 " by VFS_CACHEDROOT and the one cached " 5881 " (%p != %p)", 5882 __func__, *vpp, mp->mnt_rootvnode); 5883 } 5884 } 5885 } 5886 MNT_IUNLOCK(mp); 5887 } 5888 return (0); 5889 } 5890 5891 int 5892 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 5893 { 5894 struct vnode *vp; 5895 int error; 5896 5897 if (!vfs_op_thread_enter(mp)) 5898 return (vfs_cache_root_fallback(mp, flags, vpp)); 5899 vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); 5900 if (vp == NULL || VN_IS_DOOMED(vp)) { 5901 vfs_op_thread_exit(mp); 5902 return (vfs_cache_root_fallback(mp, flags, vpp)); 5903 } 5904 vrefact(vp); 5905 vfs_op_thread_exit(mp); 5906 error = vn_lock(vp, flags); 5907 if (error != 0) { 5908 vrele(vp); 5909 return (vfs_cache_root_fallback(mp, flags, vpp)); 5910 } 5911 *vpp = vp; 5912 return (0); 5913 } 5914 5915 struct vnode * 5916 vfs_cache_root_clear(struct mount *mp) 5917 { 5918 struct vnode *vp; 5919 5920 /* 5921 * ops > 0 guarantees there is nobody who can see this vnode 5922 */ 5923 MPASS(mp->mnt_vfs_ops > 0); 5924 vp = mp->mnt_rootvnode; 5925 mp->mnt_rootvnode = NULL; 5926 return (vp); 5927 } 5928 5929 void 5930 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 5931 { 5932 5933 MPASS(mp->mnt_vfs_ops > 0); 5934 vrefact(vp); 5935 mp->mnt_rootvnode = vp; 5936 } 5937 5938 /* 5939 * These are helper functions for filesystems to traverse all 5940 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 5941 * 5942 * This interface replaces MNT_VNODE_FOREACH. 5943 */ 5944 5945 5946 struct vnode * 5947 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 5948 { 5949 struct vnode *vp; 5950 5951 if (should_yield()) 5952 kern_yield(PRI_USER); 5953 MNT_ILOCK(mp); 5954 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 5955 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 5956 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 5957 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 5958 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 5959 continue; 5960 VI_LOCK(vp); 5961 if (VN_IS_DOOMED(vp)) { 5962 VI_UNLOCK(vp); 5963 continue; 5964 } 5965 break; 5966 } 5967 if (vp == NULL) { 5968 __mnt_vnode_markerfree_all(mvp, mp); 5969 /* MNT_IUNLOCK(mp); -- done in above function */ 5970 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 5971 return (NULL); 5972 } 5973 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 5974 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 5975 MNT_IUNLOCK(mp); 5976 return (vp); 5977 } 5978 5979 struct vnode * 5980 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 5981 { 5982 struct vnode *vp; 5983 5984 *mvp = vn_alloc_marker(mp); 5985 MNT_ILOCK(mp); 5986 MNT_REF(mp); 5987 5988 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 5989 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 5990 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 5991 continue; 5992 VI_LOCK(vp); 5993 if (VN_IS_DOOMED(vp)) { 5994 VI_UNLOCK(vp); 5995 continue; 5996 } 5997 break; 5998 } 5999 if (vp == NULL) { 6000 MNT_REL(mp); 6001 MNT_IUNLOCK(mp); 6002 vn_free_marker(*mvp); 6003 *mvp = NULL; 6004 return (NULL); 6005 } 6006 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6007 MNT_IUNLOCK(mp); 6008 return (vp); 6009 } 6010 6011 void 6012 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6013 { 6014 6015 if (*mvp == NULL) { 6016 MNT_IUNLOCK(mp); 6017 return; 6018 } 6019 6020 mtx_assert(MNT_MTX(mp), MA_OWNED); 6021 6022 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6023 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6024 MNT_REL(mp); 6025 MNT_IUNLOCK(mp); 6026 vn_free_marker(*mvp); 6027 *mvp = NULL; 6028 } 6029 6030 /* 6031 * These are helper functions for filesystems to traverse their 6032 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h 6033 */ 6034 static void 6035 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 6036 { 6037 6038 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6039 6040 MNT_ILOCK(mp); 6041 MNT_REL(mp); 6042 MNT_IUNLOCK(mp); 6043 vn_free_marker(*mvp); 6044 *mvp = NULL; 6045 } 6046 6047 /* 6048 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6049 * conventional lock order during mnt_vnode_next_active iteration. 6050 * 6051 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6052 * The list lock is dropped and reacquired. On success, both locks are held. 6053 * On failure, the mount vnode list lock is held but the vnode interlock is 6054 * not, and the procedure may have yielded. 6055 */ 6056 static bool 6057 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp, 6058 struct vnode *vp) 6059 { 6060 const struct vnode *tmp; 6061 bool held, ret; 6062 6063 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6064 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp, 6065 ("%s: bad marker", __func__)); 6066 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6067 ("%s: inappropriate vnode", __func__)); 6068 ASSERT_VI_UNLOCKED(vp, __func__); 6069 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6070 6071 ret = false; 6072 6073 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist); 6074 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist); 6075 6076 /* 6077 * Use a hold to prevent vp from disappearing while the mount vnode 6078 * list lock is dropped and reacquired. Normally a hold would be 6079 * acquired with vhold(), but that might try to acquire the vnode 6080 * interlock, which would be a LOR with the mount vnode list lock. 6081 */ 6082 held = refcount_acquire_if_not_zero(&vp->v_holdcnt); 6083 mtx_unlock(&mp->mnt_listmtx); 6084 if (!held) 6085 goto abort; 6086 VI_LOCK(vp); 6087 if (!refcount_release_if_not_last(&vp->v_holdcnt)) { 6088 vdropl(vp); 6089 goto abort; 6090 } 6091 mtx_lock(&mp->mnt_listmtx); 6092 6093 /* 6094 * Determine whether the vnode is still the next one after the marker, 6095 * excepting any other markers. If the vnode has not been doomed by 6096 * vgone() then the hold should have ensured that it remained on the 6097 * active list. If it has been doomed but is still on the active list, 6098 * don't abort, but rather skip over it (avoid spinning on doomed 6099 * vnodes). 6100 */ 6101 tmp = mvp; 6102 do { 6103 tmp = TAILQ_NEXT(tmp, v_actfreelist); 6104 } while (tmp != NULL && tmp->v_type == VMARKER); 6105 if (tmp != vp) { 6106 mtx_unlock(&mp->mnt_listmtx); 6107 VI_UNLOCK(vp); 6108 goto abort; 6109 } 6110 6111 ret = true; 6112 goto out; 6113 abort: 6114 maybe_yield(); 6115 mtx_lock(&mp->mnt_listmtx); 6116 out: 6117 if (ret) 6118 ASSERT_VI_LOCKED(vp, __func__); 6119 else 6120 ASSERT_VI_UNLOCKED(vp, __func__); 6121 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6122 return (ret); 6123 } 6124 6125 static struct vnode * 6126 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6127 { 6128 struct vnode *vp, *nvp; 6129 6130 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6131 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6132 restart: 6133 vp = TAILQ_NEXT(*mvp, v_actfreelist); 6134 while (vp != NULL) { 6135 if (vp->v_type == VMARKER) { 6136 vp = TAILQ_NEXT(vp, v_actfreelist); 6137 continue; 6138 } 6139 /* 6140 * Try-lock because this is the wrong lock order. If that does 6141 * not succeed, drop the mount vnode list lock and try to 6142 * reacquire it and the vnode interlock in the right order. 6143 */ 6144 if (!VI_TRYLOCK(vp) && 6145 !mnt_vnode_next_active_relock(*mvp, mp, vp)) 6146 goto restart; 6147 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6148 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6149 ("alien vnode on the active list %p %p", vp, mp)); 6150 if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) 6151 break; 6152 nvp = TAILQ_NEXT(vp, v_actfreelist); 6153 VI_UNLOCK(vp); 6154 vp = nvp; 6155 } 6156 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6157 6158 /* Check if we are done */ 6159 if (vp == NULL) { 6160 mtx_unlock(&mp->mnt_listmtx); 6161 mnt_vnode_markerfree_active(mvp, mp); 6162 return (NULL); 6163 } 6164 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist); 6165 mtx_unlock(&mp->mnt_listmtx); 6166 ASSERT_VI_LOCKED(vp, "active iter"); 6167 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp)); 6168 return (vp); 6169 } 6170 6171 struct vnode * 6172 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) 6173 { 6174 6175 if (should_yield()) 6176 kern_yield(PRI_USER); 6177 mtx_lock(&mp->mnt_listmtx); 6178 return (mnt_vnode_next_active(mvp, mp)); 6179 } 6180 6181 struct vnode * 6182 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp) 6183 { 6184 struct vnode *vp; 6185 6186 *mvp = vn_alloc_marker(mp); 6187 MNT_ILOCK(mp); 6188 MNT_REF(mp); 6189 MNT_IUNLOCK(mp); 6190 6191 mtx_lock(&mp->mnt_listmtx); 6192 vp = TAILQ_FIRST(&mp->mnt_activevnodelist); 6193 if (vp == NULL) { 6194 mtx_unlock(&mp->mnt_listmtx); 6195 mnt_vnode_markerfree_active(mvp, mp); 6196 return (NULL); 6197 } 6198 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist); 6199 return (mnt_vnode_next_active(mvp, mp)); 6200 } 6201 6202 void 6203 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp) 6204 { 6205 6206 if (*mvp == NULL) 6207 return; 6208 6209 mtx_lock(&mp->mnt_listmtx); 6210 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist); 6211 mtx_unlock(&mp->mnt_listmtx); 6212 mnt_vnode_markerfree_active(mvp, mp); 6213 } 6214