1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static counter_u64_t recycles_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static counter_u64_t recycles_free_count; 209 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 210 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 211 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 212 "Number of free vnodes recycled to meet vnode cache targets"); 213 214 static counter_u64_t vnode_skipped_requeues; 215 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 216 "Number of times LRU requeue was skipped due to lock contention"); 217 218 static u_long deferred_inact; 219 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 220 &deferred_inact, 0, "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 static smr_t buf_trie_smr; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 242 243 __read_frequently smr_t vfs_smr; 244 245 /* 246 * The workitem queue. 247 * 248 * It is useful to delay writes of file data and filesystem metadata 249 * for tens of seconds so that quickly created and deleted files need 250 * not waste disk bandwidth being created and removed. To realize this, 251 * we append vnodes to a "workitem" queue. When running with a soft 252 * updates implementation, most pending metadata dependencies should 253 * not wait for more than a few seconds. Thus, mounted on block devices 254 * are delayed only about a half the time that file data is delayed. 255 * Similarly, directory updates are more critical, so are only delayed 256 * about a third the time that file data is delayed. Thus, there are 257 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 258 * one each second (driven off the filesystem syncer process). The 259 * syncer_delayno variable indicates the next queue that is to be processed. 260 * Items that need to be processed soon are placed in this queue: 261 * 262 * syncer_workitem_pending[syncer_delayno] 263 * 264 * A delay of fifteen seconds is done by placing the request fifteen 265 * entries later in the queue: 266 * 267 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 268 * 269 */ 270 static int syncer_delayno; 271 static long syncer_mask; 272 LIST_HEAD(synclist, bufobj); 273 static struct synclist *syncer_workitem_pending; 274 /* 275 * The sync_mtx protects: 276 * bo->bo_synclist 277 * sync_vnode_count 278 * syncer_delayno 279 * syncer_state 280 * syncer_workitem_pending 281 * syncer_worklist_len 282 * rushjob 283 */ 284 static struct mtx sync_mtx; 285 static struct cv sync_wakeup; 286 287 #define SYNCER_MAXDELAY 32 288 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 289 static int syncdelay = 30; /* max time to delay syncing data */ 290 static int filedelay = 30; /* time to delay syncing files */ 291 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 292 "Time to delay syncing files (in seconds)"); 293 static int dirdelay = 29; /* time to delay syncing directories */ 294 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 295 "Time to delay syncing directories (in seconds)"); 296 static int metadelay = 28; /* time to delay syncing metadata */ 297 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 298 "Time to delay syncing metadata (in seconds)"); 299 static int rushjob; /* number of slots to run ASAP */ 300 static int stat_rush_requests; /* number of times I/O speeded up */ 301 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 302 "Number of times I/O speeded up (rush requests)"); 303 304 #define VDBATCH_SIZE 8 305 struct vdbatch { 306 u_int index; 307 struct mtx lock; 308 struct vnode *tab[VDBATCH_SIZE]; 309 }; 310 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 311 312 static void vdbatch_dequeue(struct vnode *vp); 313 314 /* 315 * When shutting down the syncer, run it at four times normal speed. 316 */ 317 #define SYNCER_SHUTDOWN_SPEEDUP 4 318 static int sync_vnode_count; 319 static int syncer_worklist_len; 320 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 321 syncer_state; 322 323 /* Target for maximum number of vnodes. */ 324 u_long desiredvnodes; 325 static u_long gapvnodes; /* gap between wanted and desired */ 326 static u_long vhiwat; /* enough extras after expansion */ 327 static u_long vlowat; /* minimal extras before expansion */ 328 static bool vstir; /* nonzero to stir non-free vnodes */ 329 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 330 331 static u_long vnlru_read_freevnodes(void); 332 333 /* 334 * Note that no attempt is made to sanitize these parameters. 335 */ 336 static int 337 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 338 { 339 u_long val; 340 int error; 341 342 val = desiredvnodes; 343 error = sysctl_handle_long(oidp, &val, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 347 if (val == desiredvnodes) 348 return (0); 349 mtx_lock(&vnode_list_mtx); 350 desiredvnodes = val; 351 wantfreevnodes = desiredvnodes / 4; 352 vnlru_recalc(); 353 mtx_unlock(&vnode_list_mtx); 354 /* 355 * XXX There is no protection against multiple threads changing 356 * desiredvnodes at the same time. Locking above only helps vnlru and 357 * getnewvnode. 358 */ 359 vfs_hash_changesize(desiredvnodes); 360 cache_changesize(desiredvnodes); 361 return (0); 362 } 363 364 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 365 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 366 "LU", "Target for maximum number of vnodes (legacy)"); 367 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 369 "LU", "Target for maximum number of vnodes"); 370 371 static int 372 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 373 { 374 u_long rfreevnodes; 375 376 rfreevnodes = vnlru_read_freevnodes(); 377 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 378 } 379 380 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 381 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 382 "LU", "Number of \"free\" vnodes (legacy)"); 383 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 384 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 385 "LU", "Number of \"free\" vnodes"); 386 387 static int 388 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 389 { 390 u_long val; 391 int error; 392 393 val = wantfreevnodes; 394 error = sysctl_handle_long(oidp, &val, 0, req); 395 if (error != 0 || req->newptr == NULL) 396 return (error); 397 398 if (val == wantfreevnodes) 399 return (0); 400 mtx_lock(&vnode_list_mtx); 401 wantfreevnodes = val; 402 vnlru_recalc(); 403 mtx_unlock(&vnode_list_mtx); 404 return (0); 405 } 406 407 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 408 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 409 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 410 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 411 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 412 "LU", "Target for minimum number of \"free\" vnodes"); 413 414 static int vnlru_nowhere; 415 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 416 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 417 418 static int 419 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 420 { 421 struct vnode *vp; 422 struct nameidata nd; 423 char *buf; 424 unsigned long ndflags; 425 int error; 426 427 if (req->newptr == NULL) 428 return (EINVAL); 429 if (req->newlen >= PATH_MAX) 430 return (E2BIG); 431 432 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 433 error = SYSCTL_IN(req, buf, req->newlen); 434 if (error != 0) 435 goto out; 436 437 buf[req->newlen] = '\0'; 438 439 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 440 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 441 if ((error = namei(&nd)) != 0) 442 goto out; 443 vp = nd.ni_vp; 444 445 if (VN_IS_DOOMED(vp)) { 446 /* 447 * This vnode is being recycled. Return != 0 to let the caller 448 * know that the sysctl had no effect. Return EAGAIN because a 449 * subsequent call will likely succeed (since namei will create 450 * a new vnode if necessary) 451 */ 452 error = EAGAIN; 453 goto putvnode; 454 } 455 456 counter_u64_add(recycles_count, 1); 457 vgone(vp); 458 putvnode: 459 vput(vp); 460 NDFREE_PNBUF(&nd); 461 out: 462 free(buf, M_TEMP); 463 return (error); 464 } 465 466 static int 467 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 468 { 469 struct thread *td = curthread; 470 struct vnode *vp; 471 struct file *fp; 472 int error; 473 int fd; 474 475 if (req->newptr == NULL) 476 return (EBADF); 477 478 error = sysctl_handle_int(oidp, &fd, 0, req); 479 if (error != 0) 480 return (error); 481 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 482 if (error != 0) 483 return (error); 484 vp = fp->f_vnode; 485 486 error = vn_lock(vp, LK_EXCLUSIVE); 487 if (error != 0) 488 goto drop; 489 490 counter_u64_add(recycles_count, 1); 491 vgone(vp); 492 VOP_UNLOCK(vp); 493 drop: 494 fdrop(fp, td); 495 return (error); 496 } 497 498 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 499 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 500 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 501 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 502 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 503 sysctl_ftry_reclaim_vnode, "I", 504 "Try to reclaim a vnode by its file descriptor"); 505 506 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 507 #define vnsz2log 8 508 #ifndef DEBUG_LOCKS 509 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 510 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 511 "vnsz2log needs to be updated"); 512 #endif 513 514 /* 515 * Support for the bufobj clean & dirty pctrie. 516 */ 517 static void * 518 buf_trie_alloc(struct pctrie *ptree) 519 { 520 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 521 } 522 523 static void 524 buf_trie_free(struct pctrie *ptree, void *node) 525 { 526 uma_zfree_smr(buf_trie_zone, node); 527 } 528 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 529 buf_trie_smr); 530 531 /* 532 * Initialize the vnode management data structures. 533 * 534 * Reevaluate the following cap on the number of vnodes after the physical 535 * memory size exceeds 512GB. In the limit, as the physical memory size 536 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 537 */ 538 #ifndef MAXVNODES_MAX 539 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 540 #endif 541 542 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 543 544 static struct vnode * 545 vn_alloc_marker(struct mount *mp) 546 { 547 struct vnode *vp; 548 549 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 550 vp->v_type = VMARKER; 551 vp->v_mount = mp; 552 553 return (vp); 554 } 555 556 static void 557 vn_free_marker(struct vnode *vp) 558 { 559 560 MPASS(vp->v_type == VMARKER); 561 free(vp, M_VNODE_MARKER); 562 } 563 564 #ifdef KASAN 565 static int 566 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 567 { 568 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 569 return (0); 570 } 571 572 static void 573 vnode_dtor(void *mem, int size, void *arg __unused) 574 { 575 size_t end1, end2, off1, off2; 576 577 _Static_assert(offsetof(struct vnode, v_vnodelist) < 578 offsetof(struct vnode, v_dbatchcpu), 579 "KASAN marks require updating"); 580 581 off1 = offsetof(struct vnode, v_vnodelist); 582 off2 = offsetof(struct vnode, v_dbatchcpu); 583 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 584 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 585 586 /* 587 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 588 * after the vnode has been freed. Try to get some KASAN coverage by 589 * marking everything except those two fields as invalid. Because 590 * KASAN's tracking is not byte-granular, any preceding fields sharing 591 * the same 8-byte aligned word must also be marked valid. 592 */ 593 594 /* Handle the area from the start until v_vnodelist... */ 595 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 596 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 597 598 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 599 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 600 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 601 if (off2 > off1) 602 kasan_mark((void *)((char *)mem + off1), off2 - off1, 603 off2 - off1, KASAN_UMA_FREED); 604 605 /* ... and finally the area from v_dbatchcpu to the end. */ 606 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 607 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 608 KASAN_UMA_FREED); 609 } 610 #endif /* KASAN */ 611 612 /* 613 * Initialize a vnode as it first enters the zone. 614 */ 615 static int 616 vnode_init(void *mem, int size, int flags) 617 { 618 struct vnode *vp; 619 620 vp = mem; 621 bzero(vp, size); 622 /* 623 * Setup locks. 624 */ 625 vp->v_vnlock = &vp->v_lock; 626 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 627 /* 628 * By default, don't allow shared locks unless filesystems opt-in. 629 */ 630 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 631 LK_NOSHARE | LK_IS_VNODE); 632 /* 633 * Initialize bufobj. 634 */ 635 bufobj_init(&vp->v_bufobj, vp); 636 /* 637 * Initialize namecache. 638 */ 639 cache_vnode_init(vp); 640 /* 641 * Initialize rangelocks. 642 */ 643 rangelock_init(&vp->v_rl); 644 645 vp->v_dbatchcpu = NOCPU; 646 647 vp->v_state = VSTATE_DEAD; 648 649 /* 650 * Check vhold_recycle_free for an explanation. 651 */ 652 vp->v_holdcnt = VHOLD_NO_SMR; 653 vp->v_type = VNON; 654 mtx_lock(&vnode_list_mtx); 655 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 656 mtx_unlock(&vnode_list_mtx); 657 return (0); 658 } 659 660 /* 661 * Free a vnode when it is cleared from the zone. 662 */ 663 static void 664 vnode_fini(void *mem, int size) 665 { 666 struct vnode *vp; 667 struct bufobj *bo; 668 669 vp = mem; 670 vdbatch_dequeue(vp); 671 mtx_lock(&vnode_list_mtx); 672 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 673 mtx_unlock(&vnode_list_mtx); 674 rangelock_destroy(&vp->v_rl); 675 lockdestroy(vp->v_vnlock); 676 mtx_destroy(&vp->v_interlock); 677 bo = &vp->v_bufobj; 678 rw_destroy(BO_LOCKPTR(bo)); 679 680 kasan_mark(mem, size, size, 0); 681 } 682 683 /* 684 * Provide the size of NFS nclnode and NFS fh for calculation of the 685 * vnode memory consumption. The size is specified directly to 686 * eliminate dependency on NFS-private header. 687 * 688 * Other filesystems may use bigger or smaller (like UFS and ZFS) 689 * private inode data, but the NFS-based estimation is ample enough. 690 * Still, we care about differences in the size between 64- and 32-bit 691 * platforms. 692 * 693 * Namecache structure size is heuristically 694 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 695 */ 696 #ifdef _LP64 697 #define NFS_NCLNODE_SZ (528 + 64) 698 #define NC_SZ 148 699 #else 700 #define NFS_NCLNODE_SZ (360 + 32) 701 #define NC_SZ 92 702 #endif 703 704 static void 705 vntblinit(void *dummy __unused) 706 { 707 struct vdbatch *vd; 708 uma_ctor ctor; 709 uma_dtor dtor; 710 int cpu, physvnodes, virtvnodes; 711 712 /* 713 * Desiredvnodes is a function of the physical memory size and the 714 * kernel's heap size. Generally speaking, it scales with the 715 * physical memory size. The ratio of desiredvnodes to the physical 716 * memory size is 1:16 until desiredvnodes exceeds 98,304. 717 * Thereafter, the 718 * marginal ratio of desiredvnodes to the physical memory size is 719 * 1:64. However, desiredvnodes is limited by the kernel's heap 720 * size. The memory required by desiredvnodes vnodes and vm objects 721 * must not exceed 1/10th of the kernel's heap size. 722 */ 723 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 724 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 725 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 726 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 727 desiredvnodes = min(physvnodes, virtvnodes); 728 if (desiredvnodes > MAXVNODES_MAX) { 729 if (bootverbose) 730 printf("Reducing kern.maxvnodes %lu -> %lu\n", 731 desiredvnodes, MAXVNODES_MAX); 732 desiredvnodes = MAXVNODES_MAX; 733 } 734 wantfreevnodes = desiredvnodes / 4; 735 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 736 TAILQ_INIT(&vnode_list); 737 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 738 /* 739 * The lock is taken to appease WITNESS. 740 */ 741 mtx_lock(&vnode_list_mtx); 742 vnlru_recalc(); 743 mtx_unlock(&vnode_list_mtx); 744 vnode_list_free_marker = vn_alloc_marker(NULL); 745 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 746 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 747 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 748 749 #ifdef KASAN 750 ctor = vnode_ctor; 751 dtor = vnode_dtor; 752 #else 753 ctor = NULL; 754 dtor = NULL; 755 #endif 756 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 757 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 758 uma_zone_set_smr(vnode_zone, vfs_smr); 759 760 /* 761 * Preallocate enough nodes to support one-per buf so that 762 * we can not fail an insert. reassignbuf() callers can not 763 * tolerate the insertion failure. 764 */ 765 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 766 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 767 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 768 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 769 uma_prealloc(buf_trie_zone, nbuf); 770 771 vnodes_created = counter_u64_alloc(M_WAITOK); 772 recycles_count = counter_u64_alloc(M_WAITOK); 773 recycles_free_count = counter_u64_alloc(M_WAITOK); 774 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 775 776 /* 777 * Initialize the filesystem syncer. 778 */ 779 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 780 &syncer_mask); 781 syncer_maxdelay = syncer_mask + 1; 782 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 783 cv_init(&sync_wakeup, "syncer"); 784 785 CPU_FOREACH(cpu) { 786 vd = DPCPU_ID_PTR((cpu), vd); 787 bzero(vd, sizeof(*vd)); 788 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 789 } 790 } 791 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 792 793 /* 794 * Mark a mount point as busy. Used to synchronize access and to delay 795 * unmounting. Eventually, mountlist_mtx is not released on failure. 796 * 797 * vfs_busy() is a custom lock, it can block the caller. 798 * vfs_busy() only sleeps if the unmount is active on the mount point. 799 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 800 * vnode belonging to mp. 801 * 802 * Lookup uses vfs_busy() to traverse mount points. 803 * root fs var fs 804 * / vnode lock A / vnode lock (/var) D 805 * /var vnode lock B /log vnode lock(/var/log) E 806 * vfs_busy lock C vfs_busy lock F 807 * 808 * Within each file system, the lock order is C->A->B and F->D->E. 809 * 810 * When traversing across mounts, the system follows that lock order: 811 * 812 * C->A->B 813 * | 814 * +->F->D->E 815 * 816 * The lookup() process for namei("/var") illustrates the process: 817 * 1. VOP_LOOKUP() obtains B while A is held 818 * 2. vfs_busy() obtains a shared lock on F while A and B are held 819 * 3. vput() releases lock on B 820 * 4. vput() releases lock on A 821 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 822 * 6. vfs_unbusy() releases shared lock on F 823 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 824 * Attempt to lock A (instead of vp_crossmp) while D is held would 825 * violate the global order, causing deadlocks. 826 * 827 * dounmount() locks B while F is drained. Note that for stacked 828 * filesystems, D and B in the example above may be the same lock, 829 * which introdues potential lock order reversal deadlock between 830 * dounmount() and step 5 above. These filesystems may avoid the LOR 831 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 832 * remain held until after step 5. 833 */ 834 int 835 vfs_busy(struct mount *mp, int flags) 836 { 837 struct mount_pcpu *mpcpu; 838 839 MPASS((flags & ~MBF_MASK) == 0); 840 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 841 842 if (vfs_op_thread_enter(mp, mpcpu)) { 843 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 844 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 845 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 846 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 847 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 848 vfs_op_thread_exit(mp, mpcpu); 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 return (0); 852 } 853 854 MNT_ILOCK(mp); 855 vfs_assert_mount_counters(mp); 856 MNT_REF(mp); 857 /* 858 * If mount point is currently being unmounted, sleep until the 859 * mount point fate is decided. If thread doing the unmounting fails, 860 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 861 * that this mount point has survived the unmount attempt and vfs_busy 862 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 863 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 864 * about to be really destroyed. vfs_busy needs to release its 865 * reference on the mount point in this case and return with ENOENT, 866 * telling the caller the mount it tried to busy is no longer valid. 867 */ 868 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 869 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 870 ("%s: non-empty upper mount list with pending unmount", 871 __func__)); 872 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 873 MNT_REL(mp); 874 MNT_IUNLOCK(mp); 875 CTR1(KTR_VFS, "%s: failed busying before sleeping", 876 __func__); 877 return (ENOENT); 878 } 879 if (flags & MBF_MNTLSTLOCK) 880 mtx_unlock(&mountlist_mtx); 881 mp->mnt_kern_flag |= MNTK_MWAIT; 882 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 883 if (flags & MBF_MNTLSTLOCK) 884 mtx_lock(&mountlist_mtx); 885 MNT_ILOCK(mp); 886 } 887 if (flags & MBF_MNTLSTLOCK) 888 mtx_unlock(&mountlist_mtx); 889 mp->mnt_lockref++; 890 MNT_IUNLOCK(mp); 891 return (0); 892 } 893 894 /* 895 * Free a busy filesystem. 896 */ 897 void 898 vfs_unbusy(struct mount *mp) 899 { 900 struct mount_pcpu *mpcpu; 901 int c; 902 903 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 904 905 if (vfs_op_thread_enter(mp, mpcpu)) { 906 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 907 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 908 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 909 vfs_op_thread_exit(mp, mpcpu); 910 return; 911 } 912 913 MNT_ILOCK(mp); 914 vfs_assert_mount_counters(mp); 915 MNT_REL(mp); 916 c = --mp->mnt_lockref; 917 if (mp->mnt_vfs_ops == 0) { 918 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 919 MNT_IUNLOCK(mp); 920 return; 921 } 922 if (c < 0) 923 vfs_dump_mount_counters(mp); 924 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 925 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 926 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 927 mp->mnt_kern_flag &= ~MNTK_DRAINING; 928 wakeup(&mp->mnt_lockref); 929 } 930 MNT_IUNLOCK(mp); 931 } 932 933 /* 934 * Lookup a mount point by filesystem identifier. 935 */ 936 struct mount * 937 vfs_getvfs(fsid_t *fsid) 938 { 939 struct mount *mp; 940 941 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 942 mtx_lock(&mountlist_mtx); 943 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 944 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 945 vfs_ref(mp); 946 mtx_unlock(&mountlist_mtx); 947 return (mp); 948 } 949 } 950 mtx_unlock(&mountlist_mtx); 951 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 952 return ((struct mount *) 0); 953 } 954 955 /* 956 * Lookup a mount point by filesystem identifier, busying it before 957 * returning. 958 * 959 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 960 * cache for popular filesystem identifiers. The cache is lockess, using 961 * the fact that struct mount's are never freed. In worst case we may 962 * get pointer to unmounted or even different filesystem, so we have to 963 * check what we got, and go slow way if so. 964 */ 965 struct mount * 966 vfs_busyfs(fsid_t *fsid) 967 { 968 #define FSID_CACHE_SIZE 256 969 typedef struct mount * volatile vmp_t; 970 static vmp_t cache[FSID_CACHE_SIZE]; 971 struct mount *mp; 972 int error; 973 uint32_t hash; 974 975 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 976 hash = fsid->val[0] ^ fsid->val[1]; 977 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 978 mp = cache[hash]; 979 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 980 goto slow; 981 if (vfs_busy(mp, 0) != 0) { 982 cache[hash] = NULL; 983 goto slow; 984 } 985 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 986 return (mp); 987 else 988 vfs_unbusy(mp); 989 990 slow: 991 mtx_lock(&mountlist_mtx); 992 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 993 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 994 error = vfs_busy(mp, MBF_MNTLSTLOCK); 995 if (error) { 996 cache[hash] = NULL; 997 mtx_unlock(&mountlist_mtx); 998 return (NULL); 999 } 1000 cache[hash] = mp; 1001 return (mp); 1002 } 1003 } 1004 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1005 mtx_unlock(&mountlist_mtx); 1006 return ((struct mount *) 0); 1007 } 1008 1009 /* 1010 * Check if a user can access privileged mount options. 1011 */ 1012 int 1013 vfs_suser(struct mount *mp, struct thread *td) 1014 { 1015 int error; 1016 1017 if (jailed(td->td_ucred)) { 1018 /* 1019 * If the jail of the calling thread lacks permission for 1020 * this type of file system, deny immediately. 1021 */ 1022 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1023 return (EPERM); 1024 1025 /* 1026 * If the file system was mounted outside the jail of the 1027 * calling thread, deny immediately. 1028 */ 1029 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1030 return (EPERM); 1031 } 1032 1033 /* 1034 * If file system supports delegated administration, we don't check 1035 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1036 * by the file system itself. 1037 * If this is not the user that did original mount, we check for 1038 * the PRIV_VFS_MOUNT_OWNER privilege. 1039 */ 1040 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1041 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1042 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1043 return (error); 1044 } 1045 return (0); 1046 } 1047 1048 /* 1049 * Get a new unique fsid. Try to make its val[0] unique, since this value 1050 * will be used to create fake device numbers for stat(). Also try (but 1051 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1052 * support 16-bit device numbers. We end up with unique val[0]'s for the 1053 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1054 * 1055 * Keep in mind that several mounts may be running in parallel. Starting 1056 * the search one past where the previous search terminated is both a 1057 * micro-optimization and a defense against returning the same fsid to 1058 * different mounts. 1059 */ 1060 void 1061 vfs_getnewfsid(struct mount *mp) 1062 { 1063 static uint16_t mntid_base; 1064 struct mount *nmp; 1065 fsid_t tfsid; 1066 int mtype; 1067 1068 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1069 mtx_lock(&mntid_mtx); 1070 mtype = mp->mnt_vfc->vfc_typenum; 1071 tfsid.val[1] = mtype; 1072 mtype = (mtype & 0xFF) << 24; 1073 for (;;) { 1074 tfsid.val[0] = makedev(255, 1075 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1076 mntid_base++; 1077 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1078 break; 1079 vfs_rel(nmp); 1080 } 1081 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1082 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1083 mtx_unlock(&mntid_mtx); 1084 } 1085 1086 /* 1087 * Knob to control the precision of file timestamps: 1088 * 1089 * 0 = seconds only; nanoseconds zeroed. 1090 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1091 * 2 = seconds and nanoseconds, truncated to microseconds. 1092 * >=3 = seconds and nanoseconds, maximum precision. 1093 */ 1094 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1095 1096 static int timestamp_precision = TSP_USEC; 1097 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1098 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1099 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1100 "3+: sec + ns (max. precision))"); 1101 1102 /* 1103 * Get a current timestamp. 1104 */ 1105 void 1106 vfs_timestamp(struct timespec *tsp) 1107 { 1108 struct timeval tv; 1109 1110 switch (timestamp_precision) { 1111 case TSP_SEC: 1112 tsp->tv_sec = time_second; 1113 tsp->tv_nsec = 0; 1114 break; 1115 case TSP_HZ: 1116 getnanotime(tsp); 1117 break; 1118 case TSP_USEC: 1119 microtime(&tv); 1120 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1121 break; 1122 case TSP_NSEC: 1123 default: 1124 nanotime(tsp); 1125 break; 1126 } 1127 } 1128 1129 /* 1130 * Set vnode attributes to VNOVAL 1131 */ 1132 void 1133 vattr_null(struct vattr *vap) 1134 { 1135 1136 vap->va_type = VNON; 1137 vap->va_size = VNOVAL; 1138 vap->va_bytes = VNOVAL; 1139 vap->va_mode = VNOVAL; 1140 vap->va_nlink = VNOVAL; 1141 vap->va_uid = VNOVAL; 1142 vap->va_gid = VNOVAL; 1143 vap->va_fsid = VNOVAL; 1144 vap->va_fileid = VNOVAL; 1145 vap->va_blocksize = VNOVAL; 1146 vap->va_rdev = VNOVAL; 1147 vap->va_atime.tv_sec = VNOVAL; 1148 vap->va_atime.tv_nsec = VNOVAL; 1149 vap->va_mtime.tv_sec = VNOVAL; 1150 vap->va_mtime.tv_nsec = VNOVAL; 1151 vap->va_ctime.tv_sec = VNOVAL; 1152 vap->va_ctime.tv_nsec = VNOVAL; 1153 vap->va_birthtime.tv_sec = VNOVAL; 1154 vap->va_birthtime.tv_nsec = VNOVAL; 1155 vap->va_flags = VNOVAL; 1156 vap->va_gen = VNOVAL; 1157 vap->va_vaflags = 0; 1158 } 1159 1160 /* 1161 * Try to reduce the total number of vnodes. 1162 * 1163 * This routine (and its user) are buggy in at least the following ways: 1164 * - all parameters were picked years ago when RAM sizes were significantly 1165 * smaller 1166 * - it can pick vnodes based on pages used by the vm object, but filesystems 1167 * like ZFS don't use it making the pick broken 1168 * - since ZFS has its own aging policy it gets partially combated by this one 1169 * - a dedicated method should be provided for filesystems to let them decide 1170 * whether the vnode should be recycled 1171 * 1172 * This routine is called when we have too many vnodes. It attempts 1173 * to free <count> vnodes and will potentially free vnodes that still 1174 * have VM backing store (VM backing store is typically the cause 1175 * of a vnode blowout so we want to do this). Therefore, this operation 1176 * is not considered cheap. 1177 * 1178 * A number of conditions may prevent a vnode from being reclaimed. 1179 * the buffer cache may have references on the vnode, a directory 1180 * vnode may still have references due to the namei cache representing 1181 * underlying files, or the vnode may be in active use. It is not 1182 * desirable to reuse such vnodes. These conditions may cause the 1183 * number of vnodes to reach some minimum value regardless of what 1184 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1185 * 1186 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1187 * entries if this argument is strue 1188 * @param trigger Only reclaim vnodes with fewer than this many resident 1189 * pages. 1190 * @param target How many vnodes to reclaim. 1191 * @return The number of vnodes that were reclaimed. 1192 */ 1193 static int 1194 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1195 { 1196 struct vnode *vp, *mvp; 1197 struct mount *mp; 1198 struct vm_object *object; 1199 u_long done; 1200 bool retried; 1201 1202 mtx_assert(&vnode_list_mtx, MA_OWNED); 1203 1204 retried = false; 1205 done = 0; 1206 1207 mvp = vnode_list_reclaim_marker; 1208 restart: 1209 vp = mvp; 1210 while (done < target) { 1211 vp = TAILQ_NEXT(vp, v_vnodelist); 1212 if (__predict_false(vp == NULL)) 1213 break; 1214 1215 if (__predict_false(vp->v_type == VMARKER)) 1216 continue; 1217 1218 /* 1219 * If it's been deconstructed already, it's still 1220 * referenced, or it exceeds the trigger, skip it. 1221 * Also skip free vnodes. We are trying to make space 1222 * to expand the free list, not reduce it. 1223 */ 1224 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1225 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1226 goto next_iter; 1227 1228 if (vp->v_type == VBAD || vp->v_type == VNON) 1229 goto next_iter; 1230 1231 object = atomic_load_ptr(&vp->v_object); 1232 if (object == NULL || object->resident_page_count > trigger) { 1233 goto next_iter; 1234 } 1235 1236 /* 1237 * Handle races against vnode allocation. Filesystems lock the 1238 * vnode some time after it gets returned from getnewvnode, 1239 * despite type and hold count being manipulated earlier. 1240 * Resorting to checking v_mount restores guarantees present 1241 * before the global list was reworked to contain all vnodes. 1242 */ 1243 if (!VI_TRYLOCK(vp)) 1244 goto next_iter; 1245 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1246 VI_UNLOCK(vp); 1247 goto next_iter; 1248 } 1249 if (vp->v_mount == NULL) { 1250 VI_UNLOCK(vp); 1251 goto next_iter; 1252 } 1253 vholdl(vp); 1254 VI_UNLOCK(vp); 1255 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1256 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1257 mtx_unlock(&vnode_list_mtx); 1258 1259 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1260 vdrop_recycle(vp); 1261 goto next_iter_unlocked; 1262 } 1263 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1264 vdrop_recycle(vp); 1265 vn_finished_write(mp); 1266 goto next_iter_unlocked; 1267 } 1268 1269 VI_LOCK(vp); 1270 if (vp->v_usecount > 0 || 1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1272 (vp->v_object != NULL && vp->v_object->handle == vp && 1273 vp->v_object->resident_page_count > trigger)) { 1274 VOP_UNLOCK(vp); 1275 vdropl_recycle(vp); 1276 vn_finished_write(mp); 1277 goto next_iter_unlocked; 1278 } 1279 counter_u64_add(recycles_count, 1); 1280 vgonel(vp); 1281 VOP_UNLOCK(vp); 1282 vdropl_recycle(vp); 1283 vn_finished_write(mp); 1284 done++; 1285 next_iter_unlocked: 1286 maybe_yield(); 1287 mtx_lock(&vnode_list_mtx); 1288 goto restart; 1289 next_iter: 1290 MPASS(vp->v_type != VMARKER); 1291 if (!should_yield()) 1292 continue; 1293 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1294 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1295 mtx_unlock(&vnode_list_mtx); 1296 kern_yield(PRI_USER); 1297 mtx_lock(&vnode_list_mtx); 1298 goto restart; 1299 } 1300 if (done == 0 && !retried) { 1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1302 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1303 retried = true; 1304 goto restart; 1305 } 1306 return (done); 1307 } 1308 1309 static int max_free_per_call = 10000; 1310 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_free_per_call, 0, 1311 "limit on vnode free requests per call to the vnlru_free routine (legacy)"); 1312 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, max_free_per_call, CTLFLAG_RW, 1313 &max_free_per_call, 0, 1314 "limit on vnode free requests per call to the vnlru_free routine"); 1315 1316 /* 1317 * Attempt to reduce the free list by the requested amount. 1318 */ 1319 static int 1320 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1321 { 1322 struct vnode *vp; 1323 struct mount *mp; 1324 int ocount; 1325 bool retried; 1326 1327 mtx_assert(&vnode_list_mtx, MA_OWNED); 1328 if (count > max_free_per_call) 1329 count = max_free_per_call; 1330 if (count == 0) { 1331 mtx_unlock(&vnode_list_mtx); 1332 return (0); 1333 } 1334 ocount = count; 1335 retried = false; 1336 vp = mvp; 1337 for (;;) { 1338 vp = TAILQ_NEXT(vp, v_vnodelist); 1339 if (__predict_false(vp == NULL)) { 1340 /* 1341 * The free vnode marker can be past eligible vnodes: 1342 * 1. if vdbatch_process trylock failed 1343 * 2. if vtryrecycle failed 1344 * 1345 * If so, start the scan from scratch. 1346 */ 1347 if (!retried && vnlru_read_freevnodes() > 0) { 1348 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1349 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1350 vp = mvp; 1351 retried = true; 1352 continue; 1353 } 1354 1355 /* 1356 * Give up 1357 */ 1358 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1359 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1360 mtx_unlock(&vnode_list_mtx); 1361 break; 1362 } 1363 if (__predict_false(vp->v_type == VMARKER)) 1364 continue; 1365 if (vp->v_holdcnt > 0) 1366 continue; 1367 /* 1368 * Don't recycle if our vnode is from different type 1369 * of mount point. Note that mp is type-safe, the 1370 * check does not reach unmapped address even if 1371 * vnode is reclaimed. 1372 */ 1373 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1374 mp->mnt_op != mnt_op) { 1375 continue; 1376 } 1377 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1378 continue; 1379 } 1380 if (!vhold_recycle_free(vp)) 1381 continue; 1382 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1383 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1384 mtx_unlock(&vnode_list_mtx); 1385 /* 1386 * FIXME: ignores the return value, meaning it may be nothing 1387 * got recycled but it claims otherwise to the caller. 1388 * 1389 * Originally the value started being ignored in 2005 with 1390 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1391 * 1392 * Respecting the value can run into significant stalls if most 1393 * vnodes belong to one file system and it has writes 1394 * suspended. In presence of many threads and millions of 1395 * vnodes they keep contending on the vnode_list_mtx lock only 1396 * to find vnodes they can't recycle. 1397 * 1398 * The solution would be to pre-check if the vnode is likely to 1399 * be recycle-able, but it needs to happen with the 1400 * vnode_list_mtx lock held. This runs into a problem where 1401 * VOP_GETWRITEMOUNT (currently needed to find out about if 1402 * writes are frozen) can take locks which LOR against it. 1403 * 1404 * Check nullfs for one example (null_getwritemount). 1405 */ 1406 vtryrecycle(vp); 1407 count--; 1408 if (count == 0) { 1409 break; 1410 } 1411 mtx_lock(&vnode_list_mtx); 1412 vp = mvp; 1413 } 1414 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1415 return (ocount - count); 1416 } 1417 1418 /* 1419 * XXX: returns without vnode_list_mtx locked! 1420 */ 1421 static int 1422 vnlru_free_locked(int count) 1423 { 1424 int ret; 1425 1426 mtx_assert(&vnode_list_mtx, MA_OWNED); 1427 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker); 1428 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1429 return (ret); 1430 } 1431 1432 static int 1433 vnlru_free(int count) 1434 { 1435 1436 mtx_lock(&vnode_list_mtx); 1437 return (vnlru_free_locked(count)); 1438 } 1439 1440 void 1441 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1442 { 1443 1444 MPASS(mnt_op != NULL); 1445 MPASS(mvp != NULL); 1446 VNPASS(mvp->v_type == VMARKER, mvp); 1447 mtx_lock(&vnode_list_mtx); 1448 vnlru_free_impl(count, mnt_op, mvp); 1449 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1450 } 1451 1452 struct vnode * 1453 vnlru_alloc_marker(void) 1454 { 1455 struct vnode *mvp; 1456 1457 mvp = vn_alloc_marker(NULL); 1458 mtx_lock(&vnode_list_mtx); 1459 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1460 mtx_unlock(&vnode_list_mtx); 1461 return (mvp); 1462 } 1463 1464 void 1465 vnlru_free_marker(struct vnode *mvp) 1466 { 1467 mtx_lock(&vnode_list_mtx); 1468 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1469 mtx_unlock(&vnode_list_mtx); 1470 vn_free_marker(mvp); 1471 } 1472 1473 static void 1474 vnlru_recalc(void) 1475 { 1476 1477 mtx_assert(&vnode_list_mtx, MA_OWNED); 1478 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1479 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1480 vlowat = vhiwat / 2; 1481 } 1482 1483 /* 1484 * Attempt to recycle vnodes in a context that is always safe to block. 1485 * Calling vlrurecycle() from the bowels of filesystem code has some 1486 * interesting deadlock problems. 1487 */ 1488 static struct proc *vnlruproc; 1489 static int vnlruproc_sig; 1490 static u_long vnlruproc_kicks; 1491 1492 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1493 "Number of times vnlru got woken up due to vnode shortage"); 1494 1495 #define VNLRU_COUNT_SLOP 100 1496 1497 /* 1498 * The main freevnodes counter is only updated when a counter local to CPU 1499 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1500 * walked to compute a more accurate total. 1501 * 1502 * Note: the actual value at any given moment can still exceed slop, but it 1503 * should not be by significant margin in practice. 1504 */ 1505 #define VNLRU_FREEVNODES_SLOP 126 1506 1507 static void __noinline 1508 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1509 { 1510 1511 atomic_add_long(&freevnodes, *lfreevnodes); 1512 *lfreevnodes = 0; 1513 critical_exit(); 1514 } 1515 1516 static __inline void 1517 vfs_freevnodes_inc(void) 1518 { 1519 int8_t *lfreevnodes; 1520 1521 critical_enter(); 1522 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1523 (*lfreevnodes)++; 1524 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1525 vfs_freevnodes_rollup(lfreevnodes); 1526 else 1527 critical_exit(); 1528 } 1529 1530 static __inline void 1531 vfs_freevnodes_dec(void) 1532 { 1533 int8_t *lfreevnodes; 1534 1535 critical_enter(); 1536 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1537 (*lfreevnodes)--; 1538 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1539 vfs_freevnodes_rollup(lfreevnodes); 1540 else 1541 critical_exit(); 1542 } 1543 1544 static u_long 1545 vnlru_read_freevnodes(void) 1546 { 1547 long slop, rfreevnodes, rfreevnodes_old; 1548 int cpu; 1549 1550 rfreevnodes = atomic_load_long(&freevnodes); 1551 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1552 1553 if (rfreevnodes > rfreevnodes_old) 1554 slop = rfreevnodes - rfreevnodes_old; 1555 else 1556 slop = rfreevnodes_old - rfreevnodes; 1557 if (slop < VNLRU_FREEVNODES_SLOP) 1558 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1559 CPU_FOREACH(cpu) { 1560 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1561 } 1562 atomic_store_long(&freevnodes_old, rfreevnodes); 1563 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1564 } 1565 1566 static bool 1567 vnlru_under(u_long rnumvnodes, u_long limit) 1568 { 1569 u_long rfreevnodes, space; 1570 1571 if (__predict_false(rnumvnodes > desiredvnodes)) 1572 return (true); 1573 1574 space = desiredvnodes - rnumvnodes; 1575 if (space < limit) { 1576 rfreevnodes = vnlru_read_freevnodes(); 1577 if (rfreevnodes > wantfreevnodes) 1578 space += rfreevnodes - wantfreevnodes; 1579 } 1580 return (space < limit); 1581 } 1582 1583 static void 1584 vnlru_kick_locked(void) 1585 { 1586 1587 mtx_assert(&vnode_list_mtx, MA_OWNED); 1588 if (vnlruproc_sig == 0) { 1589 vnlruproc_sig = 1; 1590 vnlruproc_kicks++; 1591 wakeup(vnlruproc); 1592 } 1593 } 1594 1595 static void 1596 vnlru_kick_cond(void) 1597 { 1598 1599 if (vnlru_read_freevnodes() > wantfreevnodes) 1600 return; 1601 1602 if (vnlruproc_sig) 1603 return; 1604 mtx_lock(&vnode_list_mtx); 1605 vnlru_kick_locked(); 1606 mtx_unlock(&vnode_list_mtx); 1607 } 1608 1609 static void 1610 vnlru_proc_sleep(void) 1611 { 1612 1613 if (vnlruproc_sig) { 1614 vnlruproc_sig = 0; 1615 wakeup(&vnlruproc_sig); 1616 } 1617 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1618 } 1619 1620 /* 1621 * A lighter version of the machinery below. 1622 * 1623 * Tries to reach goals only by recycling free vnodes and does not invoke 1624 * uma_reclaim(UMA_RECLAIM_DRAIN). 1625 * 1626 * This works around pathological behavior in vnlru in presence of tons of free 1627 * vnodes, but without having to rewrite the machinery at this time. Said 1628 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1629 * (cycling through all levels of "force") when the count is transiently above 1630 * limit. This happens a lot when all vnodes are used up and vn_alloc 1631 * speculatively increments the counter. 1632 * 1633 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1634 * 1 million files in total and 20 find(1) processes stating them in parallel 1635 * (one per each tree). 1636 * 1637 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1638 * seconds to execute (time varies *wildly* between runs). With the workaround 1639 * it consistently stays around 20 seconds [it got further down with later 1640 * changes]. 1641 * 1642 * That is to say the entire thing needs a fundamental redesign (most notably 1643 * to accommodate faster recycling), the above only tries to get it ouf the way. 1644 * 1645 * Return values are: 1646 * -1 -- fallback to regular vnlru loop 1647 * 0 -- do nothing, go to sleep 1648 * >0 -- recycle this many vnodes 1649 */ 1650 static long 1651 vnlru_proc_light_pick(void) 1652 { 1653 u_long rnumvnodes, rfreevnodes; 1654 1655 if (vstir || vnlruproc_sig == 1) 1656 return (-1); 1657 1658 rnumvnodes = atomic_load_long(&numvnodes); 1659 rfreevnodes = vnlru_read_freevnodes(); 1660 1661 /* 1662 * vnode limit might have changed and now we may be at a significant 1663 * excess. Bail if we can't sort it out with free vnodes. 1664 * 1665 * Due to atomic updates the count can legitimately go above 1666 * the limit for a short period, don't bother doing anything in 1667 * that case. 1668 */ 1669 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP + 10) { 1670 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1671 rfreevnodes <= wantfreevnodes) { 1672 return (-1); 1673 } 1674 1675 return (rnumvnodes - desiredvnodes); 1676 } 1677 1678 /* 1679 * Don't try to reach wantfreevnodes target if there are too few vnodes 1680 * to begin with. 1681 */ 1682 if (rnumvnodes < wantfreevnodes) { 1683 return (0); 1684 } 1685 1686 if (rfreevnodes < wantfreevnodes) { 1687 return (-1); 1688 } 1689 1690 return (0); 1691 } 1692 1693 static bool 1694 vnlru_proc_light(void) 1695 { 1696 long freecount; 1697 1698 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1699 1700 freecount = vnlru_proc_light_pick(); 1701 if (freecount == -1) 1702 return (false); 1703 1704 if (freecount != 0) { 1705 vnlru_free(freecount); 1706 } 1707 1708 mtx_lock(&vnode_list_mtx); 1709 vnlru_proc_sleep(); 1710 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1711 return (true); 1712 } 1713 1714 static void 1715 vnlru_proc(void) 1716 { 1717 u_long rnumvnodes, rfreevnodes, target; 1718 unsigned long onumvnodes; 1719 int done, force, trigger, usevnodes; 1720 bool reclaim_nc_src, want_reread; 1721 1722 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1723 SHUTDOWN_PRI_FIRST); 1724 1725 force = 0; 1726 want_reread = false; 1727 for (;;) { 1728 kproc_suspend_check(vnlruproc); 1729 1730 if (force == 0 && vnlru_proc_light()) 1731 continue; 1732 1733 mtx_lock(&vnode_list_mtx); 1734 rnumvnodes = atomic_load_long(&numvnodes); 1735 1736 if (want_reread) { 1737 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1738 want_reread = false; 1739 } 1740 1741 /* 1742 * If numvnodes is too large (due to desiredvnodes being 1743 * adjusted using its sysctl, or emergency growth), first 1744 * try to reduce it by discarding from the free list. 1745 */ 1746 if (rnumvnodes > desiredvnodes + 10) { 1747 vnlru_free_locked(rnumvnodes - desiredvnodes); 1748 mtx_lock(&vnode_list_mtx); 1749 rnumvnodes = atomic_load_long(&numvnodes); 1750 } 1751 /* 1752 * Sleep if the vnode cache is in a good state. This is 1753 * when it is not over-full and has space for about a 4% 1754 * or 9% expansion (by growing its size or inexcessively 1755 * reducing its free list). Otherwise, try to reclaim 1756 * space for a 10% expansion. 1757 */ 1758 if (vstir && force == 0) { 1759 force = 1; 1760 vstir = false; 1761 } 1762 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1763 vnlru_proc_sleep(); 1764 continue; 1765 } 1766 rfreevnodes = vnlru_read_freevnodes(); 1767 1768 onumvnodes = rnumvnodes; 1769 /* 1770 * Calculate parameters for recycling. These are the same 1771 * throughout the loop to give some semblance of fairness. 1772 * The trigger point is to avoid recycling vnodes with lots 1773 * of resident pages. We aren't trying to free memory; we 1774 * are trying to recycle or at least free vnodes. 1775 */ 1776 if (rnumvnodes <= desiredvnodes) 1777 usevnodes = rnumvnodes - rfreevnodes; 1778 else 1779 usevnodes = rnumvnodes; 1780 if (usevnodes <= 0) 1781 usevnodes = 1; 1782 /* 1783 * The trigger value is chosen to give a conservatively 1784 * large value to ensure that it alone doesn't prevent 1785 * making progress. The value can easily be so large that 1786 * it is effectively infinite in some congested and 1787 * misconfigured cases, and this is necessary. Normally 1788 * it is about 8 to 100 (pages), which is quite large. 1789 */ 1790 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1791 if (force < 2) 1792 trigger = vsmalltrigger; 1793 reclaim_nc_src = force >= 3; 1794 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1795 target = target / 10 + 1; 1796 done = vlrureclaim(reclaim_nc_src, trigger, target); 1797 mtx_unlock(&vnode_list_mtx); 1798 /* 1799 * Total number of vnodes can transiently go slightly above the 1800 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1801 * this happens. 1802 */ 1803 if (onumvnodes + VNLRU_COUNT_SLOP + 1000 > desiredvnodes && 1804 numvnodes <= desiredvnodes) 1805 uma_reclaim(UMA_RECLAIM_DRAIN); 1806 if (done == 0) { 1807 if (force == 0 || force == 1) { 1808 force = 2; 1809 continue; 1810 } 1811 if (force == 2) { 1812 force = 3; 1813 continue; 1814 } 1815 want_reread = true; 1816 force = 0; 1817 vnlru_nowhere++; 1818 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1819 } else { 1820 want_reread = true; 1821 kern_yield(PRI_USER); 1822 } 1823 } 1824 } 1825 1826 static struct kproc_desc vnlru_kp = { 1827 "vnlru", 1828 vnlru_proc, 1829 &vnlruproc 1830 }; 1831 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1832 &vnlru_kp); 1833 1834 /* 1835 * Routines having to do with the management of the vnode table. 1836 */ 1837 1838 /* 1839 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1840 * before we actually vgone(). This function must be called with the vnode 1841 * held to prevent the vnode from being returned to the free list midway 1842 * through vgone(). 1843 */ 1844 static int 1845 vtryrecycle(struct vnode *vp) 1846 { 1847 struct mount *vnmp; 1848 1849 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1850 VNPASS(vp->v_holdcnt > 0, vp); 1851 /* 1852 * This vnode may found and locked via some other list, if so we 1853 * can't recycle it yet. 1854 */ 1855 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1856 CTR2(KTR_VFS, 1857 "%s: impossible to recycle, vp %p lock is already held", 1858 __func__, vp); 1859 vdrop_recycle(vp); 1860 return (EWOULDBLOCK); 1861 } 1862 /* 1863 * Don't recycle if its filesystem is being suspended. 1864 */ 1865 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1866 VOP_UNLOCK(vp); 1867 CTR2(KTR_VFS, 1868 "%s: impossible to recycle, cannot start the write for %p", 1869 __func__, vp); 1870 vdrop_recycle(vp); 1871 return (EBUSY); 1872 } 1873 /* 1874 * If we got this far, we need to acquire the interlock and see if 1875 * anyone picked up this vnode from another list. If not, we will 1876 * mark it with DOOMED via vgonel() so that anyone who does find it 1877 * will skip over it. 1878 */ 1879 VI_LOCK(vp); 1880 if (vp->v_usecount) { 1881 VOP_UNLOCK(vp); 1882 vdropl_recycle(vp); 1883 vn_finished_write(vnmp); 1884 CTR2(KTR_VFS, 1885 "%s: impossible to recycle, %p is already referenced", 1886 __func__, vp); 1887 return (EBUSY); 1888 } 1889 if (!VN_IS_DOOMED(vp)) { 1890 counter_u64_add(recycles_free_count, 1); 1891 vgonel(vp); 1892 } 1893 VOP_UNLOCK(vp); 1894 vdropl_recycle(vp); 1895 vn_finished_write(vnmp); 1896 return (0); 1897 } 1898 1899 /* 1900 * Allocate a new vnode. 1901 * 1902 * The operation never returns an error. Returning an error was disabled 1903 * in r145385 (dated 2005) with the following comment: 1904 * 1905 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1906 * 1907 * Given the age of this commit (almost 15 years at the time of writing this 1908 * comment) restoring the ability to fail requires a significant audit of 1909 * all codepaths. 1910 * 1911 * The routine can try to free a vnode or stall for up to 1 second waiting for 1912 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1913 */ 1914 static u_long vn_alloc_cyclecount; 1915 static u_long vn_alloc_sleeps; 1916 1917 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1918 "Number of times vnode allocation blocked waiting on vnlru"); 1919 1920 static struct vnode * __noinline 1921 vn_alloc_hard(struct mount *mp, u_long rnumvnodes, bool bumped) 1922 { 1923 u_long rfreevnodes; 1924 1925 if (bumped) { 1926 if (rnumvnodes > desiredvnodes + VNLRU_COUNT_SLOP) { 1927 atomic_subtract_long(&numvnodes, 1); 1928 bumped = false; 1929 } 1930 } 1931 1932 mtx_lock(&vnode_list_mtx); 1933 1934 if (vn_alloc_cyclecount != 0) { 1935 rnumvnodes = atomic_load_long(&numvnodes); 1936 if (rnumvnodes + 1 < desiredvnodes) { 1937 vn_alloc_cyclecount = 0; 1938 mtx_unlock(&vnode_list_mtx); 1939 goto alloc; 1940 } 1941 1942 rfreevnodes = vnlru_read_freevnodes(); 1943 if (rfreevnodes < wantfreevnodes) { 1944 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1945 vn_alloc_cyclecount = 0; 1946 vstir = true; 1947 } 1948 } else { 1949 vn_alloc_cyclecount = 0; 1950 } 1951 } 1952 1953 /* 1954 * Grow the vnode cache if it will not be above its target max 1955 * after growing. Otherwise, if the free list is nonempty, try 1956 * to reclaim 1 item from it before growing the cache (possibly 1957 * above its target max if the reclamation failed or is delayed). 1958 * Otherwise, wait for some space. In all cases, schedule 1959 * vnlru_proc() if we are getting short of space. The watermarks 1960 * should be chosen so that we never wait or even reclaim from 1961 * the free list to below its target minimum. 1962 */ 1963 if (vnlru_free_locked(1) > 0) 1964 goto alloc; 1965 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1966 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1967 /* 1968 * Wait for space for a new vnode. 1969 */ 1970 if (bumped) { 1971 atomic_subtract_long(&numvnodes, 1); 1972 bumped = false; 1973 } 1974 mtx_lock(&vnode_list_mtx); 1975 vnlru_kick_locked(); 1976 vn_alloc_sleeps++; 1977 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1978 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1979 vnlru_read_freevnodes() > 1) 1980 vnlru_free_locked(1); 1981 else 1982 mtx_unlock(&vnode_list_mtx); 1983 } 1984 alloc: 1985 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1986 if (!bumped) 1987 atomic_add_long(&numvnodes, 1); 1988 vnlru_kick_cond(); 1989 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1990 } 1991 1992 static struct vnode * 1993 vn_alloc(struct mount *mp) 1994 { 1995 u_long rnumvnodes; 1996 1997 if (__predict_false(vn_alloc_cyclecount != 0)) 1998 return (vn_alloc_hard(mp, 0, false)); 1999 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 2000 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 2001 return (vn_alloc_hard(mp, rnumvnodes, true)); 2002 } 2003 2004 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 2005 } 2006 2007 static void 2008 vn_free(struct vnode *vp) 2009 { 2010 2011 atomic_subtract_long(&numvnodes, 1); 2012 uma_zfree_smr(vnode_zone, vp); 2013 } 2014 2015 /* 2016 * Return the next vnode from the free list. 2017 */ 2018 int 2019 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 2020 struct vnode **vpp) 2021 { 2022 struct vnode *vp; 2023 struct thread *td; 2024 struct lock_object *lo; 2025 2026 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2027 2028 KASSERT(vops->registered, 2029 ("%s: not registered vector op %p\n", __func__, vops)); 2030 cache_validate_vop_vector(mp, vops); 2031 2032 td = curthread; 2033 if (td->td_vp_reserved != NULL) { 2034 vp = td->td_vp_reserved; 2035 td->td_vp_reserved = NULL; 2036 } else { 2037 vp = vn_alloc(mp); 2038 } 2039 counter_u64_add(vnodes_created, 1); 2040 2041 vn_set_state(vp, VSTATE_UNINITIALIZED); 2042 2043 /* 2044 * Locks are given the generic name "vnode" when created. 2045 * Follow the historic practice of using the filesystem 2046 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2047 * 2048 * Locks live in a witness group keyed on their name. Thus, 2049 * when a lock is renamed, it must also move from the witness 2050 * group of its old name to the witness group of its new name. 2051 * 2052 * The change only needs to be made when the vnode moves 2053 * from one filesystem type to another. We ensure that each 2054 * filesystem use a single static name pointer for its tag so 2055 * that we can compare pointers rather than doing a strcmp(). 2056 */ 2057 lo = &vp->v_vnlock->lock_object; 2058 #ifdef WITNESS 2059 if (lo->lo_name != tag) { 2060 #endif 2061 lo->lo_name = tag; 2062 #ifdef WITNESS 2063 WITNESS_DESTROY(lo); 2064 WITNESS_INIT(lo, tag); 2065 } 2066 #endif 2067 /* 2068 * By default, don't allow shared locks unless filesystems opt-in. 2069 */ 2070 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2071 /* 2072 * Finalize various vnode identity bits. 2073 */ 2074 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2075 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2076 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2077 vp->v_type = VNON; 2078 vp->v_op = vops; 2079 vp->v_irflag = 0; 2080 v_init_counters(vp); 2081 vn_seqc_init(vp); 2082 vp->v_bufobj.bo_ops = &buf_ops_bio; 2083 #ifdef DIAGNOSTIC 2084 if (mp == NULL && vops != &dead_vnodeops) 2085 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2086 #endif 2087 #ifdef MAC 2088 mac_vnode_init(vp); 2089 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2090 mac_vnode_associate_singlelabel(mp, vp); 2091 #endif 2092 if (mp != NULL) { 2093 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2094 } 2095 2096 /* 2097 * For the filesystems which do not use vfs_hash_insert(), 2098 * still initialize v_hash to have vfs_hash_index() useful. 2099 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2100 * its own hashing. 2101 */ 2102 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2103 2104 *vpp = vp; 2105 return (0); 2106 } 2107 2108 void 2109 getnewvnode_reserve(void) 2110 { 2111 struct thread *td; 2112 2113 td = curthread; 2114 MPASS(td->td_vp_reserved == NULL); 2115 td->td_vp_reserved = vn_alloc(NULL); 2116 } 2117 2118 void 2119 getnewvnode_drop_reserve(void) 2120 { 2121 struct thread *td; 2122 2123 td = curthread; 2124 if (td->td_vp_reserved != NULL) { 2125 vn_free(td->td_vp_reserved); 2126 td->td_vp_reserved = NULL; 2127 } 2128 } 2129 2130 static void __noinline 2131 freevnode(struct vnode *vp) 2132 { 2133 struct bufobj *bo; 2134 2135 /* 2136 * The vnode has been marked for destruction, so free it. 2137 * 2138 * The vnode will be returned to the zone where it will 2139 * normally remain until it is needed for another vnode. We 2140 * need to cleanup (or verify that the cleanup has already 2141 * been done) any residual data left from its current use 2142 * so as not to contaminate the freshly allocated vnode. 2143 */ 2144 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2145 /* 2146 * Paired with vgone. 2147 */ 2148 vn_seqc_write_end_free(vp); 2149 2150 bo = &vp->v_bufobj; 2151 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2152 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2153 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2154 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2155 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2156 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2157 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2158 ("clean blk trie not empty")); 2159 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2160 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2161 ("dirty blk trie not empty")); 2162 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2163 ("Dangling rangelock waiters")); 2164 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2165 ("Leaked inactivation")); 2166 VI_UNLOCK(vp); 2167 cache_assert_no_entries(vp); 2168 2169 #ifdef MAC 2170 mac_vnode_destroy(vp); 2171 #endif 2172 if (vp->v_pollinfo != NULL) { 2173 /* 2174 * Use LK_NOWAIT to shut up witness about the lock. We may get 2175 * here while having another vnode locked when trying to 2176 * satisfy a lookup and needing to recycle. 2177 */ 2178 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2179 destroy_vpollinfo(vp->v_pollinfo); 2180 VOP_UNLOCK(vp); 2181 vp->v_pollinfo = NULL; 2182 } 2183 vp->v_mountedhere = NULL; 2184 vp->v_unpcb = NULL; 2185 vp->v_rdev = NULL; 2186 vp->v_fifoinfo = NULL; 2187 vp->v_iflag = 0; 2188 vp->v_vflag = 0; 2189 bo->bo_flag = 0; 2190 vn_free(vp); 2191 } 2192 2193 /* 2194 * Delete from old mount point vnode list, if on one. 2195 */ 2196 static void 2197 delmntque(struct vnode *vp) 2198 { 2199 struct mount *mp; 2200 2201 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2202 2203 mp = vp->v_mount; 2204 MNT_ILOCK(mp); 2205 VI_LOCK(vp); 2206 vp->v_mount = NULL; 2207 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2208 ("bad mount point vnode list size")); 2209 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2210 mp->mnt_nvnodelistsize--; 2211 MNT_REL(mp); 2212 MNT_IUNLOCK(mp); 2213 /* 2214 * The caller expects the interlock to be still held. 2215 */ 2216 ASSERT_VI_LOCKED(vp, __func__); 2217 } 2218 2219 static int 2220 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2221 { 2222 2223 KASSERT(vp->v_mount == NULL, 2224 ("insmntque: vnode already on per mount vnode list")); 2225 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2226 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2227 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2228 } else { 2229 KASSERT(!dtr, 2230 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2231 __func__)); 2232 } 2233 2234 /* 2235 * We acquire the vnode interlock early to ensure that the 2236 * vnode cannot be recycled by another process releasing a 2237 * holdcnt on it before we get it on both the vnode list 2238 * and the active vnode list. The mount mutex protects only 2239 * manipulation of the vnode list and the vnode freelist 2240 * mutex protects only manipulation of the active vnode list. 2241 * Hence the need to hold the vnode interlock throughout. 2242 */ 2243 MNT_ILOCK(mp); 2244 VI_LOCK(vp); 2245 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2246 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2247 mp->mnt_nvnodelistsize == 0)) && 2248 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2249 VI_UNLOCK(vp); 2250 MNT_IUNLOCK(mp); 2251 if (dtr) { 2252 vp->v_data = NULL; 2253 vp->v_op = &dead_vnodeops; 2254 vgone(vp); 2255 vput(vp); 2256 } 2257 return (EBUSY); 2258 } 2259 vp->v_mount = mp; 2260 MNT_REF(mp); 2261 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2262 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2263 ("neg mount point vnode list size")); 2264 mp->mnt_nvnodelistsize++; 2265 VI_UNLOCK(vp); 2266 MNT_IUNLOCK(mp); 2267 return (0); 2268 } 2269 2270 /* 2271 * Insert into list of vnodes for the new mount point, if available. 2272 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2273 * leaves handling of the vnode to the caller. 2274 */ 2275 int 2276 insmntque(struct vnode *vp, struct mount *mp) 2277 { 2278 return (insmntque1_int(vp, mp, true)); 2279 } 2280 2281 int 2282 insmntque1(struct vnode *vp, struct mount *mp) 2283 { 2284 return (insmntque1_int(vp, mp, false)); 2285 } 2286 2287 /* 2288 * Flush out and invalidate all buffers associated with a bufobj 2289 * Called with the underlying object locked. 2290 */ 2291 int 2292 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2293 { 2294 int error; 2295 2296 BO_LOCK(bo); 2297 if (flags & V_SAVE) { 2298 error = bufobj_wwait(bo, slpflag, slptimeo); 2299 if (error) { 2300 BO_UNLOCK(bo); 2301 return (error); 2302 } 2303 if (bo->bo_dirty.bv_cnt > 0) { 2304 BO_UNLOCK(bo); 2305 do { 2306 error = BO_SYNC(bo, MNT_WAIT); 2307 } while (error == ERELOOKUP); 2308 if (error != 0) 2309 return (error); 2310 BO_LOCK(bo); 2311 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2312 BO_UNLOCK(bo); 2313 return (EBUSY); 2314 } 2315 } 2316 } 2317 /* 2318 * If you alter this loop please notice that interlock is dropped and 2319 * reacquired in flushbuflist. Special care is needed to ensure that 2320 * no race conditions occur from this. 2321 */ 2322 do { 2323 error = flushbuflist(&bo->bo_clean, 2324 flags, bo, slpflag, slptimeo); 2325 if (error == 0 && !(flags & V_CLEANONLY)) 2326 error = flushbuflist(&bo->bo_dirty, 2327 flags, bo, slpflag, slptimeo); 2328 if (error != 0 && error != EAGAIN) { 2329 BO_UNLOCK(bo); 2330 return (error); 2331 } 2332 } while (error != 0); 2333 2334 /* 2335 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2336 * have write I/O in-progress but if there is a VM object then the 2337 * VM object can also have read-I/O in-progress. 2338 */ 2339 do { 2340 bufobj_wwait(bo, 0, 0); 2341 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2342 BO_UNLOCK(bo); 2343 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2344 BO_LOCK(bo); 2345 } 2346 } while (bo->bo_numoutput > 0); 2347 BO_UNLOCK(bo); 2348 2349 /* 2350 * Destroy the copy in the VM cache, too. 2351 */ 2352 if (bo->bo_object != NULL && 2353 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2354 VM_OBJECT_WLOCK(bo->bo_object); 2355 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2356 OBJPR_CLEANONLY : 0); 2357 VM_OBJECT_WUNLOCK(bo->bo_object); 2358 } 2359 2360 #ifdef INVARIANTS 2361 BO_LOCK(bo); 2362 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2363 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2364 bo->bo_clean.bv_cnt > 0)) 2365 panic("vinvalbuf: flush failed"); 2366 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2367 bo->bo_dirty.bv_cnt > 0) 2368 panic("vinvalbuf: flush dirty failed"); 2369 BO_UNLOCK(bo); 2370 #endif 2371 return (0); 2372 } 2373 2374 /* 2375 * Flush out and invalidate all buffers associated with a vnode. 2376 * Called with the underlying object locked. 2377 */ 2378 int 2379 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2380 { 2381 2382 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2383 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2384 if (vp->v_object != NULL && vp->v_object->handle != vp) 2385 return (0); 2386 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2387 } 2388 2389 /* 2390 * Flush out buffers on the specified list. 2391 * 2392 */ 2393 static int 2394 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2395 int slptimeo) 2396 { 2397 struct buf *bp, *nbp; 2398 int retval, error; 2399 daddr_t lblkno; 2400 b_xflags_t xflags; 2401 2402 ASSERT_BO_WLOCKED(bo); 2403 2404 retval = 0; 2405 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2406 /* 2407 * If we are flushing both V_NORMAL and V_ALT buffers then 2408 * do not skip any buffers. If we are flushing only V_NORMAL 2409 * buffers then skip buffers marked as BX_ALTDATA. If we are 2410 * flushing only V_ALT buffers then skip buffers not marked 2411 * as BX_ALTDATA. 2412 */ 2413 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2414 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2415 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2416 continue; 2417 } 2418 if (nbp != NULL) { 2419 lblkno = nbp->b_lblkno; 2420 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2421 } 2422 retval = EAGAIN; 2423 error = BUF_TIMELOCK(bp, 2424 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2425 "flushbuf", slpflag, slptimeo); 2426 if (error) { 2427 BO_LOCK(bo); 2428 return (error != ENOLCK ? error : EAGAIN); 2429 } 2430 KASSERT(bp->b_bufobj == bo, 2431 ("bp %p wrong b_bufobj %p should be %p", 2432 bp, bp->b_bufobj, bo)); 2433 /* 2434 * XXX Since there are no node locks for NFS, I 2435 * believe there is a slight chance that a delayed 2436 * write will occur while sleeping just above, so 2437 * check for it. 2438 */ 2439 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2440 (flags & V_SAVE)) { 2441 bremfree(bp); 2442 bp->b_flags |= B_ASYNC; 2443 bwrite(bp); 2444 BO_LOCK(bo); 2445 return (EAGAIN); /* XXX: why not loop ? */ 2446 } 2447 bremfree(bp); 2448 bp->b_flags |= (B_INVAL | B_RELBUF); 2449 bp->b_flags &= ~B_ASYNC; 2450 brelse(bp); 2451 BO_LOCK(bo); 2452 if (nbp == NULL) 2453 break; 2454 nbp = gbincore(bo, lblkno); 2455 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2456 != xflags) 2457 break; /* nbp invalid */ 2458 } 2459 return (retval); 2460 } 2461 2462 int 2463 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2464 { 2465 struct buf *bp; 2466 int error; 2467 daddr_t lblkno; 2468 2469 ASSERT_BO_LOCKED(bo); 2470 2471 for (lblkno = startn;;) { 2472 again: 2473 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2474 if (bp == NULL || bp->b_lblkno >= endn || 2475 bp->b_lblkno < startn) 2476 break; 2477 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2478 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2479 if (error != 0) { 2480 BO_RLOCK(bo); 2481 if (error == ENOLCK) 2482 goto again; 2483 return (error); 2484 } 2485 KASSERT(bp->b_bufobj == bo, 2486 ("bp %p wrong b_bufobj %p should be %p", 2487 bp, bp->b_bufobj, bo)); 2488 lblkno = bp->b_lblkno + 1; 2489 if ((bp->b_flags & B_MANAGED) == 0) 2490 bremfree(bp); 2491 bp->b_flags |= B_RELBUF; 2492 /* 2493 * In the VMIO case, use the B_NOREUSE flag to hint that the 2494 * pages backing each buffer in the range are unlikely to be 2495 * reused. Dirty buffers will have the hint applied once 2496 * they've been written. 2497 */ 2498 if ((bp->b_flags & B_VMIO) != 0) 2499 bp->b_flags |= B_NOREUSE; 2500 brelse(bp); 2501 BO_RLOCK(bo); 2502 } 2503 return (0); 2504 } 2505 2506 /* 2507 * Truncate a file's buffer and pages to a specified length. This 2508 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2509 * sync activity. 2510 */ 2511 int 2512 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2513 { 2514 struct buf *bp, *nbp; 2515 struct bufobj *bo; 2516 daddr_t startlbn; 2517 2518 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2519 vp, blksize, (uintmax_t)length); 2520 2521 /* 2522 * Round up to the *next* lbn. 2523 */ 2524 startlbn = howmany(length, blksize); 2525 2526 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2527 2528 bo = &vp->v_bufobj; 2529 restart_unlocked: 2530 BO_LOCK(bo); 2531 2532 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2533 ; 2534 2535 if (length > 0) { 2536 restartsync: 2537 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2538 if (bp->b_lblkno > 0) 2539 continue; 2540 /* 2541 * Since we hold the vnode lock this should only 2542 * fail if we're racing with the buf daemon. 2543 */ 2544 if (BUF_LOCK(bp, 2545 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2546 BO_LOCKPTR(bo)) == ENOLCK) 2547 goto restart_unlocked; 2548 2549 VNASSERT((bp->b_flags & B_DELWRI), vp, 2550 ("buf(%p) on dirty queue without DELWRI", bp)); 2551 2552 bremfree(bp); 2553 bawrite(bp); 2554 BO_LOCK(bo); 2555 goto restartsync; 2556 } 2557 } 2558 2559 bufobj_wwait(bo, 0, 0); 2560 BO_UNLOCK(bo); 2561 vnode_pager_setsize(vp, length); 2562 2563 return (0); 2564 } 2565 2566 /* 2567 * Invalidate the cached pages of a file's buffer within the range of block 2568 * numbers [startlbn, endlbn). 2569 */ 2570 void 2571 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2572 int blksize) 2573 { 2574 struct bufobj *bo; 2575 off_t start, end; 2576 2577 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2578 2579 start = blksize * startlbn; 2580 end = blksize * endlbn; 2581 2582 bo = &vp->v_bufobj; 2583 BO_LOCK(bo); 2584 MPASS(blksize == bo->bo_bsize); 2585 2586 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2587 ; 2588 2589 BO_UNLOCK(bo); 2590 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2591 } 2592 2593 static int 2594 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2595 daddr_t startlbn, daddr_t endlbn) 2596 { 2597 struct buf *bp, *nbp; 2598 bool anyfreed; 2599 2600 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2601 ASSERT_BO_LOCKED(bo); 2602 2603 do { 2604 anyfreed = false; 2605 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2606 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2607 continue; 2608 if (BUF_LOCK(bp, 2609 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2610 BO_LOCKPTR(bo)) == ENOLCK) { 2611 BO_LOCK(bo); 2612 return (EAGAIN); 2613 } 2614 2615 bremfree(bp); 2616 bp->b_flags |= B_INVAL | B_RELBUF; 2617 bp->b_flags &= ~B_ASYNC; 2618 brelse(bp); 2619 anyfreed = true; 2620 2621 BO_LOCK(bo); 2622 if (nbp != NULL && 2623 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2624 nbp->b_vp != vp || 2625 (nbp->b_flags & B_DELWRI) != 0)) 2626 return (EAGAIN); 2627 } 2628 2629 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2630 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2631 continue; 2632 if (BUF_LOCK(bp, 2633 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2634 BO_LOCKPTR(bo)) == ENOLCK) { 2635 BO_LOCK(bo); 2636 return (EAGAIN); 2637 } 2638 bremfree(bp); 2639 bp->b_flags |= B_INVAL | B_RELBUF; 2640 bp->b_flags &= ~B_ASYNC; 2641 brelse(bp); 2642 anyfreed = true; 2643 2644 BO_LOCK(bo); 2645 if (nbp != NULL && 2646 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2647 (nbp->b_vp != vp) || 2648 (nbp->b_flags & B_DELWRI) == 0)) 2649 return (EAGAIN); 2650 } 2651 } while (anyfreed); 2652 return (0); 2653 } 2654 2655 static void 2656 buf_vlist_remove(struct buf *bp) 2657 { 2658 struct bufv *bv; 2659 b_xflags_t flags; 2660 2661 flags = bp->b_xflags; 2662 2663 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2664 ASSERT_BO_WLOCKED(bp->b_bufobj); 2665 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2666 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2667 ("%s: buffer %p has invalid queue state", __func__, bp)); 2668 2669 if ((flags & BX_VNDIRTY) != 0) 2670 bv = &bp->b_bufobj->bo_dirty; 2671 else 2672 bv = &bp->b_bufobj->bo_clean; 2673 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2674 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2675 bv->bv_cnt--; 2676 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2677 } 2678 2679 /* 2680 * Add the buffer to the sorted clean or dirty block list. 2681 * 2682 * NOTE: xflags is passed as a constant, optimizing this inline function! 2683 */ 2684 static void 2685 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2686 { 2687 struct bufv *bv; 2688 struct buf *n; 2689 int error; 2690 2691 ASSERT_BO_WLOCKED(bo); 2692 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2693 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2694 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2695 ("dead bo %p", bo)); 2696 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2697 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2698 bp->b_xflags |= xflags; 2699 if (xflags & BX_VNDIRTY) 2700 bv = &bo->bo_dirty; 2701 else 2702 bv = &bo->bo_clean; 2703 2704 /* 2705 * Keep the list ordered. Optimize empty list insertion. Assume 2706 * we tend to grow at the tail so lookup_le should usually be cheaper 2707 * than _ge. 2708 */ 2709 if (bv->bv_cnt == 0 || 2710 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2711 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2712 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2713 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2714 else 2715 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2716 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2717 if (error) 2718 panic("buf_vlist_add: Preallocated nodes insufficient."); 2719 bv->bv_cnt++; 2720 } 2721 2722 /* 2723 * Look up a buffer using the buffer tries. 2724 */ 2725 struct buf * 2726 gbincore(struct bufobj *bo, daddr_t lblkno) 2727 { 2728 struct buf *bp; 2729 2730 ASSERT_BO_LOCKED(bo); 2731 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2732 if (bp != NULL) 2733 return (bp); 2734 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2735 } 2736 2737 /* 2738 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2739 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2740 * stability of the result. Like other lockless lookups, the found buf may 2741 * already be invalid by the time this function returns. 2742 */ 2743 struct buf * 2744 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2745 { 2746 struct buf *bp; 2747 2748 ASSERT_BO_UNLOCKED(bo); 2749 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2750 if (bp != NULL) 2751 return (bp); 2752 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2753 } 2754 2755 /* 2756 * Associate a buffer with a vnode. 2757 */ 2758 void 2759 bgetvp(struct vnode *vp, struct buf *bp) 2760 { 2761 struct bufobj *bo; 2762 2763 bo = &vp->v_bufobj; 2764 ASSERT_BO_WLOCKED(bo); 2765 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2766 2767 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2768 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2769 ("bgetvp: bp already attached! %p", bp)); 2770 2771 vhold(vp); 2772 bp->b_vp = vp; 2773 bp->b_bufobj = bo; 2774 /* 2775 * Insert onto list for new vnode. 2776 */ 2777 buf_vlist_add(bp, bo, BX_VNCLEAN); 2778 } 2779 2780 /* 2781 * Disassociate a buffer from a vnode. 2782 */ 2783 void 2784 brelvp(struct buf *bp) 2785 { 2786 struct bufobj *bo; 2787 struct vnode *vp; 2788 2789 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2790 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2791 2792 /* 2793 * Delete from old vnode list, if on one. 2794 */ 2795 vp = bp->b_vp; /* XXX */ 2796 bo = bp->b_bufobj; 2797 BO_LOCK(bo); 2798 buf_vlist_remove(bp); 2799 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2800 bo->bo_flag &= ~BO_ONWORKLST; 2801 mtx_lock(&sync_mtx); 2802 LIST_REMOVE(bo, bo_synclist); 2803 syncer_worklist_len--; 2804 mtx_unlock(&sync_mtx); 2805 } 2806 bp->b_vp = NULL; 2807 bp->b_bufobj = NULL; 2808 BO_UNLOCK(bo); 2809 vdrop(vp); 2810 } 2811 2812 /* 2813 * Add an item to the syncer work queue. 2814 */ 2815 static void 2816 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2817 { 2818 int slot; 2819 2820 ASSERT_BO_WLOCKED(bo); 2821 2822 mtx_lock(&sync_mtx); 2823 if (bo->bo_flag & BO_ONWORKLST) 2824 LIST_REMOVE(bo, bo_synclist); 2825 else { 2826 bo->bo_flag |= BO_ONWORKLST; 2827 syncer_worklist_len++; 2828 } 2829 2830 if (delay > syncer_maxdelay - 2) 2831 delay = syncer_maxdelay - 2; 2832 slot = (syncer_delayno + delay) & syncer_mask; 2833 2834 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2835 mtx_unlock(&sync_mtx); 2836 } 2837 2838 static int 2839 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2840 { 2841 int error, len; 2842 2843 mtx_lock(&sync_mtx); 2844 len = syncer_worklist_len - sync_vnode_count; 2845 mtx_unlock(&sync_mtx); 2846 error = SYSCTL_OUT(req, &len, sizeof(len)); 2847 return (error); 2848 } 2849 2850 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2851 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2852 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2853 2854 static struct proc *updateproc; 2855 static void sched_sync(void); 2856 static struct kproc_desc up_kp = { 2857 "syncer", 2858 sched_sync, 2859 &updateproc 2860 }; 2861 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2862 2863 static int 2864 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2865 { 2866 struct vnode *vp; 2867 struct mount *mp; 2868 2869 *bo = LIST_FIRST(slp); 2870 if (*bo == NULL) 2871 return (0); 2872 vp = bo2vnode(*bo); 2873 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2874 return (1); 2875 /* 2876 * We use vhold in case the vnode does not 2877 * successfully sync. vhold prevents the vnode from 2878 * going away when we unlock the sync_mtx so that 2879 * we can acquire the vnode interlock. 2880 */ 2881 vholdl(vp); 2882 mtx_unlock(&sync_mtx); 2883 VI_UNLOCK(vp); 2884 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2885 vdrop(vp); 2886 mtx_lock(&sync_mtx); 2887 return (*bo == LIST_FIRST(slp)); 2888 } 2889 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2890 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2891 ("suspended mp syncing vp %p", vp)); 2892 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2893 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2894 VOP_UNLOCK(vp); 2895 vn_finished_write(mp); 2896 BO_LOCK(*bo); 2897 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2898 /* 2899 * Put us back on the worklist. The worklist 2900 * routine will remove us from our current 2901 * position and then add us back in at a later 2902 * position. 2903 */ 2904 vn_syncer_add_to_worklist(*bo, syncdelay); 2905 } 2906 BO_UNLOCK(*bo); 2907 vdrop(vp); 2908 mtx_lock(&sync_mtx); 2909 return (0); 2910 } 2911 2912 static int first_printf = 1; 2913 2914 /* 2915 * System filesystem synchronizer daemon. 2916 */ 2917 static void 2918 sched_sync(void) 2919 { 2920 struct synclist *next, *slp; 2921 struct bufobj *bo; 2922 long starttime; 2923 struct thread *td = curthread; 2924 int last_work_seen; 2925 int net_worklist_len; 2926 int syncer_final_iter; 2927 int error; 2928 2929 last_work_seen = 0; 2930 syncer_final_iter = 0; 2931 syncer_state = SYNCER_RUNNING; 2932 starttime = time_uptime; 2933 td->td_pflags |= TDP_NORUNNINGBUF; 2934 2935 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2936 SHUTDOWN_PRI_LAST); 2937 2938 mtx_lock(&sync_mtx); 2939 for (;;) { 2940 if (syncer_state == SYNCER_FINAL_DELAY && 2941 syncer_final_iter == 0) { 2942 mtx_unlock(&sync_mtx); 2943 kproc_suspend_check(td->td_proc); 2944 mtx_lock(&sync_mtx); 2945 } 2946 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2947 if (syncer_state != SYNCER_RUNNING && 2948 starttime != time_uptime) { 2949 if (first_printf) { 2950 printf("\nSyncing disks, vnodes remaining... "); 2951 first_printf = 0; 2952 } 2953 printf("%d ", net_worklist_len); 2954 } 2955 starttime = time_uptime; 2956 2957 /* 2958 * Push files whose dirty time has expired. Be careful 2959 * of interrupt race on slp queue. 2960 * 2961 * Skip over empty worklist slots when shutting down. 2962 */ 2963 do { 2964 slp = &syncer_workitem_pending[syncer_delayno]; 2965 syncer_delayno += 1; 2966 if (syncer_delayno == syncer_maxdelay) 2967 syncer_delayno = 0; 2968 next = &syncer_workitem_pending[syncer_delayno]; 2969 /* 2970 * If the worklist has wrapped since the 2971 * it was emptied of all but syncer vnodes, 2972 * switch to the FINAL_DELAY state and run 2973 * for one more second. 2974 */ 2975 if (syncer_state == SYNCER_SHUTTING_DOWN && 2976 net_worklist_len == 0 && 2977 last_work_seen == syncer_delayno) { 2978 syncer_state = SYNCER_FINAL_DELAY; 2979 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2980 } 2981 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2982 syncer_worklist_len > 0); 2983 2984 /* 2985 * Keep track of the last time there was anything 2986 * on the worklist other than syncer vnodes. 2987 * Return to the SHUTTING_DOWN state if any 2988 * new work appears. 2989 */ 2990 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2991 last_work_seen = syncer_delayno; 2992 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2993 syncer_state = SYNCER_SHUTTING_DOWN; 2994 while (!LIST_EMPTY(slp)) { 2995 error = sync_vnode(slp, &bo, td); 2996 if (error == 1) { 2997 LIST_REMOVE(bo, bo_synclist); 2998 LIST_INSERT_HEAD(next, bo, bo_synclist); 2999 continue; 3000 } 3001 3002 if (first_printf == 0) { 3003 /* 3004 * Drop the sync mutex, because some watchdog 3005 * drivers need to sleep while patting 3006 */ 3007 mtx_unlock(&sync_mtx); 3008 wdog_kern_pat(WD_LASTVAL); 3009 mtx_lock(&sync_mtx); 3010 } 3011 } 3012 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 3013 syncer_final_iter--; 3014 /* 3015 * The variable rushjob allows the kernel to speed up the 3016 * processing of the filesystem syncer process. A rushjob 3017 * value of N tells the filesystem syncer to process the next 3018 * N seconds worth of work on its queue ASAP. Currently rushjob 3019 * is used by the soft update code to speed up the filesystem 3020 * syncer process when the incore state is getting so far 3021 * ahead of the disk that the kernel memory pool is being 3022 * threatened with exhaustion. 3023 */ 3024 if (rushjob > 0) { 3025 rushjob -= 1; 3026 continue; 3027 } 3028 /* 3029 * Just sleep for a short period of time between 3030 * iterations when shutting down to allow some I/O 3031 * to happen. 3032 * 3033 * If it has taken us less than a second to process the 3034 * current work, then wait. Otherwise start right over 3035 * again. We can still lose time if any single round 3036 * takes more than two seconds, but it does not really 3037 * matter as we are just trying to generally pace the 3038 * filesystem activity. 3039 */ 3040 if (syncer_state != SYNCER_RUNNING || 3041 time_uptime == starttime) { 3042 thread_lock(td); 3043 sched_prio(td, PPAUSE); 3044 thread_unlock(td); 3045 } 3046 if (syncer_state != SYNCER_RUNNING) 3047 cv_timedwait(&sync_wakeup, &sync_mtx, 3048 hz / SYNCER_SHUTDOWN_SPEEDUP); 3049 else if (time_uptime == starttime) 3050 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3051 } 3052 } 3053 3054 /* 3055 * Request the syncer daemon to speed up its work. 3056 * We never push it to speed up more than half of its 3057 * normal turn time, otherwise it could take over the cpu. 3058 */ 3059 int 3060 speedup_syncer(void) 3061 { 3062 int ret = 0; 3063 3064 mtx_lock(&sync_mtx); 3065 if (rushjob < syncdelay / 2) { 3066 rushjob += 1; 3067 stat_rush_requests += 1; 3068 ret = 1; 3069 } 3070 mtx_unlock(&sync_mtx); 3071 cv_broadcast(&sync_wakeup); 3072 return (ret); 3073 } 3074 3075 /* 3076 * Tell the syncer to speed up its work and run though its work 3077 * list several times, then tell it to shut down. 3078 */ 3079 static void 3080 syncer_shutdown(void *arg, int howto) 3081 { 3082 3083 if (howto & RB_NOSYNC) 3084 return; 3085 mtx_lock(&sync_mtx); 3086 syncer_state = SYNCER_SHUTTING_DOWN; 3087 rushjob = 0; 3088 mtx_unlock(&sync_mtx); 3089 cv_broadcast(&sync_wakeup); 3090 kproc_shutdown(arg, howto); 3091 } 3092 3093 void 3094 syncer_suspend(void) 3095 { 3096 3097 syncer_shutdown(updateproc, 0); 3098 } 3099 3100 void 3101 syncer_resume(void) 3102 { 3103 3104 mtx_lock(&sync_mtx); 3105 first_printf = 1; 3106 syncer_state = SYNCER_RUNNING; 3107 mtx_unlock(&sync_mtx); 3108 cv_broadcast(&sync_wakeup); 3109 kproc_resume(updateproc); 3110 } 3111 3112 /* 3113 * Move the buffer between the clean and dirty lists of its vnode. 3114 */ 3115 void 3116 reassignbuf(struct buf *bp) 3117 { 3118 struct vnode *vp; 3119 struct bufobj *bo; 3120 int delay; 3121 #ifdef INVARIANTS 3122 struct bufv *bv; 3123 #endif 3124 3125 vp = bp->b_vp; 3126 bo = bp->b_bufobj; 3127 3128 KASSERT((bp->b_flags & B_PAGING) == 0, 3129 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3130 3131 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3132 bp, bp->b_vp, bp->b_flags); 3133 3134 BO_LOCK(bo); 3135 buf_vlist_remove(bp); 3136 3137 /* 3138 * If dirty, put on list of dirty buffers; otherwise insert onto list 3139 * of clean buffers. 3140 */ 3141 if (bp->b_flags & B_DELWRI) { 3142 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3143 switch (vp->v_type) { 3144 case VDIR: 3145 delay = dirdelay; 3146 break; 3147 case VCHR: 3148 delay = metadelay; 3149 break; 3150 default: 3151 delay = filedelay; 3152 } 3153 vn_syncer_add_to_worklist(bo, delay); 3154 } 3155 buf_vlist_add(bp, bo, BX_VNDIRTY); 3156 } else { 3157 buf_vlist_add(bp, bo, BX_VNCLEAN); 3158 3159 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3160 mtx_lock(&sync_mtx); 3161 LIST_REMOVE(bo, bo_synclist); 3162 syncer_worklist_len--; 3163 mtx_unlock(&sync_mtx); 3164 bo->bo_flag &= ~BO_ONWORKLST; 3165 } 3166 } 3167 #ifdef INVARIANTS 3168 bv = &bo->bo_clean; 3169 bp = TAILQ_FIRST(&bv->bv_hd); 3170 KASSERT(bp == NULL || bp->b_bufobj == bo, 3171 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3172 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3173 KASSERT(bp == NULL || bp->b_bufobj == bo, 3174 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3175 bv = &bo->bo_dirty; 3176 bp = TAILQ_FIRST(&bv->bv_hd); 3177 KASSERT(bp == NULL || bp->b_bufobj == bo, 3178 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3179 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3180 KASSERT(bp == NULL || bp->b_bufobj == bo, 3181 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3182 #endif 3183 BO_UNLOCK(bo); 3184 } 3185 3186 static void 3187 v_init_counters(struct vnode *vp) 3188 { 3189 3190 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3191 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3192 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3193 3194 refcount_init(&vp->v_holdcnt, 1); 3195 refcount_init(&vp->v_usecount, 1); 3196 } 3197 3198 /* 3199 * Grab a particular vnode from the free list, increment its 3200 * reference count and lock it. VIRF_DOOMED is set if the vnode 3201 * is being destroyed. Only callers who specify LK_RETRY will 3202 * see doomed vnodes. If inactive processing was delayed in 3203 * vput try to do it here. 3204 * 3205 * usecount is manipulated using atomics without holding any locks. 3206 * 3207 * holdcnt can be manipulated using atomics without holding any locks, 3208 * except when transitioning 1<->0, in which case the interlock is held. 3209 * 3210 * Consumers which don't guarantee liveness of the vnode can use SMR to 3211 * try to get a reference. Note this operation can fail since the vnode 3212 * may be awaiting getting freed by the time they get to it. 3213 */ 3214 enum vgetstate 3215 vget_prep_smr(struct vnode *vp) 3216 { 3217 enum vgetstate vs; 3218 3219 VFS_SMR_ASSERT_ENTERED(); 3220 3221 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3222 vs = VGET_USECOUNT; 3223 } else { 3224 if (vhold_smr(vp)) 3225 vs = VGET_HOLDCNT; 3226 else 3227 vs = VGET_NONE; 3228 } 3229 return (vs); 3230 } 3231 3232 enum vgetstate 3233 vget_prep(struct vnode *vp) 3234 { 3235 enum vgetstate vs; 3236 3237 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3238 vs = VGET_USECOUNT; 3239 } else { 3240 vhold(vp); 3241 vs = VGET_HOLDCNT; 3242 } 3243 return (vs); 3244 } 3245 3246 void 3247 vget_abort(struct vnode *vp, enum vgetstate vs) 3248 { 3249 3250 switch (vs) { 3251 case VGET_USECOUNT: 3252 vrele(vp); 3253 break; 3254 case VGET_HOLDCNT: 3255 vdrop(vp); 3256 break; 3257 default: 3258 __assert_unreachable(); 3259 } 3260 } 3261 3262 int 3263 vget(struct vnode *vp, int flags) 3264 { 3265 enum vgetstate vs; 3266 3267 vs = vget_prep(vp); 3268 return (vget_finish(vp, flags, vs)); 3269 } 3270 3271 int 3272 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3273 { 3274 int error; 3275 3276 if ((flags & LK_INTERLOCK) != 0) 3277 ASSERT_VI_LOCKED(vp, __func__); 3278 else 3279 ASSERT_VI_UNLOCKED(vp, __func__); 3280 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3281 VNPASS(vp->v_holdcnt > 0, vp); 3282 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3283 3284 error = vn_lock(vp, flags); 3285 if (__predict_false(error != 0)) { 3286 vget_abort(vp, vs); 3287 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3288 vp); 3289 return (error); 3290 } 3291 3292 vget_finish_ref(vp, vs); 3293 return (0); 3294 } 3295 3296 void 3297 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3298 { 3299 int old; 3300 3301 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3302 VNPASS(vp->v_holdcnt > 0, vp); 3303 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3304 3305 if (vs == VGET_USECOUNT) 3306 return; 3307 3308 /* 3309 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3310 * the vnode around. Otherwise someone else lended their hold count and 3311 * we have to drop ours. 3312 */ 3313 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3314 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3315 if (old != 0) { 3316 #ifdef INVARIANTS 3317 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3318 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3319 #else 3320 refcount_release(&vp->v_holdcnt); 3321 #endif 3322 } 3323 } 3324 3325 void 3326 vref(struct vnode *vp) 3327 { 3328 enum vgetstate vs; 3329 3330 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3331 vs = vget_prep(vp); 3332 vget_finish_ref(vp, vs); 3333 } 3334 3335 void 3336 vrefact(struct vnode *vp) 3337 { 3338 3339 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3340 #ifdef INVARIANTS 3341 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3342 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3343 #else 3344 refcount_acquire(&vp->v_usecount); 3345 #endif 3346 } 3347 3348 void 3349 vlazy(struct vnode *vp) 3350 { 3351 struct mount *mp; 3352 3353 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3354 3355 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3356 return; 3357 /* 3358 * We may get here for inactive routines after the vnode got doomed. 3359 */ 3360 if (VN_IS_DOOMED(vp)) 3361 return; 3362 mp = vp->v_mount; 3363 mtx_lock(&mp->mnt_listmtx); 3364 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3365 vp->v_mflag |= VMP_LAZYLIST; 3366 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3367 mp->mnt_lazyvnodelistsize++; 3368 } 3369 mtx_unlock(&mp->mnt_listmtx); 3370 } 3371 3372 static void 3373 vunlazy(struct vnode *vp) 3374 { 3375 struct mount *mp; 3376 3377 ASSERT_VI_LOCKED(vp, __func__); 3378 VNPASS(!VN_IS_DOOMED(vp), vp); 3379 3380 mp = vp->v_mount; 3381 mtx_lock(&mp->mnt_listmtx); 3382 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3383 /* 3384 * Don't remove the vnode from the lazy list if another thread 3385 * has increased the hold count. It may have re-enqueued the 3386 * vnode to the lazy list and is now responsible for its 3387 * removal. 3388 */ 3389 if (vp->v_holdcnt == 0) { 3390 vp->v_mflag &= ~VMP_LAZYLIST; 3391 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3392 mp->mnt_lazyvnodelistsize--; 3393 } 3394 mtx_unlock(&mp->mnt_listmtx); 3395 } 3396 3397 /* 3398 * This routine is only meant to be called from vgonel prior to dooming 3399 * the vnode. 3400 */ 3401 static void 3402 vunlazy_gone(struct vnode *vp) 3403 { 3404 struct mount *mp; 3405 3406 ASSERT_VOP_ELOCKED(vp, __func__); 3407 ASSERT_VI_LOCKED(vp, __func__); 3408 VNPASS(!VN_IS_DOOMED(vp), vp); 3409 3410 if (vp->v_mflag & VMP_LAZYLIST) { 3411 mp = vp->v_mount; 3412 mtx_lock(&mp->mnt_listmtx); 3413 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3414 vp->v_mflag &= ~VMP_LAZYLIST; 3415 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3416 mp->mnt_lazyvnodelistsize--; 3417 mtx_unlock(&mp->mnt_listmtx); 3418 } 3419 } 3420 3421 static void 3422 vdefer_inactive(struct vnode *vp) 3423 { 3424 3425 ASSERT_VI_LOCKED(vp, __func__); 3426 VNPASS(vp->v_holdcnt > 0, vp); 3427 if (VN_IS_DOOMED(vp)) { 3428 vdropl(vp); 3429 return; 3430 } 3431 if (vp->v_iflag & VI_DEFINACT) { 3432 VNPASS(vp->v_holdcnt > 1, vp); 3433 vdropl(vp); 3434 return; 3435 } 3436 if (vp->v_usecount > 0) { 3437 vp->v_iflag &= ~VI_OWEINACT; 3438 vdropl(vp); 3439 return; 3440 } 3441 vlazy(vp); 3442 vp->v_iflag |= VI_DEFINACT; 3443 VI_UNLOCK(vp); 3444 atomic_add_long(&deferred_inact, 1); 3445 } 3446 3447 static void 3448 vdefer_inactive_unlocked(struct vnode *vp) 3449 { 3450 3451 VI_LOCK(vp); 3452 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3453 vdropl(vp); 3454 return; 3455 } 3456 vdefer_inactive(vp); 3457 } 3458 3459 enum vput_op { VRELE, VPUT, VUNREF }; 3460 3461 /* 3462 * Handle ->v_usecount transitioning to 0. 3463 * 3464 * By releasing the last usecount we take ownership of the hold count which 3465 * provides liveness of the vnode, meaning we have to vdrop. 3466 * 3467 * For all vnodes we may need to perform inactive processing. It requires an 3468 * exclusive lock on the vnode, while it is legal to call here with only a 3469 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3470 * inactive processing gets deferred to the syncer. 3471 * 3472 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3473 * on the lock being held all the way until VOP_INACTIVE. This in particular 3474 * happens with UFS which adds half-constructed vnodes to the hash, where they 3475 * can be found by other code. 3476 */ 3477 static void 3478 vput_final(struct vnode *vp, enum vput_op func) 3479 { 3480 int error; 3481 bool want_unlock; 3482 3483 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3484 VNPASS(vp->v_holdcnt > 0, vp); 3485 3486 VI_LOCK(vp); 3487 3488 /* 3489 * By the time we got here someone else might have transitioned 3490 * the count back to > 0. 3491 */ 3492 if (vp->v_usecount > 0) 3493 goto out; 3494 3495 /* 3496 * If the vnode is doomed vgone already performed inactive processing 3497 * (if needed). 3498 */ 3499 if (VN_IS_DOOMED(vp)) 3500 goto out; 3501 3502 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3503 goto out; 3504 3505 if (vp->v_iflag & VI_DOINGINACT) 3506 goto out; 3507 3508 /* 3509 * Locking operations here will drop the interlock and possibly the 3510 * vnode lock, opening a window where the vnode can get doomed all the 3511 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3512 * perform inactive. 3513 */ 3514 vp->v_iflag |= VI_OWEINACT; 3515 want_unlock = false; 3516 error = 0; 3517 switch (func) { 3518 case VRELE: 3519 switch (VOP_ISLOCKED(vp)) { 3520 case LK_EXCLUSIVE: 3521 break; 3522 case LK_EXCLOTHER: 3523 case 0: 3524 want_unlock = true; 3525 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3526 VI_LOCK(vp); 3527 break; 3528 default: 3529 /* 3530 * The lock has at least one sharer, but we have no way 3531 * to conclude whether this is us. Play it safe and 3532 * defer processing. 3533 */ 3534 error = EAGAIN; 3535 break; 3536 } 3537 break; 3538 case VPUT: 3539 want_unlock = true; 3540 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3541 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3542 LK_NOWAIT); 3543 VI_LOCK(vp); 3544 } 3545 break; 3546 case VUNREF: 3547 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3548 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3549 VI_LOCK(vp); 3550 } 3551 break; 3552 } 3553 if (error == 0) { 3554 if (func == VUNREF) { 3555 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3556 ("recursive vunref")); 3557 vp->v_vflag |= VV_UNREF; 3558 } 3559 for (;;) { 3560 error = vinactive(vp); 3561 if (want_unlock) 3562 VOP_UNLOCK(vp); 3563 if (error != ERELOOKUP || !want_unlock) 3564 break; 3565 VOP_LOCK(vp, LK_EXCLUSIVE); 3566 } 3567 if (func == VUNREF) 3568 vp->v_vflag &= ~VV_UNREF; 3569 vdropl(vp); 3570 } else { 3571 vdefer_inactive(vp); 3572 } 3573 return; 3574 out: 3575 if (func == VPUT) 3576 VOP_UNLOCK(vp); 3577 vdropl(vp); 3578 } 3579 3580 /* 3581 * Decrement ->v_usecount for a vnode. 3582 * 3583 * Releasing the last use count requires additional processing, see vput_final 3584 * above for details. 3585 * 3586 * Comment above each variant denotes lock state on entry and exit. 3587 */ 3588 3589 /* 3590 * in: any 3591 * out: same as passed in 3592 */ 3593 void 3594 vrele(struct vnode *vp) 3595 { 3596 3597 ASSERT_VI_UNLOCKED(vp, __func__); 3598 if (!refcount_release(&vp->v_usecount)) 3599 return; 3600 vput_final(vp, VRELE); 3601 } 3602 3603 /* 3604 * in: locked 3605 * out: unlocked 3606 */ 3607 void 3608 vput(struct vnode *vp) 3609 { 3610 3611 ASSERT_VOP_LOCKED(vp, __func__); 3612 ASSERT_VI_UNLOCKED(vp, __func__); 3613 if (!refcount_release(&vp->v_usecount)) { 3614 VOP_UNLOCK(vp); 3615 return; 3616 } 3617 vput_final(vp, VPUT); 3618 } 3619 3620 /* 3621 * in: locked 3622 * out: locked 3623 */ 3624 void 3625 vunref(struct vnode *vp) 3626 { 3627 3628 ASSERT_VOP_LOCKED(vp, __func__); 3629 ASSERT_VI_UNLOCKED(vp, __func__); 3630 if (!refcount_release(&vp->v_usecount)) 3631 return; 3632 vput_final(vp, VUNREF); 3633 } 3634 3635 void 3636 vhold(struct vnode *vp) 3637 { 3638 int old; 3639 3640 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3641 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3642 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3643 ("%s: wrong hold count %d", __func__, old)); 3644 if (old == 0) 3645 vfs_freevnodes_dec(); 3646 } 3647 3648 void 3649 vholdnz(struct vnode *vp) 3650 { 3651 3652 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3653 #ifdef INVARIANTS 3654 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3655 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3656 ("%s: wrong hold count %d", __func__, old)); 3657 #else 3658 atomic_add_int(&vp->v_holdcnt, 1); 3659 #endif 3660 } 3661 3662 /* 3663 * Grab a hold count unless the vnode is freed. 3664 * 3665 * Only use this routine if vfs smr is the only protection you have against 3666 * freeing the vnode. 3667 * 3668 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3669 * is not set. After the flag is set the vnode becomes immutable to anyone but 3670 * the thread which managed to set the flag. 3671 * 3672 * It may be tempting to replace the loop with: 3673 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3674 * if (count & VHOLD_NO_SMR) { 3675 * backpedal and error out; 3676 * } 3677 * 3678 * However, while this is more performant, it hinders debugging by eliminating 3679 * the previously mentioned invariant. 3680 */ 3681 bool 3682 vhold_smr(struct vnode *vp) 3683 { 3684 int count; 3685 3686 VFS_SMR_ASSERT_ENTERED(); 3687 3688 count = atomic_load_int(&vp->v_holdcnt); 3689 for (;;) { 3690 if (count & VHOLD_NO_SMR) { 3691 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3692 ("non-zero hold count with flags %d\n", count)); 3693 return (false); 3694 } 3695 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3696 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3697 if (count == 0) 3698 vfs_freevnodes_dec(); 3699 return (true); 3700 } 3701 } 3702 } 3703 3704 /* 3705 * Hold a free vnode for recycling. 3706 * 3707 * Note: vnode_init references this comment. 3708 * 3709 * Attempts to recycle only need the global vnode list lock and have no use for 3710 * SMR. 3711 * 3712 * However, vnodes get inserted into the global list before they get fully 3713 * initialized and stay there until UMA decides to free the memory. This in 3714 * particular means the target can be found before it becomes usable and after 3715 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3716 * VHOLD_NO_SMR. 3717 * 3718 * Note: the vnode may gain more references after we transition the count 0->1. 3719 */ 3720 static bool 3721 vhold_recycle_free(struct vnode *vp) 3722 { 3723 int count; 3724 3725 mtx_assert(&vnode_list_mtx, MA_OWNED); 3726 3727 count = atomic_load_int(&vp->v_holdcnt); 3728 for (;;) { 3729 if (count & VHOLD_NO_SMR) { 3730 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3731 ("non-zero hold count with flags %d\n", count)); 3732 return (false); 3733 } 3734 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3735 if (count > 0) { 3736 return (false); 3737 } 3738 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3739 vfs_freevnodes_dec(); 3740 return (true); 3741 } 3742 } 3743 } 3744 3745 static void __noinline 3746 vdbatch_process(struct vdbatch *vd) 3747 { 3748 struct vnode *vp; 3749 int i; 3750 3751 mtx_assert(&vd->lock, MA_OWNED); 3752 MPASS(curthread->td_pinned > 0); 3753 MPASS(vd->index == VDBATCH_SIZE); 3754 3755 /* 3756 * Attempt to requeue the passed batch, but give up easily. 3757 * 3758 * Despite batching the mechanism is prone to transient *significant* 3759 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3760 * if multiple CPUs get here (one real-world example is highly parallel 3761 * do-nothing make , which will stat *tons* of vnodes). Since it is 3762 * quasi-LRU (read: not that great even if fully honoured) just dodge 3763 * the problem. Parties which don't like it are welcome to implement 3764 * something better. 3765 */ 3766 critical_enter(); 3767 if (mtx_trylock(&vnode_list_mtx)) { 3768 for (i = 0; i < VDBATCH_SIZE; i++) { 3769 vp = vd->tab[i]; 3770 vd->tab[i] = NULL; 3771 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3772 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3773 MPASS(vp->v_dbatchcpu != NOCPU); 3774 vp->v_dbatchcpu = NOCPU; 3775 } 3776 mtx_unlock(&vnode_list_mtx); 3777 } else { 3778 counter_u64_add(vnode_skipped_requeues, 1); 3779 3780 for (i = 0; i < VDBATCH_SIZE; i++) { 3781 vp = vd->tab[i]; 3782 vd->tab[i] = NULL; 3783 MPASS(vp->v_dbatchcpu != NOCPU); 3784 vp->v_dbatchcpu = NOCPU; 3785 } 3786 } 3787 vd->index = 0; 3788 critical_exit(); 3789 } 3790 3791 static void 3792 vdbatch_enqueue(struct vnode *vp) 3793 { 3794 struct vdbatch *vd; 3795 3796 ASSERT_VI_LOCKED(vp, __func__); 3797 VNPASS(!VN_IS_DOOMED(vp), vp); 3798 3799 if (vp->v_dbatchcpu != NOCPU) { 3800 VI_UNLOCK(vp); 3801 return; 3802 } 3803 3804 sched_pin(); 3805 vd = DPCPU_PTR(vd); 3806 mtx_lock(&vd->lock); 3807 MPASS(vd->index < VDBATCH_SIZE); 3808 MPASS(vd->tab[vd->index] == NULL); 3809 /* 3810 * A hack: we depend on being pinned so that we know what to put in 3811 * ->v_dbatchcpu. 3812 */ 3813 vp->v_dbatchcpu = curcpu; 3814 vd->tab[vd->index] = vp; 3815 vd->index++; 3816 VI_UNLOCK(vp); 3817 if (vd->index == VDBATCH_SIZE) 3818 vdbatch_process(vd); 3819 mtx_unlock(&vd->lock); 3820 sched_unpin(); 3821 } 3822 3823 /* 3824 * This routine must only be called for vnodes which are about to be 3825 * deallocated. Supporting dequeue for arbitrary vndoes would require 3826 * validating that the locked batch matches. 3827 */ 3828 static void 3829 vdbatch_dequeue(struct vnode *vp) 3830 { 3831 struct vdbatch *vd; 3832 int i; 3833 short cpu; 3834 3835 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3836 3837 cpu = vp->v_dbatchcpu; 3838 if (cpu == NOCPU) 3839 return; 3840 3841 vd = DPCPU_ID_PTR(cpu, vd); 3842 mtx_lock(&vd->lock); 3843 for (i = 0; i < vd->index; i++) { 3844 if (vd->tab[i] != vp) 3845 continue; 3846 vp->v_dbatchcpu = NOCPU; 3847 vd->index--; 3848 vd->tab[i] = vd->tab[vd->index]; 3849 vd->tab[vd->index] = NULL; 3850 break; 3851 } 3852 mtx_unlock(&vd->lock); 3853 /* 3854 * Either we dequeued the vnode above or the target CPU beat us to it. 3855 */ 3856 MPASS(vp->v_dbatchcpu == NOCPU); 3857 } 3858 3859 /* 3860 * Drop the hold count of the vnode. If this is the last reference to 3861 * the vnode we place it on the free list unless it has been vgone'd 3862 * (marked VIRF_DOOMED) in which case we will free it. 3863 * 3864 * Because the vnode vm object keeps a hold reference on the vnode if 3865 * there is at least one resident non-cached page, the vnode cannot 3866 * leave the active list without the page cleanup done. 3867 */ 3868 static void __noinline 3869 vdropl_final(struct vnode *vp) 3870 { 3871 3872 ASSERT_VI_LOCKED(vp, __func__); 3873 VNPASS(VN_IS_DOOMED(vp), vp); 3874 /* 3875 * Set the VHOLD_NO_SMR flag. 3876 * 3877 * We may be racing against vhold_smr. If they win we can just pretend 3878 * we never got this far, they will vdrop later. 3879 */ 3880 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3881 vfs_freevnodes_inc(); 3882 VI_UNLOCK(vp); 3883 /* 3884 * We lost the aforementioned race. Any subsequent access is 3885 * invalid as they might have managed to vdropl on their own. 3886 */ 3887 return; 3888 } 3889 /* 3890 * Don't bump freevnodes as this one is going away. 3891 */ 3892 freevnode(vp); 3893 } 3894 3895 void 3896 vdrop(struct vnode *vp) 3897 { 3898 3899 ASSERT_VI_UNLOCKED(vp, __func__); 3900 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3901 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3902 return; 3903 VI_LOCK(vp); 3904 vdropl(vp); 3905 } 3906 3907 static void __always_inline 3908 vdropl_impl(struct vnode *vp, bool enqueue) 3909 { 3910 3911 ASSERT_VI_LOCKED(vp, __func__); 3912 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3913 if (!refcount_release(&vp->v_holdcnt)) { 3914 VI_UNLOCK(vp); 3915 return; 3916 } 3917 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3918 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3919 if (VN_IS_DOOMED(vp)) { 3920 vdropl_final(vp); 3921 return; 3922 } 3923 3924 vfs_freevnodes_inc(); 3925 if (vp->v_mflag & VMP_LAZYLIST) { 3926 vunlazy(vp); 3927 } 3928 3929 if (!enqueue) { 3930 VI_UNLOCK(vp); 3931 return; 3932 } 3933 3934 /* 3935 * Also unlocks the interlock. We can't assert on it as we 3936 * released our hold and by now the vnode might have been 3937 * freed. 3938 */ 3939 vdbatch_enqueue(vp); 3940 } 3941 3942 void 3943 vdropl(struct vnode *vp) 3944 { 3945 3946 vdropl_impl(vp, true); 3947 } 3948 3949 /* 3950 * vdrop a vnode when recycling 3951 * 3952 * This is a special case routine only to be used when recycling, differs from 3953 * regular vdrop by not requeieing the vnode on LRU. 3954 * 3955 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3956 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3957 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3958 * loop which can last for as long as writes are frozen. 3959 */ 3960 static void 3961 vdropl_recycle(struct vnode *vp) 3962 { 3963 3964 vdropl_impl(vp, false); 3965 } 3966 3967 static void 3968 vdrop_recycle(struct vnode *vp) 3969 { 3970 3971 VI_LOCK(vp); 3972 vdropl_recycle(vp); 3973 } 3974 3975 /* 3976 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3977 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3978 */ 3979 static int 3980 vinactivef(struct vnode *vp) 3981 { 3982 struct vm_object *obj; 3983 int error; 3984 3985 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3986 ASSERT_VI_LOCKED(vp, "vinactive"); 3987 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3988 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3989 vp->v_iflag |= VI_DOINGINACT; 3990 vp->v_iflag &= ~VI_OWEINACT; 3991 VI_UNLOCK(vp); 3992 /* 3993 * Before moving off the active list, we must be sure that any 3994 * modified pages are converted into the vnode's dirty 3995 * buffers, since these will no longer be checked once the 3996 * vnode is on the inactive list. 3997 * 3998 * The write-out of the dirty pages is asynchronous. At the 3999 * point that VOP_INACTIVE() is called, there could still be 4000 * pending I/O and dirty pages in the object. 4001 */ 4002 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 4003 vm_object_mightbedirty(obj)) { 4004 VM_OBJECT_WLOCK(obj); 4005 vm_object_page_clean(obj, 0, 0, 0); 4006 VM_OBJECT_WUNLOCK(obj); 4007 } 4008 error = VOP_INACTIVE(vp); 4009 VI_LOCK(vp); 4010 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 4011 vp->v_iflag &= ~VI_DOINGINACT; 4012 return (error); 4013 } 4014 4015 int 4016 vinactive(struct vnode *vp) 4017 { 4018 4019 ASSERT_VOP_ELOCKED(vp, "vinactive"); 4020 ASSERT_VI_LOCKED(vp, "vinactive"); 4021 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4022 4023 if ((vp->v_iflag & VI_OWEINACT) == 0) 4024 return (0); 4025 if (vp->v_iflag & VI_DOINGINACT) 4026 return (0); 4027 if (vp->v_usecount > 0) { 4028 vp->v_iflag &= ~VI_OWEINACT; 4029 return (0); 4030 } 4031 return (vinactivef(vp)); 4032 } 4033 4034 /* 4035 * Remove any vnodes in the vnode table belonging to mount point mp. 4036 * 4037 * If FORCECLOSE is not specified, there should not be any active ones, 4038 * return error if any are found (nb: this is a user error, not a 4039 * system error). If FORCECLOSE is specified, detach any active vnodes 4040 * that are found. 4041 * 4042 * If WRITECLOSE is set, only flush out regular file vnodes open for 4043 * writing. 4044 * 4045 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4046 * 4047 * `rootrefs' specifies the base reference count for the root vnode 4048 * of this filesystem. The root vnode is considered busy if its 4049 * v_usecount exceeds this value. On a successful return, vflush(, td) 4050 * will call vrele() on the root vnode exactly rootrefs times. 4051 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4052 * be zero. 4053 */ 4054 #ifdef DIAGNOSTIC 4055 static int busyprt = 0; /* print out busy vnodes */ 4056 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4057 #endif 4058 4059 int 4060 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4061 { 4062 struct vnode *vp, *mvp, *rootvp = NULL; 4063 struct vattr vattr; 4064 int busy = 0, error; 4065 4066 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4067 rootrefs, flags); 4068 if (rootrefs > 0) { 4069 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4070 ("vflush: bad args")); 4071 /* 4072 * Get the filesystem root vnode. We can vput() it 4073 * immediately, since with rootrefs > 0, it won't go away. 4074 */ 4075 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4076 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4077 __func__, error); 4078 return (error); 4079 } 4080 vput(rootvp); 4081 } 4082 loop: 4083 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4084 vholdl(vp); 4085 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4086 if (error) { 4087 vdrop(vp); 4088 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4089 goto loop; 4090 } 4091 /* 4092 * Skip over a vnodes marked VV_SYSTEM. 4093 */ 4094 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4095 VOP_UNLOCK(vp); 4096 vdrop(vp); 4097 continue; 4098 } 4099 /* 4100 * If WRITECLOSE is set, flush out unlinked but still open 4101 * files (even if open only for reading) and regular file 4102 * vnodes open for writing. 4103 */ 4104 if (flags & WRITECLOSE) { 4105 if (vp->v_object != NULL) { 4106 VM_OBJECT_WLOCK(vp->v_object); 4107 vm_object_page_clean(vp->v_object, 0, 0, 0); 4108 VM_OBJECT_WUNLOCK(vp->v_object); 4109 } 4110 do { 4111 error = VOP_FSYNC(vp, MNT_WAIT, td); 4112 } while (error == ERELOOKUP); 4113 if (error != 0) { 4114 VOP_UNLOCK(vp); 4115 vdrop(vp); 4116 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4117 return (error); 4118 } 4119 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4120 VI_LOCK(vp); 4121 4122 if ((vp->v_type == VNON || 4123 (error == 0 && vattr.va_nlink > 0)) && 4124 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4125 VOP_UNLOCK(vp); 4126 vdropl(vp); 4127 continue; 4128 } 4129 } else 4130 VI_LOCK(vp); 4131 /* 4132 * With v_usecount == 0, all we need to do is clear out the 4133 * vnode data structures and we are done. 4134 * 4135 * If FORCECLOSE is set, forcibly close the vnode. 4136 */ 4137 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4138 vgonel(vp); 4139 } else { 4140 busy++; 4141 #ifdef DIAGNOSTIC 4142 if (busyprt) 4143 vn_printf(vp, "vflush: busy vnode "); 4144 #endif 4145 } 4146 VOP_UNLOCK(vp); 4147 vdropl(vp); 4148 } 4149 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4150 /* 4151 * If just the root vnode is busy, and if its refcount 4152 * is equal to `rootrefs', then go ahead and kill it. 4153 */ 4154 VI_LOCK(rootvp); 4155 KASSERT(busy > 0, ("vflush: not busy")); 4156 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4157 ("vflush: usecount %d < rootrefs %d", 4158 rootvp->v_usecount, rootrefs)); 4159 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4160 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4161 vgone(rootvp); 4162 VOP_UNLOCK(rootvp); 4163 busy = 0; 4164 } else 4165 VI_UNLOCK(rootvp); 4166 } 4167 if (busy) { 4168 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4169 busy); 4170 return (EBUSY); 4171 } 4172 for (; rootrefs > 0; rootrefs--) 4173 vrele(rootvp); 4174 return (0); 4175 } 4176 4177 /* 4178 * Recycle an unused vnode to the front of the free list. 4179 */ 4180 int 4181 vrecycle(struct vnode *vp) 4182 { 4183 int recycled; 4184 4185 VI_LOCK(vp); 4186 recycled = vrecyclel(vp); 4187 VI_UNLOCK(vp); 4188 return (recycled); 4189 } 4190 4191 /* 4192 * vrecycle, with the vp interlock held. 4193 */ 4194 int 4195 vrecyclel(struct vnode *vp) 4196 { 4197 int recycled; 4198 4199 ASSERT_VOP_ELOCKED(vp, __func__); 4200 ASSERT_VI_LOCKED(vp, __func__); 4201 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4202 recycled = 0; 4203 if (vp->v_usecount == 0) { 4204 recycled = 1; 4205 vgonel(vp); 4206 } 4207 return (recycled); 4208 } 4209 4210 /* 4211 * Eliminate all activity associated with a vnode 4212 * in preparation for reuse. 4213 */ 4214 void 4215 vgone(struct vnode *vp) 4216 { 4217 VI_LOCK(vp); 4218 vgonel(vp); 4219 VI_UNLOCK(vp); 4220 } 4221 4222 /* 4223 * Notify upper mounts about reclaimed or unlinked vnode. 4224 */ 4225 void 4226 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4227 { 4228 struct mount *mp; 4229 struct mount_upper_node *ump; 4230 4231 mp = atomic_load_ptr(&vp->v_mount); 4232 if (mp == NULL) 4233 return; 4234 if (TAILQ_EMPTY(&mp->mnt_notify)) 4235 return; 4236 4237 MNT_ILOCK(mp); 4238 mp->mnt_upper_pending++; 4239 KASSERT(mp->mnt_upper_pending > 0, 4240 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4241 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4242 MNT_IUNLOCK(mp); 4243 switch (event) { 4244 case VFS_NOTIFY_UPPER_RECLAIM: 4245 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4246 break; 4247 case VFS_NOTIFY_UPPER_UNLINK: 4248 VFS_UNLINK_LOWERVP(ump->mp, vp); 4249 break; 4250 } 4251 MNT_ILOCK(mp); 4252 } 4253 mp->mnt_upper_pending--; 4254 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4255 mp->mnt_upper_pending == 0) { 4256 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4257 wakeup(&mp->mnt_uppers); 4258 } 4259 MNT_IUNLOCK(mp); 4260 } 4261 4262 /* 4263 * vgone, with the vp interlock held. 4264 */ 4265 static void 4266 vgonel(struct vnode *vp) 4267 { 4268 struct thread *td; 4269 struct mount *mp; 4270 vm_object_t object; 4271 bool active, doinginact, oweinact; 4272 4273 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4274 ASSERT_VI_LOCKED(vp, "vgonel"); 4275 VNASSERT(vp->v_holdcnt, vp, 4276 ("vgonel: vp %p has no reference.", vp)); 4277 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4278 td = curthread; 4279 4280 /* 4281 * Don't vgonel if we're already doomed. 4282 */ 4283 if (VN_IS_DOOMED(vp)) { 4284 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4285 vn_get_state(vp) == VSTATE_DEAD, vp); 4286 return; 4287 } 4288 /* 4289 * Paired with freevnode. 4290 */ 4291 vn_seqc_write_begin_locked(vp); 4292 vunlazy_gone(vp); 4293 vn_irflag_set_locked(vp, VIRF_DOOMED); 4294 vn_set_state(vp, VSTATE_DESTROYING); 4295 4296 /* 4297 * Check to see if the vnode is in use. If so, we have to 4298 * call VOP_CLOSE() and VOP_INACTIVE(). 4299 * 4300 * It could be that VOP_INACTIVE() requested reclamation, in 4301 * which case we should avoid recursion, so check 4302 * VI_DOINGINACT. This is not precise but good enough. 4303 */ 4304 active = vp->v_usecount > 0; 4305 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4306 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4307 4308 /* 4309 * If we need to do inactive VI_OWEINACT will be set. 4310 */ 4311 if (vp->v_iflag & VI_DEFINACT) { 4312 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4313 vp->v_iflag &= ~VI_DEFINACT; 4314 vdropl(vp); 4315 } else { 4316 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4317 VI_UNLOCK(vp); 4318 } 4319 cache_purge_vgone(vp); 4320 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4321 4322 /* 4323 * If purging an active vnode, it must be closed and 4324 * deactivated before being reclaimed. 4325 */ 4326 if (active) 4327 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4328 if (!doinginact) { 4329 do { 4330 if (oweinact || active) { 4331 VI_LOCK(vp); 4332 vinactivef(vp); 4333 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4334 VI_UNLOCK(vp); 4335 } 4336 } while (oweinact); 4337 } 4338 if (vp->v_type == VSOCK) 4339 vfs_unp_reclaim(vp); 4340 4341 /* 4342 * Clean out any buffers associated with the vnode. 4343 * If the flush fails, just toss the buffers. 4344 */ 4345 mp = NULL; 4346 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4347 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4348 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4349 while (vinvalbuf(vp, 0, 0, 0) != 0) 4350 ; 4351 } 4352 4353 BO_LOCK(&vp->v_bufobj); 4354 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4355 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4356 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4357 vp->v_bufobj.bo_clean.bv_cnt == 0, 4358 ("vp %p bufobj not invalidated", vp)); 4359 4360 /* 4361 * For VMIO bufobj, BO_DEAD is set later, or in 4362 * vm_object_terminate() after the object's page queue is 4363 * flushed. 4364 */ 4365 object = vp->v_bufobj.bo_object; 4366 if (object == NULL) 4367 vp->v_bufobj.bo_flag |= BO_DEAD; 4368 BO_UNLOCK(&vp->v_bufobj); 4369 4370 /* 4371 * Handle the VM part. Tmpfs handles v_object on its own (the 4372 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4373 * should not touch the object borrowed from the lower vnode 4374 * (the handle check). 4375 */ 4376 if (object != NULL && object->type == OBJT_VNODE && 4377 object->handle == vp) 4378 vnode_destroy_vobject(vp); 4379 4380 /* 4381 * Reclaim the vnode. 4382 */ 4383 if (VOP_RECLAIM(vp)) 4384 panic("vgone: cannot reclaim"); 4385 if (mp != NULL) 4386 vn_finished_secondary_write(mp); 4387 VNASSERT(vp->v_object == NULL, vp, 4388 ("vop_reclaim left v_object vp=%p", vp)); 4389 /* 4390 * Clear the advisory locks and wake up waiting threads. 4391 */ 4392 if (vp->v_lockf != NULL) { 4393 (void)VOP_ADVLOCKPURGE(vp); 4394 vp->v_lockf = NULL; 4395 } 4396 /* 4397 * Delete from old mount point vnode list. 4398 */ 4399 if (vp->v_mount == NULL) { 4400 VI_LOCK(vp); 4401 } else { 4402 delmntque(vp); 4403 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4404 } 4405 /* 4406 * Done with purge, reset to the standard lock and invalidate 4407 * the vnode. 4408 */ 4409 vp->v_vnlock = &vp->v_lock; 4410 vp->v_op = &dead_vnodeops; 4411 vp->v_type = VBAD; 4412 vn_set_state(vp, VSTATE_DEAD); 4413 } 4414 4415 /* 4416 * Print out a description of a vnode. 4417 */ 4418 static const char *const vtypename[] = { 4419 [VNON] = "VNON", 4420 [VREG] = "VREG", 4421 [VDIR] = "VDIR", 4422 [VBLK] = "VBLK", 4423 [VCHR] = "VCHR", 4424 [VLNK] = "VLNK", 4425 [VSOCK] = "VSOCK", 4426 [VFIFO] = "VFIFO", 4427 [VBAD] = "VBAD", 4428 [VMARKER] = "VMARKER", 4429 }; 4430 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4431 "vnode type name not added to vtypename"); 4432 4433 static const char *const vstatename[] = { 4434 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4435 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4436 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4437 [VSTATE_DEAD] = "VSTATE_DEAD", 4438 }; 4439 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4440 "vnode state name not added to vstatename"); 4441 4442 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4443 "new hold count flag not added to vn_printf"); 4444 4445 void 4446 vn_printf(struct vnode *vp, const char *fmt, ...) 4447 { 4448 va_list ap; 4449 char buf[256], buf2[16]; 4450 u_long flags; 4451 u_int holdcnt; 4452 short irflag; 4453 4454 va_start(ap, fmt); 4455 vprintf(fmt, ap); 4456 va_end(ap); 4457 printf("%p: ", (void *)vp); 4458 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4459 vstatename[vp->v_state], vp->v_op); 4460 holdcnt = atomic_load_int(&vp->v_holdcnt); 4461 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4462 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4463 vp->v_seqc_users); 4464 switch (vp->v_type) { 4465 case VDIR: 4466 printf(" mountedhere %p\n", vp->v_mountedhere); 4467 break; 4468 case VCHR: 4469 printf(" rdev %p\n", vp->v_rdev); 4470 break; 4471 case VSOCK: 4472 printf(" socket %p\n", vp->v_unpcb); 4473 break; 4474 case VFIFO: 4475 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4476 break; 4477 default: 4478 printf("\n"); 4479 break; 4480 } 4481 buf[0] = '\0'; 4482 buf[1] = '\0'; 4483 if (holdcnt & VHOLD_NO_SMR) 4484 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4485 printf(" hold count flags (%s)\n", buf + 1); 4486 4487 buf[0] = '\0'; 4488 buf[1] = '\0'; 4489 irflag = vn_irflag_read(vp); 4490 if (irflag & VIRF_DOOMED) 4491 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4492 if (irflag & VIRF_PGREAD) 4493 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4494 if (irflag & VIRF_MOUNTPOINT) 4495 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4496 if (irflag & VIRF_TEXT_REF) 4497 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4498 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4499 if (flags != 0) { 4500 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4501 strlcat(buf, buf2, sizeof(buf)); 4502 } 4503 if (vp->v_vflag & VV_ROOT) 4504 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4505 if (vp->v_vflag & VV_ISTTY) 4506 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4507 if (vp->v_vflag & VV_NOSYNC) 4508 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4509 if (vp->v_vflag & VV_ETERNALDEV) 4510 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4511 if (vp->v_vflag & VV_CACHEDLABEL) 4512 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4513 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4514 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4515 if (vp->v_vflag & VV_COPYONWRITE) 4516 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4517 if (vp->v_vflag & VV_SYSTEM) 4518 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4519 if (vp->v_vflag & VV_PROCDEP) 4520 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4521 if (vp->v_vflag & VV_DELETED) 4522 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4523 if (vp->v_vflag & VV_MD) 4524 strlcat(buf, "|VV_MD", sizeof(buf)); 4525 if (vp->v_vflag & VV_FORCEINSMQ) 4526 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4527 if (vp->v_vflag & VV_READLINK) 4528 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4529 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4530 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4531 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4532 if (flags != 0) { 4533 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4534 strlcat(buf, buf2, sizeof(buf)); 4535 } 4536 if (vp->v_iflag & VI_MOUNT) 4537 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4538 if (vp->v_iflag & VI_DOINGINACT) 4539 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4540 if (vp->v_iflag & VI_OWEINACT) 4541 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4542 if (vp->v_iflag & VI_DEFINACT) 4543 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4544 if (vp->v_iflag & VI_FOPENING) 4545 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4546 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4547 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4548 if (flags != 0) { 4549 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4550 strlcat(buf, buf2, sizeof(buf)); 4551 } 4552 if (vp->v_mflag & VMP_LAZYLIST) 4553 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4554 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4555 if (flags != 0) { 4556 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4557 strlcat(buf, buf2, sizeof(buf)); 4558 } 4559 printf(" flags (%s)", buf + 1); 4560 if (mtx_owned(VI_MTX(vp))) 4561 printf(" VI_LOCKed"); 4562 printf("\n"); 4563 if (vp->v_object != NULL) 4564 printf(" v_object %p ref %d pages %d " 4565 "cleanbuf %d dirtybuf %d\n", 4566 vp->v_object, vp->v_object->ref_count, 4567 vp->v_object->resident_page_count, 4568 vp->v_bufobj.bo_clean.bv_cnt, 4569 vp->v_bufobj.bo_dirty.bv_cnt); 4570 printf(" "); 4571 lockmgr_printinfo(vp->v_vnlock); 4572 if (vp->v_data != NULL) 4573 VOP_PRINT(vp); 4574 } 4575 4576 #ifdef DDB 4577 /* 4578 * List all of the locked vnodes in the system. 4579 * Called when debugging the kernel. 4580 */ 4581 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4582 { 4583 struct mount *mp; 4584 struct vnode *vp; 4585 4586 /* 4587 * Note: because this is DDB, we can't obey the locking semantics 4588 * for these structures, which means we could catch an inconsistent 4589 * state and dereference a nasty pointer. Not much to be done 4590 * about that. 4591 */ 4592 db_printf("Locked vnodes\n"); 4593 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4594 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4595 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4596 vn_printf(vp, "vnode "); 4597 } 4598 } 4599 } 4600 4601 /* 4602 * Show details about the given vnode. 4603 */ 4604 DB_SHOW_COMMAND(vnode, db_show_vnode) 4605 { 4606 struct vnode *vp; 4607 4608 if (!have_addr) 4609 return; 4610 vp = (struct vnode *)addr; 4611 vn_printf(vp, "vnode "); 4612 } 4613 4614 /* 4615 * Show details about the given mount point. 4616 */ 4617 DB_SHOW_COMMAND(mount, db_show_mount) 4618 { 4619 struct mount *mp; 4620 struct vfsopt *opt; 4621 struct statfs *sp; 4622 struct vnode *vp; 4623 char buf[512]; 4624 uint64_t mflags; 4625 u_int flags; 4626 4627 if (!have_addr) { 4628 /* No address given, print short info about all mount points. */ 4629 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4630 db_printf("%p %s on %s (%s)\n", mp, 4631 mp->mnt_stat.f_mntfromname, 4632 mp->mnt_stat.f_mntonname, 4633 mp->mnt_stat.f_fstypename); 4634 if (db_pager_quit) 4635 break; 4636 } 4637 db_printf("\nMore info: show mount <addr>\n"); 4638 return; 4639 } 4640 4641 mp = (struct mount *)addr; 4642 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4643 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4644 4645 buf[0] = '\0'; 4646 mflags = mp->mnt_flag; 4647 #define MNT_FLAG(flag) do { \ 4648 if (mflags & (flag)) { \ 4649 if (buf[0] != '\0') \ 4650 strlcat(buf, ", ", sizeof(buf)); \ 4651 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4652 mflags &= ~(flag); \ 4653 } \ 4654 } while (0) 4655 MNT_FLAG(MNT_RDONLY); 4656 MNT_FLAG(MNT_SYNCHRONOUS); 4657 MNT_FLAG(MNT_NOEXEC); 4658 MNT_FLAG(MNT_NOSUID); 4659 MNT_FLAG(MNT_NFS4ACLS); 4660 MNT_FLAG(MNT_UNION); 4661 MNT_FLAG(MNT_ASYNC); 4662 MNT_FLAG(MNT_SUIDDIR); 4663 MNT_FLAG(MNT_SOFTDEP); 4664 MNT_FLAG(MNT_NOSYMFOLLOW); 4665 MNT_FLAG(MNT_GJOURNAL); 4666 MNT_FLAG(MNT_MULTILABEL); 4667 MNT_FLAG(MNT_ACLS); 4668 MNT_FLAG(MNT_NOATIME); 4669 MNT_FLAG(MNT_NOCLUSTERR); 4670 MNT_FLAG(MNT_NOCLUSTERW); 4671 MNT_FLAG(MNT_SUJ); 4672 MNT_FLAG(MNT_EXRDONLY); 4673 MNT_FLAG(MNT_EXPORTED); 4674 MNT_FLAG(MNT_DEFEXPORTED); 4675 MNT_FLAG(MNT_EXPORTANON); 4676 MNT_FLAG(MNT_EXKERB); 4677 MNT_FLAG(MNT_EXPUBLIC); 4678 MNT_FLAG(MNT_LOCAL); 4679 MNT_FLAG(MNT_QUOTA); 4680 MNT_FLAG(MNT_ROOTFS); 4681 MNT_FLAG(MNT_USER); 4682 MNT_FLAG(MNT_IGNORE); 4683 MNT_FLAG(MNT_UPDATE); 4684 MNT_FLAG(MNT_DELEXPORT); 4685 MNT_FLAG(MNT_RELOAD); 4686 MNT_FLAG(MNT_FORCE); 4687 MNT_FLAG(MNT_SNAPSHOT); 4688 MNT_FLAG(MNT_BYFSID); 4689 #undef MNT_FLAG 4690 if (mflags != 0) { 4691 if (buf[0] != '\0') 4692 strlcat(buf, ", ", sizeof(buf)); 4693 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4694 "0x%016jx", mflags); 4695 } 4696 db_printf(" mnt_flag = %s\n", buf); 4697 4698 buf[0] = '\0'; 4699 flags = mp->mnt_kern_flag; 4700 #define MNT_KERN_FLAG(flag) do { \ 4701 if (flags & (flag)) { \ 4702 if (buf[0] != '\0') \ 4703 strlcat(buf, ", ", sizeof(buf)); \ 4704 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4705 flags &= ~(flag); \ 4706 } \ 4707 } while (0) 4708 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4709 MNT_KERN_FLAG(MNTK_ASYNC); 4710 MNT_KERN_FLAG(MNTK_SOFTDEP); 4711 MNT_KERN_FLAG(MNTK_NOMSYNC); 4712 MNT_KERN_FLAG(MNTK_DRAINING); 4713 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4714 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4715 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4716 MNT_KERN_FLAG(MNTK_NO_IOPF); 4717 MNT_KERN_FLAG(MNTK_RECURSE); 4718 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4719 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4720 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4721 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4722 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4723 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4724 MNT_KERN_FLAG(MNTK_NOASYNC); 4725 MNT_KERN_FLAG(MNTK_UNMOUNT); 4726 MNT_KERN_FLAG(MNTK_MWAIT); 4727 MNT_KERN_FLAG(MNTK_SUSPEND); 4728 MNT_KERN_FLAG(MNTK_SUSPEND2); 4729 MNT_KERN_FLAG(MNTK_SUSPENDED); 4730 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4731 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4732 #undef MNT_KERN_FLAG 4733 if (flags != 0) { 4734 if (buf[0] != '\0') 4735 strlcat(buf, ", ", sizeof(buf)); 4736 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4737 "0x%08x", flags); 4738 } 4739 db_printf(" mnt_kern_flag = %s\n", buf); 4740 4741 db_printf(" mnt_opt = "); 4742 opt = TAILQ_FIRST(mp->mnt_opt); 4743 if (opt != NULL) { 4744 db_printf("%s", opt->name); 4745 opt = TAILQ_NEXT(opt, link); 4746 while (opt != NULL) { 4747 db_printf(", %s", opt->name); 4748 opt = TAILQ_NEXT(opt, link); 4749 } 4750 } 4751 db_printf("\n"); 4752 4753 sp = &mp->mnt_stat; 4754 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4755 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4756 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4757 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4758 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4759 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4760 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4761 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4762 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4763 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4764 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4765 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4766 4767 db_printf(" mnt_cred = { uid=%u ruid=%u", 4768 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4769 if (jailed(mp->mnt_cred)) 4770 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4771 db_printf(" }\n"); 4772 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4773 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4774 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4775 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4776 db_printf(" mnt_lazyvnodelistsize = %d\n", 4777 mp->mnt_lazyvnodelistsize); 4778 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4779 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4780 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4781 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4782 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4783 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4784 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4785 db_printf(" mnt_secondary_accwrites = %d\n", 4786 mp->mnt_secondary_accwrites); 4787 db_printf(" mnt_gjprovider = %s\n", 4788 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4789 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4790 4791 db_printf("\n\nList of active vnodes\n"); 4792 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4793 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4794 vn_printf(vp, "vnode "); 4795 if (db_pager_quit) 4796 break; 4797 } 4798 } 4799 db_printf("\n\nList of inactive vnodes\n"); 4800 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4801 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4802 vn_printf(vp, "vnode "); 4803 if (db_pager_quit) 4804 break; 4805 } 4806 } 4807 } 4808 #endif /* DDB */ 4809 4810 /* 4811 * Fill in a struct xvfsconf based on a struct vfsconf. 4812 */ 4813 static int 4814 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4815 { 4816 struct xvfsconf xvfsp; 4817 4818 bzero(&xvfsp, sizeof(xvfsp)); 4819 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4820 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4821 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4822 xvfsp.vfc_flags = vfsp->vfc_flags; 4823 /* 4824 * These are unused in userland, we keep them 4825 * to not break binary compatibility. 4826 */ 4827 xvfsp.vfc_vfsops = NULL; 4828 xvfsp.vfc_next = NULL; 4829 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4830 } 4831 4832 #ifdef COMPAT_FREEBSD32 4833 struct xvfsconf32 { 4834 uint32_t vfc_vfsops; 4835 char vfc_name[MFSNAMELEN]; 4836 int32_t vfc_typenum; 4837 int32_t vfc_refcount; 4838 int32_t vfc_flags; 4839 uint32_t vfc_next; 4840 }; 4841 4842 static int 4843 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4844 { 4845 struct xvfsconf32 xvfsp; 4846 4847 bzero(&xvfsp, sizeof(xvfsp)); 4848 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4849 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4850 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4851 xvfsp.vfc_flags = vfsp->vfc_flags; 4852 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4853 } 4854 #endif 4855 4856 /* 4857 * Top level filesystem related information gathering. 4858 */ 4859 static int 4860 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4861 { 4862 struct vfsconf *vfsp; 4863 int error; 4864 4865 error = 0; 4866 vfsconf_slock(); 4867 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4868 #ifdef COMPAT_FREEBSD32 4869 if (req->flags & SCTL_MASK32) 4870 error = vfsconf2x32(req, vfsp); 4871 else 4872 #endif 4873 error = vfsconf2x(req, vfsp); 4874 if (error) 4875 break; 4876 } 4877 vfsconf_sunlock(); 4878 return (error); 4879 } 4880 4881 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4882 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4883 "S,xvfsconf", "List of all configured filesystems"); 4884 4885 #ifndef BURN_BRIDGES 4886 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4887 4888 static int 4889 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4890 { 4891 int *name = (int *)arg1 - 1; /* XXX */ 4892 u_int namelen = arg2 + 1; /* XXX */ 4893 struct vfsconf *vfsp; 4894 4895 log(LOG_WARNING, "userland calling deprecated sysctl, " 4896 "please rebuild world\n"); 4897 4898 #if 1 || defined(COMPAT_PRELITE2) 4899 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4900 if (namelen == 1) 4901 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4902 #endif 4903 4904 switch (name[1]) { 4905 case VFS_MAXTYPENUM: 4906 if (namelen != 2) 4907 return (ENOTDIR); 4908 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4909 case VFS_CONF: 4910 if (namelen != 3) 4911 return (ENOTDIR); /* overloaded */ 4912 vfsconf_slock(); 4913 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4914 if (vfsp->vfc_typenum == name[2]) 4915 break; 4916 } 4917 vfsconf_sunlock(); 4918 if (vfsp == NULL) 4919 return (EOPNOTSUPP); 4920 #ifdef COMPAT_FREEBSD32 4921 if (req->flags & SCTL_MASK32) 4922 return (vfsconf2x32(req, vfsp)); 4923 else 4924 #endif 4925 return (vfsconf2x(req, vfsp)); 4926 } 4927 return (EOPNOTSUPP); 4928 } 4929 4930 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4931 CTLFLAG_MPSAFE, vfs_sysctl, 4932 "Generic filesystem"); 4933 4934 #if 1 || defined(COMPAT_PRELITE2) 4935 4936 static int 4937 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4938 { 4939 int error; 4940 struct vfsconf *vfsp; 4941 struct ovfsconf ovfs; 4942 4943 vfsconf_slock(); 4944 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4945 bzero(&ovfs, sizeof(ovfs)); 4946 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4947 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4948 ovfs.vfc_index = vfsp->vfc_typenum; 4949 ovfs.vfc_refcount = vfsp->vfc_refcount; 4950 ovfs.vfc_flags = vfsp->vfc_flags; 4951 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4952 if (error != 0) { 4953 vfsconf_sunlock(); 4954 return (error); 4955 } 4956 } 4957 vfsconf_sunlock(); 4958 return (0); 4959 } 4960 4961 #endif /* 1 || COMPAT_PRELITE2 */ 4962 #endif /* !BURN_BRIDGES */ 4963 4964 static void 4965 unmount_or_warn(struct mount *mp) 4966 { 4967 int error; 4968 4969 error = dounmount(mp, MNT_FORCE, curthread); 4970 if (error != 0) { 4971 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4972 if (error == EBUSY) 4973 printf("BUSY)\n"); 4974 else 4975 printf("%d)\n", error); 4976 } 4977 } 4978 4979 /* 4980 * Unmount all filesystems. The list is traversed in reverse order 4981 * of mounting to avoid dependencies. 4982 */ 4983 void 4984 vfs_unmountall(void) 4985 { 4986 struct mount *mp, *tmp; 4987 4988 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4989 4990 /* 4991 * Since this only runs when rebooting, it is not interlocked. 4992 */ 4993 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4994 vfs_ref(mp); 4995 4996 /* 4997 * Forcibly unmounting "/dev" before "/" would prevent clean 4998 * unmount of the latter. 4999 */ 5000 if (mp == rootdevmp) 5001 continue; 5002 5003 unmount_or_warn(mp); 5004 } 5005 5006 if (rootdevmp != NULL) 5007 unmount_or_warn(rootdevmp); 5008 } 5009 5010 static void 5011 vfs_deferred_inactive(struct vnode *vp, int lkflags) 5012 { 5013 5014 ASSERT_VI_LOCKED(vp, __func__); 5015 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 5016 if ((vp->v_iflag & VI_OWEINACT) == 0) { 5017 vdropl(vp); 5018 return; 5019 } 5020 if (vn_lock(vp, lkflags) == 0) { 5021 VI_LOCK(vp); 5022 vinactive(vp); 5023 VOP_UNLOCK(vp); 5024 vdropl(vp); 5025 return; 5026 } 5027 vdefer_inactive_unlocked(vp); 5028 } 5029 5030 static int 5031 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5032 { 5033 5034 return (vp->v_iflag & VI_DEFINACT); 5035 } 5036 5037 static void __noinline 5038 vfs_periodic_inactive(struct mount *mp, int flags) 5039 { 5040 struct vnode *vp, *mvp; 5041 int lkflags; 5042 5043 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5044 if (flags != MNT_WAIT) 5045 lkflags |= LK_NOWAIT; 5046 5047 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5048 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5049 VI_UNLOCK(vp); 5050 continue; 5051 } 5052 vp->v_iflag &= ~VI_DEFINACT; 5053 vfs_deferred_inactive(vp, lkflags); 5054 } 5055 } 5056 5057 static inline bool 5058 vfs_want_msync(struct vnode *vp) 5059 { 5060 struct vm_object *obj; 5061 5062 /* 5063 * This test may be performed without any locks held. 5064 * We rely on vm_object's type stability. 5065 */ 5066 if (vp->v_vflag & VV_NOSYNC) 5067 return (false); 5068 obj = vp->v_object; 5069 return (obj != NULL && vm_object_mightbedirty(obj)); 5070 } 5071 5072 static int 5073 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5074 { 5075 5076 if (vp->v_vflag & VV_NOSYNC) 5077 return (false); 5078 if (vp->v_iflag & VI_DEFINACT) 5079 return (true); 5080 return (vfs_want_msync(vp)); 5081 } 5082 5083 static void __noinline 5084 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5085 { 5086 struct vnode *vp, *mvp; 5087 struct vm_object *obj; 5088 int lkflags, objflags; 5089 bool seen_defer; 5090 5091 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5092 if (flags != MNT_WAIT) { 5093 lkflags |= LK_NOWAIT; 5094 objflags = OBJPC_NOSYNC; 5095 } else { 5096 objflags = OBJPC_SYNC; 5097 } 5098 5099 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5100 seen_defer = false; 5101 if (vp->v_iflag & VI_DEFINACT) { 5102 vp->v_iflag &= ~VI_DEFINACT; 5103 seen_defer = true; 5104 } 5105 if (!vfs_want_msync(vp)) { 5106 if (seen_defer) 5107 vfs_deferred_inactive(vp, lkflags); 5108 else 5109 VI_UNLOCK(vp); 5110 continue; 5111 } 5112 if (vget(vp, lkflags) == 0) { 5113 obj = vp->v_object; 5114 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 5115 VM_OBJECT_WLOCK(obj); 5116 vm_object_page_clean(obj, 0, 0, objflags); 5117 VM_OBJECT_WUNLOCK(obj); 5118 } 5119 vput(vp); 5120 if (seen_defer) 5121 vdrop(vp); 5122 } else { 5123 if (seen_defer) 5124 vdefer_inactive_unlocked(vp); 5125 } 5126 } 5127 } 5128 5129 void 5130 vfs_periodic(struct mount *mp, int flags) 5131 { 5132 5133 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5134 5135 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5136 vfs_periodic_inactive(mp, flags); 5137 else 5138 vfs_periodic_msync_inactive(mp, flags); 5139 } 5140 5141 static void 5142 destroy_vpollinfo_free(struct vpollinfo *vi) 5143 { 5144 5145 knlist_destroy(&vi->vpi_selinfo.si_note); 5146 mtx_destroy(&vi->vpi_lock); 5147 free(vi, M_VNODEPOLL); 5148 } 5149 5150 static void 5151 destroy_vpollinfo(struct vpollinfo *vi) 5152 { 5153 5154 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5155 seldrain(&vi->vpi_selinfo); 5156 destroy_vpollinfo_free(vi); 5157 } 5158 5159 /* 5160 * Initialize per-vnode helper structure to hold poll-related state. 5161 */ 5162 void 5163 v_addpollinfo(struct vnode *vp) 5164 { 5165 struct vpollinfo *vi; 5166 5167 if (vp->v_pollinfo != NULL) 5168 return; 5169 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5170 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5171 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5172 vfs_knlunlock, vfs_knl_assert_lock); 5173 VI_LOCK(vp); 5174 if (vp->v_pollinfo != NULL) { 5175 VI_UNLOCK(vp); 5176 destroy_vpollinfo_free(vi); 5177 return; 5178 } 5179 vp->v_pollinfo = vi; 5180 VI_UNLOCK(vp); 5181 } 5182 5183 /* 5184 * Record a process's interest in events which might happen to 5185 * a vnode. Because poll uses the historic select-style interface 5186 * internally, this routine serves as both the ``check for any 5187 * pending events'' and the ``record my interest in future events'' 5188 * functions. (These are done together, while the lock is held, 5189 * to avoid race conditions.) 5190 */ 5191 int 5192 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5193 { 5194 5195 v_addpollinfo(vp); 5196 mtx_lock(&vp->v_pollinfo->vpi_lock); 5197 if (vp->v_pollinfo->vpi_revents & events) { 5198 /* 5199 * This leaves events we are not interested 5200 * in available for the other process which 5201 * which presumably had requested them 5202 * (otherwise they would never have been 5203 * recorded). 5204 */ 5205 events &= vp->v_pollinfo->vpi_revents; 5206 vp->v_pollinfo->vpi_revents &= ~events; 5207 5208 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5209 return (events); 5210 } 5211 vp->v_pollinfo->vpi_events |= events; 5212 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5213 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5214 return (0); 5215 } 5216 5217 /* 5218 * Routine to create and manage a filesystem syncer vnode. 5219 */ 5220 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5221 static int sync_fsync(struct vop_fsync_args *); 5222 static int sync_inactive(struct vop_inactive_args *); 5223 static int sync_reclaim(struct vop_reclaim_args *); 5224 5225 static struct vop_vector sync_vnodeops = { 5226 .vop_bypass = VOP_EOPNOTSUPP, 5227 .vop_close = sync_close, 5228 .vop_fsync = sync_fsync, 5229 .vop_getwritemount = vop_stdgetwritemount, 5230 .vop_inactive = sync_inactive, 5231 .vop_need_inactive = vop_stdneed_inactive, 5232 .vop_reclaim = sync_reclaim, 5233 .vop_lock1 = vop_stdlock, 5234 .vop_unlock = vop_stdunlock, 5235 .vop_islocked = vop_stdislocked, 5236 .vop_fplookup_vexec = VOP_EAGAIN, 5237 .vop_fplookup_symlink = VOP_EAGAIN, 5238 }; 5239 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5240 5241 /* 5242 * Create a new filesystem syncer vnode for the specified mount point. 5243 */ 5244 void 5245 vfs_allocate_syncvnode(struct mount *mp) 5246 { 5247 struct vnode *vp; 5248 struct bufobj *bo; 5249 static long start, incr, next; 5250 int error; 5251 5252 /* Allocate a new vnode */ 5253 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5254 if (error != 0) 5255 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5256 vp->v_type = VNON; 5257 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5258 vp->v_vflag |= VV_FORCEINSMQ; 5259 error = insmntque1(vp, mp); 5260 if (error != 0) 5261 panic("vfs_allocate_syncvnode: insmntque() failed"); 5262 vp->v_vflag &= ~VV_FORCEINSMQ; 5263 vn_set_state(vp, VSTATE_CONSTRUCTED); 5264 VOP_UNLOCK(vp); 5265 /* 5266 * Place the vnode onto the syncer worklist. We attempt to 5267 * scatter them about on the list so that they will go off 5268 * at evenly distributed times even if all the filesystems 5269 * are mounted at once. 5270 */ 5271 next += incr; 5272 if (next == 0 || next > syncer_maxdelay) { 5273 start /= 2; 5274 incr /= 2; 5275 if (start == 0) { 5276 start = syncer_maxdelay / 2; 5277 incr = syncer_maxdelay; 5278 } 5279 next = start; 5280 } 5281 bo = &vp->v_bufobj; 5282 BO_LOCK(bo); 5283 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5284 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5285 mtx_lock(&sync_mtx); 5286 sync_vnode_count++; 5287 if (mp->mnt_syncer == NULL) { 5288 mp->mnt_syncer = vp; 5289 vp = NULL; 5290 } 5291 mtx_unlock(&sync_mtx); 5292 BO_UNLOCK(bo); 5293 if (vp != NULL) { 5294 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5295 vgone(vp); 5296 vput(vp); 5297 } 5298 } 5299 5300 void 5301 vfs_deallocate_syncvnode(struct mount *mp) 5302 { 5303 struct vnode *vp; 5304 5305 mtx_lock(&sync_mtx); 5306 vp = mp->mnt_syncer; 5307 if (vp != NULL) 5308 mp->mnt_syncer = NULL; 5309 mtx_unlock(&sync_mtx); 5310 if (vp != NULL) 5311 vrele(vp); 5312 } 5313 5314 /* 5315 * Do a lazy sync of the filesystem. 5316 */ 5317 static int 5318 sync_fsync(struct vop_fsync_args *ap) 5319 { 5320 struct vnode *syncvp = ap->a_vp; 5321 struct mount *mp = syncvp->v_mount; 5322 int error, save; 5323 struct bufobj *bo; 5324 5325 /* 5326 * We only need to do something if this is a lazy evaluation. 5327 */ 5328 if (ap->a_waitfor != MNT_LAZY) 5329 return (0); 5330 5331 /* 5332 * Move ourselves to the back of the sync list. 5333 */ 5334 bo = &syncvp->v_bufobj; 5335 BO_LOCK(bo); 5336 vn_syncer_add_to_worklist(bo, syncdelay); 5337 BO_UNLOCK(bo); 5338 5339 /* 5340 * Walk the list of vnodes pushing all that are dirty and 5341 * not already on the sync list. 5342 */ 5343 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5344 return (0); 5345 VOP_UNLOCK(syncvp); 5346 save = curthread_pflags_set(TDP_SYNCIO); 5347 /* 5348 * The filesystem at hand may be idle with free vnodes stored in the 5349 * batch. Return them instead of letting them stay there indefinitely. 5350 */ 5351 vfs_periodic(mp, MNT_NOWAIT); 5352 error = VFS_SYNC(mp, MNT_LAZY); 5353 curthread_pflags_restore(save); 5354 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5355 vfs_unbusy(mp); 5356 return (error); 5357 } 5358 5359 /* 5360 * The syncer vnode is no referenced. 5361 */ 5362 static int 5363 sync_inactive(struct vop_inactive_args *ap) 5364 { 5365 5366 vgone(ap->a_vp); 5367 return (0); 5368 } 5369 5370 /* 5371 * The syncer vnode is no longer needed and is being decommissioned. 5372 * 5373 * Modifications to the worklist must be protected by sync_mtx. 5374 */ 5375 static int 5376 sync_reclaim(struct vop_reclaim_args *ap) 5377 { 5378 struct vnode *vp = ap->a_vp; 5379 struct bufobj *bo; 5380 5381 bo = &vp->v_bufobj; 5382 BO_LOCK(bo); 5383 mtx_lock(&sync_mtx); 5384 if (vp->v_mount->mnt_syncer == vp) 5385 vp->v_mount->mnt_syncer = NULL; 5386 if (bo->bo_flag & BO_ONWORKLST) { 5387 LIST_REMOVE(bo, bo_synclist); 5388 syncer_worklist_len--; 5389 sync_vnode_count--; 5390 bo->bo_flag &= ~BO_ONWORKLST; 5391 } 5392 mtx_unlock(&sync_mtx); 5393 BO_UNLOCK(bo); 5394 5395 return (0); 5396 } 5397 5398 int 5399 vn_need_pageq_flush(struct vnode *vp) 5400 { 5401 struct vm_object *obj; 5402 5403 obj = vp->v_object; 5404 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5405 vm_object_mightbedirty(obj)); 5406 } 5407 5408 /* 5409 * Check if vnode represents a disk device 5410 */ 5411 bool 5412 vn_isdisk_error(struct vnode *vp, int *errp) 5413 { 5414 int error; 5415 5416 if (vp->v_type != VCHR) { 5417 error = ENOTBLK; 5418 goto out; 5419 } 5420 error = 0; 5421 dev_lock(); 5422 if (vp->v_rdev == NULL) 5423 error = ENXIO; 5424 else if (vp->v_rdev->si_devsw == NULL) 5425 error = ENXIO; 5426 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5427 error = ENOTBLK; 5428 dev_unlock(); 5429 out: 5430 *errp = error; 5431 return (error == 0); 5432 } 5433 5434 bool 5435 vn_isdisk(struct vnode *vp) 5436 { 5437 int error; 5438 5439 return (vn_isdisk_error(vp, &error)); 5440 } 5441 5442 /* 5443 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5444 * the comment above cache_fplookup for details. 5445 */ 5446 int 5447 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5448 { 5449 int error; 5450 5451 VFS_SMR_ASSERT_ENTERED(); 5452 5453 /* Check the owner. */ 5454 if (cred->cr_uid == file_uid) { 5455 if (file_mode & S_IXUSR) 5456 return (0); 5457 goto out_error; 5458 } 5459 5460 /* Otherwise, check the groups (first match) */ 5461 if (groupmember(file_gid, cred)) { 5462 if (file_mode & S_IXGRP) 5463 return (0); 5464 goto out_error; 5465 } 5466 5467 /* Otherwise, check everyone else. */ 5468 if (file_mode & S_IXOTH) 5469 return (0); 5470 out_error: 5471 /* 5472 * Permission check failed, but it is possible denial will get overwritten 5473 * (e.g., when root is traversing through a 700 directory owned by someone 5474 * else). 5475 * 5476 * vaccess() calls priv_check_cred which in turn can descent into MAC 5477 * modules overriding this result. It's quite unclear what semantics 5478 * are allowed for them to operate, thus for safety we don't call them 5479 * from within the SMR section. This also means if any such modules 5480 * are present, we have to let the regular lookup decide. 5481 */ 5482 error = priv_check_cred_vfs_lookup_nomac(cred); 5483 switch (error) { 5484 case 0: 5485 return (0); 5486 case EAGAIN: 5487 /* 5488 * MAC modules present. 5489 */ 5490 return (EAGAIN); 5491 case EPERM: 5492 return (EACCES); 5493 default: 5494 return (error); 5495 } 5496 } 5497 5498 /* 5499 * Common filesystem object access control check routine. Accepts a 5500 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5501 * Returns 0 on success, or an errno on failure. 5502 */ 5503 int 5504 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5505 accmode_t accmode, struct ucred *cred) 5506 { 5507 accmode_t dac_granted; 5508 accmode_t priv_granted; 5509 5510 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5511 ("invalid bit in accmode")); 5512 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5513 ("VAPPEND without VWRITE")); 5514 5515 /* 5516 * Look for a normal, non-privileged way to access the file/directory 5517 * as requested. If it exists, go with that. 5518 */ 5519 5520 dac_granted = 0; 5521 5522 /* Check the owner. */ 5523 if (cred->cr_uid == file_uid) { 5524 dac_granted |= VADMIN; 5525 if (file_mode & S_IXUSR) 5526 dac_granted |= VEXEC; 5527 if (file_mode & S_IRUSR) 5528 dac_granted |= VREAD; 5529 if (file_mode & S_IWUSR) 5530 dac_granted |= (VWRITE | VAPPEND); 5531 5532 if ((accmode & dac_granted) == accmode) 5533 return (0); 5534 5535 goto privcheck; 5536 } 5537 5538 /* Otherwise, check the groups (first match) */ 5539 if (groupmember(file_gid, cred)) { 5540 if (file_mode & S_IXGRP) 5541 dac_granted |= VEXEC; 5542 if (file_mode & S_IRGRP) 5543 dac_granted |= VREAD; 5544 if (file_mode & S_IWGRP) 5545 dac_granted |= (VWRITE | VAPPEND); 5546 5547 if ((accmode & dac_granted) == accmode) 5548 return (0); 5549 5550 goto privcheck; 5551 } 5552 5553 /* Otherwise, check everyone else. */ 5554 if (file_mode & S_IXOTH) 5555 dac_granted |= VEXEC; 5556 if (file_mode & S_IROTH) 5557 dac_granted |= VREAD; 5558 if (file_mode & S_IWOTH) 5559 dac_granted |= (VWRITE | VAPPEND); 5560 if ((accmode & dac_granted) == accmode) 5561 return (0); 5562 5563 privcheck: 5564 /* 5565 * Build a privilege mask to determine if the set of privileges 5566 * satisfies the requirements when combined with the granted mask 5567 * from above. For each privilege, if the privilege is required, 5568 * bitwise or the request type onto the priv_granted mask. 5569 */ 5570 priv_granted = 0; 5571 5572 if (type == VDIR) { 5573 /* 5574 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5575 * requests, instead of PRIV_VFS_EXEC. 5576 */ 5577 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5578 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5579 priv_granted |= VEXEC; 5580 } else { 5581 /* 5582 * Ensure that at least one execute bit is on. Otherwise, 5583 * a privileged user will always succeed, and we don't want 5584 * this to happen unless the file really is executable. 5585 */ 5586 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5587 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5588 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5589 priv_granted |= VEXEC; 5590 } 5591 5592 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5593 !priv_check_cred(cred, PRIV_VFS_READ)) 5594 priv_granted |= VREAD; 5595 5596 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5597 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5598 priv_granted |= (VWRITE | VAPPEND); 5599 5600 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5601 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5602 priv_granted |= VADMIN; 5603 5604 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5605 return (0); 5606 } 5607 5608 return ((accmode & VADMIN) ? EPERM : EACCES); 5609 } 5610 5611 /* 5612 * Credential check based on process requesting service, and per-attribute 5613 * permissions. 5614 */ 5615 int 5616 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5617 struct thread *td, accmode_t accmode) 5618 { 5619 5620 /* 5621 * Kernel-invoked always succeeds. 5622 */ 5623 if (cred == NOCRED) 5624 return (0); 5625 5626 /* 5627 * Do not allow privileged processes in jail to directly manipulate 5628 * system attributes. 5629 */ 5630 switch (attrnamespace) { 5631 case EXTATTR_NAMESPACE_SYSTEM: 5632 /* Potentially should be: return (EPERM); */ 5633 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5634 case EXTATTR_NAMESPACE_USER: 5635 return (VOP_ACCESS(vp, accmode, cred, td)); 5636 default: 5637 return (EPERM); 5638 } 5639 } 5640 5641 #ifdef DEBUG_VFS_LOCKS 5642 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5643 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5644 "Drop into debugger on lock violation"); 5645 5646 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5647 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5648 0, "Check for interlock across VOPs"); 5649 5650 int vfs_badlock_print = 1; /* Print lock violations. */ 5651 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5652 0, "Print lock violations"); 5653 5654 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5655 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5656 0, "Print vnode details on lock violations"); 5657 5658 #ifdef KDB 5659 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5660 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5661 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5662 #endif 5663 5664 static void 5665 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5666 { 5667 5668 #ifdef KDB 5669 if (vfs_badlock_backtrace) 5670 kdb_backtrace(); 5671 #endif 5672 if (vfs_badlock_vnode) 5673 vn_printf(vp, "vnode "); 5674 if (vfs_badlock_print) 5675 printf("%s: %p %s\n", str, (void *)vp, msg); 5676 if (vfs_badlock_ddb) 5677 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5678 } 5679 5680 void 5681 assert_vi_locked(struct vnode *vp, const char *str) 5682 { 5683 5684 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5685 vfs_badlock("interlock is not locked but should be", str, vp); 5686 } 5687 5688 void 5689 assert_vi_unlocked(struct vnode *vp, const char *str) 5690 { 5691 5692 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5693 vfs_badlock("interlock is locked but should not be", str, vp); 5694 } 5695 5696 void 5697 assert_vop_locked(struct vnode *vp, const char *str) 5698 { 5699 if (KERNEL_PANICKED() || vp == NULL) 5700 return; 5701 5702 #ifdef WITNESS 5703 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5704 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5705 #else 5706 int locked = VOP_ISLOCKED(vp); 5707 if (locked == 0 || locked == LK_EXCLOTHER) 5708 #endif 5709 vfs_badlock("is not locked but should be", str, vp); 5710 } 5711 5712 void 5713 assert_vop_unlocked(struct vnode *vp, const char *str) 5714 { 5715 if (KERNEL_PANICKED() || vp == NULL) 5716 return; 5717 5718 #ifdef WITNESS 5719 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5720 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5721 #else 5722 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5723 #endif 5724 vfs_badlock("is locked but should not be", str, vp); 5725 } 5726 5727 void 5728 assert_vop_elocked(struct vnode *vp, const char *str) 5729 { 5730 if (KERNEL_PANICKED() || vp == NULL) 5731 return; 5732 5733 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5734 vfs_badlock("is not exclusive locked but should be", str, vp); 5735 } 5736 #endif /* DEBUG_VFS_LOCKS */ 5737 5738 void 5739 vop_rename_fail(struct vop_rename_args *ap) 5740 { 5741 5742 if (ap->a_tvp != NULL) 5743 vput(ap->a_tvp); 5744 if (ap->a_tdvp == ap->a_tvp) 5745 vrele(ap->a_tdvp); 5746 else 5747 vput(ap->a_tdvp); 5748 vrele(ap->a_fdvp); 5749 vrele(ap->a_fvp); 5750 } 5751 5752 void 5753 vop_rename_pre(void *ap) 5754 { 5755 struct vop_rename_args *a = ap; 5756 5757 #ifdef DEBUG_VFS_LOCKS 5758 if (a->a_tvp) 5759 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5760 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5761 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5762 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5763 5764 /* Check the source (from). */ 5765 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5766 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5767 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5768 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5769 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5770 5771 /* Check the target. */ 5772 if (a->a_tvp) 5773 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5774 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5775 #endif 5776 /* 5777 * It may be tempting to add vn_seqc_write_begin/end calls here and 5778 * in vop_rename_post but that's not going to work out since some 5779 * filesystems relookup vnodes mid-rename. This is probably a bug. 5780 * 5781 * For now filesystems are expected to do the relevant calls after they 5782 * decide what vnodes to operate on. 5783 */ 5784 if (a->a_tdvp != a->a_fdvp) 5785 vhold(a->a_fdvp); 5786 if (a->a_tvp != a->a_fvp) 5787 vhold(a->a_fvp); 5788 vhold(a->a_tdvp); 5789 if (a->a_tvp) 5790 vhold(a->a_tvp); 5791 } 5792 5793 #ifdef DEBUG_VFS_LOCKS 5794 void 5795 vop_fplookup_vexec_debugpre(void *ap __unused) 5796 { 5797 5798 VFS_SMR_ASSERT_ENTERED(); 5799 } 5800 5801 void 5802 vop_fplookup_vexec_debugpost(void *ap, int rc) 5803 { 5804 struct vop_fplookup_vexec_args *a; 5805 struct vnode *vp; 5806 5807 a = ap; 5808 vp = a->a_vp; 5809 5810 VFS_SMR_ASSERT_ENTERED(); 5811 if (rc == EOPNOTSUPP) 5812 VNPASS(VN_IS_DOOMED(vp), vp); 5813 } 5814 5815 void 5816 vop_fplookup_symlink_debugpre(void *ap __unused) 5817 { 5818 5819 VFS_SMR_ASSERT_ENTERED(); 5820 } 5821 5822 void 5823 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5824 { 5825 5826 VFS_SMR_ASSERT_ENTERED(); 5827 } 5828 5829 static void 5830 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5831 { 5832 if (vp->v_type == VCHR) 5833 ; 5834 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5835 ASSERT_VOP_LOCKED(vp, name); 5836 else 5837 ASSERT_VOP_ELOCKED(vp, name); 5838 } 5839 5840 void 5841 vop_fsync_debugpre(void *a) 5842 { 5843 struct vop_fsync_args *ap; 5844 5845 ap = a; 5846 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5847 } 5848 5849 void 5850 vop_fsync_debugpost(void *a, int rc __unused) 5851 { 5852 struct vop_fsync_args *ap; 5853 5854 ap = a; 5855 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5856 } 5857 5858 void 5859 vop_fdatasync_debugpre(void *a) 5860 { 5861 struct vop_fdatasync_args *ap; 5862 5863 ap = a; 5864 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5865 } 5866 5867 void 5868 vop_fdatasync_debugpost(void *a, int rc __unused) 5869 { 5870 struct vop_fdatasync_args *ap; 5871 5872 ap = a; 5873 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5874 } 5875 5876 void 5877 vop_strategy_debugpre(void *ap) 5878 { 5879 struct vop_strategy_args *a; 5880 struct buf *bp; 5881 5882 a = ap; 5883 bp = a->a_bp; 5884 5885 /* 5886 * Cluster ops lock their component buffers but not the IO container. 5887 */ 5888 if ((bp->b_flags & B_CLUSTER) != 0) 5889 return; 5890 5891 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5892 if (vfs_badlock_print) 5893 printf( 5894 "VOP_STRATEGY: bp is not locked but should be\n"); 5895 if (vfs_badlock_ddb) 5896 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5897 } 5898 } 5899 5900 void 5901 vop_lock_debugpre(void *ap) 5902 { 5903 struct vop_lock1_args *a = ap; 5904 5905 if ((a->a_flags & LK_INTERLOCK) == 0) 5906 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5907 else 5908 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5909 } 5910 5911 void 5912 vop_lock_debugpost(void *ap, int rc) 5913 { 5914 struct vop_lock1_args *a = ap; 5915 5916 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5917 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5918 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5919 } 5920 5921 void 5922 vop_unlock_debugpre(void *ap) 5923 { 5924 struct vop_unlock_args *a = ap; 5925 struct vnode *vp = a->a_vp; 5926 5927 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5928 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5929 } 5930 5931 void 5932 vop_need_inactive_debugpre(void *ap) 5933 { 5934 struct vop_need_inactive_args *a = ap; 5935 5936 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5937 } 5938 5939 void 5940 vop_need_inactive_debugpost(void *ap, int rc) 5941 { 5942 struct vop_need_inactive_args *a = ap; 5943 5944 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5945 } 5946 #endif 5947 5948 void 5949 vop_create_pre(void *ap) 5950 { 5951 struct vop_create_args *a; 5952 struct vnode *dvp; 5953 5954 a = ap; 5955 dvp = a->a_dvp; 5956 vn_seqc_write_begin(dvp); 5957 } 5958 5959 void 5960 vop_create_post(void *ap, int rc) 5961 { 5962 struct vop_create_args *a; 5963 struct vnode *dvp; 5964 5965 a = ap; 5966 dvp = a->a_dvp; 5967 vn_seqc_write_end(dvp); 5968 if (!rc) 5969 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5970 } 5971 5972 void 5973 vop_whiteout_pre(void *ap) 5974 { 5975 struct vop_whiteout_args *a; 5976 struct vnode *dvp; 5977 5978 a = ap; 5979 dvp = a->a_dvp; 5980 vn_seqc_write_begin(dvp); 5981 } 5982 5983 void 5984 vop_whiteout_post(void *ap, int rc) 5985 { 5986 struct vop_whiteout_args *a; 5987 struct vnode *dvp; 5988 5989 a = ap; 5990 dvp = a->a_dvp; 5991 vn_seqc_write_end(dvp); 5992 } 5993 5994 void 5995 vop_deleteextattr_pre(void *ap) 5996 { 5997 struct vop_deleteextattr_args *a; 5998 struct vnode *vp; 5999 6000 a = ap; 6001 vp = a->a_vp; 6002 vn_seqc_write_begin(vp); 6003 } 6004 6005 void 6006 vop_deleteextattr_post(void *ap, int rc) 6007 { 6008 struct vop_deleteextattr_args *a; 6009 struct vnode *vp; 6010 6011 a = ap; 6012 vp = a->a_vp; 6013 vn_seqc_write_end(vp); 6014 if (!rc) 6015 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 6016 } 6017 6018 void 6019 vop_link_pre(void *ap) 6020 { 6021 struct vop_link_args *a; 6022 struct vnode *vp, *tdvp; 6023 6024 a = ap; 6025 vp = a->a_vp; 6026 tdvp = a->a_tdvp; 6027 vn_seqc_write_begin(vp); 6028 vn_seqc_write_begin(tdvp); 6029 } 6030 6031 void 6032 vop_link_post(void *ap, int rc) 6033 { 6034 struct vop_link_args *a; 6035 struct vnode *vp, *tdvp; 6036 6037 a = ap; 6038 vp = a->a_vp; 6039 tdvp = a->a_tdvp; 6040 vn_seqc_write_end(vp); 6041 vn_seqc_write_end(tdvp); 6042 if (!rc) { 6043 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6044 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6045 } 6046 } 6047 6048 void 6049 vop_mkdir_pre(void *ap) 6050 { 6051 struct vop_mkdir_args *a; 6052 struct vnode *dvp; 6053 6054 a = ap; 6055 dvp = a->a_dvp; 6056 vn_seqc_write_begin(dvp); 6057 } 6058 6059 void 6060 vop_mkdir_post(void *ap, int rc) 6061 { 6062 struct vop_mkdir_args *a; 6063 struct vnode *dvp; 6064 6065 a = ap; 6066 dvp = a->a_dvp; 6067 vn_seqc_write_end(dvp); 6068 if (!rc) 6069 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6070 } 6071 6072 #ifdef DEBUG_VFS_LOCKS 6073 void 6074 vop_mkdir_debugpost(void *ap, int rc) 6075 { 6076 struct vop_mkdir_args *a; 6077 6078 a = ap; 6079 if (!rc) 6080 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6081 } 6082 #endif 6083 6084 void 6085 vop_mknod_pre(void *ap) 6086 { 6087 struct vop_mknod_args *a; 6088 struct vnode *dvp; 6089 6090 a = ap; 6091 dvp = a->a_dvp; 6092 vn_seqc_write_begin(dvp); 6093 } 6094 6095 void 6096 vop_mknod_post(void *ap, int rc) 6097 { 6098 struct vop_mknod_args *a; 6099 struct vnode *dvp; 6100 6101 a = ap; 6102 dvp = a->a_dvp; 6103 vn_seqc_write_end(dvp); 6104 if (!rc) 6105 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6106 } 6107 6108 void 6109 vop_reclaim_post(void *ap, int rc) 6110 { 6111 struct vop_reclaim_args *a; 6112 struct vnode *vp; 6113 6114 a = ap; 6115 vp = a->a_vp; 6116 ASSERT_VOP_IN_SEQC(vp); 6117 if (!rc) 6118 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6119 } 6120 6121 void 6122 vop_remove_pre(void *ap) 6123 { 6124 struct vop_remove_args *a; 6125 struct vnode *dvp, *vp; 6126 6127 a = ap; 6128 dvp = a->a_dvp; 6129 vp = a->a_vp; 6130 vn_seqc_write_begin(dvp); 6131 vn_seqc_write_begin(vp); 6132 } 6133 6134 void 6135 vop_remove_post(void *ap, int rc) 6136 { 6137 struct vop_remove_args *a; 6138 struct vnode *dvp, *vp; 6139 6140 a = ap; 6141 dvp = a->a_dvp; 6142 vp = a->a_vp; 6143 vn_seqc_write_end(dvp); 6144 vn_seqc_write_end(vp); 6145 if (!rc) { 6146 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6147 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6148 } 6149 } 6150 6151 void 6152 vop_rename_post(void *ap, int rc) 6153 { 6154 struct vop_rename_args *a = ap; 6155 long hint; 6156 6157 if (!rc) { 6158 hint = NOTE_WRITE; 6159 if (a->a_fdvp == a->a_tdvp) { 6160 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6161 hint |= NOTE_LINK; 6162 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6163 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6164 } else { 6165 hint |= NOTE_EXTEND; 6166 if (a->a_fvp->v_type == VDIR) 6167 hint |= NOTE_LINK; 6168 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6169 6170 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6171 a->a_tvp->v_type == VDIR) 6172 hint &= ~NOTE_LINK; 6173 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6174 } 6175 6176 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6177 if (a->a_tvp) 6178 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6179 } 6180 if (a->a_tdvp != a->a_fdvp) 6181 vdrop(a->a_fdvp); 6182 if (a->a_tvp != a->a_fvp) 6183 vdrop(a->a_fvp); 6184 vdrop(a->a_tdvp); 6185 if (a->a_tvp) 6186 vdrop(a->a_tvp); 6187 } 6188 6189 void 6190 vop_rmdir_pre(void *ap) 6191 { 6192 struct vop_rmdir_args *a; 6193 struct vnode *dvp, *vp; 6194 6195 a = ap; 6196 dvp = a->a_dvp; 6197 vp = a->a_vp; 6198 vn_seqc_write_begin(dvp); 6199 vn_seqc_write_begin(vp); 6200 } 6201 6202 void 6203 vop_rmdir_post(void *ap, int rc) 6204 { 6205 struct vop_rmdir_args *a; 6206 struct vnode *dvp, *vp; 6207 6208 a = ap; 6209 dvp = a->a_dvp; 6210 vp = a->a_vp; 6211 vn_seqc_write_end(dvp); 6212 vn_seqc_write_end(vp); 6213 if (!rc) { 6214 vp->v_vflag |= VV_UNLINKED; 6215 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6216 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6217 } 6218 } 6219 6220 void 6221 vop_setattr_pre(void *ap) 6222 { 6223 struct vop_setattr_args *a; 6224 struct vnode *vp; 6225 6226 a = ap; 6227 vp = a->a_vp; 6228 vn_seqc_write_begin(vp); 6229 } 6230 6231 void 6232 vop_setattr_post(void *ap, int rc) 6233 { 6234 struct vop_setattr_args *a; 6235 struct vnode *vp; 6236 6237 a = ap; 6238 vp = a->a_vp; 6239 vn_seqc_write_end(vp); 6240 if (!rc) 6241 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6242 } 6243 6244 void 6245 vop_setacl_pre(void *ap) 6246 { 6247 struct vop_setacl_args *a; 6248 struct vnode *vp; 6249 6250 a = ap; 6251 vp = a->a_vp; 6252 vn_seqc_write_begin(vp); 6253 } 6254 6255 void 6256 vop_setacl_post(void *ap, int rc __unused) 6257 { 6258 struct vop_setacl_args *a; 6259 struct vnode *vp; 6260 6261 a = ap; 6262 vp = a->a_vp; 6263 vn_seqc_write_end(vp); 6264 } 6265 6266 void 6267 vop_setextattr_pre(void *ap) 6268 { 6269 struct vop_setextattr_args *a; 6270 struct vnode *vp; 6271 6272 a = ap; 6273 vp = a->a_vp; 6274 vn_seqc_write_begin(vp); 6275 } 6276 6277 void 6278 vop_setextattr_post(void *ap, int rc) 6279 { 6280 struct vop_setextattr_args *a; 6281 struct vnode *vp; 6282 6283 a = ap; 6284 vp = a->a_vp; 6285 vn_seqc_write_end(vp); 6286 if (!rc) 6287 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6288 } 6289 6290 void 6291 vop_symlink_pre(void *ap) 6292 { 6293 struct vop_symlink_args *a; 6294 struct vnode *dvp; 6295 6296 a = ap; 6297 dvp = a->a_dvp; 6298 vn_seqc_write_begin(dvp); 6299 } 6300 6301 void 6302 vop_symlink_post(void *ap, int rc) 6303 { 6304 struct vop_symlink_args *a; 6305 struct vnode *dvp; 6306 6307 a = ap; 6308 dvp = a->a_dvp; 6309 vn_seqc_write_end(dvp); 6310 if (!rc) 6311 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6312 } 6313 6314 void 6315 vop_open_post(void *ap, int rc) 6316 { 6317 struct vop_open_args *a = ap; 6318 6319 if (!rc) 6320 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6321 } 6322 6323 void 6324 vop_close_post(void *ap, int rc) 6325 { 6326 struct vop_close_args *a = ap; 6327 6328 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6329 !VN_IS_DOOMED(a->a_vp))) { 6330 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6331 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6332 } 6333 } 6334 6335 void 6336 vop_read_post(void *ap, int rc) 6337 { 6338 struct vop_read_args *a = ap; 6339 6340 if (!rc) 6341 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6342 } 6343 6344 void 6345 vop_read_pgcache_post(void *ap, int rc) 6346 { 6347 struct vop_read_pgcache_args *a = ap; 6348 6349 if (!rc) 6350 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6351 } 6352 6353 void 6354 vop_readdir_post(void *ap, int rc) 6355 { 6356 struct vop_readdir_args *a = ap; 6357 6358 if (!rc) 6359 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6360 } 6361 6362 static struct knlist fs_knlist; 6363 6364 static void 6365 vfs_event_init(void *arg) 6366 { 6367 knlist_init_mtx(&fs_knlist, NULL); 6368 } 6369 /* XXX - correct order? */ 6370 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6371 6372 void 6373 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6374 { 6375 6376 KNOTE_UNLOCKED(&fs_knlist, event); 6377 } 6378 6379 static int filt_fsattach(struct knote *kn); 6380 static void filt_fsdetach(struct knote *kn); 6381 static int filt_fsevent(struct knote *kn, long hint); 6382 6383 struct filterops fs_filtops = { 6384 .f_isfd = 0, 6385 .f_attach = filt_fsattach, 6386 .f_detach = filt_fsdetach, 6387 .f_event = filt_fsevent 6388 }; 6389 6390 static int 6391 filt_fsattach(struct knote *kn) 6392 { 6393 6394 kn->kn_flags |= EV_CLEAR; 6395 knlist_add(&fs_knlist, kn, 0); 6396 return (0); 6397 } 6398 6399 static void 6400 filt_fsdetach(struct knote *kn) 6401 { 6402 6403 knlist_remove(&fs_knlist, kn, 0); 6404 } 6405 6406 static int 6407 filt_fsevent(struct knote *kn, long hint) 6408 { 6409 6410 kn->kn_fflags |= kn->kn_sfflags & hint; 6411 6412 return (kn->kn_fflags != 0); 6413 } 6414 6415 static int 6416 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6417 { 6418 struct vfsidctl vc; 6419 int error; 6420 struct mount *mp; 6421 6422 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6423 if (error) 6424 return (error); 6425 if (vc.vc_vers != VFS_CTL_VERS1) 6426 return (EINVAL); 6427 mp = vfs_getvfs(&vc.vc_fsid); 6428 if (mp == NULL) 6429 return (ENOENT); 6430 /* ensure that a specific sysctl goes to the right filesystem. */ 6431 if (strcmp(vc.vc_fstypename, "*") != 0 && 6432 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6433 vfs_rel(mp); 6434 return (EINVAL); 6435 } 6436 VCTLTOREQ(&vc, req); 6437 error = VFS_SYSCTL(mp, vc.vc_op, req); 6438 vfs_rel(mp); 6439 return (error); 6440 } 6441 6442 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6443 NULL, 0, sysctl_vfs_ctl, "", 6444 "Sysctl by fsid"); 6445 6446 /* 6447 * Function to initialize a va_filerev field sensibly. 6448 * XXX: Wouldn't a random number make a lot more sense ?? 6449 */ 6450 u_quad_t 6451 init_va_filerev(void) 6452 { 6453 struct bintime bt; 6454 6455 getbinuptime(&bt); 6456 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6457 } 6458 6459 static int filt_vfsread(struct knote *kn, long hint); 6460 static int filt_vfswrite(struct knote *kn, long hint); 6461 static int filt_vfsvnode(struct knote *kn, long hint); 6462 static void filt_vfsdetach(struct knote *kn); 6463 static struct filterops vfsread_filtops = { 6464 .f_isfd = 1, 6465 .f_detach = filt_vfsdetach, 6466 .f_event = filt_vfsread 6467 }; 6468 static struct filterops vfswrite_filtops = { 6469 .f_isfd = 1, 6470 .f_detach = filt_vfsdetach, 6471 .f_event = filt_vfswrite 6472 }; 6473 static struct filterops vfsvnode_filtops = { 6474 .f_isfd = 1, 6475 .f_detach = filt_vfsdetach, 6476 .f_event = filt_vfsvnode 6477 }; 6478 6479 static void 6480 vfs_knllock(void *arg) 6481 { 6482 struct vnode *vp = arg; 6483 6484 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6485 } 6486 6487 static void 6488 vfs_knlunlock(void *arg) 6489 { 6490 struct vnode *vp = arg; 6491 6492 VOP_UNLOCK(vp); 6493 } 6494 6495 static void 6496 vfs_knl_assert_lock(void *arg, int what) 6497 { 6498 #ifdef DEBUG_VFS_LOCKS 6499 struct vnode *vp = arg; 6500 6501 if (what == LA_LOCKED) 6502 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6503 else 6504 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6505 #endif 6506 } 6507 6508 int 6509 vfs_kqfilter(struct vop_kqfilter_args *ap) 6510 { 6511 struct vnode *vp = ap->a_vp; 6512 struct knote *kn = ap->a_kn; 6513 struct knlist *knl; 6514 6515 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6516 kn->kn_filter != EVFILT_WRITE), 6517 ("READ/WRITE filter on a FIFO leaked through")); 6518 switch (kn->kn_filter) { 6519 case EVFILT_READ: 6520 kn->kn_fop = &vfsread_filtops; 6521 break; 6522 case EVFILT_WRITE: 6523 kn->kn_fop = &vfswrite_filtops; 6524 break; 6525 case EVFILT_VNODE: 6526 kn->kn_fop = &vfsvnode_filtops; 6527 break; 6528 default: 6529 return (EINVAL); 6530 } 6531 6532 kn->kn_hook = (caddr_t)vp; 6533 6534 v_addpollinfo(vp); 6535 if (vp->v_pollinfo == NULL) 6536 return (ENOMEM); 6537 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6538 vhold(vp); 6539 knlist_add(knl, kn, 0); 6540 6541 return (0); 6542 } 6543 6544 /* 6545 * Detach knote from vnode 6546 */ 6547 static void 6548 filt_vfsdetach(struct knote *kn) 6549 { 6550 struct vnode *vp = (struct vnode *)kn->kn_hook; 6551 6552 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6553 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6554 vdrop(vp); 6555 } 6556 6557 /*ARGSUSED*/ 6558 static int 6559 filt_vfsread(struct knote *kn, long hint) 6560 { 6561 struct vnode *vp = (struct vnode *)kn->kn_hook; 6562 off_t size; 6563 int res; 6564 6565 /* 6566 * filesystem is gone, so set the EOF flag and schedule 6567 * the knote for deletion. 6568 */ 6569 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6570 VI_LOCK(vp); 6571 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6572 VI_UNLOCK(vp); 6573 return (1); 6574 } 6575 6576 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6577 return (0); 6578 6579 VI_LOCK(vp); 6580 kn->kn_data = size - kn->kn_fp->f_offset; 6581 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6582 VI_UNLOCK(vp); 6583 return (res); 6584 } 6585 6586 /*ARGSUSED*/ 6587 static int 6588 filt_vfswrite(struct knote *kn, long hint) 6589 { 6590 struct vnode *vp = (struct vnode *)kn->kn_hook; 6591 6592 VI_LOCK(vp); 6593 6594 /* 6595 * filesystem is gone, so set the EOF flag and schedule 6596 * the knote for deletion. 6597 */ 6598 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6599 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6600 6601 kn->kn_data = 0; 6602 VI_UNLOCK(vp); 6603 return (1); 6604 } 6605 6606 static int 6607 filt_vfsvnode(struct knote *kn, long hint) 6608 { 6609 struct vnode *vp = (struct vnode *)kn->kn_hook; 6610 int res; 6611 6612 VI_LOCK(vp); 6613 if (kn->kn_sfflags & hint) 6614 kn->kn_fflags |= hint; 6615 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6616 kn->kn_flags |= EV_EOF; 6617 VI_UNLOCK(vp); 6618 return (1); 6619 } 6620 res = (kn->kn_fflags != 0); 6621 VI_UNLOCK(vp); 6622 return (res); 6623 } 6624 6625 int 6626 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6627 { 6628 int error; 6629 6630 if (dp->d_reclen > ap->a_uio->uio_resid) 6631 return (ENAMETOOLONG); 6632 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6633 if (error) { 6634 if (ap->a_ncookies != NULL) { 6635 if (ap->a_cookies != NULL) 6636 free(ap->a_cookies, M_TEMP); 6637 ap->a_cookies = NULL; 6638 *ap->a_ncookies = 0; 6639 } 6640 return (error); 6641 } 6642 if (ap->a_ncookies == NULL) 6643 return (0); 6644 6645 KASSERT(ap->a_cookies, 6646 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6647 6648 *ap->a_cookies = realloc(*ap->a_cookies, 6649 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6650 (*ap->a_cookies)[*ap->a_ncookies] = off; 6651 *ap->a_ncookies += 1; 6652 return (0); 6653 } 6654 6655 /* 6656 * The purpose of this routine is to remove granularity from accmode_t, 6657 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6658 * VADMIN and VAPPEND. 6659 * 6660 * If it returns 0, the caller is supposed to continue with the usual 6661 * access checks using 'accmode' as modified by this routine. If it 6662 * returns nonzero value, the caller is supposed to return that value 6663 * as errno. 6664 * 6665 * Note that after this routine runs, accmode may be zero. 6666 */ 6667 int 6668 vfs_unixify_accmode(accmode_t *accmode) 6669 { 6670 /* 6671 * There is no way to specify explicit "deny" rule using 6672 * file mode or POSIX.1e ACLs. 6673 */ 6674 if (*accmode & VEXPLICIT_DENY) { 6675 *accmode = 0; 6676 return (0); 6677 } 6678 6679 /* 6680 * None of these can be translated into usual access bits. 6681 * Also, the common case for NFSv4 ACLs is to not contain 6682 * either of these bits. Caller should check for VWRITE 6683 * on the containing directory instead. 6684 */ 6685 if (*accmode & (VDELETE_CHILD | VDELETE)) 6686 return (EPERM); 6687 6688 if (*accmode & VADMIN_PERMS) { 6689 *accmode &= ~VADMIN_PERMS; 6690 *accmode |= VADMIN; 6691 } 6692 6693 /* 6694 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6695 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6696 */ 6697 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6698 6699 return (0); 6700 } 6701 6702 /* 6703 * Clear out a doomed vnode (if any) and replace it with a new one as long 6704 * as the fs is not being unmounted. Return the root vnode to the caller. 6705 */ 6706 static int __noinline 6707 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6708 { 6709 struct vnode *vp; 6710 int error; 6711 6712 restart: 6713 if (mp->mnt_rootvnode != NULL) { 6714 MNT_ILOCK(mp); 6715 vp = mp->mnt_rootvnode; 6716 if (vp != NULL) { 6717 if (!VN_IS_DOOMED(vp)) { 6718 vrefact(vp); 6719 MNT_IUNLOCK(mp); 6720 error = vn_lock(vp, flags); 6721 if (error == 0) { 6722 *vpp = vp; 6723 return (0); 6724 } 6725 vrele(vp); 6726 goto restart; 6727 } 6728 /* 6729 * Clear the old one. 6730 */ 6731 mp->mnt_rootvnode = NULL; 6732 } 6733 MNT_IUNLOCK(mp); 6734 if (vp != NULL) { 6735 vfs_op_barrier_wait(mp); 6736 vrele(vp); 6737 } 6738 } 6739 error = VFS_CACHEDROOT(mp, flags, vpp); 6740 if (error != 0) 6741 return (error); 6742 if (mp->mnt_vfs_ops == 0) { 6743 MNT_ILOCK(mp); 6744 if (mp->mnt_vfs_ops != 0) { 6745 MNT_IUNLOCK(mp); 6746 return (0); 6747 } 6748 if (mp->mnt_rootvnode == NULL) { 6749 vrefact(*vpp); 6750 mp->mnt_rootvnode = *vpp; 6751 } else { 6752 if (mp->mnt_rootvnode != *vpp) { 6753 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6754 panic("%s: mismatch between vnode returned " 6755 " by VFS_CACHEDROOT and the one cached " 6756 " (%p != %p)", 6757 __func__, *vpp, mp->mnt_rootvnode); 6758 } 6759 } 6760 } 6761 MNT_IUNLOCK(mp); 6762 } 6763 return (0); 6764 } 6765 6766 int 6767 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6768 { 6769 struct mount_pcpu *mpcpu; 6770 struct vnode *vp; 6771 int error; 6772 6773 if (!vfs_op_thread_enter(mp, mpcpu)) 6774 return (vfs_cache_root_fallback(mp, flags, vpp)); 6775 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6776 if (vp == NULL || VN_IS_DOOMED(vp)) { 6777 vfs_op_thread_exit(mp, mpcpu); 6778 return (vfs_cache_root_fallback(mp, flags, vpp)); 6779 } 6780 vrefact(vp); 6781 vfs_op_thread_exit(mp, mpcpu); 6782 error = vn_lock(vp, flags); 6783 if (error != 0) { 6784 vrele(vp); 6785 return (vfs_cache_root_fallback(mp, flags, vpp)); 6786 } 6787 *vpp = vp; 6788 return (0); 6789 } 6790 6791 struct vnode * 6792 vfs_cache_root_clear(struct mount *mp) 6793 { 6794 struct vnode *vp; 6795 6796 /* 6797 * ops > 0 guarantees there is nobody who can see this vnode 6798 */ 6799 MPASS(mp->mnt_vfs_ops > 0); 6800 vp = mp->mnt_rootvnode; 6801 if (vp != NULL) 6802 vn_seqc_write_begin(vp); 6803 mp->mnt_rootvnode = NULL; 6804 return (vp); 6805 } 6806 6807 void 6808 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6809 { 6810 6811 MPASS(mp->mnt_vfs_ops > 0); 6812 vrefact(vp); 6813 mp->mnt_rootvnode = vp; 6814 } 6815 6816 /* 6817 * These are helper functions for filesystems to traverse all 6818 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6819 * 6820 * This interface replaces MNT_VNODE_FOREACH. 6821 */ 6822 6823 struct vnode * 6824 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6825 { 6826 struct vnode *vp; 6827 6828 maybe_yield(); 6829 MNT_ILOCK(mp); 6830 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6831 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6832 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6833 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6834 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6835 continue; 6836 VI_LOCK(vp); 6837 if (VN_IS_DOOMED(vp)) { 6838 VI_UNLOCK(vp); 6839 continue; 6840 } 6841 break; 6842 } 6843 if (vp == NULL) { 6844 __mnt_vnode_markerfree_all(mvp, mp); 6845 /* MNT_IUNLOCK(mp); -- done in above function */ 6846 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6847 return (NULL); 6848 } 6849 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6850 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6851 MNT_IUNLOCK(mp); 6852 return (vp); 6853 } 6854 6855 struct vnode * 6856 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6857 { 6858 struct vnode *vp; 6859 6860 *mvp = vn_alloc_marker(mp); 6861 MNT_ILOCK(mp); 6862 MNT_REF(mp); 6863 6864 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6865 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6866 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6867 continue; 6868 VI_LOCK(vp); 6869 if (VN_IS_DOOMED(vp)) { 6870 VI_UNLOCK(vp); 6871 continue; 6872 } 6873 break; 6874 } 6875 if (vp == NULL) { 6876 MNT_REL(mp); 6877 MNT_IUNLOCK(mp); 6878 vn_free_marker(*mvp); 6879 *mvp = NULL; 6880 return (NULL); 6881 } 6882 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6883 MNT_IUNLOCK(mp); 6884 return (vp); 6885 } 6886 6887 void 6888 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6889 { 6890 6891 if (*mvp == NULL) { 6892 MNT_IUNLOCK(mp); 6893 return; 6894 } 6895 6896 mtx_assert(MNT_MTX(mp), MA_OWNED); 6897 6898 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6899 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6900 MNT_REL(mp); 6901 MNT_IUNLOCK(mp); 6902 vn_free_marker(*mvp); 6903 *mvp = NULL; 6904 } 6905 6906 /* 6907 * These are helper functions for filesystems to traverse their 6908 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6909 */ 6910 static void 6911 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6912 { 6913 6914 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6915 6916 MNT_ILOCK(mp); 6917 MNT_REL(mp); 6918 MNT_IUNLOCK(mp); 6919 vn_free_marker(*mvp); 6920 *mvp = NULL; 6921 } 6922 6923 /* 6924 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6925 * conventional lock order during mnt_vnode_next_lazy iteration. 6926 * 6927 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6928 * The list lock is dropped and reacquired. On success, both locks are held. 6929 * On failure, the mount vnode list lock is held but the vnode interlock is 6930 * not, and the procedure may have yielded. 6931 */ 6932 static bool 6933 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6934 struct vnode *vp) 6935 { 6936 6937 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6938 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6939 ("%s: bad marker", __func__)); 6940 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6941 ("%s: inappropriate vnode", __func__)); 6942 ASSERT_VI_UNLOCKED(vp, __func__); 6943 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6944 6945 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6946 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6947 6948 /* 6949 * Note we may be racing against vdrop which transitioned the hold 6950 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6951 * if we are the only user after we get the interlock we will just 6952 * vdrop. 6953 */ 6954 vhold(vp); 6955 mtx_unlock(&mp->mnt_listmtx); 6956 VI_LOCK(vp); 6957 if (VN_IS_DOOMED(vp)) { 6958 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6959 goto out_lost; 6960 } 6961 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6962 /* 6963 * There is nothing to do if we are the last user. 6964 */ 6965 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6966 goto out_lost; 6967 mtx_lock(&mp->mnt_listmtx); 6968 return (true); 6969 out_lost: 6970 vdropl(vp); 6971 maybe_yield(); 6972 mtx_lock(&mp->mnt_listmtx); 6973 return (false); 6974 } 6975 6976 static struct vnode * 6977 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6978 void *cbarg) 6979 { 6980 struct vnode *vp; 6981 6982 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6983 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6984 restart: 6985 vp = TAILQ_NEXT(*mvp, v_lazylist); 6986 while (vp != NULL) { 6987 if (vp->v_type == VMARKER) { 6988 vp = TAILQ_NEXT(vp, v_lazylist); 6989 continue; 6990 } 6991 /* 6992 * See if we want to process the vnode. Note we may encounter a 6993 * long string of vnodes we don't care about and hog the list 6994 * as a result. Check for it and requeue the marker. 6995 */ 6996 VNPASS(!VN_IS_DOOMED(vp), vp); 6997 if (!cb(vp, cbarg)) { 6998 if (!should_yield()) { 6999 vp = TAILQ_NEXT(vp, v_lazylist); 7000 continue; 7001 } 7002 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 7003 v_lazylist); 7004 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 7005 v_lazylist); 7006 mtx_unlock(&mp->mnt_listmtx); 7007 kern_yield(PRI_USER); 7008 mtx_lock(&mp->mnt_listmtx); 7009 goto restart; 7010 } 7011 /* 7012 * Try-lock because this is the wrong lock order. 7013 */ 7014 if (!VI_TRYLOCK(vp) && 7015 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 7016 goto restart; 7017 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 7018 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 7019 ("alien vnode on the lazy list %p %p", vp, mp)); 7020 VNPASS(vp->v_mount == mp, vp); 7021 VNPASS(!VN_IS_DOOMED(vp), vp); 7022 break; 7023 } 7024 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7025 7026 /* Check if we are done */ 7027 if (vp == NULL) { 7028 mtx_unlock(&mp->mnt_listmtx); 7029 mnt_vnode_markerfree_lazy(mvp, mp); 7030 return (NULL); 7031 } 7032 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7033 mtx_unlock(&mp->mnt_listmtx); 7034 ASSERT_VI_LOCKED(vp, "lazy iter"); 7035 return (vp); 7036 } 7037 7038 struct vnode * 7039 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7040 void *cbarg) 7041 { 7042 7043 maybe_yield(); 7044 mtx_lock(&mp->mnt_listmtx); 7045 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7046 } 7047 7048 struct vnode * 7049 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7050 void *cbarg) 7051 { 7052 struct vnode *vp; 7053 7054 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7055 return (NULL); 7056 7057 *mvp = vn_alloc_marker(mp); 7058 MNT_ILOCK(mp); 7059 MNT_REF(mp); 7060 MNT_IUNLOCK(mp); 7061 7062 mtx_lock(&mp->mnt_listmtx); 7063 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7064 if (vp == NULL) { 7065 mtx_unlock(&mp->mnt_listmtx); 7066 mnt_vnode_markerfree_lazy(mvp, mp); 7067 return (NULL); 7068 } 7069 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7070 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7071 } 7072 7073 void 7074 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7075 { 7076 7077 if (*mvp == NULL) 7078 return; 7079 7080 mtx_lock(&mp->mnt_listmtx); 7081 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7082 mtx_unlock(&mp->mnt_listmtx); 7083 mnt_vnode_markerfree_lazy(mvp, mp); 7084 } 7085 7086 int 7087 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7088 { 7089 7090 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7091 cnp->cn_flags &= ~NOEXECCHECK; 7092 return (0); 7093 } 7094 7095 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7096 } 7097 7098 /* 7099 * Do not use this variant unless you have means other than the hold count 7100 * to prevent the vnode from getting freed. 7101 */ 7102 void 7103 vn_seqc_write_begin_locked(struct vnode *vp) 7104 { 7105 7106 ASSERT_VI_LOCKED(vp, __func__); 7107 VNPASS(vp->v_holdcnt > 0, vp); 7108 VNPASS(vp->v_seqc_users >= 0, vp); 7109 vp->v_seqc_users++; 7110 if (vp->v_seqc_users == 1) 7111 seqc_sleepable_write_begin(&vp->v_seqc); 7112 } 7113 7114 void 7115 vn_seqc_write_begin(struct vnode *vp) 7116 { 7117 7118 VI_LOCK(vp); 7119 vn_seqc_write_begin_locked(vp); 7120 VI_UNLOCK(vp); 7121 } 7122 7123 void 7124 vn_seqc_write_end_locked(struct vnode *vp) 7125 { 7126 7127 ASSERT_VI_LOCKED(vp, __func__); 7128 VNPASS(vp->v_seqc_users > 0, vp); 7129 vp->v_seqc_users--; 7130 if (vp->v_seqc_users == 0) 7131 seqc_sleepable_write_end(&vp->v_seqc); 7132 } 7133 7134 void 7135 vn_seqc_write_end(struct vnode *vp) 7136 { 7137 7138 VI_LOCK(vp); 7139 vn_seqc_write_end_locked(vp); 7140 VI_UNLOCK(vp); 7141 } 7142 7143 /* 7144 * Special case handling for allocating and freeing vnodes. 7145 * 7146 * The counter remains unchanged on free so that a doomed vnode will 7147 * keep testing as in modify as long as it is accessible with SMR. 7148 */ 7149 static void 7150 vn_seqc_init(struct vnode *vp) 7151 { 7152 7153 vp->v_seqc = 0; 7154 vp->v_seqc_users = 0; 7155 } 7156 7157 static void 7158 vn_seqc_write_end_free(struct vnode *vp) 7159 { 7160 7161 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7162 VNPASS(vp->v_seqc_users == 1, vp); 7163 } 7164 7165 void 7166 vn_irflag_set_locked(struct vnode *vp, short toset) 7167 { 7168 short flags; 7169 7170 ASSERT_VI_LOCKED(vp, __func__); 7171 flags = vn_irflag_read(vp); 7172 VNASSERT((flags & toset) == 0, vp, 7173 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7174 __func__, flags, toset)); 7175 atomic_store_short(&vp->v_irflag, flags | toset); 7176 } 7177 7178 void 7179 vn_irflag_set(struct vnode *vp, short toset) 7180 { 7181 7182 VI_LOCK(vp); 7183 vn_irflag_set_locked(vp, toset); 7184 VI_UNLOCK(vp); 7185 } 7186 7187 void 7188 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7189 { 7190 short flags; 7191 7192 ASSERT_VI_LOCKED(vp, __func__); 7193 flags = vn_irflag_read(vp); 7194 atomic_store_short(&vp->v_irflag, flags | toset); 7195 } 7196 7197 void 7198 vn_irflag_set_cond(struct vnode *vp, short toset) 7199 { 7200 7201 VI_LOCK(vp); 7202 vn_irflag_set_cond_locked(vp, toset); 7203 VI_UNLOCK(vp); 7204 } 7205 7206 void 7207 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7208 { 7209 short flags; 7210 7211 ASSERT_VI_LOCKED(vp, __func__); 7212 flags = vn_irflag_read(vp); 7213 VNASSERT((flags & tounset) == tounset, vp, 7214 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7215 __func__, flags, tounset)); 7216 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7217 } 7218 7219 void 7220 vn_irflag_unset(struct vnode *vp, short tounset) 7221 { 7222 7223 VI_LOCK(vp); 7224 vn_irflag_unset_locked(vp, tounset); 7225 VI_UNLOCK(vp); 7226 } 7227 7228 int 7229 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7230 { 7231 struct vattr vattr; 7232 int error; 7233 7234 ASSERT_VOP_LOCKED(vp, __func__); 7235 error = VOP_GETATTR(vp, &vattr, cred); 7236 if (__predict_true(error == 0)) { 7237 if (vattr.va_size <= OFF_MAX) 7238 *size = vattr.va_size; 7239 else 7240 error = EFBIG; 7241 } 7242 return (error); 7243 } 7244 7245 int 7246 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7247 { 7248 int error; 7249 7250 VOP_LOCK(vp, LK_SHARED); 7251 error = vn_getsize_locked(vp, size, cred); 7252 VOP_UNLOCK(vp); 7253 return (error); 7254 } 7255 7256 #ifdef INVARIANTS 7257 void 7258 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7259 { 7260 7261 switch (vp->v_state) { 7262 case VSTATE_UNINITIALIZED: 7263 switch (state) { 7264 case VSTATE_CONSTRUCTED: 7265 case VSTATE_DESTROYING: 7266 return; 7267 default: 7268 break; 7269 } 7270 break; 7271 case VSTATE_CONSTRUCTED: 7272 ASSERT_VOP_ELOCKED(vp, __func__); 7273 switch (state) { 7274 case VSTATE_DESTROYING: 7275 return; 7276 default: 7277 break; 7278 } 7279 break; 7280 case VSTATE_DESTROYING: 7281 ASSERT_VOP_ELOCKED(vp, __func__); 7282 switch (state) { 7283 case VSTATE_DEAD: 7284 return; 7285 default: 7286 break; 7287 } 7288 break; 7289 case VSTATE_DEAD: 7290 switch (state) { 7291 case VSTATE_UNINITIALIZED: 7292 return; 7293 default: 7294 break; 7295 } 7296 break; 7297 } 7298 7299 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7300 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7301 } 7302 #endif 7303