1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 37 */ 38 39 /* 40 * External virtual filesystem routines 41 */ 42 43 #include <sys/cdefs.h> 44 #include "opt_ddb.h" 45 #include "opt_watchdog.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/asan.h> 50 #include <sys/bio.h> 51 #include <sys/buf.h> 52 #include <sys/capsicum.h> 53 #include <sys/condvar.h> 54 #include <sys/conf.h> 55 #include <sys/counter.h> 56 #include <sys/dirent.h> 57 #include <sys/event.h> 58 #include <sys/eventhandler.h> 59 #include <sys/extattr.h> 60 #include <sys/file.h> 61 #include <sys/fcntl.h> 62 #include <sys/jail.h> 63 #include <sys/kdb.h> 64 #include <sys/kernel.h> 65 #include <sys/kthread.h> 66 #include <sys/ktr.h> 67 #include <sys/limits.h> 68 #include <sys/lockf.h> 69 #include <sys/malloc.h> 70 #include <sys/mount.h> 71 #include <sys/namei.h> 72 #include <sys/pctrie.h> 73 #include <sys/priv.h> 74 #include <sys/reboot.h> 75 #include <sys/refcount.h> 76 #include <sys/rwlock.h> 77 #include <sys/sched.h> 78 #include <sys/sleepqueue.h> 79 #include <sys/smr.h> 80 #include <sys/smp.h> 81 #include <sys/stat.h> 82 #include <sys/sysctl.h> 83 #include <sys/syslog.h> 84 #include <sys/vmmeter.h> 85 #include <sys/vnode.h> 86 #include <sys/watchdog.h> 87 88 #include <machine/stdarg.h> 89 90 #include <security/mac/mac_framework.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_object.h> 94 #include <vm/vm_extern.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_kern.h> 99 #include <vm/uma.h> 100 101 #if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS)) 102 #error DEBUG_VFS_LOCKS requires INVARIANTS and WITNESS 103 #endif 104 105 #ifdef DDB 106 #include <ddb/ddb.h> 107 #endif 108 109 static void delmntque(struct vnode *vp); 110 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, 111 int slpflag, int slptimeo); 112 static void syncer_shutdown(void *arg, int howto); 113 static int vtryrecycle(struct vnode *vp); 114 static void v_init_counters(struct vnode *); 115 static void vn_seqc_init(struct vnode *); 116 static void vn_seqc_write_end_free(struct vnode *vp); 117 static void vgonel(struct vnode *); 118 static bool vhold_recycle_free(struct vnode *); 119 static void vdropl_recycle(struct vnode *vp); 120 static void vdrop_recycle(struct vnode *vp); 121 static void vfs_knllock(void *arg); 122 static void vfs_knlunlock(void *arg); 123 static void vfs_knl_assert_lock(void *arg, int what); 124 static void destroy_vpollinfo(struct vpollinfo *vi); 125 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 126 daddr_t startlbn, daddr_t endlbn); 127 static void vnlru_recalc(void); 128 129 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 130 "vnode configuration and statistics"); 131 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 132 "vnode configuration"); 133 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 134 "vnode statistics"); 135 static SYSCTL_NODE(_vfs_vnode, OID_AUTO, vnlru, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 136 "vnode recycling"); 137 138 /* 139 * Number of vnodes in existence. Increased whenever getnewvnode() 140 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. 141 */ 142 static u_long __exclusive_cache_line numvnodes; 143 144 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, 145 "Number of vnodes in existence (legacy)"); 146 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, count, CTLFLAG_RD, &numvnodes, 0, 147 "Number of vnodes in existence"); 148 149 static counter_u64_t vnodes_created; 150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created, 151 "Number of vnodes created by getnewvnode (legacy)"); 152 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, created, CTLFLAG_RD, &vnodes_created, 153 "Number of vnodes created by getnewvnode"); 154 155 /* 156 * Conversion tables for conversion from vnode types to inode formats 157 * and back. 158 */ 159 __enum_uint8(vtype) iftovt_tab[16] = { 160 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, 161 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON 162 }; 163 int vttoif_tab[10] = { 164 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, 165 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT 166 }; 167 168 /* 169 * List of allocates vnodes in the system. 170 */ 171 static TAILQ_HEAD(freelst, vnode) vnode_list; 172 static struct vnode *vnode_list_free_marker; 173 static struct vnode *vnode_list_reclaim_marker; 174 175 /* 176 * "Free" vnode target. Free vnodes are rarely completely free, but are 177 * just ones that are cheap to recycle. Usually they are for files which 178 * have been stat'd but not read; these usually have inode and namecache 179 * data attached to them. This target is the preferred minimum size of a 180 * sub-cache consisting mostly of such files. The system balances the size 181 * of this sub-cache with its complement to try to prevent either from 182 * thrashing while the other is relatively inactive. The targets express 183 * a preference for the best balance. 184 * 185 * "Above" this target there are 2 further targets (watermarks) related 186 * to recyling of free vnodes. In the best-operating case, the cache is 187 * exactly full, the free list has size between vlowat and vhiwat above the 188 * free target, and recycling from it and normal use maintains this state. 189 * Sometimes the free list is below vlowat or even empty, but this state 190 * is even better for immediate use provided the cache is not full. 191 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free 192 * ones) to reach one of these states. The watermarks are currently hard- 193 * coded as 4% and 9% of the available space higher. These and the default 194 * of 25% for wantfreevnodes are too large if the memory size is large. 195 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim 196 * whenever vnlru_proc() becomes active. 197 */ 198 static long wantfreevnodes; 199 static long __exclusive_cache_line freevnodes; 200 static long freevnodes_old; 201 202 static counter_u64_t recycles_count; 203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 204 "Number of vnodes recycled to meet vnode cache targets (legacy)"); 205 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 206 "Number of vnodes recycled to meet vnode cache targets"); 207 208 static counter_u64_t recycles_free_count; 209 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 210 "Number of free vnodes recycled to meet vnode cache targets (legacy)"); 211 SYSCTL_COUNTER_U64(_vfs_vnode_vnlru, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count, 212 "Number of free vnodes recycled to meet vnode cache targets"); 213 214 static counter_u64_t vnode_skipped_requeues; 215 SYSCTL_COUNTER_U64(_vfs_vnode_stats, OID_AUTO, skipped_requeues, CTLFLAG_RD, &vnode_skipped_requeues, 216 "Number of times LRU requeue was skipped due to lock contention"); 217 218 static u_long deferred_inact; 219 SYSCTL_ULONG(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, 220 &deferred_inact, 0, "Number of times inactive processing was deferred"); 221 222 /* To keep more than one thread at a time from running vfs_getnewfsid */ 223 static struct mtx mntid_mtx; 224 225 /* 226 * Lock for any access to the following: 227 * vnode_list 228 * numvnodes 229 * freevnodes 230 */ 231 static struct mtx __exclusive_cache_line vnode_list_mtx; 232 233 /* Publicly exported FS */ 234 struct nfs_public nfs_pub; 235 236 static uma_zone_t buf_trie_zone; 237 static smr_t buf_trie_smr; 238 239 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */ 240 static uma_zone_t vnode_zone; 241 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll"); 242 243 __read_frequently smr_t vfs_smr; 244 245 /* 246 * The workitem queue. 247 * 248 * It is useful to delay writes of file data and filesystem metadata 249 * for tens of seconds so that quickly created and deleted files need 250 * not waste disk bandwidth being created and removed. To realize this, 251 * we append vnodes to a "workitem" queue. When running with a soft 252 * updates implementation, most pending metadata dependencies should 253 * not wait for more than a few seconds. Thus, mounted on block devices 254 * are delayed only about a half the time that file data is delayed. 255 * Similarly, directory updates are more critical, so are only delayed 256 * about a third the time that file data is delayed. Thus, there are 257 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 258 * one each second (driven off the filesystem syncer process). The 259 * syncer_delayno variable indicates the next queue that is to be processed. 260 * Items that need to be processed soon are placed in this queue: 261 * 262 * syncer_workitem_pending[syncer_delayno] 263 * 264 * A delay of fifteen seconds is done by placing the request fifteen 265 * entries later in the queue: 266 * 267 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 268 * 269 */ 270 static int syncer_delayno; 271 static long syncer_mask; 272 LIST_HEAD(synclist, bufobj); 273 static struct synclist *syncer_workitem_pending; 274 /* 275 * The sync_mtx protects: 276 * bo->bo_synclist 277 * sync_vnode_count 278 * syncer_delayno 279 * syncer_state 280 * syncer_workitem_pending 281 * syncer_worklist_len 282 * rushjob 283 */ 284 static struct mtx sync_mtx; 285 static struct cv sync_wakeup; 286 287 #define SYNCER_MAXDELAY 32 288 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 289 static int syncdelay = 30; /* max time to delay syncing data */ 290 static int filedelay = 30; /* time to delay syncing files */ 291 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, 292 "Time to delay syncing files (in seconds)"); 293 static int dirdelay = 29; /* time to delay syncing directories */ 294 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, 295 "Time to delay syncing directories (in seconds)"); 296 static int metadelay = 28; /* time to delay syncing metadata */ 297 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, 298 "Time to delay syncing metadata (in seconds)"); 299 static int rushjob; /* number of slots to run ASAP */ 300 static int stat_rush_requests; /* number of times I/O speeded up */ 301 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, 302 "Number of times I/O speeded up (rush requests)"); 303 304 #define VDBATCH_SIZE 8 305 struct vdbatch { 306 u_int index; 307 struct mtx lock; 308 struct vnode *tab[VDBATCH_SIZE]; 309 }; 310 DPCPU_DEFINE_STATIC(struct vdbatch, vd); 311 312 static void vdbatch_dequeue(struct vnode *vp); 313 314 /* 315 * When shutting down the syncer, run it at four times normal speed. 316 */ 317 #define SYNCER_SHUTDOWN_SPEEDUP 4 318 static int sync_vnode_count; 319 static int syncer_worklist_len; 320 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY } 321 syncer_state; 322 323 /* Target for maximum number of vnodes. */ 324 u_long desiredvnodes; 325 static u_long gapvnodes; /* gap between wanted and desired */ 326 static u_long vhiwat; /* enough extras after expansion */ 327 static u_long vlowat; /* minimal extras before expansion */ 328 static bool vstir; /* nonzero to stir non-free vnodes */ 329 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */ 330 331 static u_long vnlru_read_freevnodes(void); 332 333 /* 334 * Note that no attempt is made to sanitize these parameters. 335 */ 336 static int 337 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS) 338 { 339 u_long val; 340 int error; 341 342 val = desiredvnodes; 343 error = sysctl_handle_long(oidp, &val, 0, req); 344 if (error != 0 || req->newptr == NULL) 345 return (error); 346 347 if (val == desiredvnodes) 348 return (0); 349 mtx_lock(&vnode_list_mtx); 350 desiredvnodes = val; 351 wantfreevnodes = desiredvnodes / 4; 352 vnlru_recalc(); 353 mtx_unlock(&vnode_list_mtx); 354 /* 355 * XXX There is no protection against multiple threads changing 356 * desiredvnodes at the same time. Locking above only helps vnlru and 357 * getnewvnode. 358 */ 359 vfs_hash_changesize(desiredvnodes); 360 cache_changesize(desiredvnodes); 361 return (0); 362 } 363 364 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, 365 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 366 "LU", "Target for maximum number of vnodes (legacy)"); 367 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, limit, 368 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes, 369 "LU", "Target for maximum number of vnodes"); 370 371 static int 372 sysctl_freevnodes(SYSCTL_HANDLER_ARGS) 373 { 374 u_long rfreevnodes; 375 376 rfreevnodes = vnlru_read_freevnodes(); 377 return (sysctl_handle_long(oidp, &rfreevnodes, 0, req)); 378 } 379 380 SYSCTL_PROC(_vfs, OID_AUTO, freevnodes, 381 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 382 "LU", "Number of \"free\" vnodes (legacy)"); 383 SYSCTL_PROC(_vfs_vnode_stats, OID_AUTO, free, 384 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_freevnodes, 385 "LU", "Number of \"free\" vnodes"); 386 387 static int 388 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS) 389 { 390 u_long val; 391 int error; 392 393 val = wantfreevnodes; 394 error = sysctl_handle_long(oidp, &val, 0, req); 395 if (error != 0 || req->newptr == NULL) 396 return (error); 397 398 if (val == wantfreevnodes) 399 return (0); 400 mtx_lock(&vnode_list_mtx); 401 wantfreevnodes = val; 402 vnlru_recalc(); 403 mtx_unlock(&vnode_list_mtx); 404 return (0); 405 } 406 407 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes, 408 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 409 "LU", "Target for minimum number of \"free\" vnodes (legacy)"); 410 SYSCTL_PROC(_vfs_vnode_param, OID_AUTO, wantfree, 411 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes, 412 "LU", "Target for minimum number of \"free\" vnodes"); 413 414 static int vnlru_nowhere; 415 SYSCTL_INT(_vfs_vnode_vnlru, OID_AUTO, failed_runs, CTLFLAG_RD | CTLFLAG_STATS, 416 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success"); 417 418 static int 419 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) 420 { 421 struct vnode *vp; 422 struct nameidata nd; 423 char *buf; 424 unsigned long ndflags; 425 int error; 426 427 if (req->newptr == NULL) 428 return (EINVAL); 429 if (req->newlen >= PATH_MAX) 430 return (E2BIG); 431 432 buf = malloc(PATH_MAX, M_TEMP, M_WAITOK); 433 error = SYSCTL_IN(req, buf, req->newlen); 434 if (error != 0) 435 goto out; 436 437 buf[req->newlen] = '\0'; 438 439 ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1; 440 NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf); 441 if ((error = namei(&nd)) != 0) 442 goto out; 443 vp = nd.ni_vp; 444 445 if (VN_IS_DOOMED(vp)) { 446 /* 447 * This vnode is being recycled. Return != 0 to let the caller 448 * know that the sysctl had no effect. Return EAGAIN because a 449 * subsequent call will likely succeed (since namei will create 450 * a new vnode if necessary) 451 */ 452 error = EAGAIN; 453 goto putvnode; 454 } 455 456 counter_u64_add(recycles_count, 1); 457 vgone(vp); 458 putvnode: 459 vput(vp); 460 NDFREE_PNBUF(&nd); 461 out: 462 free(buf, M_TEMP); 463 return (error); 464 } 465 466 static int 467 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS) 468 { 469 struct thread *td = curthread; 470 struct vnode *vp; 471 struct file *fp; 472 int error; 473 int fd; 474 475 if (req->newptr == NULL) 476 return (EBADF); 477 478 error = sysctl_handle_int(oidp, &fd, 0, req); 479 if (error != 0) 480 return (error); 481 error = getvnode(curthread, fd, &cap_fcntl_rights, &fp); 482 if (error != 0) 483 return (error); 484 vp = fp->f_vnode; 485 486 error = vn_lock(vp, LK_EXCLUSIVE); 487 if (error != 0) 488 goto drop; 489 490 counter_u64_add(recycles_count, 1); 491 vgone(vp); 492 VOP_UNLOCK(vp); 493 drop: 494 fdrop(fp, td); 495 return (error); 496 } 497 498 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode, 499 CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 500 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname"); 501 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode, 502 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0, 503 sysctl_ftry_reclaim_vnode, "I", 504 "Try to reclaim a vnode by its file descriptor"); 505 506 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */ 507 #define vnsz2log 8 508 #ifndef DEBUG_LOCKS 509 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log && 510 sizeof(struct vnode) < 1UL << (vnsz2log + 1), 511 "vnsz2log needs to be updated"); 512 #endif 513 514 /* 515 * Support for the bufobj clean & dirty pctrie. 516 */ 517 static void * 518 buf_trie_alloc(struct pctrie *ptree) 519 { 520 return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT)); 521 } 522 523 static void 524 buf_trie_free(struct pctrie *ptree, void *node) 525 { 526 uma_zfree_smr(buf_trie_zone, node); 527 } 528 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free, 529 buf_trie_smr); 530 531 /* 532 * Initialize the vnode management data structures. 533 * 534 * Reevaluate the following cap on the number of vnodes after the physical 535 * memory size exceeds 512GB. In the limit, as the physical memory size 536 * grows, the ratio of the memory size in KB to vnodes approaches 64:1. 537 */ 538 #ifndef MAXVNODES_MAX 539 #define MAXVNODES_MAX (512UL * 1024 * 1024 / 64) /* 8M */ 540 #endif 541 542 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker"); 543 544 static struct vnode * 545 vn_alloc_marker(struct mount *mp) 546 { 547 struct vnode *vp; 548 549 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); 550 vp->v_type = VMARKER; 551 vp->v_mount = mp; 552 553 return (vp); 554 } 555 556 static void 557 vn_free_marker(struct vnode *vp) 558 { 559 560 MPASS(vp->v_type == VMARKER); 561 free(vp, M_VNODE_MARKER); 562 } 563 564 #ifdef KASAN 565 static int 566 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused) 567 { 568 kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0); 569 return (0); 570 } 571 572 static void 573 vnode_dtor(void *mem, int size, void *arg __unused) 574 { 575 size_t end1, end2, off1, off2; 576 577 _Static_assert(offsetof(struct vnode, v_vnodelist) < 578 offsetof(struct vnode, v_dbatchcpu), 579 "KASAN marks require updating"); 580 581 off1 = offsetof(struct vnode, v_vnodelist); 582 off2 = offsetof(struct vnode, v_dbatchcpu); 583 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); 584 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); 585 586 /* 587 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even 588 * after the vnode has been freed. Try to get some KASAN coverage by 589 * marking everything except those two fields as invalid. Because 590 * KASAN's tracking is not byte-granular, any preceding fields sharing 591 * the same 8-byte aligned word must also be marked valid. 592 */ 593 594 /* Handle the area from the start until v_vnodelist... */ 595 off1 = rounddown2(off1, KASAN_SHADOW_SCALE); 596 kasan_mark(mem, off1, off1, KASAN_UMA_FREED); 597 598 /* ... then the area between v_vnodelist and v_dbatchcpu ... */ 599 off1 = roundup2(end1, KASAN_SHADOW_SCALE); 600 off2 = rounddown2(off2, KASAN_SHADOW_SCALE); 601 if (off2 > off1) 602 kasan_mark((void *)((char *)mem + off1), off2 - off1, 603 off2 - off1, KASAN_UMA_FREED); 604 605 /* ... and finally the area from v_dbatchcpu to the end. */ 606 off2 = roundup2(end2, KASAN_SHADOW_SCALE); 607 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, 608 KASAN_UMA_FREED); 609 } 610 #endif /* KASAN */ 611 612 /* 613 * Initialize a vnode as it first enters the zone. 614 */ 615 static int 616 vnode_init(void *mem, int size, int flags) 617 { 618 struct vnode *vp; 619 620 vp = mem; 621 bzero(vp, size); 622 /* 623 * Setup locks. 624 */ 625 vp->v_vnlock = &vp->v_lock; 626 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); 627 /* 628 * By default, don't allow shared locks unless filesystems opt-in. 629 */ 630 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, 631 LK_NOSHARE | LK_IS_VNODE); 632 /* 633 * Initialize bufobj. 634 */ 635 bufobj_init(&vp->v_bufobj, vp); 636 /* 637 * Initialize namecache. 638 */ 639 cache_vnode_init(vp); 640 /* 641 * Initialize rangelocks. 642 */ 643 rangelock_init(&vp->v_rl); 644 645 vp->v_dbatchcpu = NOCPU; 646 647 vp->v_state = VSTATE_DEAD; 648 649 /* 650 * Check vhold_recycle_free for an explanation. 651 */ 652 vp->v_holdcnt = VHOLD_NO_SMR; 653 vp->v_type = VNON; 654 mtx_lock(&vnode_list_mtx); 655 TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist); 656 mtx_unlock(&vnode_list_mtx); 657 return (0); 658 } 659 660 /* 661 * Free a vnode when it is cleared from the zone. 662 */ 663 static void 664 vnode_fini(void *mem, int size) 665 { 666 struct vnode *vp; 667 struct bufobj *bo; 668 669 vp = mem; 670 vdbatch_dequeue(vp); 671 mtx_lock(&vnode_list_mtx); 672 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 673 mtx_unlock(&vnode_list_mtx); 674 rangelock_destroy(&vp->v_rl); 675 lockdestroy(vp->v_vnlock); 676 mtx_destroy(&vp->v_interlock); 677 bo = &vp->v_bufobj; 678 rw_destroy(BO_LOCKPTR(bo)); 679 680 kasan_mark(mem, size, size, 0); 681 } 682 683 /* 684 * Provide the size of NFS nclnode and NFS fh for calculation of the 685 * vnode memory consumption. The size is specified directly to 686 * eliminate dependency on NFS-private header. 687 * 688 * Other filesystems may use bigger or smaller (like UFS and ZFS) 689 * private inode data, but the NFS-based estimation is ample enough. 690 * Still, we care about differences in the size between 64- and 32-bit 691 * platforms. 692 * 693 * Namecache structure size is heuristically 694 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1. 695 */ 696 #ifdef _LP64 697 #define NFS_NCLNODE_SZ (528 + 64) 698 #define NC_SZ 148 699 #else 700 #define NFS_NCLNODE_SZ (360 + 32) 701 #define NC_SZ 92 702 #endif 703 704 static void 705 vntblinit(void *dummy __unused) 706 { 707 struct vdbatch *vd; 708 uma_ctor ctor; 709 uma_dtor dtor; 710 int cpu, physvnodes, virtvnodes; 711 712 /* 713 * Desiredvnodes is a function of the physical memory size and the 714 * kernel's heap size. Generally speaking, it scales with the 715 * physical memory size. The ratio of desiredvnodes to the physical 716 * memory size is 1:16 until desiredvnodes exceeds 98,304. 717 * Thereafter, the 718 * marginal ratio of desiredvnodes to the physical memory size is 719 * 1:64. However, desiredvnodes is limited by the kernel's heap 720 * size. The memory required by desiredvnodes vnodes and vm objects 721 * must not exceed 1/10th of the kernel's heap size. 722 */ 723 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 + 724 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64; 725 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) + 726 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); 727 desiredvnodes = min(physvnodes, virtvnodes); 728 if (desiredvnodes > MAXVNODES_MAX) { 729 if (bootverbose) 730 printf("Reducing kern.maxvnodes %lu -> %lu\n", 731 desiredvnodes, MAXVNODES_MAX); 732 desiredvnodes = MAXVNODES_MAX; 733 } 734 wantfreevnodes = desiredvnodes / 4; 735 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF); 736 TAILQ_INIT(&vnode_list); 737 mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF); 738 /* 739 * The lock is taken to appease WITNESS. 740 */ 741 mtx_lock(&vnode_list_mtx); 742 vnlru_recalc(); 743 mtx_unlock(&vnode_list_mtx); 744 vnode_list_free_marker = vn_alloc_marker(NULL); 745 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist); 746 vnode_list_reclaim_marker = vn_alloc_marker(NULL); 747 TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist); 748 749 #ifdef KASAN 750 ctor = vnode_ctor; 751 dtor = vnode_dtor; 752 #else 753 ctor = NULL; 754 dtor = NULL; 755 #endif 756 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, 757 vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN); 758 uma_zone_set_smr(vnode_zone, vfs_smr); 759 760 /* 761 * Preallocate enough nodes to support one-per buf so that 762 * we can not fail an insert. reassignbuf() callers can not 763 * tolerate the insertion failure. 764 */ 765 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(), 766 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR, 767 UMA_ZONE_NOFREE | UMA_ZONE_SMR); 768 buf_trie_smr = uma_zone_get_smr(buf_trie_zone); 769 uma_prealloc(buf_trie_zone, nbuf); 770 771 vnodes_created = counter_u64_alloc(M_WAITOK); 772 recycles_count = counter_u64_alloc(M_WAITOK); 773 recycles_free_count = counter_u64_alloc(M_WAITOK); 774 vnode_skipped_requeues = counter_u64_alloc(M_WAITOK); 775 776 /* 777 * Initialize the filesystem syncer. 778 */ 779 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, 780 &syncer_mask); 781 syncer_maxdelay = syncer_mask + 1; 782 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF); 783 cv_init(&sync_wakeup, "syncer"); 784 785 CPU_FOREACH(cpu) { 786 vd = DPCPU_ID_PTR((cpu), vd); 787 bzero(vd, sizeof(*vd)); 788 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); 789 } 790 } 791 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL); 792 793 /* 794 * Mark a mount point as busy. Used to synchronize access and to delay 795 * unmounting. Eventually, mountlist_mtx is not released on failure. 796 * 797 * vfs_busy() is a custom lock, it can block the caller. 798 * vfs_busy() only sleeps if the unmount is active on the mount point. 799 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any 800 * vnode belonging to mp. 801 * 802 * Lookup uses vfs_busy() to traverse mount points. 803 * root fs var fs 804 * / vnode lock A / vnode lock (/var) D 805 * /var vnode lock B /log vnode lock(/var/log) E 806 * vfs_busy lock C vfs_busy lock F 807 * 808 * Within each file system, the lock order is C->A->B and F->D->E. 809 * 810 * When traversing across mounts, the system follows that lock order: 811 * 812 * C->A->B 813 * | 814 * +->F->D->E 815 * 816 * The lookup() process for namei("/var") illustrates the process: 817 * 1. VOP_LOOKUP() obtains B while A is held 818 * 2. vfs_busy() obtains a shared lock on F while A and B are held 819 * 3. vput() releases lock on B 820 * 4. vput() releases lock on A 821 * 5. VFS_ROOT() obtains lock on D while shared lock on F is held 822 * 6. vfs_unbusy() releases shared lock on F 823 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A. 824 * Attempt to lock A (instead of vp_crossmp) while D is held would 825 * violate the global order, causing deadlocks. 826 * 827 * dounmount() locks B while F is drained. Note that for stacked 828 * filesystems, D and B in the example above may be the same lock, 829 * which introdues potential lock order reversal deadlock between 830 * dounmount() and step 5 above. These filesystems may avoid the LOR 831 * by setting VV_CROSSLOCK on the covered vnode so that lock B will 832 * remain held until after step 5. 833 */ 834 int 835 vfs_busy(struct mount *mp, int flags) 836 { 837 struct mount_pcpu *mpcpu; 838 839 MPASS((flags & ~MBF_MASK) == 0); 840 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags); 841 842 if (vfs_op_thread_enter(mp, mpcpu)) { 843 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 844 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); 845 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); 846 vfs_mp_count_add_pcpu(mpcpu, ref, 1); 847 vfs_mp_count_add_pcpu(mpcpu, lockref, 1); 848 vfs_op_thread_exit(mp, mpcpu); 849 if (flags & MBF_MNTLSTLOCK) 850 mtx_unlock(&mountlist_mtx); 851 return (0); 852 } 853 854 MNT_ILOCK(mp); 855 vfs_assert_mount_counters(mp); 856 MNT_REF(mp); 857 /* 858 * If mount point is currently being unmounted, sleep until the 859 * mount point fate is decided. If thread doing the unmounting fails, 860 * it will clear MNTK_UNMOUNT flag before waking us up, indicating 861 * that this mount point has survived the unmount attempt and vfs_busy 862 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE 863 * flag in addition to MNTK_UNMOUNT, indicating that mount point is 864 * about to be really destroyed. vfs_busy needs to release its 865 * reference on the mount point in this case and return with ENOENT, 866 * telling the caller the mount it tried to busy is no longer valid. 867 */ 868 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { 869 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), 870 ("%s: non-empty upper mount list with pending unmount", 871 __func__)); 872 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { 873 MNT_REL(mp); 874 MNT_IUNLOCK(mp); 875 CTR1(KTR_VFS, "%s: failed busying before sleeping", 876 __func__); 877 return (ENOENT); 878 } 879 if (flags & MBF_MNTLSTLOCK) 880 mtx_unlock(&mountlist_mtx); 881 mp->mnt_kern_flag |= MNTK_MWAIT; 882 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0); 883 if (flags & MBF_MNTLSTLOCK) 884 mtx_lock(&mountlist_mtx); 885 MNT_ILOCK(mp); 886 } 887 if (flags & MBF_MNTLSTLOCK) 888 mtx_unlock(&mountlist_mtx); 889 mp->mnt_lockref++; 890 MNT_IUNLOCK(mp); 891 return (0); 892 } 893 894 /* 895 * Free a busy filesystem. 896 */ 897 void 898 vfs_unbusy(struct mount *mp) 899 { 900 struct mount_pcpu *mpcpu; 901 int c; 902 903 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 904 905 if (vfs_op_thread_enter(mp, mpcpu)) { 906 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 907 vfs_mp_count_sub_pcpu(mpcpu, lockref, 1); 908 vfs_mp_count_sub_pcpu(mpcpu, ref, 1); 909 vfs_op_thread_exit(mp, mpcpu); 910 return; 911 } 912 913 MNT_ILOCK(mp); 914 vfs_assert_mount_counters(mp); 915 MNT_REL(mp); 916 c = --mp->mnt_lockref; 917 if (mp->mnt_vfs_ops == 0) { 918 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); 919 MNT_IUNLOCK(mp); 920 return; 921 } 922 if (c < 0) 923 vfs_dump_mount_counters(mp); 924 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { 925 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); 926 CTR1(KTR_VFS, "%s: waking up waiters", __func__); 927 mp->mnt_kern_flag &= ~MNTK_DRAINING; 928 wakeup(&mp->mnt_lockref); 929 } 930 MNT_IUNLOCK(mp); 931 } 932 933 /* 934 * Lookup a mount point by filesystem identifier. 935 */ 936 struct mount * 937 vfs_getvfs(fsid_t *fsid) 938 { 939 struct mount *mp; 940 941 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 942 mtx_lock(&mountlist_mtx); 943 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 944 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 945 vfs_ref(mp); 946 mtx_unlock(&mountlist_mtx); 947 return (mp); 948 } 949 } 950 mtx_unlock(&mountlist_mtx); 951 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 952 return ((struct mount *) 0); 953 } 954 955 /* 956 * Lookup a mount point by filesystem identifier, busying it before 957 * returning. 958 * 959 * To avoid congestion on mountlist_mtx, implement simple direct-mapped 960 * cache for popular filesystem identifiers. The cache is lockess, using 961 * the fact that struct mount's are never freed. In worst case we may 962 * get pointer to unmounted or even different filesystem, so we have to 963 * check what we got, and go slow way if so. 964 */ 965 struct mount * 966 vfs_busyfs(fsid_t *fsid) 967 { 968 #define FSID_CACHE_SIZE 256 969 typedef struct mount * volatile vmp_t; 970 static vmp_t cache[FSID_CACHE_SIZE]; 971 struct mount *mp; 972 int error; 973 uint32_t hash; 974 975 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid); 976 hash = fsid->val[0] ^ fsid->val[1]; 977 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); 978 mp = cache[hash]; 979 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) 980 goto slow; 981 if (vfs_busy(mp, 0) != 0) { 982 cache[hash] = NULL; 983 goto slow; 984 } 985 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) 986 return (mp); 987 else 988 vfs_unbusy(mp); 989 990 slow: 991 mtx_lock(&mountlist_mtx); 992 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 993 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { 994 error = vfs_busy(mp, MBF_MNTLSTLOCK); 995 if (error) { 996 cache[hash] = NULL; 997 mtx_unlock(&mountlist_mtx); 998 return (NULL); 999 } 1000 cache[hash] = mp; 1001 return (mp); 1002 } 1003 } 1004 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid); 1005 mtx_unlock(&mountlist_mtx); 1006 return ((struct mount *) 0); 1007 } 1008 1009 /* 1010 * Check if a user can access privileged mount options. 1011 */ 1012 int 1013 vfs_suser(struct mount *mp, struct thread *td) 1014 { 1015 int error; 1016 1017 if (jailed(td->td_ucred)) { 1018 /* 1019 * If the jail of the calling thread lacks permission for 1020 * this type of file system, deny immediately. 1021 */ 1022 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) 1023 return (EPERM); 1024 1025 /* 1026 * If the file system was mounted outside the jail of the 1027 * calling thread, deny immediately. 1028 */ 1029 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) 1030 return (EPERM); 1031 } 1032 1033 /* 1034 * If file system supports delegated administration, we don't check 1035 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified 1036 * by the file system itself. 1037 * If this is not the user that did original mount, we check for 1038 * the PRIV_VFS_MOUNT_OWNER privilege. 1039 */ 1040 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && 1041 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { 1042 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0) 1043 return (error); 1044 } 1045 return (0); 1046 } 1047 1048 /* 1049 * Get a new unique fsid. Try to make its val[0] unique, since this value 1050 * will be used to create fake device numbers for stat(). Also try (but 1051 * not so hard) make its val[0] unique mod 2^16, since some emulators only 1052 * support 16-bit device numbers. We end up with unique val[0]'s for the 1053 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 1054 * 1055 * Keep in mind that several mounts may be running in parallel. Starting 1056 * the search one past where the previous search terminated is both a 1057 * micro-optimization and a defense against returning the same fsid to 1058 * different mounts. 1059 */ 1060 void 1061 vfs_getnewfsid(struct mount *mp) 1062 { 1063 static uint16_t mntid_base; 1064 struct mount *nmp; 1065 fsid_t tfsid; 1066 int mtype; 1067 1068 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 1069 mtx_lock(&mntid_mtx); 1070 mtype = mp->mnt_vfc->vfc_typenum; 1071 tfsid.val[1] = mtype; 1072 mtype = (mtype & 0xFF) << 24; 1073 for (;;) { 1074 tfsid.val[0] = makedev(255, 1075 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 1076 mntid_base++; 1077 if ((nmp = vfs_getvfs(&tfsid)) == NULL) 1078 break; 1079 vfs_rel(nmp); 1080 } 1081 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 1082 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 1083 mtx_unlock(&mntid_mtx); 1084 } 1085 1086 /* 1087 * Knob to control the precision of file timestamps: 1088 * 1089 * 0 = seconds only; nanoseconds zeroed. 1090 * 1 = seconds and nanoseconds, accurate within 1/HZ. 1091 * 2 = seconds and nanoseconds, truncated to microseconds. 1092 * >=3 = seconds and nanoseconds, maximum precision. 1093 */ 1094 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC }; 1095 1096 static int timestamp_precision = TSP_USEC; 1097 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW, 1098 ×tamp_precision, 0, "File timestamp precision (0: seconds, " 1099 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, " 1100 "3+: sec + ns (max. precision))"); 1101 1102 /* 1103 * Get a current timestamp. 1104 */ 1105 void 1106 vfs_timestamp(struct timespec *tsp) 1107 { 1108 struct timeval tv; 1109 1110 switch (timestamp_precision) { 1111 case TSP_SEC: 1112 tsp->tv_sec = time_second; 1113 tsp->tv_nsec = 0; 1114 break; 1115 case TSP_HZ: 1116 getnanotime(tsp); 1117 break; 1118 case TSP_USEC: 1119 microtime(&tv); 1120 TIMEVAL_TO_TIMESPEC(&tv, tsp); 1121 break; 1122 case TSP_NSEC: 1123 default: 1124 nanotime(tsp); 1125 break; 1126 } 1127 } 1128 1129 /* 1130 * Set vnode attributes to VNOVAL 1131 */ 1132 void 1133 vattr_null(struct vattr *vap) 1134 { 1135 1136 vap->va_type = VNON; 1137 vap->va_size = VNOVAL; 1138 vap->va_bytes = VNOVAL; 1139 vap->va_mode = VNOVAL; 1140 vap->va_nlink = VNOVAL; 1141 vap->va_uid = VNOVAL; 1142 vap->va_gid = VNOVAL; 1143 vap->va_fsid = VNOVAL; 1144 vap->va_fileid = VNOVAL; 1145 vap->va_blocksize = VNOVAL; 1146 vap->va_rdev = VNOVAL; 1147 vap->va_atime.tv_sec = VNOVAL; 1148 vap->va_atime.tv_nsec = VNOVAL; 1149 vap->va_mtime.tv_sec = VNOVAL; 1150 vap->va_mtime.tv_nsec = VNOVAL; 1151 vap->va_ctime.tv_sec = VNOVAL; 1152 vap->va_ctime.tv_nsec = VNOVAL; 1153 vap->va_birthtime.tv_sec = VNOVAL; 1154 vap->va_birthtime.tv_nsec = VNOVAL; 1155 vap->va_flags = VNOVAL; 1156 vap->va_gen = VNOVAL; 1157 vap->va_vaflags = 0; 1158 } 1159 1160 /* 1161 * Try to reduce the total number of vnodes. 1162 * 1163 * This routine (and its user) are buggy in at least the following ways: 1164 * - all parameters were picked years ago when RAM sizes were significantly 1165 * smaller 1166 * - it can pick vnodes based on pages used by the vm object, but filesystems 1167 * like ZFS don't use it making the pick broken 1168 * - since ZFS has its own aging policy it gets partially combated by this one 1169 * - a dedicated method should be provided for filesystems to let them decide 1170 * whether the vnode should be recycled 1171 * 1172 * This routine is called when we have too many vnodes. It attempts 1173 * to free <count> vnodes and will potentially free vnodes that still 1174 * have VM backing store (VM backing store is typically the cause 1175 * of a vnode blowout so we want to do this). Therefore, this operation 1176 * is not considered cheap. 1177 * 1178 * A number of conditions may prevent a vnode from being reclaimed. 1179 * the buffer cache may have references on the vnode, a directory 1180 * vnode may still have references due to the namei cache representing 1181 * underlying files, or the vnode may be in active use. It is not 1182 * desirable to reuse such vnodes. These conditions may cause the 1183 * number of vnodes to reach some minimum value regardless of what 1184 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 1185 * 1186 * @param reclaim_nc_src Only reclaim directories with outgoing namecache 1187 * entries if this argument is strue 1188 * @param trigger Only reclaim vnodes with fewer than this many resident 1189 * pages. 1190 * @param target How many vnodes to reclaim. 1191 * @return The number of vnodes that were reclaimed. 1192 */ 1193 static int 1194 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target) 1195 { 1196 struct vnode *vp, *mvp; 1197 struct mount *mp; 1198 struct vm_object *object; 1199 u_long done; 1200 bool retried; 1201 1202 mtx_assert(&vnode_list_mtx, MA_OWNED); 1203 1204 retried = false; 1205 done = 0; 1206 1207 mvp = vnode_list_reclaim_marker; 1208 restart: 1209 vp = mvp; 1210 while (done < target) { 1211 vp = TAILQ_NEXT(vp, v_vnodelist); 1212 if (__predict_false(vp == NULL)) 1213 break; 1214 1215 if (__predict_false(vp->v_type == VMARKER)) 1216 continue; 1217 1218 /* 1219 * If it's been deconstructed already, it's still 1220 * referenced, or it exceeds the trigger, skip it. 1221 * Also skip free vnodes. We are trying to make space 1222 * to expand the free list, not reduce it. 1223 */ 1224 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || 1225 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) 1226 goto next_iter; 1227 1228 if (vp->v_type == VBAD || vp->v_type == VNON) 1229 goto next_iter; 1230 1231 object = atomic_load_ptr(&vp->v_object); 1232 if (object == NULL || object->resident_page_count > trigger) { 1233 goto next_iter; 1234 } 1235 1236 /* 1237 * Handle races against vnode allocation. Filesystems lock the 1238 * vnode some time after it gets returned from getnewvnode, 1239 * despite type and hold count being manipulated earlier. 1240 * Resorting to checking v_mount restores guarantees present 1241 * before the global list was reworked to contain all vnodes. 1242 */ 1243 if (!VI_TRYLOCK(vp)) 1244 goto next_iter; 1245 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1246 VI_UNLOCK(vp); 1247 goto next_iter; 1248 } 1249 if (vp->v_mount == NULL) { 1250 VI_UNLOCK(vp); 1251 goto next_iter; 1252 } 1253 vholdl(vp); 1254 VI_UNLOCK(vp); 1255 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1256 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1257 mtx_unlock(&vnode_list_mtx); 1258 1259 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 1260 vdrop_recycle(vp); 1261 goto next_iter_unlocked; 1262 } 1263 if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) { 1264 vdrop_recycle(vp); 1265 vn_finished_write(mp); 1266 goto next_iter_unlocked; 1267 } 1268 1269 VI_LOCK(vp); 1270 if (vp->v_usecount > 0 || 1271 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || 1272 (vp->v_object != NULL && vp->v_object->handle == vp && 1273 vp->v_object->resident_page_count > trigger)) { 1274 VOP_UNLOCK(vp); 1275 vdropl_recycle(vp); 1276 vn_finished_write(mp); 1277 goto next_iter_unlocked; 1278 } 1279 counter_u64_add(recycles_count, 1); 1280 vgonel(vp); 1281 VOP_UNLOCK(vp); 1282 vdropl_recycle(vp); 1283 vn_finished_write(mp); 1284 done++; 1285 next_iter_unlocked: 1286 maybe_yield(); 1287 mtx_lock(&vnode_list_mtx); 1288 goto restart; 1289 next_iter: 1290 MPASS(vp->v_type != VMARKER); 1291 if (!should_yield()) 1292 continue; 1293 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1294 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1295 mtx_unlock(&vnode_list_mtx); 1296 kern_yield(PRI_USER); 1297 mtx_lock(&vnode_list_mtx); 1298 goto restart; 1299 } 1300 if (done == 0 && !retried) { 1301 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1302 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1303 retried = true; 1304 goto restart; 1305 } 1306 return (done); 1307 } 1308 1309 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */ 1310 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free, 1311 0, 1312 "limit on vnode free requests per call to the vnlru_free routine"); 1313 1314 /* 1315 * Attempt to reduce the free list by the requested amount. 1316 */ 1317 static int 1318 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp) 1319 { 1320 struct vnode *vp; 1321 struct mount *mp; 1322 int ocount; 1323 bool retried; 1324 1325 mtx_assert(&vnode_list_mtx, MA_OWNED); 1326 if (count > max_vnlru_free) 1327 count = max_vnlru_free; 1328 if (count == 0) { 1329 mtx_unlock(&vnode_list_mtx); 1330 return (0); 1331 } 1332 ocount = count; 1333 retried = false; 1334 vp = mvp; 1335 for (;;) { 1336 vp = TAILQ_NEXT(vp, v_vnodelist); 1337 if (__predict_false(vp == NULL)) { 1338 /* 1339 * The free vnode marker can be past eligible vnodes: 1340 * 1. if vdbatch_process trylock failed 1341 * 2. if vtryrecycle failed 1342 * 1343 * If so, start the scan from scratch. 1344 */ 1345 if (!retried && vnlru_read_freevnodes() > 0) { 1346 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1347 TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist); 1348 vp = mvp; 1349 retried = true; 1350 continue; 1351 } 1352 1353 /* 1354 * Give up 1355 */ 1356 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1357 TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist); 1358 mtx_unlock(&vnode_list_mtx); 1359 break; 1360 } 1361 if (__predict_false(vp->v_type == VMARKER)) 1362 continue; 1363 if (vp->v_holdcnt > 0) 1364 continue; 1365 /* 1366 * Don't recycle if our vnode is from different type 1367 * of mount point. Note that mp is type-safe, the 1368 * check does not reach unmapped address even if 1369 * vnode is reclaimed. 1370 */ 1371 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && 1372 mp->mnt_op != mnt_op) { 1373 continue; 1374 } 1375 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { 1376 continue; 1377 } 1378 if (!vhold_recycle_free(vp)) 1379 continue; 1380 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1381 TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist); 1382 mtx_unlock(&vnode_list_mtx); 1383 /* 1384 * FIXME: ignores the return value, meaning it may be nothing 1385 * got recycled but it claims otherwise to the caller. 1386 * 1387 * Originally the value started being ignored in 2005 with 1388 * 114a1006a8204aa156e1f9ad6476cdff89cada7f . 1389 * 1390 * Respecting the value can run into significant stalls if most 1391 * vnodes belong to one file system and it has writes 1392 * suspended. In presence of many threads and millions of 1393 * vnodes they keep contending on the vnode_list_mtx lock only 1394 * to find vnodes they can't recycle. 1395 * 1396 * The solution would be to pre-check if the vnode is likely to 1397 * be recycle-able, but it needs to happen with the 1398 * vnode_list_mtx lock held. This runs into a problem where 1399 * VOP_GETWRITEMOUNT (currently needed to find out about if 1400 * writes are frozen) can take locks which LOR against it. 1401 * 1402 * Check nullfs for one example (null_getwritemount). 1403 */ 1404 vtryrecycle(vp); 1405 count--; 1406 if (count == 0) { 1407 break; 1408 } 1409 mtx_lock(&vnode_list_mtx); 1410 vp = mvp; 1411 } 1412 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1413 return (ocount - count); 1414 } 1415 1416 /* 1417 * XXX: returns without vnode_list_mtx locked! 1418 */ 1419 static int 1420 vnlru_free_locked(int count) 1421 { 1422 int ret; 1423 1424 mtx_assert(&vnode_list_mtx, MA_OWNED); 1425 ret = vnlru_free_impl(count, NULL, vnode_list_free_marker); 1426 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1427 return (ret); 1428 } 1429 1430 static int 1431 vnlru_free(int count) 1432 { 1433 1434 mtx_lock(&vnode_list_mtx); 1435 return (vnlru_free_locked(count)); 1436 } 1437 1438 void 1439 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) 1440 { 1441 1442 MPASS(mnt_op != NULL); 1443 MPASS(mvp != NULL); 1444 VNPASS(mvp->v_type == VMARKER, mvp); 1445 mtx_lock(&vnode_list_mtx); 1446 vnlru_free_impl(count, mnt_op, mvp); 1447 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1448 } 1449 1450 struct vnode * 1451 vnlru_alloc_marker(void) 1452 { 1453 struct vnode *mvp; 1454 1455 mvp = vn_alloc_marker(NULL); 1456 mtx_lock(&vnode_list_mtx); 1457 TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist); 1458 mtx_unlock(&vnode_list_mtx); 1459 return (mvp); 1460 } 1461 1462 void 1463 vnlru_free_marker(struct vnode *mvp) 1464 { 1465 mtx_lock(&vnode_list_mtx); 1466 TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist); 1467 mtx_unlock(&vnode_list_mtx); 1468 vn_free_marker(mvp); 1469 } 1470 1471 static void 1472 vnlru_recalc(void) 1473 { 1474 1475 mtx_assert(&vnode_list_mtx, MA_OWNED); 1476 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); 1477 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ 1478 vlowat = vhiwat / 2; 1479 } 1480 1481 /* 1482 * Attempt to recycle vnodes in a context that is always safe to block. 1483 * Calling vlrurecycle() from the bowels of filesystem code has some 1484 * interesting deadlock problems. 1485 */ 1486 static struct proc *vnlruproc; 1487 static int vnlruproc_sig; 1488 static u_long vnlruproc_kicks; 1489 1490 SYSCTL_ULONG(_vfs_vnode_vnlru, OID_AUTO, kicks, CTLFLAG_RD, &vnlruproc_kicks, 0, 1491 "Number of times vnlru got woken up due to vnode shortage"); 1492 1493 /* 1494 * The main freevnodes counter is only updated when a counter local to CPU 1495 * diverges from 0 by more than VNLRU_FREEVNODES_SLOP. CPUs are conditionally 1496 * walked to compute a more accurate total. 1497 * 1498 * Note: the actual value at any given moment can still exceed slop, but it 1499 * should not be by significant margin in practice. 1500 */ 1501 #define VNLRU_FREEVNODES_SLOP 126 1502 1503 static void __noinline 1504 vfs_freevnodes_rollup(int8_t *lfreevnodes) 1505 { 1506 1507 atomic_add_long(&freevnodes, *lfreevnodes); 1508 *lfreevnodes = 0; 1509 critical_exit(); 1510 } 1511 1512 static __inline void 1513 vfs_freevnodes_inc(void) 1514 { 1515 int8_t *lfreevnodes; 1516 1517 critical_enter(); 1518 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1519 (*lfreevnodes)++; 1520 if (__predict_false(*lfreevnodes == VNLRU_FREEVNODES_SLOP)) 1521 vfs_freevnodes_rollup(lfreevnodes); 1522 else 1523 critical_exit(); 1524 } 1525 1526 static __inline void 1527 vfs_freevnodes_dec(void) 1528 { 1529 int8_t *lfreevnodes; 1530 1531 critical_enter(); 1532 lfreevnodes = PCPU_PTR(vfs_freevnodes); 1533 (*lfreevnodes)--; 1534 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) 1535 vfs_freevnodes_rollup(lfreevnodes); 1536 else 1537 critical_exit(); 1538 } 1539 1540 static u_long 1541 vnlru_read_freevnodes(void) 1542 { 1543 long slop, rfreevnodes, rfreevnodes_old; 1544 int cpu; 1545 1546 rfreevnodes = atomic_load_long(&freevnodes); 1547 rfreevnodes_old = atomic_load_long(&freevnodes_old); 1548 1549 if (rfreevnodes > rfreevnodes_old) 1550 slop = rfreevnodes - rfreevnodes_old; 1551 else 1552 slop = rfreevnodes_old - rfreevnodes; 1553 if (slop < VNLRU_FREEVNODES_SLOP) 1554 return (rfreevnodes >= 0 ? rfreevnodes : 0); 1555 CPU_FOREACH(cpu) { 1556 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; 1557 } 1558 atomic_store_long(&freevnodes_old, rfreevnodes); 1559 return (freevnodes_old >= 0 ? freevnodes_old : 0); 1560 } 1561 1562 static bool 1563 vnlru_under(u_long rnumvnodes, u_long limit) 1564 { 1565 u_long rfreevnodes, space; 1566 1567 if (__predict_false(rnumvnodes > desiredvnodes)) 1568 return (true); 1569 1570 space = desiredvnodes - rnumvnodes; 1571 if (space < limit) { 1572 rfreevnodes = vnlru_read_freevnodes(); 1573 if (rfreevnodes > wantfreevnodes) 1574 space += rfreevnodes - wantfreevnodes; 1575 } 1576 return (space < limit); 1577 } 1578 1579 static void 1580 vnlru_kick_locked(void) 1581 { 1582 1583 mtx_assert(&vnode_list_mtx, MA_OWNED); 1584 if (vnlruproc_sig == 0) { 1585 vnlruproc_sig = 1; 1586 vnlruproc_kicks++; 1587 wakeup(vnlruproc); 1588 } 1589 } 1590 1591 static void 1592 vnlru_kick_cond(void) 1593 { 1594 1595 if (vnlru_read_freevnodes() > wantfreevnodes) 1596 return; 1597 1598 if (vnlruproc_sig) 1599 return; 1600 mtx_lock(&vnode_list_mtx); 1601 vnlru_kick_locked(); 1602 mtx_unlock(&vnode_list_mtx); 1603 } 1604 1605 static void 1606 vnlru_proc_sleep(void) 1607 { 1608 1609 if (vnlruproc_sig) { 1610 vnlruproc_sig = 0; 1611 wakeup(&vnlruproc_sig); 1612 } 1613 msleep(vnlruproc, &vnode_list_mtx, PVFS|PDROP, "vlruwt", hz); 1614 } 1615 1616 /* 1617 * A lighter version of the machinery below. 1618 * 1619 * Tries to reach goals only by recycling free vnodes and does not invoke 1620 * uma_reclaim(UMA_RECLAIM_DRAIN). 1621 * 1622 * This works around pathological behavior in vnlru in presence of tons of free 1623 * vnodes, but without having to rewrite the machinery at this time. Said 1624 * behavior boils down to continuously trying to reclaim all kinds of vnodes 1625 * (cycling through all levels of "force") when the count is transiently above 1626 * limit. This happens a lot when all vnodes are used up and vn_alloc 1627 * speculatively increments the counter. 1628 * 1629 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with 1630 * 1 million files in total and 20 find(1) processes stating them in parallel 1631 * (one per each tree). 1632 * 1633 * On a kernel with only stock machinery this needs anywhere between 60 and 120 1634 * seconds to execute (time varies *wildly* between runs). With the workaround 1635 * it consistently stays around 20 seconds. 1636 * 1637 * That is to say the entire thing needs a fundamental redesign (most notably 1638 * to accommodate faster recycling), the above only tries to get it ouf the way. 1639 * 1640 * Return values are: 1641 * -1 -- fallback to regular vnlru loop 1642 * 0 -- do nothing, go to sleep 1643 * >0 -- recycle this many vnodes 1644 */ 1645 static long 1646 vnlru_proc_light_pick(void) 1647 { 1648 u_long rnumvnodes, rfreevnodes; 1649 1650 if (vstir || vnlruproc_sig == 1) 1651 return (-1); 1652 1653 rnumvnodes = atomic_load_long(&numvnodes); 1654 rfreevnodes = vnlru_read_freevnodes(); 1655 1656 /* 1657 * vnode limit might have changed and now we may be at a significant 1658 * excess. Bail if we can't sort it out with free vnodes. 1659 */ 1660 if (rnumvnodes > desiredvnodes) { 1661 if (rnumvnodes - rfreevnodes >= desiredvnodes || 1662 rfreevnodes <= wantfreevnodes) { 1663 return (-1); 1664 } 1665 1666 return (rnumvnodes - desiredvnodes); 1667 } 1668 1669 /* 1670 * Don't try to reach wantfreevnodes target if there are too few vnodes 1671 * to begin with. 1672 */ 1673 if (rnumvnodes < wantfreevnodes) { 1674 return (0); 1675 } 1676 1677 if (rfreevnodes < wantfreevnodes) { 1678 return (-1); 1679 } 1680 1681 return (0); 1682 } 1683 1684 static bool 1685 vnlru_proc_light(void) 1686 { 1687 long freecount; 1688 1689 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1690 1691 freecount = vnlru_proc_light_pick(); 1692 if (freecount == -1) 1693 return (false); 1694 1695 if (freecount != 0) { 1696 vnlru_free(freecount); 1697 } 1698 1699 mtx_lock(&vnode_list_mtx); 1700 vnlru_proc_sleep(); 1701 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1702 return (true); 1703 } 1704 1705 static void 1706 vnlru_proc(void) 1707 { 1708 u_long rnumvnodes, rfreevnodes, target; 1709 unsigned long onumvnodes; 1710 int done, force, trigger, usevnodes; 1711 bool reclaim_nc_src, want_reread; 1712 1713 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc, 1714 SHUTDOWN_PRI_FIRST); 1715 1716 force = 0; 1717 want_reread = false; 1718 for (;;) { 1719 kproc_suspend_check(vnlruproc); 1720 1721 if (force == 0 && vnlru_proc_light()) 1722 continue; 1723 1724 mtx_lock(&vnode_list_mtx); 1725 rnumvnodes = atomic_load_long(&numvnodes); 1726 1727 if (want_reread) { 1728 force = vnlru_under(numvnodes, vhiwat) ? 1 : 0; 1729 want_reread = false; 1730 } 1731 1732 /* 1733 * If numvnodes is too large (due to desiredvnodes being 1734 * adjusted using its sysctl, or emergency growth), first 1735 * try to reduce it by discarding from the free list. 1736 */ 1737 if (rnumvnodes > desiredvnodes) { 1738 vnlru_free_locked(rnumvnodes - desiredvnodes); 1739 mtx_lock(&vnode_list_mtx); 1740 rnumvnodes = atomic_load_long(&numvnodes); 1741 } 1742 /* 1743 * Sleep if the vnode cache is in a good state. This is 1744 * when it is not over-full and has space for about a 4% 1745 * or 9% expansion (by growing its size or inexcessively 1746 * reducing its free list). Otherwise, try to reclaim 1747 * space for a 10% expansion. 1748 */ 1749 if (vstir && force == 0) { 1750 force = 1; 1751 vstir = false; 1752 } 1753 if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) { 1754 vnlru_proc_sleep(); 1755 continue; 1756 } 1757 rfreevnodes = vnlru_read_freevnodes(); 1758 1759 onumvnodes = rnumvnodes; 1760 /* 1761 * Calculate parameters for recycling. These are the same 1762 * throughout the loop to give some semblance of fairness. 1763 * The trigger point is to avoid recycling vnodes with lots 1764 * of resident pages. We aren't trying to free memory; we 1765 * are trying to recycle or at least free vnodes. 1766 */ 1767 if (rnumvnodes <= desiredvnodes) 1768 usevnodes = rnumvnodes - rfreevnodes; 1769 else 1770 usevnodes = rnumvnodes; 1771 if (usevnodes <= 0) 1772 usevnodes = 1; 1773 /* 1774 * The trigger value is chosen to give a conservatively 1775 * large value to ensure that it alone doesn't prevent 1776 * making progress. The value can easily be so large that 1777 * it is effectively infinite in some congested and 1778 * misconfigured cases, and this is necessary. Normally 1779 * it is about 8 to 100 (pages), which is quite large. 1780 */ 1781 trigger = vm_cnt.v_page_count * 2 / usevnodes; 1782 if (force < 2) 1783 trigger = vsmalltrigger; 1784 reclaim_nc_src = force >= 3; 1785 target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1); 1786 target = target / 10 + 1; 1787 done = vlrureclaim(reclaim_nc_src, trigger, target); 1788 mtx_unlock(&vnode_list_mtx); 1789 /* 1790 * Total number of vnodes can transiently go slightly above the 1791 * limit (see vn_alloc_hard), no need to call uma_reclaim if 1792 * this happens. 1793 */ 1794 if (onumvnodes + 1000 > desiredvnodes && numvnodes <= desiredvnodes) 1795 uma_reclaim(UMA_RECLAIM_DRAIN); 1796 if (done == 0) { 1797 if (force == 0 || force == 1) { 1798 force = 2; 1799 continue; 1800 } 1801 if (force == 2) { 1802 force = 3; 1803 continue; 1804 } 1805 want_reread = true; 1806 force = 0; 1807 vnlru_nowhere++; 1808 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3); 1809 } else { 1810 want_reread = true; 1811 kern_yield(PRI_USER); 1812 } 1813 } 1814 } 1815 1816 static struct kproc_desc vnlru_kp = { 1817 "vnlru", 1818 vnlru_proc, 1819 &vnlruproc 1820 }; 1821 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, 1822 &vnlru_kp); 1823 1824 /* 1825 * Routines having to do with the management of the vnode table. 1826 */ 1827 1828 /* 1829 * Try to recycle a freed vnode. We abort if anyone picks up a reference 1830 * before we actually vgone(). This function must be called with the vnode 1831 * held to prevent the vnode from being returned to the free list midway 1832 * through vgone(). 1833 */ 1834 static int 1835 vtryrecycle(struct vnode *vp) 1836 { 1837 struct mount *vnmp; 1838 1839 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 1840 VNPASS(vp->v_holdcnt > 0, vp); 1841 /* 1842 * This vnode may found and locked via some other list, if so we 1843 * can't recycle it yet. 1844 */ 1845 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) { 1846 CTR2(KTR_VFS, 1847 "%s: impossible to recycle, vp %p lock is already held", 1848 __func__, vp); 1849 vdrop_recycle(vp); 1850 return (EWOULDBLOCK); 1851 } 1852 /* 1853 * Don't recycle if its filesystem is being suspended. 1854 */ 1855 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) { 1856 VOP_UNLOCK(vp); 1857 CTR2(KTR_VFS, 1858 "%s: impossible to recycle, cannot start the write for %p", 1859 __func__, vp); 1860 vdrop_recycle(vp); 1861 return (EBUSY); 1862 } 1863 /* 1864 * If we got this far, we need to acquire the interlock and see if 1865 * anyone picked up this vnode from another list. If not, we will 1866 * mark it with DOOMED via vgonel() so that anyone who does find it 1867 * will skip over it. 1868 */ 1869 VI_LOCK(vp); 1870 if (vp->v_usecount) { 1871 VOP_UNLOCK(vp); 1872 vdropl_recycle(vp); 1873 vn_finished_write(vnmp); 1874 CTR2(KTR_VFS, 1875 "%s: impossible to recycle, %p is already referenced", 1876 __func__, vp); 1877 return (EBUSY); 1878 } 1879 if (!VN_IS_DOOMED(vp)) { 1880 counter_u64_add(recycles_free_count, 1); 1881 vgonel(vp); 1882 } 1883 VOP_UNLOCK(vp); 1884 vdropl_recycle(vp); 1885 vn_finished_write(vnmp); 1886 return (0); 1887 } 1888 1889 /* 1890 * Allocate a new vnode. 1891 * 1892 * The operation never returns an error. Returning an error was disabled 1893 * in r145385 (dated 2005) with the following comment: 1894 * 1895 * XXX Not all VFS_VGET/ffs_vget callers check returns. 1896 * 1897 * Given the age of this commit (almost 15 years at the time of writing this 1898 * comment) restoring the ability to fail requires a significant audit of 1899 * all codepaths. 1900 * 1901 * The routine can try to free a vnode or stall for up to 1 second waiting for 1902 * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation. 1903 */ 1904 static u_long vn_alloc_cyclecount; 1905 static u_long vn_alloc_sleeps; 1906 1907 SYSCTL_ULONG(_vfs_vnode_stats, OID_AUTO, alloc_sleeps, CTLFLAG_RD, &vn_alloc_sleeps, 0, 1908 "Number of times vnode allocation blocked waiting on vnlru"); 1909 1910 static struct vnode * __noinline 1911 vn_alloc_hard(struct mount *mp) 1912 { 1913 u_long rnumvnodes, rfreevnodes; 1914 1915 mtx_lock(&vnode_list_mtx); 1916 rnumvnodes = atomic_load_long(&numvnodes); 1917 if (rnumvnodes + 1 < desiredvnodes) { 1918 vn_alloc_cyclecount = 0; 1919 mtx_unlock(&vnode_list_mtx); 1920 goto alloc; 1921 } 1922 1923 if (vn_alloc_cyclecount != 0) { 1924 rfreevnodes = vnlru_read_freevnodes(); 1925 if (rfreevnodes < wantfreevnodes) { 1926 if (vn_alloc_cyclecount++ >= rfreevnodes) { 1927 vn_alloc_cyclecount = 0; 1928 vstir = true; 1929 } 1930 } else { 1931 vn_alloc_cyclecount = 0; 1932 } 1933 } 1934 1935 /* 1936 * Grow the vnode cache if it will not be above its target max 1937 * after growing. Otherwise, if the free list is nonempty, try 1938 * to reclaim 1 item from it before growing the cache (possibly 1939 * above its target max if the reclamation failed or is delayed). 1940 * Otherwise, wait for some space. In all cases, schedule 1941 * vnlru_proc() if we are getting short of space. The watermarks 1942 * should be chosen so that we never wait or even reclaim from 1943 * the free list to below its target minimum. 1944 */ 1945 if (vnlru_free_locked(1) > 0) 1946 goto alloc; 1947 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1948 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 1949 /* 1950 * Wait for space for a new vnode. 1951 */ 1952 mtx_lock(&vnode_list_mtx); 1953 vnlru_kick_locked(); 1954 vn_alloc_sleeps++; 1955 msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz); 1956 if (atomic_load_long(&numvnodes) + 1 > desiredvnodes && 1957 vnlru_read_freevnodes() > 1) 1958 vnlru_free_locked(1); 1959 else 1960 mtx_unlock(&vnode_list_mtx); 1961 } 1962 alloc: 1963 mtx_assert(&vnode_list_mtx, MA_NOTOWNED); 1964 atomic_add_long(&numvnodes, 1); 1965 vnlru_kick_cond(); 1966 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1967 } 1968 1969 static struct vnode * 1970 vn_alloc(struct mount *mp) 1971 { 1972 u_long rnumvnodes; 1973 1974 if (__predict_false(vn_alloc_cyclecount != 0)) 1975 return (vn_alloc_hard(mp)); 1976 rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1; 1977 if (__predict_false(vnlru_under(rnumvnodes, vlowat))) { 1978 atomic_subtract_long(&numvnodes, 1); 1979 return (vn_alloc_hard(mp)); 1980 } 1981 1982 return (uma_zalloc_smr(vnode_zone, M_WAITOK)); 1983 } 1984 1985 static void 1986 vn_free(struct vnode *vp) 1987 { 1988 1989 atomic_subtract_long(&numvnodes, 1); 1990 uma_zfree_smr(vnode_zone, vp); 1991 } 1992 1993 /* 1994 * Return the next vnode from the free list. 1995 */ 1996 int 1997 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 1998 struct vnode **vpp) 1999 { 2000 struct vnode *vp; 2001 struct thread *td; 2002 struct lock_object *lo; 2003 2004 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag); 2005 2006 KASSERT(vops->registered, 2007 ("%s: not registered vector op %p\n", __func__, vops)); 2008 cache_validate_vop_vector(mp, vops); 2009 2010 td = curthread; 2011 if (td->td_vp_reserved != NULL) { 2012 vp = td->td_vp_reserved; 2013 td->td_vp_reserved = NULL; 2014 } else { 2015 vp = vn_alloc(mp); 2016 } 2017 counter_u64_add(vnodes_created, 1); 2018 2019 vn_set_state(vp, VSTATE_UNINITIALIZED); 2020 2021 /* 2022 * Locks are given the generic name "vnode" when created. 2023 * Follow the historic practice of using the filesystem 2024 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc. 2025 * 2026 * Locks live in a witness group keyed on their name. Thus, 2027 * when a lock is renamed, it must also move from the witness 2028 * group of its old name to the witness group of its new name. 2029 * 2030 * The change only needs to be made when the vnode moves 2031 * from one filesystem type to another. We ensure that each 2032 * filesystem use a single static name pointer for its tag so 2033 * that we can compare pointers rather than doing a strcmp(). 2034 */ 2035 lo = &vp->v_vnlock->lock_object; 2036 #ifdef WITNESS 2037 if (lo->lo_name != tag) { 2038 #endif 2039 lo->lo_name = tag; 2040 #ifdef WITNESS 2041 WITNESS_DESTROY(lo); 2042 WITNESS_INIT(lo, tag); 2043 } 2044 #endif 2045 /* 2046 * By default, don't allow shared locks unless filesystems opt-in. 2047 */ 2048 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; 2049 /* 2050 * Finalize various vnode identity bits. 2051 */ 2052 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); 2053 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); 2054 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); 2055 vp->v_type = VNON; 2056 vp->v_op = vops; 2057 vp->v_irflag = 0; 2058 v_init_counters(vp); 2059 vn_seqc_init(vp); 2060 vp->v_bufobj.bo_ops = &buf_ops_bio; 2061 #ifdef DIAGNOSTIC 2062 if (mp == NULL && vops != &dead_vnodeops) 2063 printf("NULL mp in getnewvnode(9), tag %s\n", tag); 2064 #endif 2065 #ifdef MAC 2066 mac_vnode_init(vp); 2067 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) 2068 mac_vnode_associate_singlelabel(mp, vp); 2069 #endif 2070 if (mp != NULL) { 2071 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; 2072 } 2073 2074 /* 2075 * For the filesystems which do not use vfs_hash_insert(), 2076 * still initialize v_hash to have vfs_hash_index() useful. 2077 * E.g., nullfs uses vfs_hash_index() on the lower vnode for 2078 * its own hashing. 2079 */ 2080 vp->v_hash = (uintptr_t)vp >> vnsz2log; 2081 2082 *vpp = vp; 2083 return (0); 2084 } 2085 2086 void 2087 getnewvnode_reserve(void) 2088 { 2089 struct thread *td; 2090 2091 td = curthread; 2092 MPASS(td->td_vp_reserved == NULL); 2093 td->td_vp_reserved = vn_alloc(NULL); 2094 } 2095 2096 void 2097 getnewvnode_drop_reserve(void) 2098 { 2099 struct thread *td; 2100 2101 td = curthread; 2102 if (td->td_vp_reserved != NULL) { 2103 vn_free(td->td_vp_reserved); 2104 td->td_vp_reserved = NULL; 2105 } 2106 } 2107 2108 static void __noinline 2109 freevnode(struct vnode *vp) 2110 { 2111 struct bufobj *bo; 2112 2113 /* 2114 * The vnode has been marked for destruction, so free it. 2115 * 2116 * The vnode will be returned to the zone where it will 2117 * normally remain until it is needed for another vnode. We 2118 * need to cleanup (or verify that the cleanup has already 2119 * been done) any residual data left from its current use 2120 * so as not to contaminate the freshly allocated vnode. 2121 */ 2122 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); 2123 /* 2124 * Paired with vgone. 2125 */ 2126 vn_seqc_write_end_free(vp); 2127 2128 bo = &vp->v_bufobj; 2129 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); 2130 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); 2131 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); 2132 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); 2133 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); 2134 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); 2135 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, 2136 ("clean blk trie not empty")); 2137 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); 2138 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, 2139 ("dirty blk trie not empty")); 2140 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, 2141 ("Dangling rangelock waiters")); 2142 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, 2143 ("Leaked inactivation")); 2144 VI_UNLOCK(vp); 2145 cache_assert_no_entries(vp); 2146 2147 #ifdef MAC 2148 mac_vnode_destroy(vp); 2149 #endif 2150 if (vp->v_pollinfo != NULL) { 2151 /* 2152 * Use LK_NOWAIT to shut up witness about the lock. We may get 2153 * here while having another vnode locked when trying to 2154 * satisfy a lookup and needing to recycle. 2155 */ 2156 VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT); 2157 destroy_vpollinfo(vp->v_pollinfo); 2158 VOP_UNLOCK(vp); 2159 vp->v_pollinfo = NULL; 2160 } 2161 vp->v_mountedhere = NULL; 2162 vp->v_unpcb = NULL; 2163 vp->v_rdev = NULL; 2164 vp->v_fifoinfo = NULL; 2165 vp->v_iflag = 0; 2166 vp->v_vflag = 0; 2167 bo->bo_flag = 0; 2168 vn_free(vp); 2169 } 2170 2171 /* 2172 * Delete from old mount point vnode list, if on one. 2173 */ 2174 static void 2175 delmntque(struct vnode *vp) 2176 { 2177 struct mount *mp; 2178 2179 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 2180 2181 mp = vp->v_mount; 2182 MNT_ILOCK(mp); 2183 VI_LOCK(vp); 2184 vp->v_mount = NULL; 2185 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, 2186 ("bad mount point vnode list size")); 2187 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2188 mp->mnt_nvnodelistsize--; 2189 MNT_REL(mp); 2190 MNT_IUNLOCK(mp); 2191 /* 2192 * The caller expects the interlock to be still held. 2193 */ 2194 ASSERT_VI_LOCKED(vp, __func__); 2195 } 2196 2197 static int 2198 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) 2199 { 2200 2201 KASSERT(vp->v_mount == NULL, 2202 ("insmntque: vnode already on per mount vnode list")); 2203 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)")); 2204 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { 2205 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); 2206 } else { 2207 KASSERT(!dtr, 2208 ("%s: can't have MNTK_UNLOCKED_INSMNTQUE and cleanup", 2209 __func__)); 2210 } 2211 2212 /* 2213 * We acquire the vnode interlock early to ensure that the 2214 * vnode cannot be recycled by another process releasing a 2215 * holdcnt on it before we get it on both the vnode list 2216 * and the active vnode list. The mount mutex protects only 2217 * manipulation of the vnode list and the vnode freelist 2218 * mutex protects only manipulation of the active vnode list. 2219 * Hence the need to hold the vnode interlock throughout. 2220 */ 2221 MNT_ILOCK(mp); 2222 VI_LOCK(vp); 2223 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && 2224 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || 2225 mp->mnt_nvnodelistsize == 0)) && 2226 (vp->v_vflag & VV_FORCEINSMQ) == 0) { 2227 VI_UNLOCK(vp); 2228 MNT_IUNLOCK(mp); 2229 if (dtr) { 2230 vp->v_data = NULL; 2231 vp->v_op = &dead_vnodeops; 2232 vgone(vp); 2233 vput(vp); 2234 } 2235 return (EBUSY); 2236 } 2237 vp->v_mount = mp; 2238 MNT_REF(mp); 2239 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 2240 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, 2241 ("neg mount point vnode list size")); 2242 mp->mnt_nvnodelistsize++; 2243 VI_UNLOCK(vp); 2244 MNT_IUNLOCK(mp); 2245 return (0); 2246 } 2247 2248 /* 2249 * Insert into list of vnodes for the new mount point, if available. 2250 * insmntque() reclaims the vnode on insertion failure, insmntque1() 2251 * leaves handling of the vnode to the caller. 2252 */ 2253 int 2254 insmntque(struct vnode *vp, struct mount *mp) 2255 { 2256 return (insmntque1_int(vp, mp, true)); 2257 } 2258 2259 int 2260 insmntque1(struct vnode *vp, struct mount *mp) 2261 { 2262 return (insmntque1_int(vp, mp, false)); 2263 } 2264 2265 /* 2266 * Flush out and invalidate all buffers associated with a bufobj 2267 * Called with the underlying object locked. 2268 */ 2269 int 2270 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo) 2271 { 2272 int error; 2273 2274 BO_LOCK(bo); 2275 if (flags & V_SAVE) { 2276 error = bufobj_wwait(bo, slpflag, slptimeo); 2277 if (error) { 2278 BO_UNLOCK(bo); 2279 return (error); 2280 } 2281 if (bo->bo_dirty.bv_cnt > 0) { 2282 BO_UNLOCK(bo); 2283 do { 2284 error = BO_SYNC(bo, MNT_WAIT); 2285 } while (error == ERELOOKUP); 2286 if (error != 0) 2287 return (error); 2288 BO_LOCK(bo); 2289 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { 2290 BO_UNLOCK(bo); 2291 return (EBUSY); 2292 } 2293 } 2294 } 2295 /* 2296 * If you alter this loop please notice that interlock is dropped and 2297 * reacquired in flushbuflist. Special care is needed to ensure that 2298 * no race conditions occur from this. 2299 */ 2300 do { 2301 error = flushbuflist(&bo->bo_clean, 2302 flags, bo, slpflag, slptimeo); 2303 if (error == 0 && !(flags & V_CLEANONLY)) 2304 error = flushbuflist(&bo->bo_dirty, 2305 flags, bo, slpflag, slptimeo); 2306 if (error != 0 && error != EAGAIN) { 2307 BO_UNLOCK(bo); 2308 return (error); 2309 } 2310 } while (error != 0); 2311 2312 /* 2313 * Wait for I/O to complete. XXX needs cleaning up. The vnode can 2314 * have write I/O in-progress but if there is a VM object then the 2315 * VM object can also have read-I/O in-progress. 2316 */ 2317 do { 2318 bufobj_wwait(bo, 0, 0); 2319 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { 2320 BO_UNLOCK(bo); 2321 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); 2322 BO_LOCK(bo); 2323 } 2324 } while (bo->bo_numoutput > 0); 2325 BO_UNLOCK(bo); 2326 2327 /* 2328 * Destroy the copy in the VM cache, too. 2329 */ 2330 if (bo->bo_object != NULL && 2331 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) { 2332 VM_OBJECT_WLOCK(bo->bo_object); 2333 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? 2334 OBJPR_CLEANONLY : 0); 2335 VM_OBJECT_WUNLOCK(bo->bo_object); 2336 } 2337 2338 #ifdef INVARIANTS 2339 BO_LOCK(bo); 2340 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO | 2341 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || 2342 bo->bo_clean.bv_cnt > 0)) 2343 panic("vinvalbuf: flush failed"); 2344 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 && 2345 bo->bo_dirty.bv_cnt > 0) 2346 panic("vinvalbuf: flush dirty failed"); 2347 BO_UNLOCK(bo); 2348 #endif 2349 return (0); 2350 } 2351 2352 /* 2353 * Flush out and invalidate all buffers associated with a vnode. 2354 * Called with the underlying object locked. 2355 */ 2356 int 2357 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) 2358 { 2359 2360 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags); 2361 ASSERT_VOP_LOCKED(vp, "vinvalbuf"); 2362 if (vp->v_object != NULL && vp->v_object->handle != vp) 2363 return (0); 2364 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); 2365 } 2366 2367 /* 2368 * Flush out buffers on the specified list. 2369 * 2370 */ 2371 static int 2372 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag, 2373 int slptimeo) 2374 { 2375 struct buf *bp, *nbp; 2376 int retval, error; 2377 daddr_t lblkno; 2378 b_xflags_t xflags; 2379 2380 ASSERT_BO_WLOCKED(bo); 2381 2382 retval = 0; 2383 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { 2384 /* 2385 * If we are flushing both V_NORMAL and V_ALT buffers then 2386 * do not skip any buffers. If we are flushing only V_NORMAL 2387 * buffers then skip buffers marked as BX_ALTDATA. If we are 2388 * flushing only V_ALT buffers then skip buffers not marked 2389 * as BX_ALTDATA. 2390 */ 2391 if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) && 2392 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || 2393 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { 2394 continue; 2395 } 2396 if (nbp != NULL) { 2397 lblkno = nbp->b_lblkno; 2398 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); 2399 } 2400 retval = EAGAIN; 2401 error = BUF_TIMELOCK(bp, 2402 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo), 2403 "flushbuf", slpflag, slptimeo); 2404 if (error) { 2405 BO_LOCK(bo); 2406 return (error != ENOLCK ? error : EAGAIN); 2407 } 2408 KASSERT(bp->b_bufobj == bo, 2409 ("bp %p wrong b_bufobj %p should be %p", 2410 bp, bp->b_bufobj, bo)); 2411 /* 2412 * XXX Since there are no node locks for NFS, I 2413 * believe there is a slight chance that a delayed 2414 * write will occur while sleeping just above, so 2415 * check for it. 2416 */ 2417 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && 2418 (flags & V_SAVE)) { 2419 bremfree(bp); 2420 bp->b_flags |= B_ASYNC; 2421 bwrite(bp); 2422 BO_LOCK(bo); 2423 return (EAGAIN); /* XXX: why not loop ? */ 2424 } 2425 bremfree(bp); 2426 bp->b_flags |= (B_INVAL | B_RELBUF); 2427 bp->b_flags &= ~B_ASYNC; 2428 brelse(bp); 2429 BO_LOCK(bo); 2430 if (nbp == NULL) 2431 break; 2432 nbp = gbincore(bo, lblkno); 2433 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) 2434 != xflags) 2435 break; /* nbp invalid */ 2436 } 2437 return (retval); 2438 } 2439 2440 int 2441 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn) 2442 { 2443 struct buf *bp; 2444 int error; 2445 daddr_t lblkno; 2446 2447 ASSERT_BO_LOCKED(bo); 2448 2449 for (lblkno = startn;;) { 2450 again: 2451 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno); 2452 if (bp == NULL || bp->b_lblkno >= endn || 2453 bp->b_lblkno < startn) 2454 break; 2455 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 2456 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0); 2457 if (error != 0) { 2458 BO_RLOCK(bo); 2459 if (error == ENOLCK) 2460 goto again; 2461 return (error); 2462 } 2463 KASSERT(bp->b_bufobj == bo, 2464 ("bp %p wrong b_bufobj %p should be %p", 2465 bp, bp->b_bufobj, bo)); 2466 lblkno = bp->b_lblkno + 1; 2467 if ((bp->b_flags & B_MANAGED) == 0) 2468 bremfree(bp); 2469 bp->b_flags |= B_RELBUF; 2470 /* 2471 * In the VMIO case, use the B_NOREUSE flag to hint that the 2472 * pages backing each buffer in the range are unlikely to be 2473 * reused. Dirty buffers will have the hint applied once 2474 * they've been written. 2475 */ 2476 if ((bp->b_flags & B_VMIO) != 0) 2477 bp->b_flags |= B_NOREUSE; 2478 brelse(bp); 2479 BO_RLOCK(bo); 2480 } 2481 return (0); 2482 } 2483 2484 /* 2485 * Truncate a file's buffer and pages to a specified length. This 2486 * is in lieu of the old vinvalbuf mechanism, which performed unneeded 2487 * sync activity. 2488 */ 2489 int 2490 vtruncbuf(struct vnode *vp, off_t length, int blksize) 2491 { 2492 struct buf *bp, *nbp; 2493 struct bufobj *bo; 2494 daddr_t startlbn; 2495 2496 CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__, 2497 vp, blksize, (uintmax_t)length); 2498 2499 /* 2500 * Round up to the *next* lbn. 2501 */ 2502 startlbn = howmany(length, blksize); 2503 2504 ASSERT_VOP_LOCKED(vp, "vtruncbuf"); 2505 2506 bo = &vp->v_bufobj; 2507 restart_unlocked: 2508 BO_LOCK(bo); 2509 2510 while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN) 2511 ; 2512 2513 if (length > 0) { 2514 restartsync: 2515 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2516 if (bp->b_lblkno > 0) 2517 continue; 2518 /* 2519 * Since we hold the vnode lock this should only 2520 * fail if we're racing with the buf daemon. 2521 */ 2522 if (BUF_LOCK(bp, 2523 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2524 BO_LOCKPTR(bo)) == ENOLCK) 2525 goto restart_unlocked; 2526 2527 VNASSERT((bp->b_flags & B_DELWRI), vp, 2528 ("buf(%p) on dirty queue without DELWRI", bp)); 2529 2530 bremfree(bp); 2531 bawrite(bp); 2532 BO_LOCK(bo); 2533 goto restartsync; 2534 } 2535 } 2536 2537 bufobj_wwait(bo, 0, 0); 2538 BO_UNLOCK(bo); 2539 vnode_pager_setsize(vp, length); 2540 2541 return (0); 2542 } 2543 2544 /* 2545 * Invalidate the cached pages of a file's buffer within the range of block 2546 * numbers [startlbn, endlbn). 2547 */ 2548 void 2549 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 2550 int blksize) 2551 { 2552 struct bufobj *bo; 2553 off_t start, end; 2554 2555 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range"); 2556 2557 start = blksize * startlbn; 2558 end = blksize * endlbn; 2559 2560 bo = &vp->v_bufobj; 2561 BO_LOCK(bo); 2562 MPASS(blksize == bo->bo_bsize); 2563 2564 while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN) 2565 ; 2566 2567 BO_UNLOCK(bo); 2568 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); 2569 } 2570 2571 static int 2572 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, 2573 daddr_t startlbn, daddr_t endlbn) 2574 { 2575 struct buf *bp, *nbp; 2576 bool anyfreed; 2577 2578 ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked"); 2579 ASSERT_BO_LOCKED(bo); 2580 2581 do { 2582 anyfreed = false; 2583 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) { 2584 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2585 continue; 2586 if (BUF_LOCK(bp, 2587 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2588 BO_LOCKPTR(bo)) == ENOLCK) { 2589 BO_LOCK(bo); 2590 return (EAGAIN); 2591 } 2592 2593 bremfree(bp); 2594 bp->b_flags |= B_INVAL | B_RELBUF; 2595 bp->b_flags &= ~B_ASYNC; 2596 brelse(bp); 2597 anyfreed = true; 2598 2599 BO_LOCK(bo); 2600 if (nbp != NULL && 2601 (((nbp->b_xflags & BX_VNCLEAN) == 0) || 2602 nbp->b_vp != vp || 2603 (nbp->b_flags & B_DELWRI) != 0)) 2604 return (EAGAIN); 2605 } 2606 2607 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 2608 if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn) 2609 continue; 2610 if (BUF_LOCK(bp, 2611 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 2612 BO_LOCKPTR(bo)) == ENOLCK) { 2613 BO_LOCK(bo); 2614 return (EAGAIN); 2615 } 2616 bremfree(bp); 2617 bp->b_flags |= B_INVAL | B_RELBUF; 2618 bp->b_flags &= ~B_ASYNC; 2619 brelse(bp); 2620 anyfreed = true; 2621 2622 BO_LOCK(bo); 2623 if (nbp != NULL && 2624 (((nbp->b_xflags & BX_VNDIRTY) == 0) || 2625 (nbp->b_vp != vp) || 2626 (nbp->b_flags & B_DELWRI) == 0)) 2627 return (EAGAIN); 2628 } 2629 } while (anyfreed); 2630 return (0); 2631 } 2632 2633 static void 2634 buf_vlist_remove(struct buf *bp) 2635 { 2636 struct bufv *bv; 2637 b_xflags_t flags; 2638 2639 flags = bp->b_xflags; 2640 2641 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); 2642 ASSERT_BO_WLOCKED(bp->b_bufobj); 2643 KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 && 2644 (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN), 2645 ("%s: buffer %p has invalid queue state", __func__, bp)); 2646 2647 if ((flags & BX_VNDIRTY) != 0) 2648 bv = &bp->b_bufobj->bo_dirty; 2649 else 2650 bv = &bp->b_bufobj->bo_clean; 2651 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); 2652 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); 2653 bv->bv_cnt--; 2654 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); 2655 } 2656 2657 /* 2658 * Add the buffer to the sorted clean or dirty block list. 2659 * 2660 * NOTE: xflags is passed as a constant, optimizing this inline function! 2661 */ 2662 static void 2663 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags) 2664 { 2665 struct bufv *bv; 2666 struct buf *n; 2667 int error; 2668 2669 ASSERT_BO_WLOCKED(bo); 2670 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, 2671 ("buf_vlist_add: bo %p does not allow bufs", bo)); 2672 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, 2673 ("dead bo %p", bo)); 2674 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, 2675 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); 2676 bp->b_xflags |= xflags; 2677 if (xflags & BX_VNDIRTY) 2678 bv = &bo->bo_dirty; 2679 else 2680 bv = &bo->bo_clean; 2681 2682 /* 2683 * Keep the list ordered. Optimize empty list insertion. Assume 2684 * we tend to grow at the tail so lookup_le should usually be cheaper 2685 * than _ge. 2686 */ 2687 if (bv->bv_cnt == 0 || 2688 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno) 2689 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs); 2690 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL) 2691 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); 2692 else 2693 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); 2694 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp); 2695 if (error) 2696 panic("buf_vlist_add: Preallocated nodes insufficient."); 2697 bv->bv_cnt++; 2698 } 2699 2700 /* 2701 * Look up a buffer using the buffer tries. 2702 */ 2703 struct buf * 2704 gbincore(struct bufobj *bo, daddr_t lblkno) 2705 { 2706 struct buf *bp; 2707 2708 ASSERT_BO_LOCKED(bo); 2709 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); 2710 if (bp != NULL) 2711 return (bp); 2712 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); 2713 } 2714 2715 /* 2716 * Look up a buf using the buffer tries, without the bufobj lock. This relies 2717 * on SMR for safe lookup, and bufs being in a no-free zone to provide type 2718 * stability of the result. Like other lockless lookups, the found buf may 2719 * already be invalid by the time this function returns. 2720 */ 2721 struct buf * 2722 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno) 2723 { 2724 struct buf *bp; 2725 2726 ASSERT_BO_UNLOCKED(bo); 2727 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); 2728 if (bp != NULL) 2729 return (bp); 2730 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); 2731 } 2732 2733 /* 2734 * Associate a buffer with a vnode. 2735 */ 2736 void 2737 bgetvp(struct vnode *vp, struct buf *bp) 2738 { 2739 struct bufobj *bo; 2740 2741 bo = &vp->v_bufobj; 2742 ASSERT_BO_WLOCKED(bo); 2743 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); 2744 2745 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); 2746 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, 2747 ("bgetvp: bp already attached! %p", bp)); 2748 2749 vhold(vp); 2750 bp->b_vp = vp; 2751 bp->b_bufobj = bo; 2752 /* 2753 * Insert onto list for new vnode. 2754 */ 2755 buf_vlist_add(bp, bo, BX_VNCLEAN); 2756 } 2757 2758 /* 2759 * Disassociate a buffer from a vnode. 2760 */ 2761 void 2762 brelvp(struct buf *bp) 2763 { 2764 struct bufobj *bo; 2765 struct vnode *vp; 2766 2767 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); 2768 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); 2769 2770 /* 2771 * Delete from old vnode list, if on one. 2772 */ 2773 vp = bp->b_vp; /* XXX */ 2774 bo = bp->b_bufobj; 2775 BO_LOCK(bo); 2776 buf_vlist_remove(bp); 2777 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 2778 bo->bo_flag &= ~BO_ONWORKLST; 2779 mtx_lock(&sync_mtx); 2780 LIST_REMOVE(bo, bo_synclist); 2781 syncer_worklist_len--; 2782 mtx_unlock(&sync_mtx); 2783 } 2784 bp->b_vp = NULL; 2785 bp->b_bufobj = NULL; 2786 BO_UNLOCK(bo); 2787 vdrop(vp); 2788 } 2789 2790 /* 2791 * Add an item to the syncer work queue. 2792 */ 2793 static void 2794 vn_syncer_add_to_worklist(struct bufobj *bo, int delay) 2795 { 2796 int slot; 2797 2798 ASSERT_BO_WLOCKED(bo); 2799 2800 mtx_lock(&sync_mtx); 2801 if (bo->bo_flag & BO_ONWORKLST) 2802 LIST_REMOVE(bo, bo_synclist); 2803 else { 2804 bo->bo_flag |= BO_ONWORKLST; 2805 syncer_worklist_len++; 2806 } 2807 2808 if (delay > syncer_maxdelay - 2) 2809 delay = syncer_maxdelay - 2; 2810 slot = (syncer_delayno + delay) & syncer_mask; 2811 2812 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist); 2813 mtx_unlock(&sync_mtx); 2814 } 2815 2816 static int 2817 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS) 2818 { 2819 int error, len; 2820 2821 mtx_lock(&sync_mtx); 2822 len = syncer_worklist_len - sync_vnode_count; 2823 mtx_unlock(&sync_mtx); 2824 error = SYSCTL_OUT(req, &len, sizeof(len)); 2825 return (error); 2826 } 2827 2828 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, 2829 CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0, 2830 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length"); 2831 2832 static struct proc *updateproc; 2833 static void sched_sync(void); 2834 static struct kproc_desc up_kp = { 2835 "syncer", 2836 sched_sync, 2837 &updateproc 2838 }; 2839 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp); 2840 2841 static int 2842 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td) 2843 { 2844 struct vnode *vp; 2845 struct mount *mp; 2846 2847 *bo = LIST_FIRST(slp); 2848 if (*bo == NULL) 2849 return (0); 2850 vp = bo2vnode(*bo); 2851 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0) 2852 return (1); 2853 /* 2854 * We use vhold in case the vnode does not 2855 * successfully sync. vhold prevents the vnode from 2856 * going away when we unlock the sync_mtx so that 2857 * we can acquire the vnode interlock. 2858 */ 2859 vholdl(vp); 2860 mtx_unlock(&sync_mtx); 2861 VI_UNLOCK(vp); 2862 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) { 2863 vdrop(vp); 2864 mtx_lock(&sync_mtx); 2865 return (*bo == LIST_FIRST(slp)); 2866 } 2867 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || 2868 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, 2869 ("suspended mp syncing vp %p", vp)); 2870 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 2871 (void) VOP_FSYNC(vp, MNT_LAZY, td); 2872 VOP_UNLOCK(vp); 2873 vn_finished_write(mp); 2874 BO_LOCK(*bo); 2875 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { 2876 /* 2877 * Put us back on the worklist. The worklist 2878 * routine will remove us from our current 2879 * position and then add us back in at a later 2880 * position. 2881 */ 2882 vn_syncer_add_to_worklist(*bo, syncdelay); 2883 } 2884 BO_UNLOCK(*bo); 2885 vdrop(vp); 2886 mtx_lock(&sync_mtx); 2887 return (0); 2888 } 2889 2890 static int first_printf = 1; 2891 2892 /* 2893 * System filesystem synchronizer daemon. 2894 */ 2895 static void 2896 sched_sync(void) 2897 { 2898 struct synclist *next, *slp; 2899 struct bufobj *bo; 2900 long starttime; 2901 struct thread *td = curthread; 2902 int last_work_seen; 2903 int net_worklist_len; 2904 int syncer_final_iter; 2905 int error; 2906 2907 last_work_seen = 0; 2908 syncer_final_iter = 0; 2909 syncer_state = SYNCER_RUNNING; 2910 starttime = time_uptime; 2911 td->td_pflags |= TDP_NORUNNINGBUF; 2912 2913 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, 2914 SHUTDOWN_PRI_LAST); 2915 2916 mtx_lock(&sync_mtx); 2917 for (;;) { 2918 if (syncer_state == SYNCER_FINAL_DELAY && 2919 syncer_final_iter == 0) { 2920 mtx_unlock(&sync_mtx); 2921 kproc_suspend_check(td->td_proc); 2922 mtx_lock(&sync_mtx); 2923 } 2924 net_worklist_len = syncer_worklist_len - sync_vnode_count; 2925 if (syncer_state != SYNCER_RUNNING && 2926 starttime != time_uptime) { 2927 if (first_printf) { 2928 printf("\nSyncing disks, vnodes remaining... "); 2929 first_printf = 0; 2930 } 2931 printf("%d ", net_worklist_len); 2932 } 2933 starttime = time_uptime; 2934 2935 /* 2936 * Push files whose dirty time has expired. Be careful 2937 * of interrupt race on slp queue. 2938 * 2939 * Skip over empty worklist slots when shutting down. 2940 */ 2941 do { 2942 slp = &syncer_workitem_pending[syncer_delayno]; 2943 syncer_delayno += 1; 2944 if (syncer_delayno == syncer_maxdelay) 2945 syncer_delayno = 0; 2946 next = &syncer_workitem_pending[syncer_delayno]; 2947 /* 2948 * If the worklist has wrapped since the 2949 * it was emptied of all but syncer vnodes, 2950 * switch to the FINAL_DELAY state and run 2951 * for one more second. 2952 */ 2953 if (syncer_state == SYNCER_SHUTTING_DOWN && 2954 net_worklist_len == 0 && 2955 last_work_seen == syncer_delayno) { 2956 syncer_state = SYNCER_FINAL_DELAY; 2957 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP; 2958 } 2959 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) && 2960 syncer_worklist_len > 0); 2961 2962 /* 2963 * Keep track of the last time there was anything 2964 * on the worklist other than syncer vnodes. 2965 * Return to the SHUTTING_DOWN state if any 2966 * new work appears. 2967 */ 2968 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING) 2969 last_work_seen = syncer_delayno; 2970 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY) 2971 syncer_state = SYNCER_SHUTTING_DOWN; 2972 while (!LIST_EMPTY(slp)) { 2973 error = sync_vnode(slp, &bo, td); 2974 if (error == 1) { 2975 LIST_REMOVE(bo, bo_synclist); 2976 LIST_INSERT_HEAD(next, bo, bo_synclist); 2977 continue; 2978 } 2979 2980 if (first_printf == 0) { 2981 /* 2982 * Drop the sync mutex, because some watchdog 2983 * drivers need to sleep while patting 2984 */ 2985 mtx_unlock(&sync_mtx); 2986 wdog_kern_pat(WD_LASTVAL); 2987 mtx_lock(&sync_mtx); 2988 } 2989 } 2990 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0) 2991 syncer_final_iter--; 2992 /* 2993 * The variable rushjob allows the kernel to speed up the 2994 * processing of the filesystem syncer process. A rushjob 2995 * value of N tells the filesystem syncer to process the next 2996 * N seconds worth of work on its queue ASAP. Currently rushjob 2997 * is used by the soft update code to speed up the filesystem 2998 * syncer process when the incore state is getting so far 2999 * ahead of the disk that the kernel memory pool is being 3000 * threatened with exhaustion. 3001 */ 3002 if (rushjob > 0) { 3003 rushjob -= 1; 3004 continue; 3005 } 3006 /* 3007 * Just sleep for a short period of time between 3008 * iterations when shutting down to allow some I/O 3009 * to happen. 3010 * 3011 * If it has taken us less than a second to process the 3012 * current work, then wait. Otherwise start right over 3013 * again. We can still lose time if any single round 3014 * takes more than two seconds, but it does not really 3015 * matter as we are just trying to generally pace the 3016 * filesystem activity. 3017 */ 3018 if (syncer_state != SYNCER_RUNNING || 3019 time_uptime == starttime) { 3020 thread_lock(td); 3021 sched_prio(td, PPAUSE); 3022 thread_unlock(td); 3023 } 3024 if (syncer_state != SYNCER_RUNNING) 3025 cv_timedwait(&sync_wakeup, &sync_mtx, 3026 hz / SYNCER_SHUTDOWN_SPEEDUP); 3027 else if (time_uptime == starttime) 3028 cv_timedwait(&sync_wakeup, &sync_mtx, hz); 3029 } 3030 } 3031 3032 /* 3033 * Request the syncer daemon to speed up its work. 3034 * We never push it to speed up more than half of its 3035 * normal turn time, otherwise it could take over the cpu. 3036 */ 3037 int 3038 speedup_syncer(void) 3039 { 3040 int ret = 0; 3041 3042 mtx_lock(&sync_mtx); 3043 if (rushjob < syncdelay / 2) { 3044 rushjob += 1; 3045 stat_rush_requests += 1; 3046 ret = 1; 3047 } 3048 mtx_unlock(&sync_mtx); 3049 cv_broadcast(&sync_wakeup); 3050 return (ret); 3051 } 3052 3053 /* 3054 * Tell the syncer to speed up its work and run though its work 3055 * list several times, then tell it to shut down. 3056 */ 3057 static void 3058 syncer_shutdown(void *arg, int howto) 3059 { 3060 3061 if (howto & RB_NOSYNC) 3062 return; 3063 mtx_lock(&sync_mtx); 3064 syncer_state = SYNCER_SHUTTING_DOWN; 3065 rushjob = 0; 3066 mtx_unlock(&sync_mtx); 3067 cv_broadcast(&sync_wakeup); 3068 kproc_shutdown(arg, howto); 3069 } 3070 3071 void 3072 syncer_suspend(void) 3073 { 3074 3075 syncer_shutdown(updateproc, 0); 3076 } 3077 3078 void 3079 syncer_resume(void) 3080 { 3081 3082 mtx_lock(&sync_mtx); 3083 first_printf = 1; 3084 syncer_state = SYNCER_RUNNING; 3085 mtx_unlock(&sync_mtx); 3086 cv_broadcast(&sync_wakeup); 3087 kproc_resume(updateproc); 3088 } 3089 3090 /* 3091 * Move the buffer between the clean and dirty lists of its vnode. 3092 */ 3093 void 3094 reassignbuf(struct buf *bp) 3095 { 3096 struct vnode *vp; 3097 struct bufobj *bo; 3098 int delay; 3099 #ifdef INVARIANTS 3100 struct bufv *bv; 3101 #endif 3102 3103 vp = bp->b_vp; 3104 bo = bp->b_bufobj; 3105 3106 KASSERT((bp->b_flags & B_PAGING) == 0, 3107 ("%s: cannot reassign paging buffer %p", __func__, bp)); 3108 3109 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X", 3110 bp, bp->b_vp, bp->b_flags); 3111 3112 BO_LOCK(bo); 3113 buf_vlist_remove(bp); 3114 3115 /* 3116 * If dirty, put on list of dirty buffers; otherwise insert onto list 3117 * of clean buffers. 3118 */ 3119 if (bp->b_flags & B_DELWRI) { 3120 if ((bo->bo_flag & BO_ONWORKLST) == 0) { 3121 switch (vp->v_type) { 3122 case VDIR: 3123 delay = dirdelay; 3124 break; 3125 case VCHR: 3126 delay = metadelay; 3127 break; 3128 default: 3129 delay = filedelay; 3130 } 3131 vn_syncer_add_to_worklist(bo, delay); 3132 } 3133 buf_vlist_add(bp, bo, BX_VNDIRTY); 3134 } else { 3135 buf_vlist_add(bp, bo, BX_VNCLEAN); 3136 3137 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { 3138 mtx_lock(&sync_mtx); 3139 LIST_REMOVE(bo, bo_synclist); 3140 syncer_worklist_len--; 3141 mtx_unlock(&sync_mtx); 3142 bo->bo_flag &= ~BO_ONWORKLST; 3143 } 3144 } 3145 #ifdef INVARIANTS 3146 bv = &bo->bo_clean; 3147 bp = TAILQ_FIRST(&bv->bv_hd); 3148 KASSERT(bp == NULL || bp->b_bufobj == bo, 3149 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3150 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3151 KASSERT(bp == NULL || bp->b_bufobj == bo, 3152 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3153 bv = &bo->bo_dirty; 3154 bp = TAILQ_FIRST(&bv->bv_hd); 3155 KASSERT(bp == NULL || bp->b_bufobj == bo, 3156 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3157 bp = TAILQ_LAST(&bv->bv_hd, buflists); 3158 KASSERT(bp == NULL || bp->b_bufobj == bo, 3159 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); 3160 #endif 3161 BO_UNLOCK(bo); 3162 } 3163 3164 static void 3165 v_init_counters(struct vnode *vp) 3166 { 3167 3168 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, 3169 vp, ("%s called for an initialized vnode", __FUNCTION__)); 3170 ASSERT_VI_UNLOCKED(vp, __FUNCTION__); 3171 3172 refcount_init(&vp->v_holdcnt, 1); 3173 refcount_init(&vp->v_usecount, 1); 3174 } 3175 3176 /* 3177 * Grab a particular vnode from the free list, increment its 3178 * reference count and lock it. VIRF_DOOMED is set if the vnode 3179 * is being destroyed. Only callers who specify LK_RETRY will 3180 * see doomed vnodes. If inactive processing was delayed in 3181 * vput try to do it here. 3182 * 3183 * usecount is manipulated using atomics without holding any locks. 3184 * 3185 * holdcnt can be manipulated using atomics without holding any locks, 3186 * except when transitioning 1<->0, in which case the interlock is held. 3187 * 3188 * Consumers which don't guarantee liveness of the vnode can use SMR to 3189 * try to get a reference. Note this operation can fail since the vnode 3190 * may be awaiting getting freed by the time they get to it. 3191 */ 3192 enum vgetstate 3193 vget_prep_smr(struct vnode *vp) 3194 { 3195 enum vgetstate vs; 3196 3197 VFS_SMR_ASSERT_ENTERED(); 3198 3199 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3200 vs = VGET_USECOUNT; 3201 } else { 3202 if (vhold_smr(vp)) 3203 vs = VGET_HOLDCNT; 3204 else 3205 vs = VGET_NONE; 3206 } 3207 return (vs); 3208 } 3209 3210 enum vgetstate 3211 vget_prep(struct vnode *vp) 3212 { 3213 enum vgetstate vs; 3214 3215 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { 3216 vs = VGET_USECOUNT; 3217 } else { 3218 vhold(vp); 3219 vs = VGET_HOLDCNT; 3220 } 3221 return (vs); 3222 } 3223 3224 void 3225 vget_abort(struct vnode *vp, enum vgetstate vs) 3226 { 3227 3228 switch (vs) { 3229 case VGET_USECOUNT: 3230 vrele(vp); 3231 break; 3232 case VGET_HOLDCNT: 3233 vdrop(vp); 3234 break; 3235 default: 3236 __assert_unreachable(); 3237 } 3238 } 3239 3240 int 3241 vget(struct vnode *vp, int flags) 3242 { 3243 enum vgetstate vs; 3244 3245 vs = vget_prep(vp); 3246 return (vget_finish(vp, flags, vs)); 3247 } 3248 3249 int 3250 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) 3251 { 3252 int error; 3253 3254 if ((flags & LK_INTERLOCK) != 0) 3255 ASSERT_VI_LOCKED(vp, __func__); 3256 else 3257 ASSERT_VI_UNLOCKED(vp, __func__); 3258 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3259 VNPASS(vp->v_holdcnt > 0, vp); 3260 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3261 3262 error = vn_lock(vp, flags); 3263 if (__predict_false(error != 0)) { 3264 vget_abort(vp, vs); 3265 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, 3266 vp); 3267 return (error); 3268 } 3269 3270 vget_finish_ref(vp, vs); 3271 return (0); 3272 } 3273 3274 void 3275 vget_finish_ref(struct vnode *vp, enum vgetstate vs) 3276 { 3277 int old; 3278 3279 VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp); 3280 VNPASS(vp->v_holdcnt > 0, vp); 3281 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); 3282 3283 if (vs == VGET_USECOUNT) 3284 return; 3285 3286 /* 3287 * We hold the vnode. If the usecount is 0 it will be utilized to keep 3288 * the vnode around. Otherwise someone else lended their hold count and 3289 * we have to drop ours. 3290 */ 3291 old = atomic_fetchadd_int(&vp->v_usecount, 1); 3292 VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old)); 3293 if (old != 0) { 3294 #ifdef INVARIANTS 3295 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); 3296 VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); 3297 #else 3298 refcount_release(&vp->v_holdcnt); 3299 #endif 3300 } 3301 } 3302 3303 void 3304 vref(struct vnode *vp) 3305 { 3306 enum vgetstate vs; 3307 3308 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3309 vs = vget_prep(vp); 3310 vget_finish_ref(vp, vs); 3311 } 3312 3313 void 3314 vrefact(struct vnode *vp) 3315 { 3316 3317 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3318 #ifdef INVARIANTS 3319 int old = atomic_fetchadd_int(&vp->v_usecount, 1); 3320 VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old)); 3321 #else 3322 refcount_acquire(&vp->v_usecount); 3323 #endif 3324 } 3325 3326 void 3327 vlazy(struct vnode *vp) 3328 { 3329 struct mount *mp; 3330 3331 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); 3332 3333 if ((vp->v_mflag & VMP_LAZYLIST) != 0) 3334 return; 3335 /* 3336 * We may get here for inactive routines after the vnode got doomed. 3337 */ 3338 if (VN_IS_DOOMED(vp)) 3339 return; 3340 mp = vp->v_mount; 3341 mtx_lock(&mp->mnt_listmtx); 3342 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { 3343 vp->v_mflag |= VMP_LAZYLIST; 3344 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3345 mp->mnt_lazyvnodelistsize++; 3346 } 3347 mtx_unlock(&mp->mnt_listmtx); 3348 } 3349 3350 static void 3351 vunlazy(struct vnode *vp) 3352 { 3353 struct mount *mp; 3354 3355 ASSERT_VI_LOCKED(vp, __func__); 3356 VNPASS(!VN_IS_DOOMED(vp), vp); 3357 3358 mp = vp->v_mount; 3359 mtx_lock(&mp->mnt_listmtx); 3360 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3361 /* 3362 * Don't remove the vnode from the lazy list if another thread 3363 * has increased the hold count. It may have re-enqueued the 3364 * vnode to the lazy list and is now responsible for its 3365 * removal. 3366 */ 3367 if (vp->v_holdcnt == 0) { 3368 vp->v_mflag &= ~VMP_LAZYLIST; 3369 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3370 mp->mnt_lazyvnodelistsize--; 3371 } 3372 mtx_unlock(&mp->mnt_listmtx); 3373 } 3374 3375 /* 3376 * This routine is only meant to be called from vgonel prior to dooming 3377 * the vnode. 3378 */ 3379 static void 3380 vunlazy_gone(struct vnode *vp) 3381 { 3382 struct mount *mp; 3383 3384 ASSERT_VOP_ELOCKED(vp, __func__); 3385 ASSERT_VI_LOCKED(vp, __func__); 3386 VNPASS(!VN_IS_DOOMED(vp), vp); 3387 3388 if (vp->v_mflag & VMP_LAZYLIST) { 3389 mp = vp->v_mount; 3390 mtx_lock(&mp->mnt_listmtx); 3391 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 3392 vp->v_mflag &= ~VMP_LAZYLIST; 3393 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); 3394 mp->mnt_lazyvnodelistsize--; 3395 mtx_unlock(&mp->mnt_listmtx); 3396 } 3397 } 3398 3399 static void 3400 vdefer_inactive(struct vnode *vp) 3401 { 3402 3403 ASSERT_VI_LOCKED(vp, __func__); 3404 VNPASS(vp->v_holdcnt > 0, vp); 3405 if (VN_IS_DOOMED(vp)) { 3406 vdropl(vp); 3407 return; 3408 } 3409 if (vp->v_iflag & VI_DEFINACT) { 3410 VNPASS(vp->v_holdcnt > 1, vp); 3411 vdropl(vp); 3412 return; 3413 } 3414 if (vp->v_usecount > 0) { 3415 vp->v_iflag &= ~VI_OWEINACT; 3416 vdropl(vp); 3417 return; 3418 } 3419 vlazy(vp); 3420 vp->v_iflag |= VI_DEFINACT; 3421 VI_UNLOCK(vp); 3422 atomic_add_long(&deferred_inact, 1); 3423 } 3424 3425 static void 3426 vdefer_inactive_unlocked(struct vnode *vp) 3427 { 3428 3429 VI_LOCK(vp); 3430 if ((vp->v_iflag & VI_OWEINACT) == 0) { 3431 vdropl(vp); 3432 return; 3433 } 3434 vdefer_inactive(vp); 3435 } 3436 3437 enum vput_op { VRELE, VPUT, VUNREF }; 3438 3439 /* 3440 * Handle ->v_usecount transitioning to 0. 3441 * 3442 * By releasing the last usecount we take ownership of the hold count which 3443 * provides liveness of the vnode, meaning we have to vdrop. 3444 * 3445 * For all vnodes we may need to perform inactive processing. It requires an 3446 * exclusive lock on the vnode, while it is legal to call here with only a 3447 * shared lock (or no locks). If locking the vnode in an expected manner fails, 3448 * inactive processing gets deferred to the syncer. 3449 * 3450 * XXX Some filesystems pass in an exclusively locked vnode and strongly depend 3451 * on the lock being held all the way until VOP_INACTIVE. This in particular 3452 * happens with UFS which adds half-constructed vnodes to the hash, where they 3453 * can be found by other code. 3454 */ 3455 static void 3456 vput_final(struct vnode *vp, enum vput_op func) 3457 { 3458 int error; 3459 bool want_unlock; 3460 3461 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3462 VNPASS(vp->v_holdcnt > 0, vp); 3463 3464 VI_LOCK(vp); 3465 3466 /* 3467 * By the time we got here someone else might have transitioned 3468 * the count back to > 0. 3469 */ 3470 if (vp->v_usecount > 0) 3471 goto out; 3472 3473 /* 3474 * If the vnode is doomed vgone already performed inactive processing 3475 * (if needed). 3476 */ 3477 if (VN_IS_DOOMED(vp)) 3478 goto out; 3479 3480 if (__predict_true(VOP_NEED_INACTIVE(vp) == 0)) 3481 goto out; 3482 3483 if (vp->v_iflag & VI_DOINGINACT) 3484 goto out; 3485 3486 /* 3487 * Locking operations here will drop the interlock and possibly the 3488 * vnode lock, opening a window where the vnode can get doomed all the 3489 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to 3490 * perform inactive. 3491 */ 3492 vp->v_iflag |= VI_OWEINACT; 3493 want_unlock = false; 3494 error = 0; 3495 switch (func) { 3496 case VRELE: 3497 switch (VOP_ISLOCKED(vp)) { 3498 case LK_EXCLUSIVE: 3499 break; 3500 case LK_EXCLOTHER: 3501 case 0: 3502 want_unlock = true; 3503 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); 3504 VI_LOCK(vp); 3505 break; 3506 default: 3507 /* 3508 * The lock has at least one sharer, but we have no way 3509 * to conclude whether this is us. Play it safe and 3510 * defer processing. 3511 */ 3512 error = EAGAIN; 3513 break; 3514 } 3515 break; 3516 case VPUT: 3517 want_unlock = true; 3518 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3519 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | 3520 LK_NOWAIT); 3521 VI_LOCK(vp); 3522 } 3523 break; 3524 case VUNREF: 3525 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { 3526 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); 3527 VI_LOCK(vp); 3528 } 3529 break; 3530 } 3531 if (error == 0) { 3532 if (func == VUNREF) { 3533 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, 3534 ("recursive vunref")); 3535 vp->v_vflag |= VV_UNREF; 3536 } 3537 for (;;) { 3538 error = vinactive(vp); 3539 if (want_unlock) 3540 VOP_UNLOCK(vp); 3541 if (error != ERELOOKUP || !want_unlock) 3542 break; 3543 VOP_LOCK(vp, LK_EXCLUSIVE); 3544 } 3545 if (func == VUNREF) 3546 vp->v_vflag &= ~VV_UNREF; 3547 vdropl(vp); 3548 } else { 3549 vdefer_inactive(vp); 3550 } 3551 return; 3552 out: 3553 if (func == VPUT) 3554 VOP_UNLOCK(vp); 3555 vdropl(vp); 3556 } 3557 3558 /* 3559 * Decrement ->v_usecount for a vnode. 3560 * 3561 * Releasing the last use count requires additional processing, see vput_final 3562 * above for details. 3563 * 3564 * Comment above each variant denotes lock state on entry and exit. 3565 */ 3566 3567 /* 3568 * in: any 3569 * out: same as passed in 3570 */ 3571 void 3572 vrele(struct vnode *vp) 3573 { 3574 3575 ASSERT_VI_UNLOCKED(vp, __func__); 3576 if (!refcount_release(&vp->v_usecount)) 3577 return; 3578 vput_final(vp, VRELE); 3579 } 3580 3581 /* 3582 * in: locked 3583 * out: unlocked 3584 */ 3585 void 3586 vput(struct vnode *vp) 3587 { 3588 3589 ASSERT_VOP_LOCKED(vp, __func__); 3590 ASSERT_VI_UNLOCKED(vp, __func__); 3591 if (!refcount_release(&vp->v_usecount)) { 3592 VOP_UNLOCK(vp); 3593 return; 3594 } 3595 vput_final(vp, VPUT); 3596 } 3597 3598 /* 3599 * in: locked 3600 * out: locked 3601 */ 3602 void 3603 vunref(struct vnode *vp) 3604 { 3605 3606 ASSERT_VOP_LOCKED(vp, __func__); 3607 ASSERT_VI_UNLOCKED(vp, __func__); 3608 if (!refcount_release(&vp->v_usecount)) 3609 return; 3610 vput_final(vp, VUNREF); 3611 } 3612 3613 void 3614 vhold(struct vnode *vp) 3615 { 3616 int old; 3617 3618 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3619 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3620 VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3621 ("%s: wrong hold count %d", __func__, old)); 3622 if (old == 0) 3623 vfs_freevnodes_dec(); 3624 } 3625 3626 void 3627 vholdnz(struct vnode *vp) 3628 { 3629 3630 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3631 #ifdef INVARIANTS 3632 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3633 VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp, 3634 ("%s: wrong hold count %d", __func__, old)); 3635 #else 3636 atomic_add_int(&vp->v_holdcnt, 1); 3637 #endif 3638 } 3639 3640 /* 3641 * Grab a hold count unless the vnode is freed. 3642 * 3643 * Only use this routine if vfs smr is the only protection you have against 3644 * freeing the vnode. 3645 * 3646 * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag 3647 * is not set. After the flag is set the vnode becomes immutable to anyone but 3648 * the thread which managed to set the flag. 3649 * 3650 * It may be tempting to replace the loop with: 3651 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1); 3652 * if (count & VHOLD_NO_SMR) { 3653 * backpedal and error out; 3654 * } 3655 * 3656 * However, while this is more performant, it hinders debugging by eliminating 3657 * the previously mentioned invariant. 3658 */ 3659 bool 3660 vhold_smr(struct vnode *vp) 3661 { 3662 int count; 3663 3664 VFS_SMR_ASSERT_ENTERED(); 3665 3666 count = atomic_load_int(&vp->v_holdcnt); 3667 for (;;) { 3668 if (count & VHOLD_NO_SMR) { 3669 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3670 ("non-zero hold count with flags %d\n", count)); 3671 return (false); 3672 } 3673 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3674 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3675 if (count == 0) 3676 vfs_freevnodes_dec(); 3677 return (true); 3678 } 3679 } 3680 } 3681 3682 /* 3683 * Hold a free vnode for recycling. 3684 * 3685 * Note: vnode_init references this comment. 3686 * 3687 * Attempts to recycle only need the global vnode list lock and have no use for 3688 * SMR. 3689 * 3690 * However, vnodes get inserted into the global list before they get fully 3691 * initialized and stay there until UMA decides to free the memory. This in 3692 * particular means the target can be found before it becomes usable and after 3693 * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to 3694 * VHOLD_NO_SMR. 3695 * 3696 * Note: the vnode may gain more references after we transition the count 0->1. 3697 */ 3698 static bool 3699 vhold_recycle_free(struct vnode *vp) 3700 { 3701 int count; 3702 3703 mtx_assert(&vnode_list_mtx, MA_OWNED); 3704 3705 count = atomic_load_int(&vp->v_holdcnt); 3706 for (;;) { 3707 if (count & VHOLD_NO_SMR) { 3708 VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp, 3709 ("non-zero hold count with flags %d\n", count)); 3710 return (false); 3711 } 3712 VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count)); 3713 if (count > 0) { 3714 return (false); 3715 } 3716 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { 3717 vfs_freevnodes_dec(); 3718 return (true); 3719 } 3720 } 3721 } 3722 3723 static void __noinline 3724 vdbatch_process(struct vdbatch *vd) 3725 { 3726 struct vnode *vp; 3727 int i; 3728 3729 mtx_assert(&vd->lock, MA_OWNED); 3730 MPASS(curthread->td_pinned > 0); 3731 MPASS(vd->index == VDBATCH_SIZE); 3732 3733 /* 3734 * Attempt to requeue the passed batch, but give up easily. 3735 * 3736 * Despite batching the mechanism is prone to transient *significant* 3737 * lock contention, where vnode_list_mtx becomes the primary bottleneck 3738 * if multiple CPUs get here (one real-world example is highly parallel 3739 * do-nothing make , which will stat *tons* of vnodes). Since it is 3740 * quasi-LRU (read: not that great even if fully honoured) just dodge 3741 * the problem. Parties which don't like it are welcome to implement 3742 * something better. 3743 */ 3744 critical_enter(); 3745 if (mtx_trylock(&vnode_list_mtx)) { 3746 for (i = 0; i < VDBATCH_SIZE; i++) { 3747 vp = vd->tab[i]; 3748 vd->tab[i] = NULL; 3749 TAILQ_REMOVE(&vnode_list, vp, v_vnodelist); 3750 TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist); 3751 MPASS(vp->v_dbatchcpu != NOCPU); 3752 vp->v_dbatchcpu = NOCPU; 3753 } 3754 mtx_unlock(&vnode_list_mtx); 3755 } else { 3756 counter_u64_add(vnode_skipped_requeues, 1); 3757 3758 for (i = 0; i < VDBATCH_SIZE; i++) { 3759 vp = vd->tab[i]; 3760 vd->tab[i] = NULL; 3761 MPASS(vp->v_dbatchcpu != NOCPU); 3762 vp->v_dbatchcpu = NOCPU; 3763 } 3764 } 3765 vd->index = 0; 3766 critical_exit(); 3767 } 3768 3769 static void 3770 vdbatch_enqueue(struct vnode *vp) 3771 { 3772 struct vdbatch *vd; 3773 3774 ASSERT_VI_LOCKED(vp, __func__); 3775 VNPASS(!VN_IS_DOOMED(vp), vp); 3776 3777 if (vp->v_dbatchcpu != NOCPU) { 3778 VI_UNLOCK(vp); 3779 return; 3780 } 3781 3782 sched_pin(); 3783 vd = DPCPU_PTR(vd); 3784 mtx_lock(&vd->lock); 3785 MPASS(vd->index < VDBATCH_SIZE); 3786 MPASS(vd->tab[vd->index] == NULL); 3787 /* 3788 * A hack: we depend on being pinned so that we know what to put in 3789 * ->v_dbatchcpu. 3790 */ 3791 vp->v_dbatchcpu = curcpu; 3792 vd->tab[vd->index] = vp; 3793 vd->index++; 3794 VI_UNLOCK(vp); 3795 if (vd->index == VDBATCH_SIZE) 3796 vdbatch_process(vd); 3797 mtx_unlock(&vd->lock); 3798 sched_unpin(); 3799 } 3800 3801 /* 3802 * This routine must only be called for vnodes which are about to be 3803 * deallocated. Supporting dequeue for arbitrary vndoes would require 3804 * validating that the locked batch matches. 3805 */ 3806 static void 3807 vdbatch_dequeue(struct vnode *vp) 3808 { 3809 struct vdbatch *vd; 3810 int i; 3811 short cpu; 3812 3813 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); 3814 3815 cpu = vp->v_dbatchcpu; 3816 if (cpu == NOCPU) 3817 return; 3818 3819 vd = DPCPU_ID_PTR(cpu, vd); 3820 mtx_lock(&vd->lock); 3821 for (i = 0; i < vd->index; i++) { 3822 if (vd->tab[i] != vp) 3823 continue; 3824 vp->v_dbatchcpu = NOCPU; 3825 vd->index--; 3826 vd->tab[i] = vd->tab[vd->index]; 3827 vd->tab[vd->index] = NULL; 3828 break; 3829 } 3830 mtx_unlock(&vd->lock); 3831 /* 3832 * Either we dequeued the vnode above or the target CPU beat us to it. 3833 */ 3834 MPASS(vp->v_dbatchcpu == NOCPU); 3835 } 3836 3837 /* 3838 * Drop the hold count of the vnode. If this is the last reference to 3839 * the vnode we place it on the free list unless it has been vgone'd 3840 * (marked VIRF_DOOMED) in which case we will free it. 3841 * 3842 * Because the vnode vm object keeps a hold reference on the vnode if 3843 * there is at least one resident non-cached page, the vnode cannot 3844 * leave the active list without the page cleanup done. 3845 */ 3846 static void __noinline 3847 vdropl_final(struct vnode *vp) 3848 { 3849 3850 ASSERT_VI_LOCKED(vp, __func__); 3851 VNPASS(VN_IS_DOOMED(vp), vp); 3852 /* 3853 * Set the VHOLD_NO_SMR flag. 3854 * 3855 * We may be racing against vhold_smr. If they win we can just pretend 3856 * we never got this far, they will vdrop later. 3857 */ 3858 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { 3859 vfs_freevnodes_inc(); 3860 VI_UNLOCK(vp); 3861 /* 3862 * We lost the aforementioned race. Any subsequent access is 3863 * invalid as they might have managed to vdropl on their own. 3864 */ 3865 return; 3866 } 3867 /* 3868 * Don't bump freevnodes as this one is going away. 3869 */ 3870 freevnode(vp); 3871 } 3872 3873 void 3874 vdrop(struct vnode *vp) 3875 { 3876 3877 ASSERT_VI_UNLOCKED(vp, __func__); 3878 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3879 if (refcount_release_if_not_last(&vp->v_holdcnt)) 3880 return; 3881 VI_LOCK(vp); 3882 vdropl(vp); 3883 } 3884 3885 static void __always_inline 3886 vdropl_impl(struct vnode *vp, bool enqueue) 3887 { 3888 3889 ASSERT_VI_LOCKED(vp, __func__); 3890 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3891 if (!refcount_release(&vp->v_holdcnt)) { 3892 VI_UNLOCK(vp); 3893 return; 3894 } 3895 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); 3896 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 3897 if (VN_IS_DOOMED(vp)) { 3898 vdropl_final(vp); 3899 return; 3900 } 3901 3902 vfs_freevnodes_inc(); 3903 if (vp->v_mflag & VMP_LAZYLIST) { 3904 vunlazy(vp); 3905 } 3906 3907 if (!enqueue) { 3908 VI_UNLOCK(vp); 3909 return; 3910 } 3911 3912 /* 3913 * Also unlocks the interlock. We can't assert on it as we 3914 * released our hold and by now the vnode might have been 3915 * freed. 3916 */ 3917 vdbatch_enqueue(vp); 3918 } 3919 3920 void 3921 vdropl(struct vnode *vp) 3922 { 3923 3924 vdropl_impl(vp, true); 3925 } 3926 3927 /* 3928 * vdrop a vnode when recycling 3929 * 3930 * This is a special case routine only to be used when recycling, differs from 3931 * regular vdrop by not requeieing the vnode on LRU. 3932 * 3933 * Consider a case where vtryrecycle continuously fails with all vnodes (due to 3934 * e.g., frozen writes on the filesystem), filling the batch and causing it to 3935 * be requeued. Then vnlru will end up revisiting the same vnodes. This is a 3936 * loop which can last for as long as writes are frozen. 3937 */ 3938 static void 3939 vdropl_recycle(struct vnode *vp) 3940 { 3941 3942 vdropl_impl(vp, false); 3943 } 3944 3945 static void 3946 vdrop_recycle(struct vnode *vp) 3947 { 3948 3949 VI_LOCK(vp); 3950 vdropl_recycle(vp); 3951 } 3952 3953 /* 3954 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT 3955 * flags. DOINGINACT prevents us from recursing in calls to vinactive. 3956 */ 3957 static int 3958 vinactivef(struct vnode *vp) 3959 { 3960 struct vm_object *obj; 3961 int error; 3962 3963 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3964 ASSERT_VI_LOCKED(vp, "vinactive"); 3965 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); 3966 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 3967 vp->v_iflag |= VI_DOINGINACT; 3968 vp->v_iflag &= ~VI_OWEINACT; 3969 VI_UNLOCK(vp); 3970 /* 3971 * Before moving off the active list, we must be sure that any 3972 * modified pages are converted into the vnode's dirty 3973 * buffers, since these will no longer be checked once the 3974 * vnode is on the inactive list. 3975 * 3976 * The write-out of the dirty pages is asynchronous. At the 3977 * point that VOP_INACTIVE() is called, there could still be 3978 * pending I/O and dirty pages in the object. 3979 */ 3980 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 3981 vm_object_mightbedirty(obj)) { 3982 VM_OBJECT_WLOCK(obj); 3983 vm_object_page_clean(obj, 0, 0, 0); 3984 VM_OBJECT_WUNLOCK(obj); 3985 } 3986 error = VOP_INACTIVE(vp); 3987 VI_LOCK(vp); 3988 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); 3989 vp->v_iflag &= ~VI_DOINGINACT; 3990 return (error); 3991 } 3992 3993 int 3994 vinactive(struct vnode *vp) 3995 { 3996 3997 ASSERT_VOP_ELOCKED(vp, "vinactive"); 3998 ASSERT_VI_LOCKED(vp, "vinactive"); 3999 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4000 4001 if ((vp->v_iflag & VI_OWEINACT) == 0) 4002 return (0); 4003 if (vp->v_iflag & VI_DOINGINACT) 4004 return (0); 4005 if (vp->v_usecount > 0) { 4006 vp->v_iflag &= ~VI_OWEINACT; 4007 return (0); 4008 } 4009 return (vinactivef(vp)); 4010 } 4011 4012 /* 4013 * Remove any vnodes in the vnode table belonging to mount point mp. 4014 * 4015 * If FORCECLOSE is not specified, there should not be any active ones, 4016 * return error if any are found (nb: this is a user error, not a 4017 * system error). If FORCECLOSE is specified, detach any active vnodes 4018 * that are found. 4019 * 4020 * If WRITECLOSE is set, only flush out regular file vnodes open for 4021 * writing. 4022 * 4023 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped. 4024 * 4025 * `rootrefs' specifies the base reference count for the root vnode 4026 * of this filesystem. The root vnode is considered busy if its 4027 * v_usecount exceeds this value. On a successful return, vflush(, td) 4028 * will call vrele() on the root vnode exactly rootrefs times. 4029 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 4030 * be zero. 4031 */ 4032 #ifdef DIAGNOSTIC 4033 static int busyprt = 0; /* print out busy vnodes */ 4034 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes"); 4035 #endif 4036 4037 int 4038 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td) 4039 { 4040 struct vnode *vp, *mvp, *rootvp = NULL; 4041 struct vattr vattr; 4042 int busy = 0, error; 4043 4044 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp, 4045 rootrefs, flags); 4046 if (rootrefs > 0) { 4047 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 4048 ("vflush: bad args")); 4049 /* 4050 * Get the filesystem root vnode. We can vput() it 4051 * immediately, since with rootrefs > 0, it won't go away. 4052 */ 4053 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) { 4054 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d", 4055 __func__, error); 4056 return (error); 4057 } 4058 vput(rootvp); 4059 } 4060 loop: 4061 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 4062 vholdl(vp); 4063 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE); 4064 if (error) { 4065 vdrop(vp); 4066 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4067 goto loop; 4068 } 4069 /* 4070 * Skip over a vnodes marked VV_SYSTEM. 4071 */ 4072 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { 4073 VOP_UNLOCK(vp); 4074 vdrop(vp); 4075 continue; 4076 } 4077 /* 4078 * If WRITECLOSE is set, flush out unlinked but still open 4079 * files (even if open only for reading) and regular file 4080 * vnodes open for writing. 4081 */ 4082 if (flags & WRITECLOSE) { 4083 if (vp->v_object != NULL) { 4084 VM_OBJECT_WLOCK(vp->v_object); 4085 vm_object_page_clean(vp->v_object, 0, 0, 0); 4086 VM_OBJECT_WUNLOCK(vp->v_object); 4087 } 4088 do { 4089 error = VOP_FSYNC(vp, MNT_WAIT, td); 4090 } while (error == ERELOOKUP); 4091 if (error != 0) { 4092 VOP_UNLOCK(vp); 4093 vdrop(vp); 4094 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp); 4095 return (error); 4096 } 4097 error = VOP_GETATTR(vp, &vattr, td->td_ucred); 4098 VI_LOCK(vp); 4099 4100 if ((vp->v_type == VNON || 4101 (error == 0 && vattr.va_nlink > 0)) && 4102 (vp->v_writecount <= 0 || vp->v_type != VREG)) { 4103 VOP_UNLOCK(vp); 4104 vdropl(vp); 4105 continue; 4106 } 4107 } else 4108 VI_LOCK(vp); 4109 /* 4110 * With v_usecount == 0, all we need to do is clear out the 4111 * vnode data structures and we are done. 4112 * 4113 * If FORCECLOSE is set, forcibly close the vnode. 4114 */ 4115 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { 4116 vgonel(vp); 4117 } else { 4118 busy++; 4119 #ifdef DIAGNOSTIC 4120 if (busyprt) 4121 vn_printf(vp, "vflush: busy vnode "); 4122 #endif 4123 } 4124 VOP_UNLOCK(vp); 4125 vdropl(vp); 4126 } 4127 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 4128 /* 4129 * If just the root vnode is busy, and if its refcount 4130 * is equal to `rootrefs', then go ahead and kill it. 4131 */ 4132 VI_LOCK(rootvp); 4133 KASSERT(busy > 0, ("vflush: not busy")); 4134 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, 4135 ("vflush: usecount %d < rootrefs %d", 4136 rootvp->v_usecount, rootrefs)); 4137 if (busy == 1 && rootvp->v_usecount == rootrefs) { 4138 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK); 4139 vgone(rootvp); 4140 VOP_UNLOCK(rootvp); 4141 busy = 0; 4142 } else 4143 VI_UNLOCK(rootvp); 4144 } 4145 if (busy) { 4146 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__, 4147 busy); 4148 return (EBUSY); 4149 } 4150 for (; rootrefs > 0; rootrefs--) 4151 vrele(rootvp); 4152 return (0); 4153 } 4154 4155 /* 4156 * Recycle an unused vnode to the front of the free list. 4157 */ 4158 int 4159 vrecycle(struct vnode *vp) 4160 { 4161 int recycled; 4162 4163 VI_LOCK(vp); 4164 recycled = vrecyclel(vp); 4165 VI_UNLOCK(vp); 4166 return (recycled); 4167 } 4168 4169 /* 4170 * vrecycle, with the vp interlock held. 4171 */ 4172 int 4173 vrecyclel(struct vnode *vp) 4174 { 4175 int recycled; 4176 4177 ASSERT_VOP_ELOCKED(vp, __func__); 4178 ASSERT_VI_LOCKED(vp, __func__); 4179 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4180 recycled = 0; 4181 if (vp->v_usecount == 0) { 4182 recycled = 1; 4183 vgonel(vp); 4184 } 4185 return (recycled); 4186 } 4187 4188 /* 4189 * Eliminate all activity associated with a vnode 4190 * in preparation for reuse. 4191 */ 4192 void 4193 vgone(struct vnode *vp) 4194 { 4195 VI_LOCK(vp); 4196 vgonel(vp); 4197 VI_UNLOCK(vp); 4198 } 4199 4200 /* 4201 * Notify upper mounts about reclaimed or unlinked vnode. 4202 */ 4203 void 4204 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) 4205 { 4206 struct mount *mp; 4207 struct mount_upper_node *ump; 4208 4209 mp = atomic_load_ptr(&vp->v_mount); 4210 if (mp == NULL) 4211 return; 4212 if (TAILQ_EMPTY(&mp->mnt_notify)) 4213 return; 4214 4215 MNT_ILOCK(mp); 4216 mp->mnt_upper_pending++; 4217 KASSERT(mp->mnt_upper_pending > 0, 4218 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); 4219 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { 4220 MNT_IUNLOCK(mp); 4221 switch (event) { 4222 case VFS_NOTIFY_UPPER_RECLAIM: 4223 VFS_RECLAIM_LOWERVP(ump->mp, vp); 4224 break; 4225 case VFS_NOTIFY_UPPER_UNLINK: 4226 VFS_UNLINK_LOWERVP(ump->mp, vp); 4227 break; 4228 } 4229 MNT_ILOCK(mp); 4230 } 4231 mp->mnt_upper_pending--; 4232 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && 4233 mp->mnt_upper_pending == 0) { 4234 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; 4235 wakeup(&mp->mnt_uppers); 4236 } 4237 MNT_IUNLOCK(mp); 4238 } 4239 4240 /* 4241 * vgone, with the vp interlock held. 4242 */ 4243 static void 4244 vgonel(struct vnode *vp) 4245 { 4246 struct thread *td; 4247 struct mount *mp; 4248 vm_object_t object; 4249 bool active, doinginact, oweinact; 4250 4251 ASSERT_VOP_ELOCKED(vp, "vgonel"); 4252 ASSERT_VI_LOCKED(vp, "vgonel"); 4253 VNASSERT(vp->v_holdcnt, vp, 4254 ("vgonel: vp %p has no reference.", vp)); 4255 CTR2(KTR_VFS, "%s: vp %p", __func__, vp); 4256 td = curthread; 4257 4258 /* 4259 * Don't vgonel if we're already doomed. 4260 */ 4261 if (VN_IS_DOOMED(vp)) { 4262 VNPASS(vn_get_state(vp) == VSTATE_DESTROYING || \ 4263 vn_get_state(vp) == VSTATE_DEAD, vp); 4264 return; 4265 } 4266 /* 4267 * Paired with freevnode. 4268 */ 4269 vn_seqc_write_begin_locked(vp); 4270 vunlazy_gone(vp); 4271 vn_irflag_set_locked(vp, VIRF_DOOMED); 4272 vn_set_state(vp, VSTATE_DESTROYING); 4273 4274 /* 4275 * Check to see if the vnode is in use. If so, we have to 4276 * call VOP_CLOSE() and VOP_INACTIVE(). 4277 * 4278 * It could be that VOP_INACTIVE() requested reclamation, in 4279 * which case we should avoid recursion, so check 4280 * VI_DOINGINACT. This is not precise but good enough. 4281 */ 4282 active = vp->v_usecount > 0; 4283 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4284 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; 4285 4286 /* 4287 * If we need to do inactive VI_OWEINACT will be set. 4288 */ 4289 if (vp->v_iflag & VI_DEFINACT) { 4290 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); 4291 vp->v_iflag &= ~VI_DEFINACT; 4292 vdropl(vp); 4293 } else { 4294 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); 4295 VI_UNLOCK(vp); 4296 } 4297 cache_purge_vgone(vp); 4298 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM); 4299 4300 /* 4301 * If purging an active vnode, it must be closed and 4302 * deactivated before being reclaimed. 4303 */ 4304 if (active) 4305 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td); 4306 if (!doinginact) { 4307 do { 4308 if (oweinact || active) { 4309 VI_LOCK(vp); 4310 vinactivef(vp); 4311 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; 4312 VI_UNLOCK(vp); 4313 } 4314 } while (oweinact); 4315 } 4316 if (vp->v_type == VSOCK) 4317 vfs_unp_reclaim(vp); 4318 4319 /* 4320 * Clean out any buffers associated with the vnode. 4321 * If the flush fails, just toss the buffers. 4322 */ 4323 mp = NULL; 4324 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) 4325 (void) vn_start_secondary_write(vp, &mp, V_WAIT); 4326 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) { 4327 while (vinvalbuf(vp, 0, 0, 0) != 0) 4328 ; 4329 } 4330 4331 BO_LOCK(&vp->v_bufobj); 4332 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && 4333 vp->v_bufobj.bo_dirty.bv_cnt == 0 && 4334 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && 4335 vp->v_bufobj.bo_clean.bv_cnt == 0, 4336 ("vp %p bufobj not invalidated", vp)); 4337 4338 /* 4339 * For VMIO bufobj, BO_DEAD is set later, or in 4340 * vm_object_terminate() after the object's page queue is 4341 * flushed. 4342 */ 4343 object = vp->v_bufobj.bo_object; 4344 if (object == NULL) 4345 vp->v_bufobj.bo_flag |= BO_DEAD; 4346 BO_UNLOCK(&vp->v_bufobj); 4347 4348 /* 4349 * Handle the VM part. Tmpfs handles v_object on its own (the 4350 * OBJT_VNODE check). Nullfs or other bypassing filesystems 4351 * should not touch the object borrowed from the lower vnode 4352 * (the handle check). 4353 */ 4354 if (object != NULL && object->type == OBJT_VNODE && 4355 object->handle == vp) 4356 vnode_destroy_vobject(vp); 4357 4358 /* 4359 * Reclaim the vnode. 4360 */ 4361 if (VOP_RECLAIM(vp)) 4362 panic("vgone: cannot reclaim"); 4363 if (mp != NULL) 4364 vn_finished_secondary_write(mp); 4365 VNASSERT(vp->v_object == NULL, vp, 4366 ("vop_reclaim left v_object vp=%p", vp)); 4367 /* 4368 * Clear the advisory locks and wake up waiting threads. 4369 */ 4370 if (vp->v_lockf != NULL) { 4371 (void)VOP_ADVLOCKPURGE(vp); 4372 vp->v_lockf = NULL; 4373 } 4374 /* 4375 * Delete from old mount point vnode list. 4376 */ 4377 if (vp->v_mount == NULL) { 4378 VI_LOCK(vp); 4379 } else { 4380 delmntque(vp); 4381 ASSERT_VI_LOCKED(vp, "vgonel 2"); 4382 } 4383 /* 4384 * Done with purge, reset to the standard lock and invalidate 4385 * the vnode. 4386 */ 4387 vp->v_vnlock = &vp->v_lock; 4388 vp->v_op = &dead_vnodeops; 4389 vp->v_type = VBAD; 4390 vn_set_state(vp, VSTATE_DEAD); 4391 } 4392 4393 /* 4394 * Print out a description of a vnode. 4395 */ 4396 static const char *const vtypename[] = { 4397 [VNON] = "VNON", 4398 [VREG] = "VREG", 4399 [VDIR] = "VDIR", 4400 [VBLK] = "VBLK", 4401 [VCHR] = "VCHR", 4402 [VLNK] = "VLNK", 4403 [VSOCK] = "VSOCK", 4404 [VFIFO] = "VFIFO", 4405 [VBAD] = "VBAD", 4406 [VMARKER] = "VMARKER", 4407 }; 4408 _Static_assert(nitems(vtypename) == VLASTTYPE + 1, 4409 "vnode type name not added to vtypename"); 4410 4411 static const char *const vstatename[] = { 4412 [VSTATE_UNINITIALIZED] = "VSTATE_UNINITIALIZED", 4413 [VSTATE_CONSTRUCTED] = "VSTATE_CONSTRUCTED", 4414 [VSTATE_DESTROYING] = "VSTATE_DESTROYING", 4415 [VSTATE_DEAD] = "VSTATE_DEAD", 4416 }; 4417 _Static_assert(nitems(vstatename) == VLASTSTATE + 1, 4418 "vnode state name not added to vstatename"); 4419 4420 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0, 4421 "new hold count flag not added to vn_printf"); 4422 4423 void 4424 vn_printf(struct vnode *vp, const char *fmt, ...) 4425 { 4426 va_list ap; 4427 char buf[256], buf2[16]; 4428 u_long flags; 4429 u_int holdcnt; 4430 short irflag; 4431 4432 va_start(ap, fmt); 4433 vprintf(fmt, ap); 4434 va_end(ap); 4435 printf("%p: ", (void *)vp); 4436 printf("type %s state %s op %p\n", vtypename[vp->v_type], 4437 vstatename[vp->v_state], vp->v_op); 4438 holdcnt = atomic_load_int(&vp->v_holdcnt); 4439 printf(" usecount %d, writecount %d, refcount %d seqc users %d", 4440 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, 4441 vp->v_seqc_users); 4442 switch (vp->v_type) { 4443 case VDIR: 4444 printf(" mountedhere %p\n", vp->v_mountedhere); 4445 break; 4446 case VCHR: 4447 printf(" rdev %p\n", vp->v_rdev); 4448 break; 4449 case VSOCK: 4450 printf(" socket %p\n", vp->v_unpcb); 4451 break; 4452 case VFIFO: 4453 printf(" fifoinfo %p\n", vp->v_fifoinfo); 4454 break; 4455 default: 4456 printf("\n"); 4457 break; 4458 } 4459 buf[0] = '\0'; 4460 buf[1] = '\0'; 4461 if (holdcnt & VHOLD_NO_SMR) 4462 strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf)); 4463 printf(" hold count flags (%s)\n", buf + 1); 4464 4465 buf[0] = '\0'; 4466 buf[1] = '\0'; 4467 irflag = vn_irflag_read(vp); 4468 if (irflag & VIRF_DOOMED) 4469 strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); 4470 if (irflag & VIRF_PGREAD) 4471 strlcat(buf, "|VIRF_PGREAD", sizeof(buf)); 4472 if (irflag & VIRF_MOUNTPOINT) 4473 strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf)); 4474 if (irflag & VIRF_TEXT_REF) 4475 strlcat(buf, "|VIRF_TEXT_REF", sizeof(buf)); 4476 flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT | VIRF_TEXT_REF); 4477 if (flags != 0) { 4478 snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); 4479 strlcat(buf, buf2, sizeof(buf)); 4480 } 4481 if (vp->v_vflag & VV_ROOT) 4482 strlcat(buf, "|VV_ROOT", sizeof(buf)); 4483 if (vp->v_vflag & VV_ISTTY) 4484 strlcat(buf, "|VV_ISTTY", sizeof(buf)); 4485 if (vp->v_vflag & VV_NOSYNC) 4486 strlcat(buf, "|VV_NOSYNC", sizeof(buf)); 4487 if (vp->v_vflag & VV_ETERNALDEV) 4488 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf)); 4489 if (vp->v_vflag & VV_CACHEDLABEL) 4490 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf)); 4491 if (vp->v_vflag & VV_VMSIZEVNLOCK) 4492 strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf)); 4493 if (vp->v_vflag & VV_COPYONWRITE) 4494 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf)); 4495 if (vp->v_vflag & VV_SYSTEM) 4496 strlcat(buf, "|VV_SYSTEM", sizeof(buf)); 4497 if (vp->v_vflag & VV_PROCDEP) 4498 strlcat(buf, "|VV_PROCDEP", sizeof(buf)); 4499 if (vp->v_vflag & VV_DELETED) 4500 strlcat(buf, "|VV_DELETED", sizeof(buf)); 4501 if (vp->v_vflag & VV_MD) 4502 strlcat(buf, "|VV_MD", sizeof(buf)); 4503 if (vp->v_vflag & VV_FORCEINSMQ) 4504 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf)); 4505 if (vp->v_vflag & VV_READLINK) 4506 strlcat(buf, "|VV_READLINK", sizeof(buf)); 4507 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | 4508 VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM | 4509 VV_PROCDEP | VV_DELETED | VV_MD | VV_FORCEINSMQ | VV_READLINK); 4510 if (flags != 0) { 4511 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags); 4512 strlcat(buf, buf2, sizeof(buf)); 4513 } 4514 if (vp->v_iflag & VI_MOUNT) 4515 strlcat(buf, "|VI_MOUNT", sizeof(buf)); 4516 if (vp->v_iflag & VI_DOINGINACT) 4517 strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); 4518 if (vp->v_iflag & VI_OWEINACT) 4519 strlcat(buf, "|VI_OWEINACT", sizeof(buf)); 4520 if (vp->v_iflag & VI_DEFINACT) 4521 strlcat(buf, "|VI_DEFINACT", sizeof(buf)); 4522 if (vp->v_iflag & VI_FOPENING) 4523 strlcat(buf, "|VI_FOPENING", sizeof(buf)); 4524 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | 4525 VI_OWEINACT | VI_DEFINACT | VI_FOPENING); 4526 if (flags != 0) { 4527 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); 4528 strlcat(buf, buf2, sizeof(buf)); 4529 } 4530 if (vp->v_mflag & VMP_LAZYLIST) 4531 strlcat(buf, "|VMP_LAZYLIST", sizeof(buf)); 4532 flags = vp->v_mflag & ~(VMP_LAZYLIST); 4533 if (flags != 0) { 4534 snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags); 4535 strlcat(buf, buf2, sizeof(buf)); 4536 } 4537 printf(" flags (%s)", buf + 1); 4538 if (mtx_owned(VI_MTX(vp))) 4539 printf(" VI_LOCKed"); 4540 printf("\n"); 4541 if (vp->v_object != NULL) 4542 printf(" v_object %p ref %d pages %d " 4543 "cleanbuf %d dirtybuf %d\n", 4544 vp->v_object, vp->v_object->ref_count, 4545 vp->v_object->resident_page_count, 4546 vp->v_bufobj.bo_clean.bv_cnt, 4547 vp->v_bufobj.bo_dirty.bv_cnt); 4548 printf(" "); 4549 lockmgr_printinfo(vp->v_vnlock); 4550 if (vp->v_data != NULL) 4551 VOP_PRINT(vp); 4552 } 4553 4554 #ifdef DDB 4555 /* 4556 * List all of the locked vnodes in the system. 4557 * Called when debugging the kernel. 4558 */ 4559 DB_SHOW_COMMAND_FLAGS(lockedvnods, lockedvnodes, DB_CMD_MEMSAFE) 4560 { 4561 struct mount *mp; 4562 struct vnode *vp; 4563 4564 /* 4565 * Note: because this is DDB, we can't obey the locking semantics 4566 * for these structures, which means we could catch an inconsistent 4567 * state and dereference a nasty pointer. Not much to be done 4568 * about that. 4569 */ 4570 db_printf("Locked vnodes\n"); 4571 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4572 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4573 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) 4574 vn_printf(vp, "vnode "); 4575 } 4576 } 4577 } 4578 4579 /* 4580 * Show details about the given vnode. 4581 */ 4582 DB_SHOW_COMMAND(vnode, db_show_vnode) 4583 { 4584 struct vnode *vp; 4585 4586 if (!have_addr) 4587 return; 4588 vp = (struct vnode *)addr; 4589 vn_printf(vp, "vnode "); 4590 } 4591 4592 /* 4593 * Show details about the given mount point. 4594 */ 4595 DB_SHOW_COMMAND(mount, db_show_mount) 4596 { 4597 struct mount *mp; 4598 struct vfsopt *opt; 4599 struct statfs *sp; 4600 struct vnode *vp; 4601 char buf[512]; 4602 uint64_t mflags; 4603 u_int flags; 4604 4605 if (!have_addr) { 4606 /* No address given, print short info about all mount points. */ 4607 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 4608 db_printf("%p %s on %s (%s)\n", mp, 4609 mp->mnt_stat.f_mntfromname, 4610 mp->mnt_stat.f_mntonname, 4611 mp->mnt_stat.f_fstypename); 4612 if (db_pager_quit) 4613 break; 4614 } 4615 db_printf("\nMore info: show mount <addr>\n"); 4616 return; 4617 } 4618 4619 mp = (struct mount *)addr; 4620 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, 4621 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); 4622 4623 buf[0] = '\0'; 4624 mflags = mp->mnt_flag; 4625 #define MNT_FLAG(flag) do { \ 4626 if (mflags & (flag)) { \ 4627 if (buf[0] != '\0') \ 4628 strlcat(buf, ", ", sizeof(buf)); \ 4629 strlcat(buf, (#flag) + 4, sizeof(buf)); \ 4630 mflags &= ~(flag); \ 4631 } \ 4632 } while (0) 4633 MNT_FLAG(MNT_RDONLY); 4634 MNT_FLAG(MNT_SYNCHRONOUS); 4635 MNT_FLAG(MNT_NOEXEC); 4636 MNT_FLAG(MNT_NOSUID); 4637 MNT_FLAG(MNT_NFS4ACLS); 4638 MNT_FLAG(MNT_UNION); 4639 MNT_FLAG(MNT_ASYNC); 4640 MNT_FLAG(MNT_SUIDDIR); 4641 MNT_FLAG(MNT_SOFTDEP); 4642 MNT_FLAG(MNT_NOSYMFOLLOW); 4643 MNT_FLAG(MNT_GJOURNAL); 4644 MNT_FLAG(MNT_MULTILABEL); 4645 MNT_FLAG(MNT_ACLS); 4646 MNT_FLAG(MNT_NOATIME); 4647 MNT_FLAG(MNT_NOCLUSTERR); 4648 MNT_FLAG(MNT_NOCLUSTERW); 4649 MNT_FLAG(MNT_SUJ); 4650 MNT_FLAG(MNT_EXRDONLY); 4651 MNT_FLAG(MNT_EXPORTED); 4652 MNT_FLAG(MNT_DEFEXPORTED); 4653 MNT_FLAG(MNT_EXPORTANON); 4654 MNT_FLAG(MNT_EXKERB); 4655 MNT_FLAG(MNT_EXPUBLIC); 4656 MNT_FLAG(MNT_LOCAL); 4657 MNT_FLAG(MNT_QUOTA); 4658 MNT_FLAG(MNT_ROOTFS); 4659 MNT_FLAG(MNT_USER); 4660 MNT_FLAG(MNT_IGNORE); 4661 MNT_FLAG(MNT_UPDATE); 4662 MNT_FLAG(MNT_DELEXPORT); 4663 MNT_FLAG(MNT_RELOAD); 4664 MNT_FLAG(MNT_FORCE); 4665 MNT_FLAG(MNT_SNAPSHOT); 4666 MNT_FLAG(MNT_BYFSID); 4667 #undef MNT_FLAG 4668 if (mflags != 0) { 4669 if (buf[0] != '\0') 4670 strlcat(buf, ", ", sizeof(buf)); 4671 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4672 "0x%016jx", mflags); 4673 } 4674 db_printf(" mnt_flag = %s\n", buf); 4675 4676 buf[0] = '\0'; 4677 flags = mp->mnt_kern_flag; 4678 #define MNT_KERN_FLAG(flag) do { \ 4679 if (flags & (flag)) { \ 4680 if (buf[0] != '\0') \ 4681 strlcat(buf, ", ", sizeof(buf)); \ 4682 strlcat(buf, (#flag) + 5, sizeof(buf)); \ 4683 flags &= ~(flag); \ 4684 } \ 4685 } while (0) 4686 MNT_KERN_FLAG(MNTK_UNMOUNTF); 4687 MNT_KERN_FLAG(MNTK_ASYNC); 4688 MNT_KERN_FLAG(MNTK_SOFTDEP); 4689 MNT_KERN_FLAG(MNTK_NOMSYNC); 4690 MNT_KERN_FLAG(MNTK_DRAINING); 4691 MNT_KERN_FLAG(MNTK_REFEXPIRE); 4692 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED); 4693 MNT_KERN_FLAG(MNTK_SHARED_WRITES); 4694 MNT_KERN_FLAG(MNTK_NO_IOPF); 4695 MNT_KERN_FLAG(MNTK_RECURSE); 4696 MNT_KERN_FLAG(MNTK_UPPER_WAITER); 4697 MNT_KERN_FLAG(MNTK_UNLOCKED_INSMNTQUE); 4698 MNT_KERN_FLAG(MNTK_USES_BCACHE); 4699 MNT_KERN_FLAG(MNTK_VMSETSIZE_BUG); 4700 MNT_KERN_FLAG(MNTK_FPLOOKUP); 4701 MNT_KERN_FLAG(MNTK_TASKQUEUE_WAITER); 4702 MNT_KERN_FLAG(MNTK_NOASYNC); 4703 MNT_KERN_FLAG(MNTK_UNMOUNT); 4704 MNT_KERN_FLAG(MNTK_MWAIT); 4705 MNT_KERN_FLAG(MNTK_SUSPEND); 4706 MNT_KERN_FLAG(MNTK_SUSPEND2); 4707 MNT_KERN_FLAG(MNTK_SUSPENDED); 4708 MNT_KERN_FLAG(MNTK_NULL_NOCACHE); 4709 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED); 4710 #undef MNT_KERN_FLAG 4711 if (flags != 0) { 4712 if (buf[0] != '\0') 4713 strlcat(buf, ", ", sizeof(buf)); 4714 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), 4715 "0x%08x", flags); 4716 } 4717 db_printf(" mnt_kern_flag = %s\n", buf); 4718 4719 db_printf(" mnt_opt = "); 4720 opt = TAILQ_FIRST(mp->mnt_opt); 4721 if (opt != NULL) { 4722 db_printf("%s", opt->name); 4723 opt = TAILQ_NEXT(opt, link); 4724 while (opt != NULL) { 4725 db_printf(", %s", opt->name); 4726 opt = TAILQ_NEXT(opt, link); 4727 } 4728 } 4729 db_printf("\n"); 4730 4731 sp = &mp->mnt_stat; 4732 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx " 4733 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju " 4734 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju " 4735 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n", 4736 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, 4737 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, 4738 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, 4739 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, 4740 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, 4741 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, 4742 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, 4743 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); 4744 4745 db_printf(" mnt_cred = { uid=%u ruid=%u", 4746 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); 4747 if (jailed(mp->mnt_cred)) 4748 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); 4749 db_printf(" }\n"); 4750 db_printf(" mnt_ref = %d (with %d in the struct)\n", 4751 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); 4752 db_printf(" mnt_gen = %d\n", mp->mnt_gen); 4753 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); 4754 db_printf(" mnt_lazyvnodelistsize = %d\n", 4755 mp->mnt_lazyvnodelistsize); 4756 db_printf(" mnt_writeopcount = %d (with %d in the struct)\n", 4757 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); 4758 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); 4759 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); 4760 db_printf(" mnt_lockref = %d (with %d in the struct)\n", 4761 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); 4762 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); 4763 db_printf(" mnt_secondary_accwrites = %d\n", 4764 mp->mnt_secondary_accwrites); 4765 db_printf(" mnt_gjprovider = %s\n", 4766 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); 4767 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); 4768 4769 db_printf("\n\nList of active vnodes\n"); 4770 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4771 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { 4772 vn_printf(vp, "vnode "); 4773 if (db_pager_quit) 4774 break; 4775 } 4776 } 4777 db_printf("\n\nList of inactive vnodes\n"); 4778 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 4779 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { 4780 vn_printf(vp, "vnode "); 4781 if (db_pager_quit) 4782 break; 4783 } 4784 } 4785 } 4786 #endif /* DDB */ 4787 4788 /* 4789 * Fill in a struct xvfsconf based on a struct vfsconf. 4790 */ 4791 static int 4792 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp) 4793 { 4794 struct xvfsconf xvfsp; 4795 4796 bzero(&xvfsp, sizeof(xvfsp)); 4797 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4798 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4799 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4800 xvfsp.vfc_flags = vfsp->vfc_flags; 4801 /* 4802 * These are unused in userland, we keep them 4803 * to not break binary compatibility. 4804 */ 4805 xvfsp.vfc_vfsops = NULL; 4806 xvfsp.vfc_next = NULL; 4807 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4808 } 4809 4810 #ifdef COMPAT_FREEBSD32 4811 struct xvfsconf32 { 4812 uint32_t vfc_vfsops; 4813 char vfc_name[MFSNAMELEN]; 4814 int32_t vfc_typenum; 4815 int32_t vfc_refcount; 4816 int32_t vfc_flags; 4817 uint32_t vfc_next; 4818 }; 4819 4820 static int 4821 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp) 4822 { 4823 struct xvfsconf32 xvfsp; 4824 4825 bzero(&xvfsp, sizeof(xvfsp)); 4826 strcpy(xvfsp.vfc_name, vfsp->vfc_name); 4827 xvfsp.vfc_typenum = vfsp->vfc_typenum; 4828 xvfsp.vfc_refcount = vfsp->vfc_refcount; 4829 xvfsp.vfc_flags = vfsp->vfc_flags; 4830 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp))); 4831 } 4832 #endif 4833 4834 /* 4835 * Top level filesystem related information gathering. 4836 */ 4837 static int 4838 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS) 4839 { 4840 struct vfsconf *vfsp; 4841 int error; 4842 4843 error = 0; 4844 vfsconf_slock(); 4845 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4846 #ifdef COMPAT_FREEBSD32 4847 if (req->flags & SCTL_MASK32) 4848 error = vfsconf2x32(req, vfsp); 4849 else 4850 #endif 4851 error = vfsconf2x(req, vfsp); 4852 if (error) 4853 break; 4854 } 4855 vfsconf_sunlock(); 4856 return (error); 4857 } 4858 4859 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD | 4860 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist, 4861 "S,xvfsconf", "List of all configured filesystems"); 4862 4863 #ifndef BURN_BRIDGES 4864 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS); 4865 4866 static int 4867 vfs_sysctl(SYSCTL_HANDLER_ARGS) 4868 { 4869 int *name = (int *)arg1 - 1; /* XXX */ 4870 u_int namelen = arg2 + 1; /* XXX */ 4871 struct vfsconf *vfsp; 4872 4873 log(LOG_WARNING, "userland calling deprecated sysctl, " 4874 "please rebuild world\n"); 4875 4876 #if 1 || defined(COMPAT_PRELITE2) 4877 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */ 4878 if (namelen == 1) 4879 return (sysctl_ovfs_conf(oidp, arg1, arg2, req)); 4880 #endif 4881 4882 switch (name[1]) { 4883 case VFS_MAXTYPENUM: 4884 if (namelen != 2) 4885 return (ENOTDIR); 4886 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int))); 4887 case VFS_CONF: 4888 if (namelen != 3) 4889 return (ENOTDIR); /* overloaded */ 4890 vfsconf_slock(); 4891 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4892 if (vfsp->vfc_typenum == name[2]) 4893 break; 4894 } 4895 vfsconf_sunlock(); 4896 if (vfsp == NULL) 4897 return (EOPNOTSUPP); 4898 #ifdef COMPAT_FREEBSD32 4899 if (req->flags & SCTL_MASK32) 4900 return (vfsconf2x32(req, vfsp)); 4901 else 4902 #endif 4903 return (vfsconf2x(req, vfsp)); 4904 } 4905 return (EOPNOTSUPP); 4906 } 4907 4908 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP | 4909 CTLFLAG_MPSAFE, vfs_sysctl, 4910 "Generic filesystem"); 4911 4912 #if 1 || defined(COMPAT_PRELITE2) 4913 4914 static int 4915 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS) 4916 { 4917 int error; 4918 struct vfsconf *vfsp; 4919 struct ovfsconf ovfs; 4920 4921 vfsconf_slock(); 4922 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) { 4923 bzero(&ovfs, sizeof(ovfs)); 4924 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ 4925 strcpy(ovfs.vfc_name, vfsp->vfc_name); 4926 ovfs.vfc_index = vfsp->vfc_typenum; 4927 ovfs.vfc_refcount = vfsp->vfc_refcount; 4928 ovfs.vfc_flags = vfsp->vfc_flags; 4929 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs); 4930 if (error != 0) { 4931 vfsconf_sunlock(); 4932 return (error); 4933 } 4934 } 4935 vfsconf_sunlock(); 4936 return (0); 4937 } 4938 4939 #endif /* 1 || COMPAT_PRELITE2 */ 4940 #endif /* !BURN_BRIDGES */ 4941 4942 static void 4943 unmount_or_warn(struct mount *mp) 4944 { 4945 int error; 4946 4947 error = dounmount(mp, MNT_FORCE, curthread); 4948 if (error != 0) { 4949 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); 4950 if (error == EBUSY) 4951 printf("BUSY)\n"); 4952 else 4953 printf("%d)\n", error); 4954 } 4955 } 4956 4957 /* 4958 * Unmount all filesystems. The list is traversed in reverse order 4959 * of mounting to avoid dependencies. 4960 */ 4961 void 4962 vfs_unmountall(void) 4963 { 4964 struct mount *mp, *tmp; 4965 4966 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__); 4967 4968 /* 4969 * Since this only runs when rebooting, it is not interlocked. 4970 */ 4971 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) { 4972 vfs_ref(mp); 4973 4974 /* 4975 * Forcibly unmounting "/dev" before "/" would prevent clean 4976 * unmount of the latter. 4977 */ 4978 if (mp == rootdevmp) 4979 continue; 4980 4981 unmount_or_warn(mp); 4982 } 4983 4984 if (rootdevmp != NULL) 4985 unmount_or_warn(rootdevmp); 4986 } 4987 4988 static void 4989 vfs_deferred_inactive(struct vnode *vp, int lkflags) 4990 { 4991 4992 ASSERT_VI_LOCKED(vp, __func__); 4993 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); 4994 if ((vp->v_iflag & VI_OWEINACT) == 0) { 4995 vdropl(vp); 4996 return; 4997 } 4998 if (vn_lock(vp, lkflags) == 0) { 4999 VI_LOCK(vp); 5000 vinactive(vp); 5001 VOP_UNLOCK(vp); 5002 vdropl(vp); 5003 return; 5004 } 5005 vdefer_inactive_unlocked(vp); 5006 } 5007 5008 static int 5009 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) 5010 { 5011 5012 return (vp->v_iflag & VI_DEFINACT); 5013 } 5014 5015 static void __noinline 5016 vfs_periodic_inactive(struct mount *mp, int flags) 5017 { 5018 struct vnode *vp, *mvp; 5019 int lkflags; 5020 5021 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5022 if (flags != MNT_WAIT) 5023 lkflags |= LK_NOWAIT; 5024 5025 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) { 5026 if ((vp->v_iflag & VI_DEFINACT) == 0) { 5027 VI_UNLOCK(vp); 5028 continue; 5029 } 5030 vp->v_iflag &= ~VI_DEFINACT; 5031 vfs_deferred_inactive(vp, lkflags); 5032 } 5033 } 5034 5035 static inline bool 5036 vfs_want_msync(struct vnode *vp) 5037 { 5038 struct vm_object *obj; 5039 5040 /* 5041 * This test may be performed without any locks held. 5042 * We rely on vm_object's type stability. 5043 */ 5044 if (vp->v_vflag & VV_NOSYNC) 5045 return (false); 5046 obj = vp->v_object; 5047 return (obj != NULL && vm_object_mightbedirty(obj)); 5048 } 5049 5050 static int 5051 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) 5052 { 5053 5054 if (vp->v_vflag & VV_NOSYNC) 5055 return (false); 5056 if (vp->v_iflag & VI_DEFINACT) 5057 return (true); 5058 return (vfs_want_msync(vp)); 5059 } 5060 5061 static void __noinline 5062 vfs_periodic_msync_inactive(struct mount *mp, int flags) 5063 { 5064 struct vnode *vp, *mvp; 5065 struct vm_object *obj; 5066 int lkflags, objflags; 5067 bool seen_defer; 5068 5069 lkflags = LK_EXCLUSIVE | LK_INTERLOCK; 5070 if (flags != MNT_WAIT) { 5071 lkflags |= LK_NOWAIT; 5072 objflags = OBJPC_NOSYNC; 5073 } else { 5074 objflags = OBJPC_SYNC; 5075 } 5076 5077 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) { 5078 seen_defer = false; 5079 if (vp->v_iflag & VI_DEFINACT) { 5080 vp->v_iflag &= ~VI_DEFINACT; 5081 seen_defer = true; 5082 } 5083 if (!vfs_want_msync(vp)) { 5084 if (seen_defer) 5085 vfs_deferred_inactive(vp, lkflags); 5086 else 5087 VI_UNLOCK(vp); 5088 continue; 5089 } 5090 if (vget(vp, lkflags) == 0) { 5091 obj = vp->v_object; 5092 if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) { 5093 VM_OBJECT_WLOCK(obj); 5094 vm_object_page_clean(obj, 0, 0, objflags); 5095 VM_OBJECT_WUNLOCK(obj); 5096 } 5097 vput(vp); 5098 if (seen_defer) 5099 vdrop(vp); 5100 } else { 5101 if (seen_defer) 5102 vdefer_inactive_unlocked(vp); 5103 } 5104 } 5105 } 5106 5107 void 5108 vfs_periodic(struct mount *mp, int flags) 5109 { 5110 5111 CTR2(KTR_VFS, "%s: mp %p", __func__, mp); 5112 5113 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) 5114 vfs_periodic_inactive(mp, flags); 5115 else 5116 vfs_periodic_msync_inactive(mp, flags); 5117 } 5118 5119 static void 5120 destroy_vpollinfo_free(struct vpollinfo *vi) 5121 { 5122 5123 knlist_destroy(&vi->vpi_selinfo.si_note); 5124 mtx_destroy(&vi->vpi_lock); 5125 free(vi, M_VNODEPOLL); 5126 } 5127 5128 static void 5129 destroy_vpollinfo(struct vpollinfo *vi) 5130 { 5131 5132 knlist_clear(&vi->vpi_selinfo.si_note, 1); 5133 seldrain(&vi->vpi_selinfo); 5134 destroy_vpollinfo_free(vi); 5135 } 5136 5137 /* 5138 * Initialize per-vnode helper structure to hold poll-related state. 5139 */ 5140 void 5141 v_addpollinfo(struct vnode *vp) 5142 { 5143 struct vpollinfo *vi; 5144 5145 if (vp->v_pollinfo != NULL) 5146 return; 5147 vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO); 5148 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); 5149 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, 5150 vfs_knlunlock, vfs_knl_assert_lock); 5151 VI_LOCK(vp); 5152 if (vp->v_pollinfo != NULL) { 5153 VI_UNLOCK(vp); 5154 destroy_vpollinfo_free(vi); 5155 return; 5156 } 5157 vp->v_pollinfo = vi; 5158 VI_UNLOCK(vp); 5159 } 5160 5161 /* 5162 * Record a process's interest in events which might happen to 5163 * a vnode. Because poll uses the historic select-style interface 5164 * internally, this routine serves as both the ``check for any 5165 * pending events'' and the ``record my interest in future events'' 5166 * functions. (These are done together, while the lock is held, 5167 * to avoid race conditions.) 5168 */ 5169 int 5170 vn_pollrecord(struct vnode *vp, struct thread *td, int events) 5171 { 5172 5173 v_addpollinfo(vp); 5174 mtx_lock(&vp->v_pollinfo->vpi_lock); 5175 if (vp->v_pollinfo->vpi_revents & events) { 5176 /* 5177 * This leaves events we are not interested 5178 * in available for the other process which 5179 * which presumably had requested them 5180 * (otherwise they would never have been 5181 * recorded). 5182 */ 5183 events &= vp->v_pollinfo->vpi_revents; 5184 vp->v_pollinfo->vpi_revents &= ~events; 5185 5186 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5187 return (events); 5188 } 5189 vp->v_pollinfo->vpi_events |= events; 5190 selrecord(td, &vp->v_pollinfo->vpi_selinfo); 5191 mtx_unlock(&vp->v_pollinfo->vpi_lock); 5192 return (0); 5193 } 5194 5195 /* 5196 * Routine to create and manage a filesystem syncer vnode. 5197 */ 5198 #define sync_close ((int (*)(struct vop_close_args *))nullop) 5199 static int sync_fsync(struct vop_fsync_args *); 5200 static int sync_inactive(struct vop_inactive_args *); 5201 static int sync_reclaim(struct vop_reclaim_args *); 5202 5203 static struct vop_vector sync_vnodeops = { 5204 .vop_bypass = VOP_EOPNOTSUPP, 5205 .vop_close = sync_close, 5206 .vop_fsync = sync_fsync, 5207 .vop_getwritemount = vop_stdgetwritemount, 5208 .vop_inactive = sync_inactive, 5209 .vop_need_inactive = vop_stdneed_inactive, 5210 .vop_reclaim = sync_reclaim, 5211 .vop_lock1 = vop_stdlock, 5212 .vop_unlock = vop_stdunlock, 5213 .vop_islocked = vop_stdislocked, 5214 .vop_fplookup_vexec = VOP_EAGAIN, 5215 .vop_fplookup_symlink = VOP_EAGAIN, 5216 }; 5217 VFS_VOP_VECTOR_REGISTER(sync_vnodeops); 5218 5219 /* 5220 * Create a new filesystem syncer vnode for the specified mount point. 5221 */ 5222 void 5223 vfs_allocate_syncvnode(struct mount *mp) 5224 { 5225 struct vnode *vp; 5226 struct bufobj *bo; 5227 static long start, incr, next; 5228 int error; 5229 5230 /* Allocate a new vnode */ 5231 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp); 5232 if (error != 0) 5233 panic("vfs_allocate_syncvnode: getnewvnode() failed"); 5234 vp->v_type = VNON; 5235 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5236 vp->v_vflag |= VV_FORCEINSMQ; 5237 error = insmntque1(vp, mp); 5238 if (error != 0) 5239 panic("vfs_allocate_syncvnode: insmntque() failed"); 5240 vp->v_vflag &= ~VV_FORCEINSMQ; 5241 vn_set_state(vp, VSTATE_CONSTRUCTED); 5242 VOP_UNLOCK(vp); 5243 /* 5244 * Place the vnode onto the syncer worklist. We attempt to 5245 * scatter them about on the list so that they will go off 5246 * at evenly distributed times even if all the filesystems 5247 * are mounted at once. 5248 */ 5249 next += incr; 5250 if (next == 0 || next > syncer_maxdelay) { 5251 start /= 2; 5252 incr /= 2; 5253 if (start == 0) { 5254 start = syncer_maxdelay / 2; 5255 incr = syncer_maxdelay; 5256 } 5257 next = start; 5258 } 5259 bo = &vp->v_bufobj; 5260 BO_LOCK(bo); 5261 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0); 5262 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ 5263 mtx_lock(&sync_mtx); 5264 sync_vnode_count++; 5265 if (mp->mnt_syncer == NULL) { 5266 mp->mnt_syncer = vp; 5267 vp = NULL; 5268 } 5269 mtx_unlock(&sync_mtx); 5270 BO_UNLOCK(bo); 5271 if (vp != NULL) { 5272 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 5273 vgone(vp); 5274 vput(vp); 5275 } 5276 } 5277 5278 void 5279 vfs_deallocate_syncvnode(struct mount *mp) 5280 { 5281 struct vnode *vp; 5282 5283 mtx_lock(&sync_mtx); 5284 vp = mp->mnt_syncer; 5285 if (vp != NULL) 5286 mp->mnt_syncer = NULL; 5287 mtx_unlock(&sync_mtx); 5288 if (vp != NULL) 5289 vrele(vp); 5290 } 5291 5292 /* 5293 * Do a lazy sync of the filesystem. 5294 */ 5295 static int 5296 sync_fsync(struct vop_fsync_args *ap) 5297 { 5298 struct vnode *syncvp = ap->a_vp; 5299 struct mount *mp = syncvp->v_mount; 5300 int error, save; 5301 struct bufobj *bo; 5302 5303 /* 5304 * We only need to do something if this is a lazy evaluation. 5305 */ 5306 if (ap->a_waitfor != MNT_LAZY) 5307 return (0); 5308 5309 /* 5310 * Move ourselves to the back of the sync list. 5311 */ 5312 bo = &syncvp->v_bufobj; 5313 BO_LOCK(bo); 5314 vn_syncer_add_to_worklist(bo, syncdelay); 5315 BO_UNLOCK(bo); 5316 5317 /* 5318 * Walk the list of vnodes pushing all that are dirty and 5319 * not already on the sync list. 5320 */ 5321 if (vfs_busy(mp, MBF_NOWAIT) != 0) 5322 return (0); 5323 VOP_UNLOCK(syncvp); 5324 save = curthread_pflags_set(TDP_SYNCIO); 5325 /* 5326 * The filesystem at hand may be idle with free vnodes stored in the 5327 * batch. Return them instead of letting them stay there indefinitely. 5328 */ 5329 vfs_periodic(mp, MNT_NOWAIT); 5330 error = VFS_SYNC(mp, MNT_LAZY); 5331 curthread_pflags_restore(save); 5332 vn_lock(syncvp, LK_EXCLUSIVE | LK_RETRY); 5333 vfs_unbusy(mp); 5334 return (error); 5335 } 5336 5337 /* 5338 * The syncer vnode is no referenced. 5339 */ 5340 static int 5341 sync_inactive(struct vop_inactive_args *ap) 5342 { 5343 5344 vgone(ap->a_vp); 5345 return (0); 5346 } 5347 5348 /* 5349 * The syncer vnode is no longer needed and is being decommissioned. 5350 * 5351 * Modifications to the worklist must be protected by sync_mtx. 5352 */ 5353 static int 5354 sync_reclaim(struct vop_reclaim_args *ap) 5355 { 5356 struct vnode *vp = ap->a_vp; 5357 struct bufobj *bo; 5358 5359 bo = &vp->v_bufobj; 5360 BO_LOCK(bo); 5361 mtx_lock(&sync_mtx); 5362 if (vp->v_mount->mnt_syncer == vp) 5363 vp->v_mount->mnt_syncer = NULL; 5364 if (bo->bo_flag & BO_ONWORKLST) { 5365 LIST_REMOVE(bo, bo_synclist); 5366 syncer_worklist_len--; 5367 sync_vnode_count--; 5368 bo->bo_flag &= ~BO_ONWORKLST; 5369 } 5370 mtx_unlock(&sync_mtx); 5371 BO_UNLOCK(bo); 5372 5373 return (0); 5374 } 5375 5376 int 5377 vn_need_pageq_flush(struct vnode *vp) 5378 { 5379 struct vm_object *obj; 5380 5381 obj = vp->v_object; 5382 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && 5383 vm_object_mightbedirty(obj)); 5384 } 5385 5386 /* 5387 * Check if vnode represents a disk device 5388 */ 5389 bool 5390 vn_isdisk_error(struct vnode *vp, int *errp) 5391 { 5392 int error; 5393 5394 if (vp->v_type != VCHR) { 5395 error = ENOTBLK; 5396 goto out; 5397 } 5398 error = 0; 5399 dev_lock(); 5400 if (vp->v_rdev == NULL) 5401 error = ENXIO; 5402 else if (vp->v_rdev->si_devsw == NULL) 5403 error = ENXIO; 5404 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) 5405 error = ENOTBLK; 5406 dev_unlock(); 5407 out: 5408 *errp = error; 5409 return (error == 0); 5410 } 5411 5412 bool 5413 vn_isdisk(struct vnode *vp) 5414 { 5415 int error; 5416 5417 return (vn_isdisk_error(vp, &error)); 5418 } 5419 5420 /* 5421 * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see 5422 * the comment above cache_fplookup for details. 5423 */ 5424 int 5425 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred) 5426 { 5427 int error; 5428 5429 VFS_SMR_ASSERT_ENTERED(); 5430 5431 /* Check the owner. */ 5432 if (cred->cr_uid == file_uid) { 5433 if (file_mode & S_IXUSR) 5434 return (0); 5435 goto out_error; 5436 } 5437 5438 /* Otherwise, check the groups (first match) */ 5439 if (groupmember(file_gid, cred)) { 5440 if (file_mode & S_IXGRP) 5441 return (0); 5442 goto out_error; 5443 } 5444 5445 /* Otherwise, check everyone else. */ 5446 if (file_mode & S_IXOTH) 5447 return (0); 5448 out_error: 5449 /* 5450 * Permission check failed, but it is possible denial will get overwritten 5451 * (e.g., when root is traversing through a 700 directory owned by someone 5452 * else). 5453 * 5454 * vaccess() calls priv_check_cred which in turn can descent into MAC 5455 * modules overriding this result. It's quite unclear what semantics 5456 * are allowed for them to operate, thus for safety we don't call them 5457 * from within the SMR section. This also means if any such modules 5458 * are present, we have to let the regular lookup decide. 5459 */ 5460 error = priv_check_cred_vfs_lookup_nomac(cred); 5461 switch (error) { 5462 case 0: 5463 return (0); 5464 case EAGAIN: 5465 /* 5466 * MAC modules present. 5467 */ 5468 return (EAGAIN); 5469 case EPERM: 5470 return (EACCES); 5471 default: 5472 return (error); 5473 } 5474 } 5475 5476 /* 5477 * Common filesystem object access control check routine. Accepts a 5478 * vnode's type, "mode", uid and gid, requested access mode, and credentials. 5479 * Returns 0 on success, or an errno on failure. 5480 */ 5481 int 5482 vaccess(__enum_uint8(vtype) type, mode_t file_mode, uid_t file_uid, gid_t file_gid, 5483 accmode_t accmode, struct ucred *cred) 5484 { 5485 accmode_t dac_granted; 5486 accmode_t priv_granted; 5487 5488 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0, 5489 ("invalid bit in accmode")); 5490 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE), 5491 ("VAPPEND without VWRITE")); 5492 5493 /* 5494 * Look for a normal, non-privileged way to access the file/directory 5495 * as requested. If it exists, go with that. 5496 */ 5497 5498 dac_granted = 0; 5499 5500 /* Check the owner. */ 5501 if (cred->cr_uid == file_uid) { 5502 dac_granted |= VADMIN; 5503 if (file_mode & S_IXUSR) 5504 dac_granted |= VEXEC; 5505 if (file_mode & S_IRUSR) 5506 dac_granted |= VREAD; 5507 if (file_mode & S_IWUSR) 5508 dac_granted |= (VWRITE | VAPPEND); 5509 5510 if ((accmode & dac_granted) == accmode) 5511 return (0); 5512 5513 goto privcheck; 5514 } 5515 5516 /* Otherwise, check the groups (first match) */ 5517 if (groupmember(file_gid, cred)) { 5518 if (file_mode & S_IXGRP) 5519 dac_granted |= VEXEC; 5520 if (file_mode & S_IRGRP) 5521 dac_granted |= VREAD; 5522 if (file_mode & S_IWGRP) 5523 dac_granted |= (VWRITE | VAPPEND); 5524 5525 if ((accmode & dac_granted) == accmode) 5526 return (0); 5527 5528 goto privcheck; 5529 } 5530 5531 /* Otherwise, check everyone else. */ 5532 if (file_mode & S_IXOTH) 5533 dac_granted |= VEXEC; 5534 if (file_mode & S_IROTH) 5535 dac_granted |= VREAD; 5536 if (file_mode & S_IWOTH) 5537 dac_granted |= (VWRITE | VAPPEND); 5538 if ((accmode & dac_granted) == accmode) 5539 return (0); 5540 5541 privcheck: 5542 /* 5543 * Build a privilege mask to determine if the set of privileges 5544 * satisfies the requirements when combined with the granted mask 5545 * from above. For each privilege, if the privilege is required, 5546 * bitwise or the request type onto the priv_granted mask. 5547 */ 5548 priv_granted = 0; 5549 5550 if (type == VDIR) { 5551 /* 5552 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC 5553 * requests, instead of PRIV_VFS_EXEC. 5554 */ 5555 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5556 !priv_check_cred(cred, PRIV_VFS_LOOKUP)) 5557 priv_granted |= VEXEC; 5558 } else { 5559 /* 5560 * Ensure that at least one execute bit is on. Otherwise, 5561 * a privileged user will always succeed, and we don't want 5562 * this to happen unless the file really is executable. 5563 */ 5564 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) && 5565 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 && 5566 !priv_check_cred(cred, PRIV_VFS_EXEC)) 5567 priv_granted |= VEXEC; 5568 } 5569 5570 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) && 5571 !priv_check_cred(cred, PRIV_VFS_READ)) 5572 priv_granted |= VREAD; 5573 5574 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) && 5575 !priv_check_cred(cred, PRIV_VFS_WRITE)) 5576 priv_granted |= (VWRITE | VAPPEND); 5577 5578 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) && 5579 !priv_check_cred(cred, PRIV_VFS_ADMIN)) 5580 priv_granted |= VADMIN; 5581 5582 if ((accmode & (priv_granted | dac_granted)) == accmode) { 5583 return (0); 5584 } 5585 5586 return ((accmode & VADMIN) ? EPERM : EACCES); 5587 } 5588 5589 /* 5590 * Credential check based on process requesting service, and per-attribute 5591 * permissions. 5592 */ 5593 int 5594 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, 5595 struct thread *td, accmode_t accmode) 5596 { 5597 5598 /* 5599 * Kernel-invoked always succeeds. 5600 */ 5601 if (cred == NOCRED) 5602 return (0); 5603 5604 /* 5605 * Do not allow privileged processes in jail to directly manipulate 5606 * system attributes. 5607 */ 5608 switch (attrnamespace) { 5609 case EXTATTR_NAMESPACE_SYSTEM: 5610 /* Potentially should be: return (EPERM); */ 5611 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM)); 5612 case EXTATTR_NAMESPACE_USER: 5613 return (VOP_ACCESS(vp, accmode, cred, td)); 5614 default: 5615 return (EPERM); 5616 } 5617 } 5618 5619 #ifdef DEBUG_VFS_LOCKS 5620 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */ 5621 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, 5622 "Drop into debugger on lock violation"); 5623 5624 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */ 5625 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 5626 0, "Check for interlock across VOPs"); 5627 5628 int vfs_badlock_print = 1; /* Print lock violations. */ 5629 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 5630 0, "Print lock violations"); 5631 5632 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */ 5633 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode, 5634 0, "Print vnode details on lock violations"); 5635 5636 #ifdef KDB 5637 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */ 5638 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, 5639 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations"); 5640 #endif 5641 5642 static void 5643 vfs_badlock(const char *msg, const char *str, struct vnode *vp) 5644 { 5645 5646 #ifdef KDB 5647 if (vfs_badlock_backtrace) 5648 kdb_backtrace(); 5649 #endif 5650 if (vfs_badlock_vnode) 5651 vn_printf(vp, "vnode "); 5652 if (vfs_badlock_print) 5653 printf("%s: %p %s\n", str, (void *)vp, msg); 5654 if (vfs_badlock_ddb) 5655 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5656 } 5657 5658 void 5659 assert_vi_locked(struct vnode *vp, const char *str) 5660 { 5661 5662 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp))) 5663 vfs_badlock("interlock is not locked but should be", str, vp); 5664 } 5665 5666 void 5667 assert_vi_unlocked(struct vnode *vp, const char *str) 5668 { 5669 5670 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp))) 5671 vfs_badlock("interlock is locked but should not be", str, vp); 5672 } 5673 5674 void 5675 assert_vop_locked(struct vnode *vp, const char *str) 5676 { 5677 if (KERNEL_PANICKED() || vp == NULL) 5678 return; 5679 5680 #ifdef WITNESS 5681 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5682 witness_is_owned(&vp->v_vnlock->lock_object) == -1) 5683 #else 5684 int locked = VOP_ISLOCKED(vp); 5685 if (locked == 0 || locked == LK_EXCLOTHER) 5686 #endif 5687 vfs_badlock("is not locked but should be", str, vp); 5688 } 5689 5690 void 5691 assert_vop_unlocked(struct vnode *vp, const char *str) 5692 { 5693 if (KERNEL_PANICKED() || vp == NULL) 5694 return; 5695 5696 #ifdef WITNESS 5697 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && 5698 witness_is_owned(&vp->v_vnlock->lock_object) == 1) 5699 #else 5700 if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) 5701 #endif 5702 vfs_badlock("is locked but should not be", str, vp); 5703 } 5704 5705 void 5706 assert_vop_elocked(struct vnode *vp, const char *str) 5707 { 5708 if (KERNEL_PANICKED() || vp == NULL) 5709 return; 5710 5711 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) 5712 vfs_badlock("is not exclusive locked but should be", str, vp); 5713 } 5714 #endif /* DEBUG_VFS_LOCKS */ 5715 5716 void 5717 vop_rename_fail(struct vop_rename_args *ap) 5718 { 5719 5720 if (ap->a_tvp != NULL) 5721 vput(ap->a_tvp); 5722 if (ap->a_tdvp == ap->a_tvp) 5723 vrele(ap->a_tdvp); 5724 else 5725 vput(ap->a_tdvp); 5726 vrele(ap->a_fdvp); 5727 vrele(ap->a_fvp); 5728 } 5729 5730 void 5731 vop_rename_pre(void *ap) 5732 { 5733 struct vop_rename_args *a = ap; 5734 5735 #ifdef DEBUG_VFS_LOCKS 5736 if (a->a_tvp) 5737 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); 5738 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); 5739 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); 5740 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); 5741 5742 /* Check the source (from). */ 5743 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && 5744 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) 5745 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); 5746 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) 5747 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); 5748 5749 /* Check the target. */ 5750 if (a->a_tvp) 5751 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); 5752 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); 5753 #endif 5754 /* 5755 * It may be tempting to add vn_seqc_write_begin/end calls here and 5756 * in vop_rename_post but that's not going to work out since some 5757 * filesystems relookup vnodes mid-rename. This is probably a bug. 5758 * 5759 * For now filesystems are expected to do the relevant calls after they 5760 * decide what vnodes to operate on. 5761 */ 5762 if (a->a_tdvp != a->a_fdvp) 5763 vhold(a->a_fdvp); 5764 if (a->a_tvp != a->a_fvp) 5765 vhold(a->a_fvp); 5766 vhold(a->a_tdvp); 5767 if (a->a_tvp) 5768 vhold(a->a_tvp); 5769 } 5770 5771 #ifdef DEBUG_VFS_LOCKS 5772 void 5773 vop_fplookup_vexec_debugpre(void *ap __unused) 5774 { 5775 5776 VFS_SMR_ASSERT_ENTERED(); 5777 } 5778 5779 void 5780 vop_fplookup_vexec_debugpost(void *ap, int rc) 5781 { 5782 struct vop_fplookup_vexec_args *a; 5783 struct vnode *vp; 5784 5785 a = ap; 5786 vp = a->a_vp; 5787 5788 VFS_SMR_ASSERT_ENTERED(); 5789 if (rc == EOPNOTSUPP) 5790 VNPASS(VN_IS_DOOMED(vp), vp); 5791 } 5792 5793 void 5794 vop_fplookup_symlink_debugpre(void *ap __unused) 5795 { 5796 5797 VFS_SMR_ASSERT_ENTERED(); 5798 } 5799 5800 void 5801 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused) 5802 { 5803 5804 VFS_SMR_ASSERT_ENTERED(); 5805 } 5806 5807 static void 5808 vop_fsync_debugprepost(struct vnode *vp, const char *name) 5809 { 5810 if (vp->v_type == VCHR) 5811 ; 5812 else if (MNT_EXTENDED_SHARED(vp->v_mount)) 5813 ASSERT_VOP_LOCKED(vp, name); 5814 else 5815 ASSERT_VOP_ELOCKED(vp, name); 5816 } 5817 5818 void 5819 vop_fsync_debugpre(void *a) 5820 { 5821 struct vop_fsync_args *ap; 5822 5823 ap = a; 5824 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5825 } 5826 5827 void 5828 vop_fsync_debugpost(void *a, int rc __unused) 5829 { 5830 struct vop_fsync_args *ap; 5831 5832 ap = a; 5833 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5834 } 5835 5836 void 5837 vop_fdatasync_debugpre(void *a) 5838 { 5839 struct vop_fdatasync_args *ap; 5840 5841 ap = a; 5842 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5843 } 5844 5845 void 5846 vop_fdatasync_debugpost(void *a, int rc __unused) 5847 { 5848 struct vop_fdatasync_args *ap; 5849 5850 ap = a; 5851 vop_fsync_debugprepost(ap->a_vp, "fsync"); 5852 } 5853 5854 void 5855 vop_strategy_debugpre(void *ap) 5856 { 5857 struct vop_strategy_args *a; 5858 struct buf *bp; 5859 5860 a = ap; 5861 bp = a->a_bp; 5862 5863 /* 5864 * Cluster ops lock their component buffers but not the IO container. 5865 */ 5866 if ((bp->b_flags & B_CLUSTER) != 0) 5867 return; 5868 5869 if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) { 5870 if (vfs_badlock_print) 5871 printf( 5872 "VOP_STRATEGY: bp is not locked but should be\n"); 5873 if (vfs_badlock_ddb) 5874 kdb_enter(KDB_WHY_VFSLOCK, "lock violation"); 5875 } 5876 } 5877 5878 void 5879 vop_lock_debugpre(void *ap) 5880 { 5881 struct vop_lock1_args *a = ap; 5882 5883 if ((a->a_flags & LK_INTERLOCK) == 0) 5884 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5885 else 5886 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); 5887 } 5888 5889 void 5890 vop_lock_debugpost(void *ap, int rc) 5891 { 5892 struct vop_lock1_args *a = ap; 5893 5894 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); 5895 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) 5896 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); 5897 } 5898 5899 void 5900 vop_unlock_debugpre(void *ap) 5901 { 5902 struct vop_unlock_args *a = ap; 5903 struct vnode *vp = a->a_vp; 5904 5905 VNPASS(vn_get_state(vp) != VSTATE_UNINITIALIZED, vp); 5906 ASSERT_VOP_LOCKED(vp, "VOP_UNLOCK"); 5907 } 5908 5909 void 5910 vop_need_inactive_debugpre(void *ap) 5911 { 5912 struct vop_need_inactive_args *a = ap; 5913 5914 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5915 } 5916 5917 void 5918 vop_need_inactive_debugpost(void *ap, int rc) 5919 { 5920 struct vop_need_inactive_args *a = ap; 5921 5922 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); 5923 } 5924 #endif 5925 5926 void 5927 vop_create_pre(void *ap) 5928 { 5929 struct vop_create_args *a; 5930 struct vnode *dvp; 5931 5932 a = ap; 5933 dvp = a->a_dvp; 5934 vn_seqc_write_begin(dvp); 5935 } 5936 5937 void 5938 vop_create_post(void *ap, int rc) 5939 { 5940 struct vop_create_args *a; 5941 struct vnode *dvp; 5942 5943 a = ap; 5944 dvp = a->a_dvp; 5945 vn_seqc_write_end(dvp); 5946 if (!rc) 5947 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 5948 } 5949 5950 void 5951 vop_whiteout_pre(void *ap) 5952 { 5953 struct vop_whiteout_args *a; 5954 struct vnode *dvp; 5955 5956 a = ap; 5957 dvp = a->a_dvp; 5958 vn_seqc_write_begin(dvp); 5959 } 5960 5961 void 5962 vop_whiteout_post(void *ap, int rc) 5963 { 5964 struct vop_whiteout_args *a; 5965 struct vnode *dvp; 5966 5967 a = ap; 5968 dvp = a->a_dvp; 5969 vn_seqc_write_end(dvp); 5970 } 5971 5972 void 5973 vop_deleteextattr_pre(void *ap) 5974 { 5975 struct vop_deleteextattr_args *a; 5976 struct vnode *vp; 5977 5978 a = ap; 5979 vp = a->a_vp; 5980 vn_seqc_write_begin(vp); 5981 } 5982 5983 void 5984 vop_deleteextattr_post(void *ap, int rc) 5985 { 5986 struct vop_deleteextattr_args *a; 5987 struct vnode *vp; 5988 5989 a = ap; 5990 vp = a->a_vp; 5991 vn_seqc_write_end(vp); 5992 if (!rc) 5993 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); 5994 } 5995 5996 void 5997 vop_link_pre(void *ap) 5998 { 5999 struct vop_link_args *a; 6000 struct vnode *vp, *tdvp; 6001 6002 a = ap; 6003 vp = a->a_vp; 6004 tdvp = a->a_tdvp; 6005 vn_seqc_write_begin(vp); 6006 vn_seqc_write_begin(tdvp); 6007 } 6008 6009 void 6010 vop_link_post(void *ap, int rc) 6011 { 6012 struct vop_link_args *a; 6013 struct vnode *vp, *tdvp; 6014 6015 a = ap; 6016 vp = a->a_vp; 6017 tdvp = a->a_tdvp; 6018 vn_seqc_write_end(vp); 6019 vn_seqc_write_end(tdvp); 6020 if (!rc) { 6021 VFS_KNOTE_LOCKED(vp, NOTE_LINK); 6022 VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE); 6023 } 6024 } 6025 6026 void 6027 vop_mkdir_pre(void *ap) 6028 { 6029 struct vop_mkdir_args *a; 6030 struct vnode *dvp; 6031 6032 a = ap; 6033 dvp = a->a_dvp; 6034 vn_seqc_write_begin(dvp); 6035 } 6036 6037 void 6038 vop_mkdir_post(void *ap, int rc) 6039 { 6040 struct vop_mkdir_args *a; 6041 struct vnode *dvp; 6042 6043 a = ap; 6044 dvp = a->a_dvp; 6045 vn_seqc_write_end(dvp); 6046 if (!rc) 6047 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6048 } 6049 6050 #ifdef DEBUG_VFS_LOCKS 6051 void 6052 vop_mkdir_debugpost(void *ap, int rc) 6053 { 6054 struct vop_mkdir_args *a; 6055 6056 a = ap; 6057 if (!rc) 6058 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); 6059 } 6060 #endif 6061 6062 void 6063 vop_mknod_pre(void *ap) 6064 { 6065 struct vop_mknod_args *a; 6066 struct vnode *dvp; 6067 6068 a = ap; 6069 dvp = a->a_dvp; 6070 vn_seqc_write_begin(dvp); 6071 } 6072 6073 void 6074 vop_mknod_post(void *ap, int rc) 6075 { 6076 struct vop_mknod_args *a; 6077 struct vnode *dvp; 6078 6079 a = ap; 6080 dvp = a->a_dvp; 6081 vn_seqc_write_end(dvp); 6082 if (!rc) 6083 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6084 } 6085 6086 void 6087 vop_reclaim_post(void *ap, int rc) 6088 { 6089 struct vop_reclaim_args *a; 6090 struct vnode *vp; 6091 6092 a = ap; 6093 vp = a->a_vp; 6094 ASSERT_VOP_IN_SEQC(vp); 6095 if (!rc) 6096 VFS_KNOTE_LOCKED(vp, NOTE_REVOKE); 6097 } 6098 6099 void 6100 vop_remove_pre(void *ap) 6101 { 6102 struct vop_remove_args *a; 6103 struct vnode *dvp, *vp; 6104 6105 a = ap; 6106 dvp = a->a_dvp; 6107 vp = a->a_vp; 6108 vn_seqc_write_begin(dvp); 6109 vn_seqc_write_begin(vp); 6110 } 6111 6112 void 6113 vop_remove_post(void *ap, int rc) 6114 { 6115 struct vop_remove_args *a; 6116 struct vnode *dvp, *vp; 6117 6118 a = ap; 6119 dvp = a->a_dvp; 6120 vp = a->a_vp; 6121 vn_seqc_write_end(dvp); 6122 vn_seqc_write_end(vp); 6123 if (!rc) { 6124 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6125 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6126 } 6127 } 6128 6129 void 6130 vop_rename_post(void *ap, int rc) 6131 { 6132 struct vop_rename_args *a = ap; 6133 long hint; 6134 6135 if (!rc) { 6136 hint = NOTE_WRITE; 6137 if (a->a_fdvp == a->a_tdvp) { 6138 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) 6139 hint |= NOTE_LINK; 6140 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6141 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6142 } else { 6143 hint |= NOTE_EXTEND; 6144 if (a->a_fvp->v_type == VDIR) 6145 hint |= NOTE_LINK; 6146 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); 6147 6148 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && 6149 a->a_tvp->v_type == VDIR) 6150 hint &= ~NOTE_LINK; 6151 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); 6152 } 6153 6154 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); 6155 if (a->a_tvp) 6156 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); 6157 } 6158 if (a->a_tdvp != a->a_fdvp) 6159 vdrop(a->a_fdvp); 6160 if (a->a_tvp != a->a_fvp) 6161 vdrop(a->a_fvp); 6162 vdrop(a->a_tdvp); 6163 if (a->a_tvp) 6164 vdrop(a->a_tvp); 6165 } 6166 6167 void 6168 vop_rmdir_pre(void *ap) 6169 { 6170 struct vop_rmdir_args *a; 6171 struct vnode *dvp, *vp; 6172 6173 a = ap; 6174 dvp = a->a_dvp; 6175 vp = a->a_vp; 6176 vn_seqc_write_begin(dvp); 6177 vn_seqc_write_begin(vp); 6178 } 6179 6180 void 6181 vop_rmdir_post(void *ap, int rc) 6182 { 6183 struct vop_rmdir_args *a; 6184 struct vnode *dvp, *vp; 6185 6186 a = ap; 6187 dvp = a->a_dvp; 6188 vp = a->a_vp; 6189 vn_seqc_write_end(dvp); 6190 vn_seqc_write_end(vp); 6191 if (!rc) { 6192 vp->v_vflag |= VV_UNLINKED; 6193 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK); 6194 VFS_KNOTE_LOCKED(vp, NOTE_DELETE); 6195 } 6196 } 6197 6198 void 6199 vop_setattr_pre(void *ap) 6200 { 6201 struct vop_setattr_args *a; 6202 struct vnode *vp; 6203 6204 a = ap; 6205 vp = a->a_vp; 6206 vn_seqc_write_begin(vp); 6207 } 6208 6209 void 6210 vop_setattr_post(void *ap, int rc) 6211 { 6212 struct vop_setattr_args *a; 6213 struct vnode *vp; 6214 6215 a = ap; 6216 vp = a->a_vp; 6217 vn_seqc_write_end(vp); 6218 if (!rc) 6219 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6220 } 6221 6222 void 6223 vop_setacl_pre(void *ap) 6224 { 6225 struct vop_setacl_args *a; 6226 struct vnode *vp; 6227 6228 a = ap; 6229 vp = a->a_vp; 6230 vn_seqc_write_begin(vp); 6231 } 6232 6233 void 6234 vop_setacl_post(void *ap, int rc __unused) 6235 { 6236 struct vop_setacl_args *a; 6237 struct vnode *vp; 6238 6239 a = ap; 6240 vp = a->a_vp; 6241 vn_seqc_write_end(vp); 6242 } 6243 6244 void 6245 vop_setextattr_pre(void *ap) 6246 { 6247 struct vop_setextattr_args *a; 6248 struct vnode *vp; 6249 6250 a = ap; 6251 vp = a->a_vp; 6252 vn_seqc_write_begin(vp); 6253 } 6254 6255 void 6256 vop_setextattr_post(void *ap, int rc) 6257 { 6258 struct vop_setextattr_args *a; 6259 struct vnode *vp; 6260 6261 a = ap; 6262 vp = a->a_vp; 6263 vn_seqc_write_end(vp); 6264 if (!rc) 6265 VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB); 6266 } 6267 6268 void 6269 vop_symlink_pre(void *ap) 6270 { 6271 struct vop_symlink_args *a; 6272 struct vnode *dvp; 6273 6274 a = ap; 6275 dvp = a->a_dvp; 6276 vn_seqc_write_begin(dvp); 6277 } 6278 6279 void 6280 vop_symlink_post(void *ap, int rc) 6281 { 6282 struct vop_symlink_args *a; 6283 struct vnode *dvp; 6284 6285 a = ap; 6286 dvp = a->a_dvp; 6287 vn_seqc_write_end(dvp); 6288 if (!rc) 6289 VFS_KNOTE_LOCKED(dvp, NOTE_WRITE); 6290 } 6291 6292 void 6293 vop_open_post(void *ap, int rc) 6294 { 6295 struct vop_open_args *a = ap; 6296 6297 if (!rc) 6298 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); 6299 } 6300 6301 void 6302 vop_close_post(void *ap, int rc) 6303 { 6304 struct vop_close_args *a = ap; 6305 6306 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ 6307 !VN_IS_DOOMED(a->a_vp))) { 6308 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? 6309 NOTE_CLOSE_WRITE : NOTE_CLOSE); 6310 } 6311 } 6312 6313 void 6314 vop_read_post(void *ap, int rc) 6315 { 6316 struct vop_read_args *a = ap; 6317 6318 if (!rc) 6319 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6320 } 6321 6322 void 6323 vop_read_pgcache_post(void *ap, int rc) 6324 { 6325 struct vop_read_pgcache_args *a = ap; 6326 6327 if (!rc) 6328 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); 6329 } 6330 6331 void 6332 vop_readdir_post(void *ap, int rc) 6333 { 6334 struct vop_readdir_args *a = ap; 6335 6336 if (!rc) 6337 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); 6338 } 6339 6340 static struct knlist fs_knlist; 6341 6342 static void 6343 vfs_event_init(void *arg) 6344 { 6345 knlist_init_mtx(&fs_knlist, NULL); 6346 } 6347 /* XXX - correct order? */ 6348 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL); 6349 6350 void 6351 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused) 6352 { 6353 6354 KNOTE_UNLOCKED(&fs_knlist, event); 6355 } 6356 6357 static int filt_fsattach(struct knote *kn); 6358 static void filt_fsdetach(struct knote *kn); 6359 static int filt_fsevent(struct knote *kn, long hint); 6360 6361 struct filterops fs_filtops = { 6362 .f_isfd = 0, 6363 .f_attach = filt_fsattach, 6364 .f_detach = filt_fsdetach, 6365 .f_event = filt_fsevent 6366 }; 6367 6368 static int 6369 filt_fsattach(struct knote *kn) 6370 { 6371 6372 kn->kn_flags |= EV_CLEAR; 6373 knlist_add(&fs_knlist, kn, 0); 6374 return (0); 6375 } 6376 6377 static void 6378 filt_fsdetach(struct knote *kn) 6379 { 6380 6381 knlist_remove(&fs_knlist, kn, 0); 6382 } 6383 6384 static int 6385 filt_fsevent(struct knote *kn, long hint) 6386 { 6387 6388 kn->kn_fflags |= kn->kn_sfflags & hint; 6389 6390 return (kn->kn_fflags != 0); 6391 } 6392 6393 static int 6394 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS) 6395 { 6396 struct vfsidctl vc; 6397 int error; 6398 struct mount *mp; 6399 6400 error = SYSCTL_IN(req, &vc, sizeof(vc)); 6401 if (error) 6402 return (error); 6403 if (vc.vc_vers != VFS_CTL_VERS1) 6404 return (EINVAL); 6405 mp = vfs_getvfs(&vc.vc_fsid); 6406 if (mp == NULL) 6407 return (ENOENT); 6408 /* ensure that a specific sysctl goes to the right filesystem. */ 6409 if (strcmp(vc.vc_fstypename, "*") != 0 && 6410 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { 6411 vfs_rel(mp); 6412 return (EINVAL); 6413 } 6414 VCTLTOREQ(&vc, req); 6415 error = VFS_SYSCTL(mp, vc.vc_op, req); 6416 vfs_rel(mp); 6417 return (error); 6418 } 6419 6420 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR, 6421 NULL, 0, sysctl_vfs_ctl, "", 6422 "Sysctl by fsid"); 6423 6424 /* 6425 * Function to initialize a va_filerev field sensibly. 6426 * XXX: Wouldn't a random number make a lot more sense ?? 6427 */ 6428 u_quad_t 6429 init_va_filerev(void) 6430 { 6431 struct bintime bt; 6432 6433 getbinuptime(&bt); 6434 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL)); 6435 } 6436 6437 static int filt_vfsread(struct knote *kn, long hint); 6438 static int filt_vfswrite(struct knote *kn, long hint); 6439 static int filt_vfsvnode(struct knote *kn, long hint); 6440 static void filt_vfsdetach(struct knote *kn); 6441 static struct filterops vfsread_filtops = { 6442 .f_isfd = 1, 6443 .f_detach = filt_vfsdetach, 6444 .f_event = filt_vfsread 6445 }; 6446 static struct filterops vfswrite_filtops = { 6447 .f_isfd = 1, 6448 .f_detach = filt_vfsdetach, 6449 .f_event = filt_vfswrite 6450 }; 6451 static struct filterops vfsvnode_filtops = { 6452 .f_isfd = 1, 6453 .f_detach = filt_vfsdetach, 6454 .f_event = filt_vfsvnode 6455 }; 6456 6457 static void 6458 vfs_knllock(void *arg) 6459 { 6460 struct vnode *vp = arg; 6461 6462 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 6463 } 6464 6465 static void 6466 vfs_knlunlock(void *arg) 6467 { 6468 struct vnode *vp = arg; 6469 6470 VOP_UNLOCK(vp); 6471 } 6472 6473 static void 6474 vfs_knl_assert_lock(void *arg, int what) 6475 { 6476 #ifdef DEBUG_VFS_LOCKS 6477 struct vnode *vp = arg; 6478 6479 if (what == LA_LOCKED) 6480 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked"); 6481 else 6482 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked"); 6483 #endif 6484 } 6485 6486 int 6487 vfs_kqfilter(struct vop_kqfilter_args *ap) 6488 { 6489 struct vnode *vp = ap->a_vp; 6490 struct knote *kn = ap->a_kn; 6491 struct knlist *knl; 6492 6493 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && 6494 kn->kn_filter != EVFILT_WRITE), 6495 ("READ/WRITE filter on a FIFO leaked through")); 6496 switch (kn->kn_filter) { 6497 case EVFILT_READ: 6498 kn->kn_fop = &vfsread_filtops; 6499 break; 6500 case EVFILT_WRITE: 6501 kn->kn_fop = &vfswrite_filtops; 6502 break; 6503 case EVFILT_VNODE: 6504 kn->kn_fop = &vfsvnode_filtops; 6505 break; 6506 default: 6507 return (EINVAL); 6508 } 6509 6510 kn->kn_hook = (caddr_t)vp; 6511 6512 v_addpollinfo(vp); 6513 if (vp->v_pollinfo == NULL) 6514 return (ENOMEM); 6515 knl = &vp->v_pollinfo->vpi_selinfo.si_note; 6516 vhold(vp); 6517 knlist_add(knl, kn, 0); 6518 6519 return (0); 6520 } 6521 6522 /* 6523 * Detach knote from vnode 6524 */ 6525 static void 6526 filt_vfsdetach(struct knote *kn) 6527 { 6528 struct vnode *vp = (struct vnode *)kn->kn_hook; 6529 6530 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); 6531 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); 6532 vdrop(vp); 6533 } 6534 6535 /*ARGSUSED*/ 6536 static int 6537 filt_vfsread(struct knote *kn, long hint) 6538 { 6539 struct vnode *vp = (struct vnode *)kn->kn_hook; 6540 off_t size; 6541 int res; 6542 6543 /* 6544 * filesystem is gone, so set the EOF flag and schedule 6545 * the knote for deletion. 6546 */ 6547 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6548 VI_LOCK(vp); 6549 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6550 VI_UNLOCK(vp); 6551 return (1); 6552 } 6553 6554 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) 6555 return (0); 6556 6557 VI_LOCK(vp); 6558 kn->kn_data = size - kn->kn_fp->f_offset; 6559 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; 6560 VI_UNLOCK(vp); 6561 return (res); 6562 } 6563 6564 /*ARGSUSED*/ 6565 static int 6566 filt_vfswrite(struct knote *kn, long hint) 6567 { 6568 struct vnode *vp = (struct vnode *)kn->kn_hook; 6569 6570 VI_LOCK(vp); 6571 6572 /* 6573 * filesystem is gone, so set the EOF flag and schedule 6574 * the knote for deletion. 6575 */ 6576 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) 6577 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 6578 6579 kn->kn_data = 0; 6580 VI_UNLOCK(vp); 6581 return (1); 6582 } 6583 6584 static int 6585 filt_vfsvnode(struct knote *kn, long hint) 6586 { 6587 struct vnode *vp = (struct vnode *)kn->kn_hook; 6588 int res; 6589 6590 VI_LOCK(vp); 6591 if (kn->kn_sfflags & hint) 6592 kn->kn_fflags |= hint; 6593 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { 6594 kn->kn_flags |= EV_EOF; 6595 VI_UNLOCK(vp); 6596 return (1); 6597 } 6598 res = (kn->kn_fflags != 0); 6599 VI_UNLOCK(vp); 6600 return (res); 6601 } 6602 6603 int 6604 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off) 6605 { 6606 int error; 6607 6608 if (dp->d_reclen > ap->a_uio->uio_resid) 6609 return (ENAMETOOLONG); 6610 error = uiomove(dp, dp->d_reclen, ap->a_uio); 6611 if (error) { 6612 if (ap->a_ncookies != NULL) { 6613 if (ap->a_cookies != NULL) 6614 free(ap->a_cookies, M_TEMP); 6615 ap->a_cookies = NULL; 6616 *ap->a_ncookies = 0; 6617 } 6618 return (error); 6619 } 6620 if (ap->a_ncookies == NULL) 6621 return (0); 6622 6623 KASSERT(ap->a_cookies, 6624 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); 6625 6626 *ap->a_cookies = realloc(*ap->a_cookies, 6627 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); 6628 (*ap->a_cookies)[*ap->a_ncookies] = off; 6629 *ap->a_ncookies += 1; 6630 return (0); 6631 } 6632 6633 /* 6634 * The purpose of this routine is to remove granularity from accmode_t, 6635 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE, 6636 * VADMIN and VAPPEND. 6637 * 6638 * If it returns 0, the caller is supposed to continue with the usual 6639 * access checks using 'accmode' as modified by this routine. If it 6640 * returns nonzero value, the caller is supposed to return that value 6641 * as errno. 6642 * 6643 * Note that after this routine runs, accmode may be zero. 6644 */ 6645 int 6646 vfs_unixify_accmode(accmode_t *accmode) 6647 { 6648 /* 6649 * There is no way to specify explicit "deny" rule using 6650 * file mode or POSIX.1e ACLs. 6651 */ 6652 if (*accmode & VEXPLICIT_DENY) { 6653 *accmode = 0; 6654 return (0); 6655 } 6656 6657 /* 6658 * None of these can be translated into usual access bits. 6659 * Also, the common case for NFSv4 ACLs is to not contain 6660 * either of these bits. Caller should check for VWRITE 6661 * on the containing directory instead. 6662 */ 6663 if (*accmode & (VDELETE_CHILD | VDELETE)) 6664 return (EPERM); 6665 6666 if (*accmode & VADMIN_PERMS) { 6667 *accmode &= ~VADMIN_PERMS; 6668 *accmode |= VADMIN; 6669 } 6670 6671 /* 6672 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL 6673 * or VSYNCHRONIZE using file mode or POSIX.1e ACL. 6674 */ 6675 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE); 6676 6677 return (0); 6678 } 6679 6680 /* 6681 * Clear out a doomed vnode (if any) and replace it with a new one as long 6682 * as the fs is not being unmounted. Return the root vnode to the caller. 6683 */ 6684 static int __noinline 6685 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) 6686 { 6687 struct vnode *vp; 6688 int error; 6689 6690 restart: 6691 if (mp->mnt_rootvnode != NULL) { 6692 MNT_ILOCK(mp); 6693 vp = mp->mnt_rootvnode; 6694 if (vp != NULL) { 6695 if (!VN_IS_DOOMED(vp)) { 6696 vrefact(vp); 6697 MNT_IUNLOCK(mp); 6698 error = vn_lock(vp, flags); 6699 if (error == 0) { 6700 *vpp = vp; 6701 return (0); 6702 } 6703 vrele(vp); 6704 goto restart; 6705 } 6706 /* 6707 * Clear the old one. 6708 */ 6709 mp->mnt_rootvnode = NULL; 6710 } 6711 MNT_IUNLOCK(mp); 6712 if (vp != NULL) { 6713 vfs_op_barrier_wait(mp); 6714 vrele(vp); 6715 } 6716 } 6717 error = VFS_CACHEDROOT(mp, flags, vpp); 6718 if (error != 0) 6719 return (error); 6720 if (mp->mnt_vfs_ops == 0) { 6721 MNT_ILOCK(mp); 6722 if (mp->mnt_vfs_ops != 0) { 6723 MNT_IUNLOCK(mp); 6724 return (0); 6725 } 6726 if (mp->mnt_rootvnode == NULL) { 6727 vrefact(*vpp); 6728 mp->mnt_rootvnode = *vpp; 6729 } else { 6730 if (mp->mnt_rootvnode != *vpp) { 6731 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { 6732 panic("%s: mismatch between vnode returned " 6733 " by VFS_CACHEDROOT and the one cached " 6734 " (%p != %p)", 6735 __func__, *vpp, mp->mnt_rootvnode); 6736 } 6737 } 6738 } 6739 MNT_IUNLOCK(mp); 6740 } 6741 return (0); 6742 } 6743 6744 int 6745 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) 6746 { 6747 struct mount_pcpu *mpcpu; 6748 struct vnode *vp; 6749 int error; 6750 6751 if (!vfs_op_thread_enter(mp, mpcpu)) 6752 return (vfs_cache_root_fallback(mp, flags, vpp)); 6753 vp = atomic_load_ptr(&mp->mnt_rootvnode); 6754 if (vp == NULL || VN_IS_DOOMED(vp)) { 6755 vfs_op_thread_exit(mp, mpcpu); 6756 return (vfs_cache_root_fallback(mp, flags, vpp)); 6757 } 6758 vrefact(vp); 6759 vfs_op_thread_exit(mp, mpcpu); 6760 error = vn_lock(vp, flags); 6761 if (error != 0) { 6762 vrele(vp); 6763 return (vfs_cache_root_fallback(mp, flags, vpp)); 6764 } 6765 *vpp = vp; 6766 return (0); 6767 } 6768 6769 struct vnode * 6770 vfs_cache_root_clear(struct mount *mp) 6771 { 6772 struct vnode *vp; 6773 6774 /* 6775 * ops > 0 guarantees there is nobody who can see this vnode 6776 */ 6777 MPASS(mp->mnt_vfs_ops > 0); 6778 vp = mp->mnt_rootvnode; 6779 if (vp != NULL) 6780 vn_seqc_write_begin(vp); 6781 mp->mnt_rootvnode = NULL; 6782 return (vp); 6783 } 6784 6785 void 6786 vfs_cache_root_set(struct mount *mp, struct vnode *vp) 6787 { 6788 6789 MPASS(mp->mnt_vfs_ops > 0); 6790 vrefact(vp); 6791 mp->mnt_rootvnode = vp; 6792 } 6793 6794 /* 6795 * These are helper functions for filesystems to traverse all 6796 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h. 6797 * 6798 * This interface replaces MNT_VNODE_FOREACH. 6799 */ 6800 6801 struct vnode * 6802 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) 6803 { 6804 struct vnode *vp; 6805 6806 maybe_yield(); 6807 MNT_ILOCK(mp); 6808 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6809 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; 6810 vp = TAILQ_NEXT(vp, v_nmntvnodes)) { 6811 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6812 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6813 continue; 6814 VI_LOCK(vp); 6815 if (VN_IS_DOOMED(vp)) { 6816 VI_UNLOCK(vp); 6817 continue; 6818 } 6819 break; 6820 } 6821 if (vp == NULL) { 6822 __mnt_vnode_markerfree_all(mvp, mp); 6823 /* MNT_IUNLOCK(mp); -- done in above function */ 6824 mtx_assert(MNT_MTX(mp), MA_NOTOWNED); 6825 return (NULL); 6826 } 6827 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6828 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6829 MNT_IUNLOCK(mp); 6830 return (vp); 6831 } 6832 6833 struct vnode * 6834 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) 6835 { 6836 struct vnode *vp; 6837 6838 *mvp = vn_alloc_marker(mp); 6839 MNT_ILOCK(mp); 6840 MNT_REF(mp); 6841 6842 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { 6843 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ 6844 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) 6845 continue; 6846 VI_LOCK(vp); 6847 if (VN_IS_DOOMED(vp)) { 6848 VI_UNLOCK(vp); 6849 continue; 6850 } 6851 break; 6852 } 6853 if (vp == NULL) { 6854 MNT_REL(mp); 6855 MNT_IUNLOCK(mp); 6856 vn_free_marker(*mvp); 6857 *mvp = NULL; 6858 return (NULL); 6859 } 6860 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); 6861 MNT_IUNLOCK(mp); 6862 return (vp); 6863 } 6864 6865 void 6866 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) 6867 { 6868 6869 if (*mvp == NULL) { 6870 MNT_IUNLOCK(mp); 6871 return; 6872 } 6873 6874 mtx_assert(MNT_MTX(mp), MA_OWNED); 6875 6876 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6877 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); 6878 MNT_REL(mp); 6879 MNT_IUNLOCK(mp); 6880 vn_free_marker(*mvp); 6881 *mvp = NULL; 6882 } 6883 6884 /* 6885 * These are helper functions for filesystems to traverse their 6886 * lazy vnodes. See MNT_VNODE_FOREACH_LAZY() in sys/mount.h 6887 */ 6888 static void 6889 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 6890 { 6891 6892 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6893 6894 MNT_ILOCK(mp); 6895 MNT_REL(mp); 6896 MNT_IUNLOCK(mp); 6897 vn_free_marker(*mvp); 6898 *mvp = NULL; 6899 } 6900 6901 /* 6902 * Relock the mp mount vnode list lock with the vp vnode interlock in the 6903 * conventional lock order during mnt_vnode_next_lazy iteration. 6904 * 6905 * On entry, the mount vnode list lock is held and the vnode interlock is not. 6906 * The list lock is dropped and reacquired. On success, both locks are held. 6907 * On failure, the mount vnode list lock is held but the vnode interlock is 6908 * not, and the procedure may have yielded. 6909 */ 6910 static bool 6911 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, 6912 struct vnode *vp) 6913 { 6914 6915 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && 6916 TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp, 6917 ("%s: bad marker", __func__)); 6918 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, 6919 ("%s: inappropriate vnode", __func__)); 6920 ASSERT_VI_UNLOCKED(vp, __func__); 6921 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6922 6923 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); 6924 TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist); 6925 6926 /* 6927 * Note we may be racing against vdrop which transitioned the hold 6928 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, 6929 * if we are the only user after we get the interlock we will just 6930 * vdrop. 6931 */ 6932 vhold(vp); 6933 mtx_unlock(&mp->mnt_listmtx); 6934 VI_LOCK(vp); 6935 if (VN_IS_DOOMED(vp)) { 6936 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); 6937 goto out_lost; 6938 } 6939 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); 6940 /* 6941 * There is nothing to do if we are the last user. 6942 */ 6943 if (!refcount_release_if_not_last(&vp->v_holdcnt)) 6944 goto out_lost; 6945 mtx_lock(&mp->mnt_listmtx); 6946 return (true); 6947 out_lost: 6948 vdropl(vp); 6949 maybe_yield(); 6950 mtx_lock(&mp->mnt_listmtx); 6951 return (false); 6952 } 6953 6954 static struct vnode * 6955 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 6956 void *cbarg) 6957 { 6958 struct vnode *vp; 6959 6960 mtx_assert(&mp->mnt_listmtx, MA_OWNED); 6961 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); 6962 restart: 6963 vp = TAILQ_NEXT(*mvp, v_lazylist); 6964 while (vp != NULL) { 6965 if (vp->v_type == VMARKER) { 6966 vp = TAILQ_NEXT(vp, v_lazylist); 6967 continue; 6968 } 6969 /* 6970 * See if we want to process the vnode. Note we may encounter a 6971 * long string of vnodes we don't care about and hog the list 6972 * as a result. Check for it and requeue the marker. 6973 */ 6974 VNPASS(!VN_IS_DOOMED(vp), vp); 6975 if (!cb(vp, cbarg)) { 6976 if (!should_yield()) { 6977 vp = TAILQ_NEXT(vp, v_lazylist); 6978 continue; 6979 } 6980 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, 6981 v_lazylist); 6982 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, 6983 v_lazylist); 6984 mtx_unlock(&mp->mnt_listmtx); 6985 kern_yield(PRI_USER); 6986 mtx_lock(&mp->mnt_listmtx); 6987 goto restart; 6988 } 6989 /* 6990 * Try-lock because this is the wrong lock order. 6991 */ 6992 if (!VI_TRYLOCK(vp) && 6993 !mnt_vnode_next_lazy_relock(*mvp, mp, vp)) 6994 goto restart; 6995 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); 6996 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, 6997 ("alien vnode on the lazy list %p %p", vp, mp)); 6998 VNPASS(vp->v_mount == mp, vp); 6999 VNPASS(!VN_IS_DOOMED(vp), vp); 7000 break; 7001 } 7002 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7003 7004 /* Check if we are done */ 7005 if (vp == NULL) { 7006 mtx_unlock(&mp->mnt_listmtx); 7007 mnt_vnode_markerfree_lazy(mvp, mp); 7008 return (NULL); 7009 } 7010 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); 7011 mtx_unlock(&mp->mnt_listmtx); 7012 ASSERT_VI_LOCKED(vp, "lazy iter"); 7013 return (vp); 7014 } 7015 7016 struct vnode * 7017 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7018 void *cbarg) 7019 { 7020 7021 maybe_yield(); 7022 mtx_lock(&mp->mnt_listmtx); 7023 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7024 } 7025 7026 struct vnode * 7027 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, 7028 void *cbarg) 7029 { 7030 struct vnode *vp; 7031 7032 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) 7033 return (NULL); 7034 7035 *mvp = vn_alloc_marker(mp); 7036 MNT_ILOCK(mp); 7037 MNT_REF(mp); 7038 MNT_IUNLOCK(mp); 7039 7040 mtx_lock(&mp->mnt_listmtx); 7041 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); 7042 if (vp == NULL) { 7043 mtx_unlock(&mp->mnt_listmtx); 7044 mnt_vnode_markerfree_lazy(mvp, mp); 7045 return (NULL); 7046 } 7047 TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist); 7048 return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg)); 7049 } 7050 7051 void 7052 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) 7053 { 7054 7055 if (*mvp == NULL) 7056 return; 7057 7058 mtx_lock(&mp->mnt_listmtx); 7059 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); 7060 mtx_unlock(&mp->mnt_listmtx); 7061 mnt_vnode_markerfree_lazy(mvp, mp); 7062 } 7063 7064 int 7065 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) 7066 { 7067 7068 if ((cnp->cn_flags & NOEXECCHECK) != 0) { 7069 cnp->cn_flags &= ~NOEXECCHECK; 7070 return (0); 7071 } 7072 7073 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); 7074 } 7075 7076 /* 7077 * Do not use this variant unless you have means other than the hold count 7078 * to prevent the vnode from getting freed. 7079 */ 7080 void 7081 vn_seqc_write_begin_locked(struct vnode *vp) 7082 { 7083 7084 ASSERT_VI_LOCKED(vp, __func__); 7085 VNPASS(vp->v_holdcnt > 0, vp); 7086 VNPASS(vp->v_seqc_users >= 0, vp); 7087 vp->v_seqc_users++; 7088 if (vp->v_seqc_users == 1) 7089 seqc_sleepable_write_begin(&vp->v_seqc); 7090 } 7091 7092 void 7093 vn_seqc_write_begin(struct vnode *vp) 7094 { 7095 7096 VI_LOCK(vp); 7097 vn_seqc_write_begin_locked(vp); 7098 VI_UNLOCK(vp); 7099 } 7100 7101 void 7102 vn_seqc_write_end_locked(struct vnode *vp) 7103 { 7104 7105 ASSERT_VI_LOCKED(vp, __func__); 7106 VNPASS(vp->v_seqc_users > 0, vp); 7107 vp->v_seqc_users--; 7108 if (vp->v_seqc_users == 0) 7109 seqc_sleepable_write_end(&vp->v_seqc); 7110 } 7111 7112 void 7113 vn_seqc_write_end(struct vnode *vp) 7114 { 7115 7116 VI_LOCK(vp); 7117 vn_seqc_write_end_locked(vp); 7118 VI_UNLOCK(vp); 7119 } 7120 7121 /* 7122 * Special case handling for allocating and freeing vnodes. 7123 * 7124 * The counter remains unchanged on free so that a doomed vnode will 7125 * keep testing as in modify as long as it is accessible with SMR. 7126 */ 7127 static void 7128 vn_seqc_init(struct vnode *vp) 7129 { 7130 7131 vp->v_seqc = 0; 7132 vp->v_seqc_users = 0; 7133 } 7134 7135 static void 7136 vn_seqc_write_end_free(struct vnode *vp) 7137 { 7138 7139 VNPASS(seqc_in_modify(vp->v_seqc), vp); 7140 VNPASS(vp->v_seqc_users == 1, vp); 7141 } 7142 7143 void 7144 vn_irflag_set_locked(struct vnode *vp, short toset) 7145 { 7146 short flags; 7147 7148 ASSERT_VI_LOCKED(vp, __func__); 7149 flags = vn_irflag_read(vp); 7150 VNASSERT((flags & toset) == 0, vp, 7151 ("%s: some of the passed flags already set (have %d, passed %d)\n", 7152 __func__, flags, toset)); 7153 atomic_store_short(&vp->v_irflag, flags | toset); 7154 } 7155 7156 void 7157 vn_irflag_set(struct vnode *vp, short toset) 7158 { 7159 7160 VI_LOCK(vp); 7161 vn_irflag_set_locked(vp, toset); 7162 VI_UNLOCK(vp); 7163 } 7164 7165 void 7166 vn_irflag_set_cond_locked(struct vnode *vp, short toset) 7167 { 7168 short flags; 7169 7170 ASSERT_VI_LOCKED(vp, __func__); 7171 flags = vn_irflag_read(vp); 7172 atomic_store_short(&vp->v_irflag, flags | toset); 7173 } 7174 7175 void 7176 vn_irflag_set_cond(struct vnode *vp, short toset) 7177 { 7178 7179 VI_LOCK(vp); 7180 vn_irflag_set_cond_locked(vp, toset); 7181 VI_UNLOCK(vp); 7182 } 7183 7184 void 7185 vn_irflag_unset_locked(struct vnode *vp, short tounset) 7186 { 7187 short flags; 7188 7189 ASSERT_VI_LOCKED(vp, __func__); 7190 flags = vn_irflag_read(vp); 7191 VNASSERT((flags & tounset) == tounset, vp, 7192 ("%s: some of the passed flags not set (have %d, passed %d)\n", 7193 __func__, flags, tounset)); 7194 atomic_store_short(&vp->v_irflag, flags & ~tounset); 7195 } 7196 7197 void 7198 vn_irflag_unset(struct vnode *vp, short tounset) 7199 { 7200 7201 VI_LOCK(vp); 7202 vn_irflag_unset_locked(vp, tounset); 7203 VI_UNLOCK(vp); 7204 } 7205 7206 int 7207 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) 7208 { 7209 struct vattr vattr; 7210 int error; 7211 7212 ASSERT_VOP_LOCKED(vp, __func__); 7213 error = VOP_GETATTR(vp, &vattr, cred); 7214 if (__predict_true(error == 0)) { 7215 if (vattr.va_size <= OFF_MAX) 7216 *size = vattr.va_size; 7217 else 7218 error = EFBIG; 7219 } 7220 return (error); 7221 } 7222 7223 int 7224 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) 7225 { 7226 int error; 7227 7228 VOP_LOCK(vp, LK_SHARED); 7229 error = vn_getsize_locked(vp, size, cred); 7230 VOP_UNLOCK(vp); 7231 return (error); 7232 } 7233 7234 #ifdef INVARIANTS 7235 void 7236 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) 7237 { 7238 7239 switch (vp->v_state) { 7240 case VSTATE_UNINITIALIZED: 7241 switch (state) { 7242 case VSTATE_CONSTRUCTED: 7243 case VSTATE_DESTROYING: 7244 return; 7245 default: 7246 break; 7247 } 7248 break; 7249 case VSTATE_CONSTRUCTED: 7250 ASSERT_VOP_ELOCKED(vp, __func__); 7251 switch (state) { 7252 case VSTATE_DESTROYING: 7253 return; 7254 default: 7255 break; 7256 } 7257 break; 7258 case VSTATE_DESTROYING: 7259 ASSERT_VOP_ELOCKED(vp, __func__); 7260 switch (state) { 7261 case VSTATE_DEAD: 7262 return; 7263 default: 7264 break; 7265 } 7266 break; 7267 case VSTATE_DEAD: 7268 switch (state) { 7269 case VSTATE_UNINITIALIZED: 7270 return; 7271 default: 7272 break; 7273 } 7274 break; 7275 } 7276 7277 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); 7278 panic("invalid state transition %d -> %d\n", vp->v_state, state); 7279 } 7280 #endif 7281