1 /* 2 * Copyright (c) 2004,2013-2019 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/spinlock2.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <vm/vm.h> 86 #include <vm/vm_object.h> 87 88 struct mountscan_info { 89 TAILQ_ENTRY(mountscan_info) msi_entry; 90 int msi_how; 91 struct mount *msi_node; 92 }; 93 94 struct vmntvnodescan_info { 95 TAILQ_ENTRY(vmntvnodescan_info) entry; 96 struct vnode *vp; 97 }; 98 99 struct vnlru_info { 100 int pass; 101 }; 102 103 static int 104 mount_cmp(struct mount *mnt1, struct mount *mnt2) 105 { 106 if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0]) 107 return -1; 108 if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0]) 109 return 1; 110 if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1]) 111 return -1; 112 if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1]) 113 return 1; 114 return 0; 115 } 116 117 static int 118 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt) 119 { 120 if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0]) 121 return -1; 122 if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0]) 123 return 1; 124 if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1]) 125 return -1; 126 if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1]) 127 return 1; 128 return 0; 129 } 130 131 RB_HEAD(mount_rb_tree, mount); 132 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *); 133 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp); 134 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node, 135 mount_fsid_cmp, fsid_t *); 136 137 static int vnlru_nowhere = 0; 138 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 139 &vnlru_nowhere, 0, 140 "Number of times the vnlru process ran without success"); 141 142 143 static struct lwkt_token mntid_token; 144 static struct mount dummymount; 145 146 /* note: mountlist exported to pstat */ 147 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 148 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree); 149 static TAILQ_HEAD(,mountscan_info) mountscan_list; 150 static struct lwkt_token mountlist_token; 151 152 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 153 154 /* 155 * Called from vfsinit() 156 */ 157 void 158 vfs_mount_init(void) 159 { 160 lwkt_token_init(&mountlist_token, "mntlist"); 161 lwkt_token_init(&mntid_token, "mntid"); 162 TAILQ_INIT(&mountscan_list); 163 mount_init(&dummymount); 164 dummymount.mnt_flag |= MNT_RDONLY; 165 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 166 } 167 168 /* 169 * Support function called to remove a vnode from the mountlist and 170 * deal with side effects for scans in progress. 171 * 172 * Target mnt_token is held on call. 173 */ 174 static void 175 vremovevnodemnt(struct vnode *vp) 176 { 177 struct vmntvnodescan_info *info; 178 struct mount *mp = vp->v_mount; 179 180 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 181 if (info->vp == vp) 182 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 183 } 184 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 185 } 186 187 /* 188 * Allocate a new vnode and associate it with a tag, mount point, and 189 * operations vector. 190 * 191 * A VX locked and refd vnode is returned. The caller should setup the 192 * remaining fields and vx_put() or, if he wishes to leave a vref, 193 * vx_unlock() the vnode. 194 */ 195 int 196 getnewvnode(enum vtagtype tag, struct mount *mp, 197 struct vnode **vpp, int lktimeout, int lkflags) 198 { 199 struct vnode *vp; 200 201 KKASSERT(mp != NULL); 202 203 vp = allocvnode(lktimeout, lkflags); 204 vp->v_tag = tag; 205 vp->v_data = NULL; 206 207 /* 208 * By default the vnode is assigned the mount point's normal 209 * operations vector. 210 */ 211 vp->v_ops = &mp->mnt_vn_use_ops; 212 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 213 214 /* 215 * Placing the vnode on the mount point's queue makes it visible. 216 * VNON prevents it from being messed with, however. 217 */ 218 insmntque(vp, mp); 219 220 /* 221 * A VX locked & refd vnode is returned. 222 */ 223 *vpp = vp; 224 return (0); 225 } 226 227 /* 228 * This function creates vnodes with special operations vectors. The 229 * mount point is optional. 230 * 231 * This routine is being phased out but is still used by vfs_conf to 232 * create vnodes for devices prior to the root mount (with mp == NULL). 233 */ 234 int 235 getspecialvnode(enum vtagtype tag, struct mount *mp, 236 struct vop_ops **ops, 237 struct vnode **vpp, int lktimeout, int lkflags) 238 { 239 struct vnode *vp; 240 241 vp = allocvnode(lktimeout, lkflags); 242 vp->v_tag = tag; 243 vp->v_data = NULL; 244 vp->v_ops = ops; 245 246 if (mp == NULL) 247 mp = &dummymount; 248 249 /* 250 * Placing the vnode on the mount point's queue makes it visible. 251 * VNON prevents it from being messed with, however. 252 */ 253 insmntque(vp, mp); 254 255 /* 256 * A VX locked & refd vnode is returned. 257 */ 258 *vpp = vp; 259 return (0); 260 } 261 262 /* 263 * Interlock against an unmount, return 0 on success, non-zero on failure. 264 * 265 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 266 * is in-progress. 267 * 268 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 269 * are used. A shared locked will be obtained and the filesystem will not 270 * be unmountable until the lock is released. 271 */ 272 int 273 vfs_busy(struct mount *mp, int flags) 274 { 275 int lkflags; 276 277 atomic_add_int(&mp->mnt_refs, 1); 278 lwkt_gettoken(&mp->mnt_token); 279 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 280 if (flags & LK_NOWAIT) { 281 lwkt_reltoken(&mp->mnt_token); 282 atomic_add_int(&mp->mnt_refs, -1); 283 return (ENOENT); 284 } 285 /* XXX not MP safe */ 286 mp->mnt_kern_flag |= MNTK_MWAIT; 287 288 /* 289 * Since all busy locks are shared except the exclusive 290 * lock granted when unmounting, the only place that a 291 * wakeup needs to be done is at the release of the 292 * exclusive lock at the end of dounmount. 293 * 294 * WARNING! mp can potentially go away once we release 295 * our ref. 296 */ 297 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 298 lwkt_reltoken(&mp->mnt_token); 299 atomic_add_int(&mp->mnt_refs, -1); 300 return (ENOENT); 301 } 302 lkflags = LK_SHARED; 303 if (lockmgr(&mp->mnt_lock, lkflags)) 304 panic("vfs_busy: unexpected lock failure"); 305 lwkt_reltoken(&mp->mnt_token); 306 return (0); 307 } 308 309 /* 310 * Free a busy filesystem. 311 * 312 * Once refs is decremented the mount point can potentially get ripped 313 * out from under us, but we want to clean up our refs before unlocking 314 * so do a hold/drop around the whole mess. 315 * 316 * This is not in the critical path (I hope). 317 */ 318 void 319 vfs_unbusy(struct mount *mp) 320 { 321 mount_hold(mp); 322 atomic_add_int(&mp->mnt_refs, -1); 323 lockmgr(&mp->mnt_lock, LK_RELEASE); 324 mount_drop(mp); 325 } 326 327 /* 328 * Lookup a filesystem type, and if found allocate and initialize 329 * a mount structure for it. 330 * 331 * Devname is usually updated by mount(8) after booting. 332 */ 333 int 334 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 335 { 336 struct vfsconf *vfsp; 337 struct mount *mp; 338 339 if (fstypename == NULL) 340 return (ENODEV); 341 342 vfsp = vfsconf_find_by_name(fstypename); 343 if (vfsp == NULL) 344 return (ENODEV); 345 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 346 mount_init(mp); 347 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 348 349 vfs_busy(mp, 0); 350 mp->mnt_vfc = vfsp; 351 mp->mnt_op = vfsp->vfc_vfsops; 352 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 353 vfsp->vfc_refcount++; 354 mp->mnt_stat.f_type = vfsp->vfc_typenum; 355 mp->mnt_flag |= MNT_RDONLY; 356 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 357 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 358 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 359 360 /* 361 * Pre-set MPSAFE flags for VFS_MOUNT() call. 362 */ 363 if (vfsp->vfc_flags & VFCF_MPSAFE) 364 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; 365 366 *mpp = mp; 367 368 return (0); 369 } 370 371 /* 372 * Basic mount structure initialization 373 */ 374 void 375 mount_init(struct mount *mp) 376 { 377 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 378 lwkt_token_init(&mp->mnt_token, "permnt"); 379 380 TAILQ_INIT(&mp->mnt_vnodescan_list); 381 TAILQ_INIT(&mp->mnt_nvnodelist); 382 TAILQ_INIT(&mp->mnt_reservedvnlist); 383 TAILQ_INIT(&mp->mnt_jlist); 384 mp->mnt_nvnodelistsize = 0; 385 mp->mnt_flag = 0; 386 mp->mnt_hold = 1; /* hold for umount last drop */ 387 mp->mnt_iosize_max = MAXPHYS; 388 vn_syncer_thr_create(mp); 389 } 390 391 void 392 mount_hold(struct mount *mp) 393 { 394 atomic_add_int(&mp->mnt_hold, 1); 395 } 396 397 void 398 mount_drop(struct mount *mp) 399 { 400 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) { 401 KKASSERT(mp->mnt_refs == 0); 402 kfree(mp, M_MOUNT); 403 } 404 } 405 406 /* 407 * Lookup a mount point by filesystem identifier. 408 * 409 * If not NULL, the returned mp is held and the caller is expected to drop 410 * it via mount_drop(). 411 */ 412 struct mount * 413 vfs_getvfs(fsid_t *fsid) 414 { 415 struct mount *mp; 416 417 lwkt_gettoken_shared(&mountlist_token); 418 mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid); 419 if (mp) 420 mount_hold(mp); 421 lwkt_reltoken(&mountlist_token); 422 return (mp); 423 } 424 425 /* 426 * Generate a FSID based on the mountpt. The FSID will be adjusted to avoid 427 * collisions when the mount is added to mountlist. 428 * 429 * May only be called prior to the mount succeeding. 430 * 431 * OLD: 432 * 433 * Get a new unique fsid. Try to make its val[0] unique, since this value 434 * will be used to create fake device numbers for stat(). Also try (but 435 * not so hard) make its val[0] unique mod 2^16, since some emulators only 436 * support 16-bit device numbers. We end up with unique val[0]'s for the 437 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 438 */ 439 void 440 vfs_getnewfsid(struct mount *mp) 441 { 442 fsid_t tfsid; 443 int mtype; 444 int error; 445 char *retbuf; 446 char *freebuf; 447 448 mtype = mp->mnt_vfc->vfc_typenum; 449 tfsid.val[1] = mtype; 450 error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL, 451 &retbuf, &freebuf, 0); 452 if (error) { 453 tfsid.val[0] = makeudev(255, 0); 454 } else { 455 tfsid.val[0] = makeudev(255, 456 iscsi_crc32(retbuf, strlen(retbuf)) & 457 ~makeudev(255, 0)); 458 kfree(freebuf, M_TEMP); 459 } 460 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 461 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 462 } 463 464 /* 465 * Set the FSID for a new mount point to the template. 466 * 467 * The FSID will be adjusted to avoid collisions when the mount is 468 * added to mountlist. 469 * 470 * May only be called prior to the mount succeeding. 471 */ 472 void 473 vfs_setfsid(struct mount *mp, fsid_t *template) 474 { 475 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 476 477 #if 0 478 struct mount *mptmp; 479 480 lwkt_gettoken(&mntid_token); 481 for (;;) { 482 mptmp = vfs_getvfs(template); 483 if (mptmp == NULL) 484 break; 485 mount_drop(mptmp); 486 ++template->val[1]; 487 } 488 lwkt_reltoken(&mntid_token); 489 #endif 490 mp->mnt_stat.f_fsid = *template; 491 } 492 493 /* 494 * This routine is called when we have too many vnodes. It attempts 495 * to free <count> vnodes and will potentially free vnodes that still 496 * have VM backing store (VM backing store is typically the cause 497 * of a vnode blowout so we want to do this). Therefore, this operation 498 * is not considered cheap. 499 * 500 * A number of conditions may prevent a vnode from being reclaimed. 501 * the buffer cache may have references on the vnode, a directory 502 * vnode may still have references due to the namei cache representing 503 * underlying files, or the vnode may be in active use. It is not 504 * desireable to reuse such vnodes. These conditions may cause the 505 * number of vnodes to reach some minimum value regardless of what 506 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 507 */ 508 509 /* 510 * Attempt to recycle vnodes in a context that is always safe to block. 511 * Calling vlrurecycle() from the bowels of file system code has some 512 * interesting deadlock problems. 513 */ 514 static struct thread *vnlruthread; 515 516 static void 517 vnlru_proc(void) 518 { 519 struct thread *td = curthread; 520 521 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 522 SHUTDOWN_PRI_FIRST); 523 524 for (;;) { 525 int ncachedandinactive; 526 527 kproc_suspend_loop(); 528 529 /* 530 * Try to free some vnodes if we have too many. Trigger based 531 * on potentially freeable vnodes but calculate the count 532 * based on total vnodes. 533 * 534 * (long) -> deal with 64 bit machines, intermediate overflow 535 */ 536 synchronizevnodecount(); 537 ncachedandinactive = countcachedandinactivevnodes(); 538 if (numvnodes >= maxvnodes * 9 / 10 && 539 ncachedandinactive >= maxvnodes * 5 / 10) { 540 int count = numvnodes - maxvnodes * 9 / 10; 541 542 if (count > (ncachedandinactive) / 100) 543 count = (ncachedandinactive) / 100; 544 if (count < 5) 545 count = 5; 546 freesomevnodes(count); 547 } 548 549 /* 550 * Do non-critical-path (more robust) cache cleaning, 551 * even if vnode counts are nominal, to try to avoid 552 * having to do it in the critical path. 553 */ 554 cache_hysteresis(0); 555 556 /* 557 * Nothing to do if most of our vnodes are already on 558 * the free list. 559 */ 560 synchronizevnodecount(); 561 ncachedandinactive = countcachedandinactivevnodes(); 562 if (numvnodes <= maxvnodes * 9 / 10 || 563 ncachedandinactive <= maxvnodes * 5 / 10) { 564 tsleep(vnlruthread, 0, "vlruwt", hz); 565 continue; 566 } 567 } 568 } 569 570 /* 571 * MOUNTLIST FUNCTIONS 572 */ 573 574 /* 575 * mountlist_insert (MP SAFE) 576 * 577 * Add a new mount point to the mount list. Filesystem should attempt to 578 * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure 579 * uniqueness. 580 */ 581 void 582 mountlist_insert(struct mount *mp, int how) 583 { 584 int lim = 0x01000000; 585 586 lwkt_gettoken(&mountlist_token); 587 if (how == MNTINS_FIRST) 588 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 589 else 590 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 591 while (mount_rb_tree_RB_INSERT(&mounttree, mp)) { 592 int32_t val; 593 594 /* 595 * minor device mask: 0xFFFF00FF 596 */ 597 val = mp->mnt_stat.f_fsid.val[0]; 598 val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF); 599 ++val; 600 val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF); 601 mp->mnt_stat.f_fsid.val[0] = val; 602 if (--lim == 0) { 603 lim = 0x01000000; 604 mp->mnt_stat.f_fsid.val[1] += 0x0100; 605 kprintf("mountlist_insert: fsid collision, " 606 "too many mounts\n"); 607 } 608 } 609 lwkt_reltoken(&mountlist_token); 610 } 611 612 /* 613 * mountlist_interlock (MP SAFE) 614 * 615 * Execute the specified interlock function with the mountlist token 616 * held. The function will be called in a serialized fashion verses 617 * other functions called through this mechanism. 618 * 619 * The function is expected to be very short-lived. 620 */ 621 int 622 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 623 { 624 int error; 625 626 lwkt_gettoken(&mountlist_token); 627 error = callback(mp); 628 lwkt_reltoken(&mountlist_token); 629 return (error); 630 } 631 632 /* 633 * mountlist_boot_getfirst (DURING BOOT ONLY) 634 * 635 * This function returns the first mount on the mountlist, which is 636 * expected to be the root mount. Since no interlocks are obtained 637 * this function is only safe to use during booting. 638 */ 639 640 struct mount * 641 mountlist_boot_getfirst(void) 642 { 643 return(TAILQ_FIRST(&mountlist)); 644 } 645 646 /* 647 * mountlist_remove (MP SAFE) 648 * 649 * Remove a node from the mountlist. If this node is the next scan node 650 * for any active mountlist scans, the active mountlist scan will be 651 * adjusted to skip the node, thus allowing removals during mountlist 652 * scans. 653 */ 654 void 655 mountlist_remove(struct mount *mp) 656 { 657 struct mountscan_info *msi; 658 659 lwkt_gettoken(&mountlist_token); 660 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 661 if (msi->msi_node == mp) { 662 if (msi->msi_how & MNTSCAN_FORWARD) 663 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 664 else 665 msi->msi_node = TAILQ_PREV(mp, mntlist, 666 mnt_list); 667 } 668 } 669 TAILQ_REMOVE(&mountlist, mp, mnt_list); 670 mount_rb_tree_RB_REMOVE(&mounttree, mp); 671 lwkt_reltoken(&mountlist_token); 672 } 673 674 /* 675 * mountlist_exists (MP SAFE) 676 * 677 * Checks if a node exists in the mountlist. 678 * This function is mainly used by VFS quota code to check if a 679 * cached nullfs struct mount pointer is still valid at use time 680 * 681 * FIXME: there is no warranty the mp passed to that function 682 * will be the same one used by VFS_ACCOUNT() later 683 */ 684 int 685 mountlist_exists(struct mount *mp) 686 { 687 int node_exists = 0; 688 struct mount* lmp; 689 690 lwkt_gettoken_shared(&mountlist_token); 691 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 692 if (lmp == mp) { 693 node_exists = 1; 694 break; 695 } 696 } 697 lwkt_reltoken(&mountlist_token); 698 699 return(node_exists); 700 } 701 702 /* 703 * mountlist_scan 704 * 705 * Safely scan the mount points on the mount list. Each mountpoint 706 * is held across the callback. The callback is responsible for 707 * acquiring any further tokens or locks. 708 * 709 * Unless otherwise specified each mount point will be busied prior to the 710 * callback and unbusied afterwords. The callback may safely remove any 711 * mount point without interfering with the scan. If the current callback 712 * mount is removed the scanner will not attempt to unbusy it. 713 * 714 * If a mount node cannot be busied it is silently skipped. 715 * 716 * The callback return value is aggregated and a total is returned. A return 717 * value of < 0 is not aggregated and will terminate the scan. 718 * 719 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 720 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 721 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 722 * the mount node. 723 * 724 * NOTE: mountlist_token is not held across the callback. 725 */ 726 int 727 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 728 { 729 struct mountscan_info info; 730 struct mount *mp; 731 int count; 732 int res; 733 734 lwkt_gettoken(&mountlist_token); 735 info.msi_how = how; 736 info.msi_node = NULL; /* paranoia */ 737 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 738 lwkt_reltoken(&mountlist_token); 739 740 res = 0; 741 lwkt_gettoken_shared(&mountlist_token); 742 743 if (how & MNTSCAN_FORWARD) { 744 info.msi_node = TAILQ_FIRST(&mountlist); 745 while ((mp = info.msi_node) != NULL) { 746 mount_hold(mp); 747 if (how & MNTSCAN_NOBUSY) { 748 lwkt_reltoken(&mountlist_token); 749 count = callback(mp, data); 750 lwkt_gettoken_shared(&mountlist_token); 751 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 752 lwkt_reltoken(&mountlist_token); 753 count = callback(mp, data); 754 lwkt_gettoken_shared(&mountlist_token); 755 if (mp == info.msi_node) 756 vfs_unbusy(mp); 757 } else { 758 count = 0; 759 } 760 mount_drop(mp); 761 if (count < 0) 762 break; 763 res += count; 764 if (mp == info.msi_node) 765 info.msi_node = TAILQ_NEXT(mp, mnt_list); 766 } 767 } else if (how & MNTSCAN_REVERSE) { 768 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 769 while ((mp = info.msi_node) != NULL) { 770 mount_hold(mp); 771 if (how & MNTSCAN_NOBUSY) { 772 lwkt_reltoken(&mountlist_token); 773 count = callback(mp, data); 774 lwkt_gettoken_shared(&mountlist_token); 775 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 776 lwkt_reltoken(&mountlist_token); 777 count = callback(mp, data); 778 lwkt_gettoken_shared(&mountlist_token); 779 if (mp == info.msi_node) 780 vfs_unbusy(mp); 781 } else { 782 count = 0; 783 } 784 mount_drop(mp); 785 if (count < 0) 786 break; 787 res += count; 788 if (mp == info.msi_node) 789 info.msi_node = TAILQ_PREV(mp, mntlist, 790 mnt_list); 791 } 792 } 793 lwkt_reltoken(&mountlist_token); 794 795 lwkt_gettoken(&mountlist_token); 796 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 797 lwkt_reltoken(&mountlist_token); 798 799 return(res); 800 } 801 802 /* 803 * MOUNT RELATED VNODE FUNCTIONS 804 */ 805 806 static struct kproc_desc vnlru_kp = { 807 "vnlru", 808 vnlru_proc, 809 &vnlruthread 810 }; 811 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); 812 813 /* 814 * Move a vnode from one mount queue to another. 815 */ 816 void 817 insmntque(struct vnode *vp, struct mount *mp) 818 { 819 struct mount *omp; 820 821 /* 822 * Delete from old mount point vnode list, if on one. 823 */ 824 if ((omp = vp->v_mount) != NULL) { 825 lwkt_gettoken(&omp->mnt_token); 826 KKASSERT(omp == vp->v_mount); 827 KASSERT(omp->mnt_nvnodelistsize > 0, 828 ("bad mount point vnode list size")); 829 vremovevnodemnt(vp); 830 omp->mnt_nvnodelistsize--; 831 lwkt_reltoken(&omp->mnt_token); 832 } 833 834 /* 835 * Insert into list of vnodes for the new mount point, if available. 836 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 837 */ 838 if (mp == NULL) { 839 vp->v_mount = NULL; 840 return; 841 } 842 lwkt_gettoken(&mp->mnt_token); 843 vp->v_mount = mp; 844 if (mp->mnt_syncer) { 845 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 846 } else { 847 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 848 } 849 mp->mnt_nvnodelistsize++; 850 lwkt_reltoken(&mp->mnt_token); 851 } 852 853 854 /* 855 * Scan the vnodes under a mount point and issue appropriate callbacks. 856 * 857 * The fastfunc() callback is called with just the mountlist token held 858 * (no vnode lock). It may not block and the vnode may be undergoing 859 * modifications while the caller is processing it. The vnode will 860 * not be entirely destroyed, however, due to the fact that the mountlist 861 * token is held. A return value < 0 skips to the next vnode without calling 862 * the slowfunc(), a return value > 0 terminates the loop. 863 * 864 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp 865 * data structure is unstable when called from fastfunc(). 866 * 867 * The slowfunc() callback is called after the vnode has been successfully 868 * locked based on passed flags. The vnode is skipped if it gets rearranged 869 * or destroyed while blocking on the lock. A non-zero return value from 870 * the slow function terminates the loop. The slow function is allowed to 871 * arbitrarily block. The scanning code guarentees consistency of operation 872 * even if the slow function deletes or moves the node, or blocks and some 873 * other thread deletes or moves the node. 874 */ 875 int 876 vmntvnodescan( 877 struct mount *mp, 878 int flags, 879 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 880 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 881 void *data 882 ) { 883 struct vmntvnodescan_info info; 884 struct vnode *vp; 885 int r = 0; 886 int maxcount = mp->mnt_nvnodelistsize * 2; 887 int stopcount = 0; 888 int count = 0; 889 890 lwkt_gettoken(&mp->mnt_token); 891 892 /* 893 * If asked to do one pass stop after iterating available vnodes. 894 * Under heavy loads new vnodes can be added while we are scanning, 895 * so this isn't perfect. Create a slop factor of 2x. 896 */ 897 if (flags & VMSC_ONEPASS) 898 stopcount = mp->mnt_nvnodelistsize; 899 900 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 901 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 902 903 while ((vp = info.vp) != NULL) { 904 if (--maxcount == 0) { 905 kprintf("Warning: excessive fssync iteration\n"); 906 maxcount = mp->mnt_nvnodelistsize * 2; 907 } 908 909 /* 910 * Skip if visible but not ready, or special (e.g. 911 * mp->mnt_syncer) 912 */ 913 if (vp->v_type == VNON) 914 goto next; 915 KKASSERT(vp->v_mount == mp); 916 917 /* 918 * Quick test. A negative return continues the loop without 919 * calling the slow test. 0 continues onto the slow test. 920 * A positive number aborts the loop. 921 */ 922 if (fastfunc) { 923 if ((r = fastfunc(mp, vp, data)) < 0) { 924 r = 0; 925 goto next; 926 } 927 if (r) 928 break; 929 } 930 931 /* 932 * Get a vxlock on the vnode, retry if it has moved or isn't 933 * in the mountlist where we expect it. 934 */ 935 if (slowfunc) { 936 int error; 937 938 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 939 case VMSC_GETVP: 940 error = vget(vp, LK_EXCLUSIVE); 941 break; 942 case VMSC_GETVP|VMSC_NOWAIT: 943 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 944 break; 945 case VMSC_GETVX: 946 vx_get(vp); 947 error = 0; 948 break; 949 default: 950 error = 0; 951 break; 952 } 953 if (error) 954 goto next; 955 /* 956 * Do not call the slow function if the vnode is 957 * invalid or if it was ripped out from under us 958 * while we (potentially) blocked. 959 */ 960 if (info.vp == vp && vp->v_type != VNON) 961 r = slowfunc(mp, vp, data); 962 963 /* 964 * Cleanup 965 */ 966 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 967 case VMSC_GETVP: 968 case VMSC_GETVP|VMSC_NOWAIT: 969 vput(vp); 970 break; 971 case VMSC_GETVX: 972 vx_put(vp); 973 break; 974 default: 975 break; 976 } 977 if (r != 0) 978 break; 979 } 980 981 next: 982 /* 983 * Yield after some processing. Depending on the number 984 * of vnodes, we might wind up running for a long time. 985 * Because threads are not preemptable, time critical 986 * userland processes might starve. Give them a chance 987 * now and then. 988 */ 989 if (++count == 10000) { 990 /* 991 * We really want to yield a bit, so we simply 992 * sleep a tick 993 */ 994 tsleep(mp, 0, "vnodescn", 1); 995 count = 0; 996 } 997 998 /* 999 * If doing one pass this decrements to zero. If it starts 1000 * at zero it is effectively unlimited for the purposes of 1001 * this loop. 1002 */ 1003 if (--stopcount == 0) 1004 break; 1005 1006 /* 1007 * Iterate. If the vnode was ripped out from under us 1008 * info.vp will already point to the next vnode, otherwise 1009 * we have to obtain the next valid vnode ourselves. 1010 */ 1011 if (info.vp == vp) 1012 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1013 } 1014 1015 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 1016 lwkt_reltoken(&mp->mnt_token); 1017 return(r); 1018 } 1019 1020 /* 1021 * Remove any vnodes in the vnode table belonging to mount point mp. 1022 * 1023 * If FORCECLOSE is not specified, there should not be any active ones, 1024 * return error if any are found (nb: this is a user error, not a 1025 * system error). If FORCECLOSE is specified, detach any active vnodes 1026 * that are found. 1027 * 1028 * If WRITECLOSE is set, only flush out regular file vnodes open for 1029 * writing. 1030 * 1031 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1032 * 1033 * `rootrefs' specifies the base reference count for the root vnode 1034 * of this filesystem. The root vnode is considered busy if its 1035 * v_refcnt exceeds this value. On a successful return, vflush() 1036 * will call vrele() on the root vnode exactly rootrefs times. 1037 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1038 * be zero. 1039 */ 1040 static int debug_busyprt = 0; /* print out busy vnodes */ 1041 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, ""); 1042 1043 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1044 1045 struct vflush_info { 1046 int flags; 1047 int busy; 1048 thread_t td; 1049 }; 1050 1051 int 1052 vflush(struct mount *mp, int rootrefs, int flags) 1053 { 1054 struct thread *td = curthread; /* XXX */ 1055 struct vnode *rootvp = NULL; 1056 int error; 1057 struct vflush_info vflush_info; 1058 1059 if (rootrefs > 0) { 1060 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1061 ("vflush: bad args")); 1062 /* 1063 * Get the filesystem root vnode. We can vput() it 1064 * immediately, since with rootrefs > 0, it won't go away. 1065 */ 1066 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1067 if ((flags & FORCECLOSE) == 0) 1068 return (error); 1069 rootrefs = 0; 1070 /* continue anyway */ 1071 } 1072 if (rootrefs) 1073 vput(rootvp); 1074 } 1075 1076 vflush_info.busy = 0; 1077 vflush_info.flags = flags; 1078 vflush_info.td = td; 1079 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1080 1081 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1082 /* 1083 * If just the root vnode is busy, and if its refcount 1084 * is equal to `rootrefs', then go ahead and kill it. 1085 */ 1086 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1087 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs")); 1088 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) { 1089 vx_lock(rootvp); 1090 vgone_vxlocked(rootvp); 1091 vx_unlock(rootvp); 1092 vflush_info.busy = 0; 1093 } 1094 } 1095 if (vflush_info.busy) 1096 return (EBUSY); 1097 for (; rootrefs > 0; rootrefs--) 1098 vrele(rootvp); 1099 return (0); 1100 } 1101 1102 /* 1103 * The scan callback is made with an VX locked vnode. 1104 */ 1105 static int 1106 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1107 { 1108 struct vflush_info *info = data; 1109 struct vattr vattr; 1110 int flags = info->flags; 1111 1112 /* 1113 * Generally speaking try to deactivate on 0 refs (catch-all) 1114 */ 1115 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1116 1117 /* 1118 * Skip over a vnodes marked VSYSTEM. 1119 */ 1120 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1121 return(0); 1122 } 1123 1124 /* 1125 * Do not force-close VCHR or VBLK vnodes 1126 */ 1127 if (vp->v_type == VCHR || vp->v_type == VBLK) 1128 flags &= ~(WRITECLOSE|FORCECLOSE); 1129 1130 /* 1131 * If WRITECLOSE is set, flush out unlinked but still open 1132 * files (even if open only for reading) and regular file 1133 * vnodes open for writing. 1134 */ 1135 if ((flags & WRITECLOSE) && 1136 (vp->v_type == VNON || 1137 (VOP_GETATTR(vp, &vattr) == 0 && 1138 vattr.va_nlink > 0)) && 1139 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1140 return(0); 1141 } 1142 1143 /* 1144 * If we are the only holder (refcnt of 1) or the vnode is in 1145 * termination (refcnt < 0), we can vgone the vnode. 1146 */ 1147 if (VREFCNT(vp) <= 1) { 1148 vgone_vxlocked(vp); 1149 return(0); 1150 } 1151 1152 /* 1153 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1154 * it to a dummymount structure so vop_*() functions don't deref 1155 * a NULL pointer. 1156 */ 1157 if (flags & FORCECLOSE) { 1158 vhold(vp); 1159 vgone_vxlocked(vp); 1160 if (vp->v_mount == NULL) 1161 insmntque(vp, &dummymount); 1162 vdrop(vp); 1163 return(0); 1164 } 1165 if (vp->v_type == VCHR || vp->v_type == VBLK) 1166 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1167 if (debug_busyprt) { 1168 const char *filename; 1169 1170 spin_lock(&vp->v_spin); 1171 filename = TAILQ_FIRST(&vp->v_namecache) ? 1172 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 1173 spin_unlock(&vp->v_spin); 1174 kprintf("vflush: busy vnode (%p) %s\n", vp, filename); 1175 } 1176 ++info->busy; 1177 return(0); 1178 } 1179 1180 void 1181 add_bio_ops(struct bio_ops *ops) 1182 { 1183 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1184 } 1185 1186 void 1187 rem_bio_ops(struct bio_ops *ops) 1188 { 1189 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1190 } 1191 1192 /* 1193 * This calls the bio_ops io_sync function either for a mount point 1194 * or generally. 1195 * 1196 * WARNING: softdeps is weirdly coded and just isn't happy unless 1197 * io_sync is called with a NULL mount from the general syncing code. 1198 */ 1199 void 1200 bio_ops_sync(struct mount *mp) 1201 { 1202 struct bio_ops *ops; 1203 1204 if (mp) { 1205 if ((ops = mp->mnt_bioops) != NULL) 1206 ops->io_sync(mp); 1207 } else { 1208 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1209 ops->io_sync(NULL); 1210 } 1211 } 1212 } 1213 1214 /* 1215 * Lookup a mount point by nch 1216 */ 1217 struct mount * 1218 mount_get_by_nc(struct namecache *ncp) 1219 { 1220 struct mount *mp = NULL; 1221 1222 lwkt_gettoken_shared(&mountlist_token); 1223 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1224 if (ncp == mp->mnt_ncmountpt.ncp) 1225 break; 1226 } 1227 lwkt_reltoken(&mountlist_token); 1228 1229 return (mp); 1230 } 1231 1232