1 /* 2 * Copyright (c) 2004,2013-2019 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/buf.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <sys/buf2.h> 86 87 #include <vm/vm.h> 88 #include <vm/vm_object.h> 89 90 struct mountscan_info { 91 TAILQ_ENTRY(mountscan_info) msi_entry; 92 int msi_how; 93 struct mount *msi_node; 94 }; 95 96 struct vmntvnodescan_info { 97 TAILQ_ENTRY(vmntvnodescan_info) entry; 98 struct vnode *vp; 99 }; 100 101 struct vnlru_info { 102 int pass; 103 }; 104 105 static int 106 mount_cmp(struct mount *mnt1, struct mount *mnt2) 107 { 108 if (mnt1->mnt_stat.f_fsid.val[0] < mnt2->mnt_stat.f_fsid.val[0]) 109 return -1; 110 if (mnt1->mnt_stat.f_fsid.val[0] > mnt2->mnt_stat.f_fsid.val[0]) 111 return 1; 112 if (mnt1->mnt_stat.f_fsid.val[1] < mnt2->mnt_stat.f_fsid.val[1]) 113 return -1; 114 if (mnt1->mnt_stat.f_fsid.val[1] > mnt2->mnt_stat.f_fsid.val[1]) 115 return 1; 116 return 0; 117 } 118 119 static int 120 mount_fsid_cmp(fsid_t *fsid, struct mount *mnt) 121 { 122 if (fsid->val[0] < mnt->mnt_stat.f_fsid.val[0]) 123 return -1; 124 if (fsid->val[0] > mnt->mnt_stat.f_fsid.val[0]) 125 return 1; 126 if (fsid->val[1] < mnt->mnt_stat.f_fsid.val[1]) 127 return -1; 128 if (fsid->val[1] > mnt->mnt_stat.f_fsid.val[1]) 129 return 1; 130 return 0; 131 } 132 133 RB_HEAD(mount_rb_tree, mount); 134 RB_PROTOTYPEX(mount_rb_tree, FSID, mount, mnt_node, mount_cmp, fsid_t *); 135 RB_GENERATE(mount_rb_tree, mount, mnt_node, mount_cmp); 136 RB_GENERATE_XLOOKUP(mount_rb_tree, FSID, mount, mnt_node, 137 mount_fsid_cmp, fsid_t *); 138 139 static int vnlru_nowhere = 0; 140 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 141 &vnlru_nowhere, 0, 142 "Number of times the vnlru process ran without success"); 143 144 145 static struct lwkt_token mntid_token; 146 static struct mount dummymount; 147 148 /* note: mountlist exported to pstat */ 149 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 150 struct mount_rb_tree mounttree = RB_INITIALIZER(dev_tree_mounttree); 151 static TAILQ_HEAD(,mountscan_info) mountscan_list; 152 static struct lwkt_token mountlist_token; 153 154 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 155 156 /* 157 * Called from vfsinit() 158 */ 159 void 160 vfs_mount_init(void) 161 { 162 lwkt_token_init(&mountlist_token, "mntlist"); 163 lwkt_token_init(&mntid_token, "mntid"); 164 TAILQ_INIT(&mountscan_list); 165 mount_init(&dummymount); 166 dummymount.mnt_flag |= MNT_RDONLY; 167 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 168 } 169 170 /* 171 * Support function called to remove a vnode from the mountlist and 172 * deal with side effects for scans in progress. 173 * 174 * Target mnt_token is held on call. 175 */ 176 static void 177 vremovevnodemnt(struct vnode *vp) 178 { 179 struct vmntvnodescan_info *info; 180 struct mount *mp = vp->v_mount; 181 182 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 183 if (info->vp == vp) 184 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 185 } 186 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 187 } 188 189 /* 190 * Allocate a new vnode and associate it with a tag, mount point, and 191 * operations vector. 192 * 193 * A VX locked and refd vnode is returned. The caller should setup the 194 * remaining fields and vx_put() or, if he wishes to leave a vref, 195 * vx_unlock() the vnode. 196 */ 197 int 198 getnewvnode(enum vtagtype tag, struct mount *mp, 199 struct vnode **vpp, int lktimeout, int lkflags) 200 { 201 struct vnode *vp; 202 203 KKASSERT(mp != NULL); 204 205 vp = allocvnode(lktimeout, lkflags); 206 vp->v_tag = tag; 207 vp->v_data = NULL; 208 209 /* 210 * By default the vnode is assigned the mount point's normal 211 * operations vector. 212 */ 213 vp->v_ops = &mp->mnt_vn_use_ops; 214 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 215 216 /* 217 * Placing the vnode on the mount point's queue makes it visible. 218 * VNON prevents it from being messed with, however. 219 */ 220 insmntque(vp, mp); 221 222 /* 223 * A VX locked & refd vnode is returned. 224 */ 225 *vpp = vp; 226 return (0); 227 } 228 229 /* 230 * This function creates vnodes with special operations vectors. The 231 * mount point is optional. 232 * 233 * This routine is being phased out but is still used by vfs_conf to 234 * create vnodes for devices prior to the root mount (with mp == NULL). 235 */ 236 int 237 getspecialvnode(enum vtagtype tag, struct mount *mp, 238 struct vop_ops **ops, 239 struct vnode **vpp, int lktimeout, int lkflags) 240 { 241 struct vnode *vp; 242 243 vp = allocvnode(lktimeout, lkflags); 244 vp->v_tag = tag; 245 vp->v_data = NULL; 246 vp->v_ops = ops; 247 248 if (mp == NULL) 249 mp = &dummymount; 250 251 /* 252 * Placing the vnode on the mount point's queue makes it visible. 253 * VNON prevents it from being messed with, however. 254 */ 255 insmntque(vp, mp); 256 257 /* 258 * A VX locked & refd vnode is returned. 259 */ 260 *vpp = vp; 261 return (0); 262 } 263 264 /* 265 * Interlock against an unmount, return 0 on success, non-zero on failure. 266 * 267 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 268 * is in-progress. 269 * 270 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 271 * are used. A shared locked will be obtained and the filesystem will not 272 * be unmountable until the lock is released. 273 */ 274 int 275 vfs_busy(struct mount *mp, int flags) 276 { 277 int lkflags; 278 279 atomic_add_int(&mp->mnt_refs, 1); 280 lwkt_gettoken(&mp->mnt_token); 281 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 282 if (flags & LK_NOWAIT) { 283 lwkt_reltoken(&mp->mnt_token); 284 atomic_add_int(&mp->mnt_refs, -1); 285 return (ENOENT); 286 } 287 /* XXX not MP safe */ 288 mp->mnt_kern_flag |= MNTK_MWAIT; 289 290 /* 291 * Since all busy locks are shared except the exclusive 292 * lock granted when unmounting, the only place that a 293 * wakeup needs to be done is at the release of the 294 * exclusive lock at the end of dounmount. 295 * 296 * WARNING! mp can potentially go away once we release 297 * our ref. 298 */ 299 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 300 lwkt_reltoken(&mp->mnt_token); 301 atomic_add_int(&mp->mnt_refs, -1); 302 return (ENOENT); 303 } 304 lkflags = LK_SHARED; 305 if (lockmgr(&mp->mnt_lock, lkflags)) 306 panic("vfs_busy: unexpected lock failure"); 307 lwkt_reltoken(&mp->mnt_token); 308 return (0); 309 } 310 311 /* 312 * Free a busy filesystem. 313 * 314 * Once refs is decremented the mount point can potentially get ripped 315 * out from under us, but we want to clean up our refs before unlocking 316 * so do a hold/drop around the whole mess. 317 * 318 * This is not in the critical path (I hope). 319 */ 320 void 321 vfs_unbusy(struct mount *mp) 322 { 323 mount_hold(mp); 324 atomic_add_int(&mp->mnt_refs, -1); 325 lockmgr(&mp->mnt_lock, LK_RELEASE); 326 mount_drop(mp); 327 } 328 329 /* 330 * Lookup a filesystem type, and if found allocate and initialize 331 * a mount structure for it. 332 * 333 * Devname is usually updated by mount(8) after booting. 334 */ 335 int 336 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 337 { 338 struct vfsconf *vfsp; 339 struct mount *mp; 340 341 if (fstypename == NULL) 342 return (ENODEV); 343 344 vfsp = vfsconf_find_by_name(fstypename); 345 if (vfsp == NULL) 346 return (ENODEV); 347 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 348 mount_init(mp); 349 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 350 351 vfs_busy(mp, 0); 352 mp->mnt_vfc = vfsp; 353 mp->mnt_op = vfsp->vfc_vfsops; 354 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 355 vfsp->vfc_refcount++; 356 mp->mnt_stat.f_type = vfsp->vfc_typenum; 357 mp->mnt_flag |= MNT_RDONLY; 358 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 359 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 360 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 361 362 /* 363 * Pre-set MPSAFE flags for VFS_MOUNT() call. 364 */ 365 if (vfsp->vfc_flags & VFCF_MPSAFE) 366 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; 367 368 *mpp = mp; 369 370 return (0); 371 } 372 373 /* 374 * Basic mount structure initialization 375 */ 376 void 377 mount_init(struct mount *mp) 378 { 379 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 380 lwkt_token_init(&mp->mnt_token, "permnt"); 381 382 TAILQ_INIT(&mp->mnt_vnodescan_list); 383 TAILQ_INIT(&mp->mnt_nvnodelist); 384 TAILQ_INIT(&mp->mnt_reservedvnlist); 385 TAILQ_INIT(&mp->mnt_jlist); 386 mp->mnt_nvnodelistsize = 0; 387 mp->mnt_flag = 0; 388 mp->mnt_hold = 1; /* hold for umount last drop */ 389 mp->mnt_iosize_max = MAXPHYS; 390 vn_syncer_thr_create(mp); 391 } 392 393 void 394 mount_hold(struct mount *mp) 395 { 396 atomic_add_int(&mp->mnt_hold, 1); 397 } 398 399 void 400 mount_drop(struct mount *mp) 401 { 402 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) { 403 KKASSERT(mp->mnt_refs == 0); 404 kfree(mp, M_MOUNT); 405 } 406 } 407 408 /* 409 * Lookup a mount point by filesystem identifier. 410 * 411 * If not NULL, the returned mp is held and the caller is expected to drop 412 * it via mount_drop(). 413 */ 414 struct mount * 415 vfs_getvfs(fsid_t *fsid) 416 { 417 struct mount *mp; 418 419 lwkt_gettoken_shared(&mountlist_token); 420 mp = mount_rb_tree_RB_LOOKUP_FSID(&mounttree, fsid); 421 if (mp) 422 mount_hold(mp); 423 lwkt_reltoken(&mountlist_token); 424 return (mp); 425 } 426 427 /* 428 * Generate a FSID based on the mountpt. The FSID will be adjusted to avoid 429 * collisions when the mount is added to mountlist. 430 * 431 * May only be called prior to the mount succeeding. 432 * 433 * OLD: 434 * 435 * Get a new unique fsid. Try to make its val[0] unique, since this value 436 * will be used to create fake device numbers for stat(). Also try (but 437 * not so hard) make its val[0] unique mod 2^16, since some emulators only 438 * support 16-bit device numbers. We end up with unique val[0]'s for the 439 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 440 */ 441 void 442 vfs_getnewfsid(struct mount *mp) 443 { 444 fsid_t tfsid; 445 int mtype; 446 int error; 447 char *retbuf; 448 char *freebuf; 449 450 mtype = mp->mnt_vfc->vfc_typenum; 451 tfsid.val[1] = mtype; 452 error = cache_fullpath(NULL, &mp->mnt_ncmounton, NULL, 453 &retbuf, &freebuf, 0); 454 if (error) { 455 tfsid.val[0] = makeudev(255, 0); 456 } else { 457 tfsid.val[0] = makeudev(255, 458 iscsi_crc32(retbuf, strlen(retbuf)) & 459 ~makeudev(255, 0)); 460 kfree(freebuf, M_TEMP); 461 } 462 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 463 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 464 } 465 466 /* 467 * Set the FSID for a new mount point to the template. 468 * 469 * The FSID will be adjusted to avoid collisions when the mount is 470 * added to mountlist. 471 * 472 * May only be called prior to the mount succeeding. 473 */ 474 void 475 vfs_setfsid(struct mount *mp, fsid_t *template) 476 { 477 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 478 479 #if 0 480 struct mount *mptmp; 481 482 lwkt_gettoken(&mntid_token); 483 for (;;) { 484 mptmp = vfs_getvfs(template); 485 if (mptmp == NULL) 486 break; 487 mount_drop(mptmp); 488 ++template->val[1]; 489 } 490 lwkt_reltoken(&mntid_token); 491 #endif 492 mp->mnt_stat.f_fsid = *template; 493 } 494 495 /* 496 * This routine is called when we have too many vnodes. It attempts 497 * to free <count> vnodes and will potentially free vnodes that still 498 * have VM backing store (VM backing store is typically the cause 499 * of a vnode blowout so we want to do this). Therefore, this operation 500 * is not considered cheap. 501 * 502 * A number of conditions may prevent a vnode from being reclaimed. 503 * the buffer cache may have references on the vnode, a directory 504 * vnode may still have references due to the namei cache representing 505 * underlying files, or the vnode may be in active use. It is not 506 * desireable to reuse such vnodes. These conditions may cause the 507 * number of vnodes to reach some minimum value regardless of what 508 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 509 */ 510 511 /* 512 * Attempt to recycle vnodes in a context that is always safe to block. 513 * Calling vlrurecycle() from the bowels of file system code has some 514 * interesting deadlock problems. 515 */ 516 static struct thread *vnlruthread; 517 518 static void 519 vnlru_proc(void) 520 { 521 struct thread *td = curthread; 522 523 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 524 SHUTDOWN_PRI_FIRST); 525 526 for (;;) { 527 int ncachedandinactive; 528 529 kproc_suspend_loop(); 530 531 /* 532 * Try to free some vnodes if we have too many. Trigger based 533 * on potentially freeable vnodes but calculate the count 534 * based on total vnodes. 535 * 536 * (long) -> deal with 64 bit machines, intermediate overflow 537 */ 538 synchronizevnodecount(); 539 ncachedandinactive = countcachedandinactivevnodes(); 540 if (numvnodes >= maxvnodes * 9 / 10 && 541 ncachedandinactive >= maxvnodes * 5 / 10) { 542 int count = numvnodes - maxvnodes * 9 / 10; 543 544 if (count > (ncachedandinactive) / 100) 545 count = (ncachedandinactive) / 100; 546 if (count < 5) 547 count = 5; 548 freesomevnodes(count); 549 } 550 551 /* 552 * Do non-critical-path (more robust) cache cleaning, 553 * even if vnode counts are nominal, to try to avoid 554 * having to do it in the critical path. 555 */ 556 cache_hysteresis(0); 557 558 /* 559 * Nothing to do if most of our vnodes are already on 560 * the free list. 561 */ 562 synchronizevnodecount(); 563 ncachedandinactive = countcachedandinactivevnodes(); 564 if (numvnodes <= maxvnodes * 9 / 10 || 565 ncachedandinactive <= maxvnodes * 5 / 10) { 566 tsleep(vnlruthread, 0, "vlruwt", hz); 567 continue; 568 } 569 } 570 } 571 572 /* 573 * MOUNTLIST FUNCTIONS 574 */ 575 576 /* 577 * mountlist_insert (MP SAFE) 578 * 579 * Add a new mount point to the mount list. Filesystem should attempt to 580 * supply a unique fsid but if a duplicate occurs adjust the fsid to ensure 581 * uniqueness. 582 */ 583 void 584 mountlist_insert(struct mount *mp, int how) 585 { 586 int lim = 0x01000000; 587 588 lwkt_gettoken(&mountlist_token); 589 if (how == MNTINS_FIRST) 590 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 591 else 592 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 593 while (mount_rb_tree_RB_INSERT(&mounttree, mp)) { 594 int32_t val; 595 596 /* 597 * minor device mask: 0xFFFF00FF 598 */ 599 val = mp->mnt_stat.f_fsid.val[0]; 600 val = ((val & 0xFFFF0000) >> 8) | (val & 0x000000FF); 601 ++val; 602 val = ((val << 8) & 0xFFFF0000) | (val & 0x000000FF); 603 mp->mnt_stat.f_fsid.val[0] = val; 604 if (--lim == 0) { 605 lim = 0x01000000; 606 mp->mnt_stat.f_fsid.val[1] += 0x0100; 607 kprintf("mountlist_insert: fsid collision, " 608 "too many mounts\n"); 609 } 610 } 611 lwkt_reltoken(&mountlist_token); 612 } 613 614 /* 615 * mountlist_interlock (MP SAFE) 616 * 617 * Execute the specified interlock function with the mountlist token 618 * held. The function will be called in a serialized fashion verses 619 * other functions called through this mechanism. 620 * 621 * The function is expected to be very short-lived. 622 */ 623 int 624 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 625 { 626 int error; 627 628 lwkt_gettoken(&mountlist_token); 629 error = callback(mp); 630 lwkt_reltoken(&mountlist_token); 631 return (error); 632 } 633 634 /* 635 * mountlist_boot_getfirst (DURING BOOT ONLY) 636 * 637 * This function returns the first mount on the mountlist, which is 638 * expected to be the root mount. Since no interlocks are obtained 639 * this function is only safe to use during booting. 640 */ 641 642 struct mount * 643 mountlist_boot_getfirst(void) 644 { 645 return(TAILQ_FIRST(&mountlist)); 646 } 647 648 /* 649 * mountlist_remove (MP SAFE) 650 * 651 * Remove a node from the mountlist. If this node is the next scan node 652 * for any active mountlist scans, the active mountlist scan will be 653 * adjusted to skip the node, thus allowing removals during mountlist 654 * scans. 655 */ 656 void 657 mountlist_remove(struct mount *mp) 658 { 659 struct mountscan_info *msi; 660 661 lwkt_gettoken(&mountlist_token); 662 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 663 if (msi->msi_node == mp) { 664 if (msi->msi_how & MNTSCAN_FORWARD) 665 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 666 else 667 msi->msi_node = TAILQ_PREV(mp, mntlist, 668 mnt_list); 669 } 670 } 671 TAILQ_REMOVE(&mountlist, mp, mnt_list); 672 mount_rb_tree_RB_REMOVE(&mounttree, mp); 673 lwkt_reltoken(&mountlist_token); 674 } 675 676 /* 677 * mountlist_exists (MP SAFE) 678 * 679 * Checks if a node exists in the mountlist. 680 * This function is mainly used by VFS quota code to check if a 681 * cached nullfs struct mount pointer is still valid at use time 682 * 683 * FIXME: there is no warranty the mp passed to that function 684 * will be the same one used by VFS_ACCOUNT() later 685 */ 686 int 687 mountlist_exists(struct mount *mp) 688 { 689 int node_exists = 0; 690 struct mount* lmp; 691 692 lwkt_gettoken_shared(&mountlist_token); 693 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 694 if (lmp == mp) { 695 node_exists = 1; 696 break; 697 } 698 } 699 lwkt_reltoken(&mountlist_token); 700 701 return(node_exists); 702 } 703 704 /* 705 * mountlist_scan 706 * 707 * Safely scan the mount points on the mount list. Each mountpoint 708 * is held across the callback. The callback is responsible for 709 * acquiring any further tokens or locks. 710 * 711 * Unless otherwise specified each mount point will be busied prior to the 712 * callback and unbusied afterwords. The callback may safely remove any 713 * mount point without interfering with the scan. If the current callback 714 * mount is removed the scanner will not attempt to unbusy it. 715 * 716 * If a mount node cannot be busied it is silently skipped. 717 * 718 * The callback return value is aggregated and a total is returned. A return 719 * value of < 0 is not aggregated and will terminate the scan. 720 * 721 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 722 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 723 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 724 * the mount node. 725 * 726 * NOTE: mountlist_token is not held across the callback. 727 */ 728 int 729 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 730 { 731 struct mountscan_info info; 732 struct mount *mp; 733 int count; 734 int res; 735 736 lwkt_gettoken(&mountlist_token); 737 info.msi_how = how; 738 info.msi_node = NULL; /* paranoia */ 739 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 740 lwkt_reltoken(&mountlist_token); 741 742 res = 0; 743 lwkt_gettoken_shared(&mountlist_token); 744 745 if (how & MNTSCAN_FORWARD) { 746 info.msi_node = TAILQ_FIRST(&mountlist); 747 while ((mp = info.msi_node) != NULL) { 748 mount_hold(mp); 749 if (how & MNTSCAN_NOBUSY) { 750 lwkt_reltoken(&mountlist_token); 751 count = callback(mp, data); 752 lwkt_gettoken_shared(&mountlist_token); 753 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 754 lwkt_reltoken(&mountlist_token); 755 count = callback(mp, data); 756 lwkt_gettoken_shared(&mountlist_token); 757 if (mp == info.msi_node) 758 vfs_unbusy(mp); 759 } else { 760 count = 0; 761 } 762 mount_drop(mp); 763 if (count < 0) 764 break; 765 res += count; 766 if (mp == info.msi_node) 767 info.msi_node = TAILQ_NEXT(mp, mnt_list); 768 } 769 } else if (how & MNTSCAN_REVERSE) { 770 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 771 while ((mp = info.msi_node) != NULL) { 772 mount_hold(mp); 773 if (how & MNTSCAN_NOBUSY) { 774 lwkt_reltoken(&mountlist_token); 775 count = callback(mp, data); 776 lwkt_gettoken_shared(&mountlist_token); 777 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 778 lwkt_reltoken(&mountlist_token); 779 count = callback(mp, data); 780 lwkt_gettoken_shared(&mountlist_token); 781 if (mp == info.msi_node) 782 vfs_unbusy(mp); 783 } else { 784 count = 0; 785 } 786 mount_drop(mp); 787 if (count < 0) 788 break; 789 res += count; 790 if (mp == info.msi_node) 791 info.msi_node = TAILQ_PREV(mp, mntlist, 792 mnt_list); 793 } 794 } 795 lwkt_reltoken(&mountlist_token); 796 797 lwkt_gettoken(&mountlist_token); 798 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 799 lwkt_reltoken(&mountlist_token); 800 801 return(res); 802 } 803 804 /* 805 * MOUNT RELATED VNODE FUNCTIONS 806 */ 807 808 static struct kproc_desc vnlru_kp = { 809 "vnlru", 810 vnlru_proc, 811 &vnlruthread 812 }; 813 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); 814 815 /* 816 * Move a vnode from one mount queue to another. 817 */ 818 void 819 insmntque(struct vnode *vp, struct mount *mp) 820 { 821 struct mount *omp; 822 823 /* 824 * Delete from old mount point vnode list, if on one. 825 */ 826 if ((omp = vp->v_mount) != NULL) { 827 lwkt_gettoken(&omp->mnt_token); 828 KKASSERT(omp == vp->v_mount); 829 KASSERT(omp->mnt_nvnodelistsize > 0, 830 ("bad mount point vnode list size")); 831 vremovevnodemnt(vp); 832 omp->mnt_nvnodelistsize--; 833 lwkt_reltoken(&omp->mnt_token); 834 } 835 836 /* 837 * Insert into list of vnodes for the new mount point, if available. 838 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 839 */ 840 if (mp == NULL) { 841 vp->v_mount = NULL; 842 return; 843 } 844 lwkt_gettoken(&mp->mnt_token); 845 vp->v_mount = mp; 846 if (mp->mnt_syncer) { 847 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 848 } else { 849 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 850 } 851 mp->mnt_nvnodelistsize++; 852 lwkt_reltoken(&mp->mnt_token); 853 } 854 855 856 /* 857 * Scan the vnodes under a mount point and issue appropriate callbacks. 858 * 859 * The fastfunc() callback is called with just the mountlist token held 860 * (no vnode lock). It may not block and the vnode may be undergoing 861 * modifications while the caller is processing it. The vnode will 862 * not be entirely destroyed, however, due to the fact that the mountlist 863 * token is held. A return value < 0 skips to the next vnode without calling 864 * the slowfunc(), a return value > 0 terminates the loop. 865 * 866 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp 867 * data structure is unstable when called from fastfunc(). 868 * 869 * The slowfunc() callback is called after the vnode has been successfully 870 * locked based on passed flags. The vnode is skipped if it gets rearranged 871 * or destroyed while blocking on the lock. A non-zero return value from 872 * the slow function terminates the loop. The slow function is allowed to 873 * arbitrarily block. The scanning code guarentees consistency of operation 874 * even if the slow function deletes or moves the node, or blocks and some 875 * other thread deletes or moves the node. 876 */ 877 int 878 vmntvnodescan( 879 struct mount *mp, 880 int flags, 881 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 882 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 883 void *data 884 ) { 885 struct vmntvnodescan_info info; 886 struct vnode *vp; 887 int r = 0; 888 int maxcount = mp->mnt_nvnodelistsize * 2; 889 int stopcount = 0; 890 int count = 0; 891 892 lwkt_gettoken(&mp->mnt_token); 893 894 /* 895 * If asked to do one pass stop after iterating available vnodes. 896 * Under heavy loads new vnodes can be added while we are scanning, 897 * so this isn't perfect. Create a slop factor of 2x. 898 */ 899 if (flags & VMSC_ONEPASS) 900 stopcount = mp->mnt_nvnodelistsize; 901 902 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 903 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 904 905 while ((vp = info.vp) != NULL) { 906 if (--maxcount == 0) { 907 kprintf("Warning: excessive fssync iteration\n"); 908 maxcount = mp->mnt_nvnodelistsize * 2; 909 } 910 911 /* 912 * Skip if visible but not ready, or special (e.g. 913 * mp->mnt_syncer) 914 */ 915 if (vp->v_type == VNON) 916 goto next; 917 KKASSERT(vp->v_mount == mp); 918 919 /* 920 * Quick test. A negative return continues the loop without 921 * calling the slow test. 0 continues onto the slow test. 922 * A positive number aborts the loop. 923 */ 924 if (fastfunc) { 925 if ((r = fastfunc(mp, vp, data)) < 0) { 926 r = 0; 927 goto next; 928 } 929 if (r) 930 break; 931 } 932 933 /* 934 * Get a vxlock on the vnode, retry if it has moved or isn't 935 * in the mountlist where we expect it. 936 */ 937 if (slowfunc) { 938 int error; 939 940 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 941 case VMSC_GETVP: 942 error = vget(vp, LK_EXCLUSIVE); 943 break; 944 case VMSC_GETVP|VMSC_NOWAIT: 945 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 946 break; 947 case VMSC_GETVX: 948 vx_get(vp); 949 error = 0; 950 break; 951 default: 952 error = 0; 953 break; 954 } 955 if (error) 956 goto next; 957 /* 958 * Do not call the slow function if the vnode is 959 * invalid or if it was ripped out from under us 960 * while we (potentially) blocked. 961 */ 962 if (info.vp == vp && vp->v_type != VNON) 963 r = slowfunc(mp, vp, data); 964 965 /* 966 * Cleanup 967 */ 968 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 969 case VMSC_GETVP: 970 case VMSC_GETVP|VMSC_NOWAIT: 971 vput(vp); 972 break; 973 case VMSC_GETVX: 974 vx_put(vp); 975 break; 976 default: 977 break; 978 } 979 if (r != 0) 980 break; 981 } 982 983 next: 984 /* 985 * Yield after some processing. Depending on the number 986 * of vnodes, we might wind up running for a long time. 987 * Because threads are not preemptable, time critical 988 * userland processes might starve. Give them a chance 989 * now and then. 990 */ 991 if (++count == 10000) { 992 /* 993 * We really want to yield a bit, so we simply 994 * sleep a tick 995 */ 996 tsleep(mp, 0, "vnodescn", 1); 997 count = 0; 998 } 999 1000 /* 1001 * If doing one pass this decrements to zero. If it starts 1002 * at zero it is effectively unlimited for the purposes of 1003 * this loop. 1004 */ 1005 if (--stopcount == 0) 1006 break; 1007 1008 /* 1009 * Iterate. If the vnode was ripped out from under us 1010 * info.vp will already point to the next vnode, otherwise 1011 * we have to obtain the next valid vnode ourselves. 1012 */ 1013 if (info.vp == vp) 1014 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1015 } 1016 1017 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 1018 lwkt_reltoken(&mp->mnt_token); 1019 return(r); 1020 } 1021 1022 /* 1023 * Remove any vnodes in the vnode table belonging to mount point mp. 1024 * 1025 * If FORCECLOSE is not specified, there should not be any active ones, 1026 * return error if any are found (nb: this is a user error, not a 1027 * system error). If FORCECLOSE is specified, detach any active vnodes 1028 * that are found. 1029 * 1030 * If WRITECLOSE is set, only flush out regular file vnodes open for 1031 * writing. 1032 * 1033 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1034 * 1035 * `rootrefs' specifies the base reference count for the root vnode 1036 * of this filesystem. The root vnode is considered busy if its 1037 * v_refcnt exceeds this value. On a successful return, vflush() 1038 * will call vrele() on the root vnode exactly rootrefs times. 1039 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1040 * be zero. 1041 */ 1042 static int debug_busyprt = 0; /* print out busy vnodes */ 1043 SYSCTL_INT(_vfs, OID_AUTO, debug_busyprt, CTLFLAG_RW, &debug_busyprt, 0, ""); 1044 1045 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1046 1047 struct vflush_info { 1048 int flags; 1049 int busy; 1050 thread_t td; 1051 }; 1052 1053 int 1054 vflush(struct mount *mp, int rootrefs, int flags) 1055 { 1056 struct thread *td = curthread; /* XXX */ 1057 struct vnode *rootvp = NULL; 1058 int error; 1059 struct vflush_info vflush_info; 1060 1061 if (rootrefs > 0) { 1062 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1063 ("vflush: bad args")); 1064 /* 1065 * Get the filesystem root vnode. We can vput() it 1066 * immediately, since with rootrefs > 0, it won't go away. 1067 */ 1068 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1069 if ((flags & FORCECLOSE) == 0) 1070 return (error); 1071 rootrefs = 0; 1072 /* continue anyway */ 1073 } 1074 if (rootrefs) 1075 vput(rootvp); 1076 } 1077 1078 vflush_info.busy = 0; 1079 vflush_info.flags = flags; 1080 vflush_info.td = td; 1081 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1082 1083 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1084 /* 1085 * If just the root vnode is busy, and if its refcount 1086 * is equal to `rootrefs', then go ahead and kill it. 1087 */ 1088 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1089 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs")); 1090 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) { 1091 vx_lock(rootvp); 1092 vgone_vxlocked(rootvp); 1093 vx_unlock(rootvp); 1094 vflush_info.busy = 0; 1095 } 1096 } 1097 if (vflush_info.busy) 1098 return (EBUSY); 1099 for (; rootrefs > 0; rootrefs--) 1100 vrele(rootvp); 1101 return (0); 1102 } 1103 1104 /* 1105 * The scan callback is made with an VX locked vnode. 1106 */ 1107 static int 1108 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1109 { 1110 struct vflush_info *info = data; 1111 struct vattr vattr; 1112 int flags = info->flags; 1113 1114 /* 1115 * Generally speaking try to deactivate on 0 refs (catch-all) 1116 */ 1117 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1118 1119 /* 1120 * Skip over a vnodes marked VSYSTEM. 1121 */ 1122 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1123 return(0); 1124 } 1125 1126 /* 1127 * Do not force-close VCHR or VBLK vnodes 1128 */ 1129 if (vp->v_type == VCHR || vp->v_type == VBLK) 1130 flags &= ~(WRITECLOSE|FORCECLOSE); 1131 1132 /* 1133 * If WRITECLOSE is set, flush out unlinked but still open 1134 * files (even if open only for reading) and regular file 1135 * vnodes open for writing. 1136 */ 1137 if ((flags & WRITECLOSE) && 1138 (vp->v_type == VNON || 1139 (VOP_GETATTR(vp, &vattr) == 0 && 1140 vattr.va_nlink > 0)) && 1141 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1142 return(0); 1143 } 1144 1145 /* 1146 * If we are the only holder (refcnt of 1) or the vnode is in 1147 * termination (refcnt < 0), we can vgone the vnode. 1148 */ 1149 if (VREFCNT(vp) <= 1) { 1150 vgone_vxlocked(vp); 1151 return(0); 1152 } 1153 1154 /* 1155 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1156 * it to a dummymount structure so vop_*() functions don't deref 1157 * a NULL pointer. 1158 */ 1159 if (flags & FORCECLOSE) { 1160 vhold(vp); 1161 vgone_vxlocked(vp); 1162 if (vp->v_mount == NULL) 1163 insmntque(vp, &dummymount); 1164 vdrop(vp); 1165 return(0); 1166 } 1167 if (vp->v_type == VCHR || vp->v_type == VBLK) 1168 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1169 if (debug_busyprt) { 1170 const char *filename; 1171 1172 spin_lock(&vp->v_spin); 1173 filename = TAILQ_FIRST(&vp->v_namecache) ? 1174 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; 1175 spin_unlock(&vp->v_spin); 1176 kprintf("vflush: busy vnode (%p) %s\n", vp, filename); 1177 } 1178 ++info->busy; 1179 return(0); 1180 } 1181 1182 void 1183 add_bio_ops(struct bio_ops *ops) 1184 { 1185 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1186 } 1187 1188 void 1189 rem_bio_ops(struct bio_ops *ops) 1190 { 1191 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1192 } 1193 1194 /* 1195 * This calls the bio_ops io_sync function either for a mount point 1196 * or generally. 1197 * 1198 * WARNING: softdeps is weirdly coded and just isn't happy unless 1199 * io_sync is called with a NULL mount from the general syncing code. 1200 */ 1201 void 1202 bio_ops_sync(struct mount *mp) 1203 { 1204 struct bio_ops *ops; 1205 1206 if (mp) { 1207 if ((ops = mp->mnt_bioops) != NULL) 1208 ops->io_sync(mp); 1209 } else { 1210 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1211 ops->io_sync(NULL); 1212 } 1213 } 1214 } 1215 1216 /* 1217 * Lookup a mount point by nch 1218 */ 1219 struct mount * 1220 mount_get_by_nc(struct namecache *ncp) 1221 { 1222 struct mount *mp = NULL; 1223 1224 lwkt_gettoken_shared(&mountlist_token); 1225 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1226 if (ncp == mp->mnt_ncmountpt.ncp) 1227 break; 1228 } 1229 lwkt_reltoken(&mountlist_token); 1230 1231 return (mp); 1232 } 1233 1234