1 /* 2 * Copyright (c) 2004,2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/buf.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 92 struct mountscan_info { 93 TAILQ_ENTRY(mountscan_info) msi_entry; 94 int msi_how; 95 struct mount *msi_node; 96 }; 97 98 struct vmntvnodescan_info { 99 TAILQ_ENTRY(vmntvnodescan_info) entry; 100 struct vnode *vp; 101 }; 102 103 struct vnlru_info { 104 int pass; 105 }; 106 107 static int vnlru_nowhere = 0; 108 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 109 &vnlru_nowhere, 0, 110 "Number of times the vnlru process ran without success"); 111 112 113 static struct lwkt_token mntid_token; 114 static struct mount dummymount; 115 116 /* note: mountlist exported to pstat */ 117 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 118 static TAILQ_HEAD(,mountscan_info) mountscan_list; 119 static struct lwkt_token mountlist_token; 120 121 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 122 123 /* 124 * Called from vfsinit() 125 */ 126 void 127 vfs_mount_init(void) 128 { 129 lwkt_token_init(&mountlist_token, "mntlist"); 130 lwkt_token_init(&mntid_token, "mntid"); 131 TAILQ_INIT(&mountscan_list); 132 mount_init(&dummymount); 133 dummymount.mnt_flag |= MNT_RDONLY; 134 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 135 } 136 137 /* 138 * Support function called to remove a vnode from the mountlist and 139 * deal with side effects for scans in progress. 140 * 141 * Target mnt_token is held on call. 142 */ 143 static void 144 vremovevnodemnt(struct vnode *vp) 145 { 146 struct vmntvnodescan_info *info; 147 struct mount *mp = vp->v_mount; 148 149 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 150 if (info->vp == vp) 151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 152 } 153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 154 } 155 156 /* 157 * Allocate a new vnode and associate it with a tag, mount point, and 158 * operations vector. 159 * 160 * A VX locked and refd vnode is returned. The caller should setup the 161 * remaining fields and vx_put() or, if he wishes to leave a vref, 162 * vx_unlock() the vnode. 163 */ 164 int 165 getnewvnode(enum vtagtype tag, struct mount *mp, 166 struct vnode **vpp, int lktimeout, int lkflags) 167 { 168 struct vnode *vp; 169 170 KKASSERT(mp != NULL); 171 172 vp = allocvnode(lktimeout, lkflags); 173 vp->v_tag = tag; 174 vp->v_data = NULL; 175 176 /* 177 * By default the vnode is assigned the mount point's normal 178 * operations vector. 179 */ 180 vp->v_ops = &mp->mnt_vn_use_ops; 181 182 /* 183 * Placing the vnode on the mount point's queue makes it visible. 184 * VNON prevents it from being messed with, however. 185 */ 186 insmntque(vp, mp); 187 188 /* 189 * A VX locked & refd vnode is returned. 190 */ 191 *vpp = vp; 192 return (0); 193 } 194 195 /* 196 * This function creates vnodes with special operations vectors. The 197 * mount point is optional. 198 * 199 * This routine is being phased out but is still used by vfs_conf to 200 * create vnodes for devices prior to the root mount (with mp == NULL). 201 */ 202 int 203 getspecialvnode(enum vtagtype tag, struct mount *mp, 204 struct vop_ops **ops, 205 struct vnode **vpp, int lktimeout, int lkflags) 206 { 207 struct vnode *vp; 208 209 vp = allocvnode(lktimeout, lkflags); 210 vp->v_tag = tag; 211 vp->v_data = NULL; 212 vp->v_ops = ops; 213 214 if (mp == NULL) 215 mp = &dummymount; 216 217 /* 218 * Placing the vnode on the mount point's queue makes it visible. 219 * VNON prevents it from being messed with, however. 220 */ 221 insmntque(vp, mp); 222 223 /* 224 * A VX locked & refd vnode is returned. 225 */ 226 *vpp = vp; 227 return (0); 228 } 229 230 /* 231 * Interlock against an unmount, return 0 on success, non-zero on failure. 232 * 233 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 234 * is in-progress. 235 * 236 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 237 * are used. A shared locked will be obtained and the filesystem will not 238 * be unmountable until the lock is released. 239 */ 240 int 241 vfs_busy(struct mount *mp, int flags) 242 { 243 int lkflags; 244 245 atomic_add_int(&mp->mnt_refs, 1); 246 lwkt_gettoken(&mp->mnt_token); 247 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 248 if (flags & LK_NOWAIT) { 249 lwkt_reltoken(&mp->mnt_token); 250 atomic_add_int(&mp->mnt_refs, -1); 251 return (ENOENT); 252 } 253 /* XXX not MP safe */ 254 mp->mnt_kern_flag |= MNTK_MWAIT; 255 /* 256 * Since all busy locks are shared except the exclusive 257 * lock granted when unmounting, the only place that a 258 * wakeup needs to be done is at the release of the 259 * exclusive lock at the end of dounmount. 260 */ 261 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 262 lwkt_reltoken(&mp->mnt_token); 263 atomic_add_int(&mp->mnt_refs, -1); 264 return (ENOENT); 265 } 266 lkflags = LK_SHARED; 267 if (lockmgr(&mp->mnt_lock, lkflags)) 268 panic("vfs_busy: unexpected lock failure"); 269 lwkt_reltoken(&mp->mnt_token); 270 return (0); 271 } 272 273 /* 274 * Free a busy filesystem. 275 * 276 * Decrement refs before releasing the lock so e.g. a pending umount 277 * doesn't give us an unexpected busy error. 278 */ 279 void 280 vfs_unbusy(struct mount *mp) 281 { 282 atomic_add_int(&mp->mnt_refs, -1); 283 lockmgr(&mp->mnt_lock, LK_RELEASE); 284 } 285 286 /* 287 * Lookup a filesystem type, and if found allocate and initialize 288 * a mount structure for it. 289 * 290 * Devname is usually updated by mount(8) after booting. 291 */ 292 int 293 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 294 { 295 struct vfsconf *vfsp; 296 struct mount *mp; 297 298 if (fstypename == NULL) 299 return (ENODEV); 300 301 vfsp = vfsconf_find_by_name(fstypename); 302 if (vfsp == NULL) 303 return (ENODEV); 304 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 305 mount_init(mp); 306 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 307 308 vfs_busy(mp, 0); 309 mp->mnt_vfc = vfsp; 310 mp->mnt_op = vfsp->vfc_vfsops; 311 vfsp->vfc_refcount++; 312 mp->mnt_stat.f_type = vfsp->vfc_typenum; 313 mp->mnt_flag |= MNT_RDONLY; 314 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 315 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 316 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 317 *mpp = mp; 318 return (0); 319 } 320 321 /* 322 * Basic mount structure initialization 323 */ 324 void 325 mount_init(struct mount *mp) 326 { 327 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 328 lwkt_token_init(&mp->mnt_token, "permnt"); 329 330 TAILQ_INIT(&mp->mnt_vnodescan_list); 331 TAILQ_INIT(&mp->mnt_nvnodelist); 332 TAILQ_INIT(&mp->mnt_reservedvnlist); 333 TAILQ_INIT(&mp->mnt_jlist); 334 mp->mnt_nvnodelistsize = 0; 335 mp->mnt_flag = 0; 336 mp->mnt_hold = 1; 337 mp->mnt_iosize_max = MAXPHYS; 338 vn_syncer_thr_create(mp); 339 } 340 341 void 342 mount_hold(struct mount *mp) 343 { 344 atomic_add_int(&mp->mnt_hold, 1); 345 } 346 347 void 348 mount_drop(struct mount *mp) 349 { 350 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) 351 kfree(mp, M_MOUNT); 352 } 353 354 /* 355 * Lookup a mount point by filesystem identifier. 356 */ 357 struct mount * 358 vfs_getvfs(fsid_t *fsid) 359 { 360 struct mount *mp; 361 362 lwkt_gettoken(&mountlist_token); 363 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 364 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 365 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 366 break; 367 } 368 } 369 lwkt_reltoken(&mountlist_token); 370 return (mp); 371 } 372 373 /* 374 * Get a new unique fsid. Try to make its val[0] unique, since this value 375 * will be used to create fake device numbers for stat(). Also try (but 376 * not so hard) make its val[0] unique mod 2^16, since some emulators only 377 * support 16-bit device numbers. We end up with unique val[0]'s for the 378 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 379 * 380 * Keep in mind that several mounts may be running in parallel. Starting 381 * the search one past where the previous search terminated is both a 382 * micro-optimization and a defense against returning the same fsid to 383 * different mounts. 384 */ 385 void 386 vfs_getnewfsid(struct mount *mp) 387 { 388 static u_int16_t mntid_base; 389 fsid_t tfsid; 390 int mtype; 391 392 lwkt_gettoken(&mntid_token); 393 mtype = mp->mnt_vfc->vfc_typenum; 394 tfsid.val[1] = mtype; 395 mtype = (mtype & 0xFF) << 24; 396 for (;;) { 397 tfsid.val[0] = makeudev(255, 398 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 399 mntid_base++; 400 if (vfs_getvfs(&tfsid) == NULL) 401 break; 402 } 403 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 404 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 405 lwkt_reltoken(&mntid_token); 406 } 407 408 /* 409 * Set the FSID for a new mount point to the template. Adjust 410 * the FSID to avoid collisions. 411 */ 412 int 413 vfs_setfsid(struct mount *mp, fsid_t *template) 414 { 415 int didmunge = 0; 416 417 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 418 for (;;) { 419 if (vfs_getvfs(template) == NULL) 420 break; 421 didmunge = 1; 422 ++template->val[1]; 423 } 424 mp->mnt_stat.f_fsid = *template; 425 return(didmunge); 426 } 427 428 /* 429 * This routine is called when we have too many vnodes. It attempts 430 * to free <count> vnodes and will potentially free vnodes that still 431 * have VM backing store (VM backing store is typically the cause 432 * of a vnode blowout so we want to do this). Therefore, this operation 433 * is not considered cheap. 434 * 435 * A number of conditions may prevent a vnode from being reclaimed. 436 * the buffer cache may have references on the vnode, a directory 437 * vnode may still have references due to the namei cache representing 438 * underlying files, or the vnode may be in active use. It is not 439 * desireable to reuse such vnodes. These conditions may cause the 440 * number of vnodes to reach some minimum value regardless of what 441 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 442 */ 443 444 /* 445 * Attempt to recycle vnodes in a context that is always safe to block. 446 * Calling vlrurecycle() from the bowels of file system code has some 447 * interesting deadlock problems. 448 */ 449 static struct thread *vnlruthread; 450 451 static void 452 vnlru_proc(void) 453 { 454 struct thread *td = curthread; 455 456 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 457 SHUTDOWN_PRI_FIRST); 458 459 for (;;) { 460 kproc_suspend_loop(); 461 462 /* 463 * Try to free some vnodes if we have too many. Trigger based 464 * on potentially freeable vnodes but calculate the count 465 * based on total vnodes. 466 * 467 * (long) -> deal with 64 bit machines, intermediate overflow 468 */ 469 if (numvnodes >= maxvnodes * 9 / 10 && 470 cachedvnodes + inactivevnodes >= maxvnodes * 5 / 10) { 471 int count = numvnodes - maxvnodes * 9 / 10; 472 473 if (count > (cachedvnodes + inactivevnodes) / 100) 474 count = (cachedvnodes + inactivevnodes) / 100; 475 if (count < 5) 476 count = 5; 477 freesomevnodes(count); 478 } 479 480 /* 481 * Do non-critical-path (more robust) cache cleaning, 482 * even if vnode counts are nominal, to try to avoid 483 * having to do it in the critical path. 484 */ 485 cache_hysteresis(0); 486 487 /* 488 * Nothing to do if most of our vnodes are already on 489 * the free list. 490 */ 491 if (numvnodes <= maxvnodes * 9 / 10 || 492 cachedvnodes + inactivevnodes <= maxvnodes * 5 / 10) { 493 tsleep(vnlruthread, 0, "vlruwt", hz); 494 continue; 495 } 496 } 497 } 498 499 /* 500 * MOUNTLIST FUNCTIONS 501 */ 502 503 /* 504 * mountlist_insert (MP SAFE) 505 * 506 * Add a new mount point to the mount list. 507 */ 508 void 509 mountlist_insert(struct mount *mp, int how) 510 { 511 lwkt_gettoken(&mountlist_token); 512 if (how == MNTINS_FIRST) 513 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 514 else 515 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 516 lwkt_reltoken(&mountlist_token); 517 } 518 519 /* 520 * mountlist_interlock (MP SAFE) 521 * 522 * Execute the specified interlock function with the mountlist token 523 * held. The function will be called in a serialized fashion verses 524 * other functions called through this mechanism. 525 */ 526 int 527 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 528 { 529 int error; 530 531 lwkt_gettoken(&mountlist_token); 532 error = callback(mp); 533 lwkt_reltoken(&mountlist_token); 534 return (error); 535 } 536 537 /* 538 * mountlist_boot_getfirst (DURING BOOT ONLY) 539 * 540 * This function returns the first mount on the mountlist, which is 541 * expected to be the root mount. Since no interlocks are obtained 542 * this function is only safe to use during booting. 543 */ 544 545 struct mount * 546 mountlist_boot_getfirst(void) 547 { 548 return(TAILQ_FIRST(&mountlist)); 549 } 550 551 /* 552 * mountlist_remove (MP SAFE) 553 * 554 * Remove a node from the mountlist. If this node is the next scan node 555 * for any active mountlist scans, the active mountlist scan will be 556 * adjusted to skip the node, thus allowing removals during mountlist 557 * scans. 558 */ 559 void 560 mountlist_remove(struct mount *mp) 561 { 562 struct mountscan_info *msi; 563 564 lwkt_gettoken(&mountlist_token); 565 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 566 if (msi->msi_node == mp) { 567 if (msi->msi_how & MNTSCAN_FORWARD) 568 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 569 else 570 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 571 } 572 } 573 TAILQ_REMOVE(&mountlist, mp, mnt_list); 574 lwkt_reltoken(&mountlist_token); 575 } 576 577 /* 578 * mountlist_exists (MP SAFE) 579 * 580 * Checks if a node exists in the mountlist. 581 * This function is mainly used by VFS quota code to check if a 582 * cached nullfs struct mount pointer is still valid at use time 583 * 584 * FIXME: there is no warranty the mp passed to that function 585 * will be the same one used by VFS_ACCOUNT() later 586 */ 587 int 588 mountlist_exists(struct mount *mp) 589 { 590 int node_exists = 0; 591 struct mount* lmp; 592 593 lwkt_gettoken(&mountlist_token); 594 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 595 if (lmp == mp) { 596 node_exists = 1; 597 break; 598 } 599 } 600 lwkt_reltoken(&mountlist_token); 601 return(node_exists); 602 } 603 604 /* 605 * mountlist_scan (MP SAFE) 606 * 607 * Safely scan the mount points on the mount list. Unless otherwise 608 * specified each mount point will be busied prior to the callback and 609 * unbusied afterwords. The callback may safely remove any mount point 610 * without interfering with the scan. If the current callback 611 * mount is removed the scanner will not attempt to unbusy it. 612 * 613 * If a mount node cannot be busied it is silently skipped. 614 * 615 * The callback return value is aggregated and a total is returned. A return 616 * value of < 0 is not aggregated and will terminate the scan. 617 * 618 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 619 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 620 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 621 * the mount node. 622 * 623 * NOTE: mount_hold()/mount_drop() sequence primarily helps us avoid 624 * confusion for the unbusy check, particularly if a kfree/kmalloc 625 * occurs quickly (lots of processes mounting and unmounting at the 626 * same time). 627 */ 628 int 629 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 630 { 631 struct mountscan_info info; 632 struct mount *mp; 633 int count; 634 int res; 635 636 lwkt_gettoken(&mountlist_token); 637 638 info.msi_how = how; 639 info.msi_node = NULL; /* paranoia */ 640 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 641 642 res = 0; 643 644 if (how & MNTSCAN_FORWARD) { 645 info.msi_node = TAILQ_FIRST(&mountlist); 646 while ((mp = info.msi_node) != NULL) { 647 mount_hold(mp); 648 if (how & MNTSCAN_NOBUSY) { 649 count = callback(mp, data); 650 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 651 count = callback(mp, data); 652 if (mp == info.msi_node) 653 vfs_unbusy(mp); 654 } else { 655 count = 0; 656 } 657 mount_drop(mp); 658 if (count < 0) 659 break; 660 res += count; 661 if (mp == info.msi_node) 662 info.msi_node = TAILQ_NEXT(mp, mnt_list); 663 } 664 } else if (how & MNTSCAN_REVERSE) { 665 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 666 while ((mp = info.msi_node) != NULL) { 667 mount_hold(mp); 668 if (how & MNTSCAN_NOBUSY) { 669 count = callback(mp, data); 670 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 671 count = callback(mp, data); 672 if (mp == info.msi_node) 673 vfs_unbusy(mp); 674 } else { 675 count = 0; 676 } 677 mount_drop(mp); 678 if (count < 0) 679 break; 680 res += count; 681 if (mp == info.msi_node) 682 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 683 } 684 } 685 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 686 lwkt_reltoken(&mountlist_token); 687 return(res); 688 } 689 690 /* 691 * MOUNT RELATED VNODE FUNCTIONS 692 */ 693 694 static struct kproc_desc vnlru_kp = { 695 "vnlru", 696 vnlru_proc, 697 &vnlruthread 698 }; 699 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); 700 701 /* 702 * Move a vnode from one mount queue to another. 703 */ 704 void 705 insmntque(struct vnode *vp, struct mount *mp) 706 { 707 struct mount *omp; 708 709 /* 710 * Delete from old mount point vnode list, if on one. 711 */ 712 if ((omp = vp->v_mount) != NULL) { 713 lwkt_gettoken(&omp->mnt_token); 714 KKASSERT(omp == vp->v_mount); 715 KASSERT(omp->mnt_nvnodelistsize > 0, 716 ("bad mount point vnode list size")); 717 vremovevnodemnt(vp); 718 omp->mnt_nvnodelistsize--; 719 lwkt_reltoken(&omp->mnt_token); 720 } 721 722 /* 723 * Insert into list of vnodes for the new mount point, if available. 724 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 725 */ 726 if (mp == NULL) { 727 vp->v_mount = NULL; 728 return; 729 } 730 lwkt_gettoken(&mp->mnt_token); 731 vp->v_mount = mp; 732 if (mp->mnt_syncer) { 733 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 734 } else { 735 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 736 } 737 mp->mnt_nvnodelistsize++; 738 lwkt_reltoken(&mp->mnt_token); 739 } 740 741 742 /* 743 * Scan the vnodes under a mount point and issue appropriate callbacks. 744 * 745 * The fastfunc() callback is called with just the mountlist token held 746 * (no vnode lock). It may not block and the vnode may be undergoing 747 * modifications while the caller is processing it. The vnode will 748 * not be entirely destroyed, however, due to the fact that the mountlist 749 * token is held. A return value < 0 skips to the next vnode without calling 750 * the slowfunc(), a return value > 0 terminates the loop. 751 * 752 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp 753 * data structure is unstable when called from fastfunc(). 754 * 755 * The slowfunc() callback is called after the vnode has been successfully 756 * locked based on passed flags. The vnode is skipped if it gets rearranged 757 * or destroyed while blocking on the lock. A non-zero return value from 758 * the slow function terminates the loop. The slow function is allowed to 759 * arbitrarily block. The scanning code guarentees consistency of operation 760 * even if the slow function deletes or moves the node, or blocks and some 761 * other thread deletes or moves the node. 762 */ 763 int 764 vmntvnodescan( 765 struct mount *mp, 766 int flags, 767 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 768 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 769 void *data 770 ) { 771 struct vmntvnodescan_info info; 772 struct vnode *vp; 773 int r = 0; 774 int maxcount = mp->mnt_nvnodelistsize * 2; 775 int stopcount = 0; 776 int count = 0; 777 778 lwkt_gettoken(&mp->mnt_token); 779 780 /* 781 * If asked to do one pass stop after iterating available vnodes. 782 * Under heavy loads new vnodes can be added while we are scanning, 783 * so this isn't perfect. Create a slop factor of 2x. 784 */ 785 if (flags & VMSC_ONEPASS) 786 stopcount = mp->mnt_nvnodelistsize; 787 788 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 789 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 790 791 while ((vp = info.vp) != NULL) { 792 if (--maxcount == 0) { 793 kprintf("Warning: excessive fssync iteration\n"); 794 maxcount = mp->mnt_nvnodelistsize * 2; 795 } 796 797 /* 798 * Skip if visible but not ready, or special (e.g. 799 * mp->mnt_syncer) 800 */ 801 if (vp->v_type == VNON) 802 goto next; 803 KKASSERT(vp->v_mount == mp); 804 805 /* 806 * Quick test. A negative return continues the loop without 807 * calling the slow test. 0 continues onto the slow test. 808 * A positive number aborts the loop. 809 */ 810 if (fastfunc) { 811 if ((r = fastfunc(mp, vp, data)) < 0) { 812 r = 0; 813 goto next; 814 } 815 if (r) 816 break; 817 } 818 819 /* 820 * Get a vxlock on the vnode, retry if it has moved or isn't 821 * in the mountlist where we expect it. 822 */ 823 if (slowfunc) { 824 int error; 825 826 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 827 case VMSC_GETVP: 828 error = vget(vp, LK_EXCLUSIVE); 829 break; 830 case VMSC_GETVP|VMSC_NOWAIT: 831 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 832 break; 833 case VMSC_GETVX: 834 vx_get(vp); 835 error = 0; 836 break; 837 default: 838 error = 0; 839 break; 840 } 841 if (error) 842 goto next; 843 /* 844 * Do not call the slow function if the vnode is 845 * invalid or if it was ripped out from under us 846 * while we (potentially) blocked. 847 */ 848 if (info.vp == vp && vp->v_type != VNON) 849 r = slowfunc(mp, vp, data); 850 851 /* 852 * Cleanup 853 */ 854 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 855 case VMSC_GETVP: 856 case VMSC_GETVP|VMSC_NOWAIT: 857 vput(vp); 858 break; 859 case VMSC_GETVX: 860 vx_put(vp); 861 break; 862 default: 863 break; 864 } 865 if (r != 0) 866 break; 867 } 868 869 next: 870 /* 871 * Yield after some processing. Depending on the number 872 * of vnodes, we might wind up running for a long time. 873 * Because threads are not preemptable, time critical 874 * userland processes might starve. Give them a chance 875 * now and then. 876 */ 877 if (++count == 10000) { 878 /* 879 * We really want to yield a bit, so we simply 880 * sleep a tick 881 */ 882 tsleep(mp, 0, "vnodescn", 1); 883 count = 0; 884 } 885 886 /* 887 * If doing one pass this decrements to zero. If it starts 888 * at zero it is effectively unlimited for the purposes of 889 * this loop. 890 */ 891 if (--stopcount == 0) 892 break; 893 894 /* 895 * Iterate. If the vnode was ripped out from under us 896 * info.vp will already point to the next vnode, otherwise 897 * we have to obtain the next valid vnode ourselves. 898 */ 899 if (info.vp == vp) 900 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 901 } 902 903 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 904 lwkt_reltoken(&mp->mnt_token); 905 return(r); 906 } 907 908 /* 909 * Remove any vnodes in the vnode table belonging to mount point mp. 910 * 911 * If FORCECLOSE is not specified, there should not be any active ones, 912 * return error if any are found (nb: this is a user error, not a 913 * system error). If FORCECLOSE is specified, detach any active vnodes 914 * that are found. 915 * 916 * If WRITECLOSE is set, only flush out regular file vnodes open for 917 * writing. 918 * 919 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 920 * 921 * `rootrefs' specifies the base reference count for the root vnode 922 * of this filesystem. The root vnode is considered busy if its 923 * v_refcnt exceeds this value. On a successful return, vflush() 924 * will call vrele() on the root vnode exactly rootrefs times. 925 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 926 * be zero. 927 */ 928 #ifdef DIAGNOSTIC 929 static int busyprt = 0; /* print out busy vnodes */ 930 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 931 #endif 932 933 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 934 935 struct vflush_info { 936 int flags; 937 int busy; 938 thread_t td; 939 }; 940 941 int 942 vflush(struct mount *mp, int rootrefs, int flags) 943 { 944 struct thread *td = curthread; /* XXX */ 945 struct vnode *rootvp = NULL; 946 int error; 947 struct vflush_info vflush_info; 948 949 if (rootrefs > 0) { 950 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 951 ("vflush: bad args")); 952 /* 953 * Get the filesystem root vnode. We can vput() it 954 * immediately, since with rootrefs > 0, it won't go away. 955 */ 956 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 957 if ((flags & FORCECLOSE) == 0) 958 return (error); 959 rootrefs = 0; 960 /* continue anyway */ 961 } 962 if (rootrefs) 963 vput(rootvp); 964 } 965 966 vflush_info.busy = 0; 967 vflush_info.flags = flags; 968 vflush_info.td = td; 969 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 970 971 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 972 /* 973 * If just the root vnode is busy, and if its refcount 974 * is equal to `rootrefs', then go ahead and kill it. 975 */ 976 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 977 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs")); 978 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) { 979 vx_lock(rootvp); 980 vgone_vxlocked(rootvp); 981 vx_unlock(rootvp); 982 vflush_info.busy = 0; 983 } 984 } 985 if (vflush_info.busy) 986 return (EBUSY); 987 for (; rootrefs > 0; rootrefs--) 988 vrele(rootvp); 989 return (0); 990 } 991 992 /* 993 * The scan callback is made with an VX locked vnode. 994 */ 995 static int 996 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 997 { 998 struct vflush_info *info = data; 999 struct vattr vattr; 1000 int flags = info->flags; 1001 1002 /* 1003 * Generally speaking try to deactivate on 0 refs (catch-all) 1004 */ 1005 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1006 1007 /* 1008 * Skip over a vnodes marked VSYSTEM. 1009 */ 1010 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1011 return(0); 1012 } 1013 1014 /* 1015 * Do not force-close VCHR or VBLK vnodes 1016 */ 1017 if (vp->v_type == VCHR || vp->v_type == VBLK) 1018 flags &= ~(WRITECLOSE|FORCECLOSE); 1019 1020 /* 1021 * If WRITECLOSE is set, flush out unlinked but still open 1022 * files (even if open only for reading) and regular file 1023 * vnodes open for writing. 1024 */ 1025 if ((flags & WRITECLOSE) && 1026 (vp->v_type == VNON || 1027 (VOP_GETATTR(vp, &vattr) == 0 && 1028 vattr.va_nlink > 0)) && 1029 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1030 return(0); 1031 } 1032 1033 /* 1034 * If we are the only holder (refcnt of 1) or the vnode is in 1035 * termination (refcnt < 0), we can vgone the vnode. 1036 */ 1037 if (VREFCNT(vp) <= 1) { 1038 vgone_vxlocked(vp); 1039 return(0); 1040 } 1041 1042 /* 1043 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1044 * it to a dummymount structure so vop_*() functions don't deref 1045 * a NULL pointer. 1046 */ 1047 if (flags & FORCECLOSE) { 1048 vhold(vp); 1049 vgone_vxlocked(vp); 1050 if (vp->v_mount == NULL) 1051 insmntque(vp, &dummymount); 1052 vdrop(vp); 1053 return(0); 1054 } 1055 if (vp->v_type == VCHR || vp->v_type == VBLK) 1056 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1057 #ifdef DIAGNOSTIC 1058 if (busyprt) 1059 vprint("vflush: busy vnode", vp); 1060 #endif 1061 ++info->busy; 1062 return(0); 1063 } 1064 1065 void 1066 add_bio_ops(struct bio_ops *ops) 1067 { 1068 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1069 } 1070 1071 void 1072 rem_bio_ops(struct bio_ops *ops) 1073 { 1074 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1075 } 1076 1077 /* 1078 * This calls the bio_ops io_sync function either for a mount point 1079 * or generally. 1080 * 1081 * WARNING: softdeps is weirdly coded and just isn't happy unless 1082 * io_sync is called with a NULL mount from the general syncing code. 1083 */ 1084 void 1085 bio_ops_sync(struct mount *mp) 1086 { 1087 struct bio_ops *ops; 1088 1089 if (mp) { 1090 if ((ops = mp->mnt_bioops) != NULL) 1091 ops->io_sync(mp); 1092 } else { 1093 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1094 ops->io_sync(NULL); 1095 } 1096 } 1097 } 1098 1099 /* 1100 * Lookup a mount point by nch 1101 */ 1102 struct mount * 1103 mount_get_by_nc(struct namecache *ncp) 1104 { 1105 struct mount *mp = NULL; 1106 1107 lwkt_gettoken(&mountlist_token); 1108 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1109 if (ncp == mp->mnt_ncmountpt.ncp) 1110 break; 1111 } 1112 lwkt_reltoken(&mountlist_token); 1113 return (mp); 1114 } 1115 1116