1 /* 2 * Copyright (c) 2004,2013 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. Neither the name of the University nor the names of its contributors 51 * may be used to endorse or promote products derived from this software 52 * without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 64 * SUCH DAMAGE. 65 */ 66 67 /* 68 * External virtual filesystem routines 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mount.h> 76 #include <sys/proc.h> 77 #include <sys/vnode.h> 78 #include <sys/buf.h> 79 #include <sys/eventhandler.h> 80 #include <sys/kthread.h> 81 #include <sys/sysctl.h> 82 83 #include <machine/limits.h> 84 85 #include <sys/buf2.h> 86 #include <sys/thread2.h> 87 #include <sys/sysref2.h> 88 89 #include <vm/vm.h> 90 #include <vm/vm_object.h> 91 92 struct mountscan_info { 93 TAILQ_ENTRY(mountscan_info) msi_entry; 94 int msi_how; 95 struct mount *msi_node; 96 }; 97 98 struct vmntvnodescan_info { 99 TAILQ_ENTRY(vmntvnodescan_info) entry; 100 struct vnode *vp; 101 }; 102 103 struct vnlru_info { 104 int pass; 105 }; 106 107 static int vnlru_nowhere = 0; 108 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 109 &vnlru_nowhere, 0, 110 "Number of times the vnlru process ran without success"); 111 112 113 static struct lwkt_token mntid_token; 114 static struct mount dummymount; 115 116 /* note: mountlist exported to pstat */ 117 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 118 static TAILQ_HEAD(,mountscan_info) mountscan_list; 119 static struct lwkt_token mountlist_token; 120 121 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 122 123 /* 124 * Called from vfsinit() 125 */ 126 void 127 vfs_mount_init(void) 128 { 129 lwkt_token_init(&mountlist_token, "mntlist"); 130 lwkt_token_init(&mntid_token, "mntid"); 131 TAILQ_INIT(&mountscan_list); 132 mount_init(&dummymount); 133 dummymount.mnt_flag |= MNT_RDONLY; 134 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 135 } 136 137 /* 138 * Support function called to remove a vnode from the mountlist and 139 * deal with side effects for scans in progress. 140 * 141 * Target mnt_token is held on call. 142 */ 143 static void 144 vremovevnodemnt(struct vnode *vp) 145 { 146 struct vmntvnodescan_info *info; 147 struct mount *mp = vp->v_mount; 148 149 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 150 if (info->vp == vp) 151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 152 } 153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 154 } 155 156 /* 157 * Allocate a new vnode and associate it with a tag, mount point, and 158 * operations vector. 159 * 160 * A VX locked and refd vnode is returned. The caller should setup the 161 * remaining fields and vx_put() or, if he wishes to leave a vref, 162 * vx_unlock() the vnode. 163 */ 164 int 165 getnewvnode(enum vtagtype tag, struct mount *mp, 166 struct vnode **vpp, int lktimeout, int lkflags) 167 { 168 struct vnode *vp; 169 170 KKASSERT(mp != NULL); 171 172 vp = allocvnode(lktimeout, lkflags); 173 vp->v_tag = tag; 174 vp->v_data = NULL; 175 176 /* 177 * By default the vnode is assigned the mount point's normal 178 * operations vector. 179 */ 180 vp->v_ops = &mp->mnt_vn_use_ops; 181 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 182 183 /* 184 * Placing the vnode on the mount point's queue makes it visible. 185 * VNON prevents it from being messed with, however. 186 */ 187 insmntque(vp, mp); 188 189 /* 190 * A VX locked & refd vnode is returned. 191 */ 192 *vpp = vp; 193 return (0); 194 } 195 196 /* 197 * This function creates vnodes with special operations vectors. The 198 * mount point is optional. 199 * 200 * This routine is being phased out but is still used by vfs_conf to 201 * create vnodes for devices prior to the root mount (with mp == NULL). 202 */ 203 int 204 getspecialvnode(enum vtagtype tag, struct mount *mp, 205 struct vop_ops **ops, 206 struct vnode **vpp, int lktimeout, int lkflags) 207 { 208 struct vnode *vp; 209 210 vp = allocvnode(lktimeout, lkflags); 211 vp->v_tag = tag; 212 vp->v_data = NULL; 213 vp->v_ops = ops; 214 215 if (mp == NULL) 216 mp = &dummymount; 217 218 /* 219 * Placing the vnode on the mount point's queue makes it visible. 220 * VNON prevents it from being messed with, however. 221 */ 222 insmntque(vp, mp); 223 224 /* 225 * A VX locked & refd vnode is returned. 226 */ 227 *vpp = vp; 228 return (0); 229 } 230 231 /* 232 * Interlock against an unmount, return 0 on success, non-zero on failure. 233 * 234 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 235 * is in-progress. 236 * 237 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 238 * are used. A shared locked will be obtained and the filesystem will not 239 * be unmountable until the lock is released. 240 */ 241 int 242 vfs_busy(struct mount *mp, int flags) 243 { 244 int lkflags; 245 246 atomic_add_int(&mp->mnt_refs, 1); 247 lwkt_gettoken(&mp->mnt_token); 248 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 249 if (flags & LK_NOWAIT) { 250 lwkt_reltoken(&mp->mnt_token); 251 atomic_add_int(&mp->mnt_refs, -1); 252 return (ENOENT); 253 } 254 /* XXX not MP safe */ 255 mp->mnt_kern_flag |= MNTK_MWAIT; 256 /* 257 * Since all busy locks are shared except the exclusive 258 * lock granted when unmounting, the only place that a 259 * wakeup needs to be done is at the release of the 260 * exclusive lock at the end of dounmount. 261 */ 262 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 263 lwkt_reltoken(&mp->mnt_token); 264 atomic_add_int(&mp->mnt_refs, -1); 265 return (ENOENT); 266 } 267 lkflags = LK_SHARED; 268 if (lockmgr(&mp->mnt_lock, lkflags)) 269 panic("vfs_busy: unexpected lock failure"); 270 lwkt_reltoken(&mp->mnt_token); 271 return (0); 272 } 273 274 /* 275 * Free a busy filesystem. 276 * 277 * Decrement refs before releasing the lock so e.g. a pending umount 278 * doesn't give us an unexpected busy error. 279 */ 280 void 281 vfs_unbusy(struct mount *mp) 282 { 283 atomic_add_int(&mp->mnt_refs, -1); 284 lockmgr(&mp->mnt_lock, LK_RELEASE); 285 } 286 287 /* 288 * Lookup a filesystem type, and if found allocate and initialize 289 * a mount structure for it. 290 * 291 * Devname is usually updated by mount(8) after booting. 292 */ 293 int 294 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 295 { 296 struct vfsconf *vfsp; 297 struct mount *mp; 298 299 if (fstypename == NULL) 300 return (ENODEV); 301 302 vfsp = vfsconf_find_by_name(fstypename); 303 if (vfsp == NULL) 304 return (ENODEV); 305 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 306 mount_init(mp); 307 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 308 309 vfs_busy(mp, 0); 310 mp->mnt_vfc = vfsp; 311 mp->mnt_op = vfsp->vfc_vfsops; 312 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT; 313 vfsp->vfc_refcount++; 314 mp->mnt_stat.f_type = vfsp->vfc_typenum; 315 mp->mnt_flag |= MNT_RDONLY; 316 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 317 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 318 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 319 320 /* 321 * Pre-set MPSAFE flags for VFS_MOUNT() call. 322 */ 323 if (vfsp->vfc_flags & VFCF_MPSAFE) 324 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE; 325 326 *mpp = mp; 327 328 return (0); 329 } 330 331 /* 332 * Basic mount structure initialization 333 */ 334 void 335 mount_init(struct mount *mp) 336 { 337 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 338 lwkt_token_init(&mp->mnt_token, "permnt"); 339 340 TAILQ_INIT(&mp->mnt_vnodescan_list); 341 TAILQ_INIT(&mp->mnt_nvnodelist); 342 TAILQ_INIT(&mp->mnt_reservedvnlist); 343 TAILQ_INIT(&mp->mnt_jlist); 344 mp->mnt_nvnodelistsize = 0; 345 mp->mnt_flag = 0; 346 mp->mnt_hold = 1; 347 mp->mnt_iosize_max = MAXPHYS; 348 vn_syncer_thr_create(mp); 349 } 350 351 void 352 mount_hold(struct mount *mp) 353 { 354 atomic_add_int(&mp->mnt_hold, 1); 355 } 356 357 void 358 mount_drop(struct mount *mp) 359 { 360 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) 361 kfree(mp, M_MOUNT); 362 } 363 364 /* 365 * Lookup a mount point by filesystem identifier. 366 */ 367 struct mount * 368 vfs_getvfs(fsid_t *fsid) 369 { 370 struct mount *mp; 371 372 lwkt_gettoken(&mountlist_token); 373 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 374 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 375 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 376 break; 377 } 378 } 379 lwkt_reltoken(&mountlist_token); 380 return (mp); 381 } 382 383 /* 384 * Get a new unique fsid. Try to make its val[0] unique, since this value 385 * will be used to create fake device numbers for stat(). Also try (but 386 * not so hard) make its val[0] unique mod 2^16, since some emulators only 387 * support 16-bit device numbers. We end up with unique val[0]'s for the 388 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 389 * 390 * Keep in mind that several mounts may be running in parallel. Starting 391 * the search one past where the previous search terminated is both a 392 * micro-optimization and a defense against returning the same fsid to 393 * different mounts. 394 */ 395 void 396 vfs_getnewfsid(struct mount *mp) 397 { 398 static u_int16_t mntid_base; 399 fsid_t tfsid; 400 int mtype; 401 402 lwkt_gettoken(&mntid_token); 403 mtype = mp->mnt_vfc->vfc_typenum; 404 tfsid.val[1] = mtype; 405 mtype = (mtype & 0xFF) << 24; 406 for (;;) { 407 tfsid.val[0] = makeudev(255, 408 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 409 mntid_base++; 410 if (vfs_getvfs(&tfsid) == NULL) 411 break; 412 } 413 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 414 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 415 lwkt_reltoken(&mntid_token); 416 } 417 418 /* 419 * Set the FSID for a new mount point to the template. Adjust 420 * the FSID to avoid collisions. 421 */ 422 int 423 vfs_setfsid(struct mount *mp, fsid_t *template) 424 { 425 int didmunge = 0; 426 427 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 428 for (;;) { 429 if (vfs_getvfs(template) == NULL) 430 break; 431 didmunge = 1; 432 ++template->val[1]; 433 } 434 mp->mnt_stat.f_fsid = *template; 435 return(didmunge); 436 } 437 438 /* 439 * This routine is called when we have too many vnodes. It attempts 440 * to free <count> vnodes and will potentially free vnodes that still 441 * have VM backing store (VM backing store is typically the cause 442 * of a vnode blowout so we want to do this). Therefore, this operation 443 * is not considered cheap. 444 * 445 * A number of conditions may prevent a vnode from being reclaimed. 446 * the buffer cache may have references on the vnode, a directory 447 * vnode may still have references due to the namei cache representing 448 * underlying files, or the vnode may be in active use. It is not 449 * desireable to reuse such vnodes. These conditions may cause the 450 * number of vnodes to reach some minimum value regardless of what 451 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 452 */ 453 454 /* 455 * Attempt to recycle vnodes in a context that is always safe to block. 456 * Calling vlrurecycle() from the bowels of file system code has some 457 * interesting deadlock problems. 458 */ 459 static struct thread *vnlruthread; 460 461 static void 462 vnlru_proc(void) 463 { 464 struct thread *td = curthread; 465 466 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 467 SHUTDOWN_PRI_FIRST); 468 469 for (;;) { 470 int ncached; 471 472 kproc_suspend_loop(); 473 474 /* 475 * Try to free some vnodes if we have too many. Trigger based 476 * on potentially freeable vnodes but calculate the count 477 * based on total vnodes. 478 * 479 * (long) -> deal with 64 bit machines, intermediate overflow 480 */ 481 ncached = countcachedvnodes(1); 482 if (numvnodes >= maxvnodes * 9 / 10 && 483 ncached + inactivevnodes >= maxvnodes * 5 / 10) { 484 int count = numvnodes - maxvnodes * 9 / 10; 485 486 if (count > (ncached + inactivevnodes) / 100) 487 count = (ncached + inactivevnodes) / 100; 488 if (count < 5) 489 count = 5; 490 freesomevnodes(count); 491 } 492 493 /* 494 * Do non-critical-path (more robust) cache cleaning, 495 * even if vnode counts are nominal, to try to avoid 496 * having to do it in the critical path. 497 */ 498 cache_hysteresis(0); 499 500 /* 501 * Nothing to do if most of our vnodes are already on 502 * the free list. 503 */ 504 ncached = countcachedvnodes(1); 505 if (numvnodes <= maxvnodes * 9 / 10 || 506 ncached + inactivevnodes <= maxvnodes * 5 / 10) { 507 tsleep(vnlruthread, 0, "vlruwt", hz); 508 continue; 509 } 510 } 511 } 512 513 /* 514 * MOUNTLIST FUNCTIONS 515 */ 516 517 /* 518 * mountlist_insert (MP SAFE) 519 * 520 * Add a new mount point to the mount list. 521 */ 522 void 523 mountlist_insert(struct mount *mp, int how) 524 { 525 lwkt_gettoken(&mountlist_token); 526 if (how == MNTINS_FIRST) 527 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 528 else 529 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 530 lwkt_reltoken(&mountlist_token); 531 } 532 533 /* 534 * mountlist_interlock (MP SAFE) 535 * 536 * Execute the specified interlock function with the mountlist token 537 * held. The function will be called in a serialized fashion verses 538 * other functions called through this mechanism. 539 */ 540 int 541 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 542 { 543 int error; 544 545 lwkt_gettoken(&mountlist_token); 546 error = callback(mp); 547 lwkt_reltoken(&mountlist_token); 548 return (error); 549 } 550 551 /* 552 * mountlist_boot_getfirst (DURING BOOT ONLY) 553 * 554 * This function returns the first mount on the mountlist, which is 555 * expected to be the root mount. Since no interlocks are obtained 556 * this function is only safe to use during booting. 557 */ 558 559 struct mount * 560 mountlist_boot_getfirst(void) 561 { 562 return(TAILQ_FIRST(&mountlist)); 563 } 564 565 /* 566 * mountlist_remove (MP SAFE) 567 * 568 * Remove a node from the mountlist. If this node is the next scan node 569 * for any active mountlist scans, the active mountlist scan will be 570 * adjusted to skip the node, thus allowing removals during mountlist 571 * scans. 572 */ 573 void 574 mountlist_remove(struct mount *mp) 575 { 576 struct mountscan_info *msi; 577 578 lwkt_gettoken(&mountlist_token); 579 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 580 if (msi->msi_node == mp) { 581 if (msi->msi_how & MNTSCAN_FORWARD) 582 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 583 else 584 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 585 } 586 } 587 TAILQ_REMOVE(&mountlist, mp, mnt_list); 588 lwkt_reltoken(&mountlist_token); 589 } 590 591 /* 592 * mountlist_exists (MP SAFE) 593 * 594 * Checks if a node exists in the mountlist. 595 * This function is mainly used by VFS quota code to check if a 596 * cached nullfs struct mount pointer is still valid at use time 597 * 598 * FIXME: there is no warranty the mp passed to that function 599 * will be the same one used by VFS_ACCOUNT() later 600 */ 601 int 602 mountlist_exists(struct mount *mp) 603 { 604 int node_exists = 0; 605 struct mount* lmp; 606 607 lwkt_gettoken(&mountlist_token); 608 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 609 if (lmp == mp) { 610 node_exists = 1; 611 break; 612 } 613 } 614 lwkt_reltoken(&mountlist_token); 615 return(node_exists); 616 } 617 618 /* 619 * mountlist_scan (MP SAFE) 620 * 621 * Safely scan the mount points on the mount list. Unless otherwise 622 * specified each mount point will be busied prior to the callback and 623 * unbusied afterwords. The callback may safely remove any mount point 624 * without interfering with the scan. If the current callback 625 * mount is removed the scanner will not attempt to unbusy it. 626 * 627 * If a mount node cannot be busied it is silently skipped. 628 * 629 * The callback return value is aggregated and a total is returned. A return 630 * value of < 0 is not aggregated and will terminate the scan. 631 * 632 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 633 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 634 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 635 * the mount node. 636 * 637 * NOTE: mount_hold()/mount_drop() sequence primarily helps us avoid 638 * confusion for the unbusy check, particularly if a kfree/kmalloc 639 * occurs quickly (lots of processes mounting and unmounting at the 640 * same time). 641 */ 642 int 643 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 644 { 645 struct mountscan_info info; 646 struct mount *mp; 647 int count; 648 int res; 649 650 lwkt_gettoken(&mountlist_token); 651 652 info.msi_how = how; 653 info.msi_node = NULL; /* paranoia */ 654 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 655 656 res = 0; 657 658 if (how & MNTSCAN_FORWARD) { 659 info.msi_node = TAILQ_FIRST(&mountlist); 660 while ((mp = info.msi_node) != NULL) { 661 mount_hold(mp); 662 if (how & MNTSCAN_NOBUSY) { 663 count = callback(mp, data); 664 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 665 count = callback(mp, data); 666 if (mp == info.msi_node) 667 vfs_unbusy(mp); 668 } else { 669 count = 0; 670 } 671 mount_drop(mp); 672 if (count < 0) 673 break; 674 res += count; 675 if (mp == info.msi_node) 676 info.msi_node = TAILQ_NEXT(mp, mnt_list); 677 } 678 } else if (how & MNTSCAN_REVERSE) { 679 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 680 while ((mp = info.msi_node) != NULL) { 681 mount_hold(mp); 682 if (how & MNTSCAN_NOBUSY) { 683 count = callback(mp, data); 684 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 685 count = callback(mp, data); 686 if (mp == info.msi_node) 687 vfs_unbusy(mp); 688 } else { 689 count = 0; 690 } 691 mount_drop(mp); 692 if (count < 0) 693 break; 694 res += count; 695 if (mp == info.msi_node) 696 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 697 } 698 } 699 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 700 lwkt_reltoken(&mountlist_token); 701 return(res); 702 } 703 704 /* 705 * MOUNT RELATED VNODE FUNCTIONS 706 */ 707 708 static struct kproc_desc vnlru_kp = { 709 "vnlru", 710 vnlru_proc, 711 &vnlruthread 712 }; 713 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp); 714 715 /* 716 * Move a vnode from one mount queue to another. 717 */ 718 void 719 insmntque(struct vnode *vp, struct mount *mp) 720 { 721 struct mount *omp; 722 723 /* 724 * Delete from old mount point vnode list, if on one. 725 */ 726 if ((omp = vp->v_mount) != NULL) { 727 lwkt_gettoken(&omp->mnt_token); 728 KKASSERT(omp == vp->v_mount); 729 KASSERT(omp->mnt_nvnodelistsize > 0, 730 ("bad mount point vnode list size")); 731 vremovevnodemnt(vp); 732 omp->mnt_nvnodelistsize--; 733 lwkt_reltoken(&omp->mnt_token); 734 } 735 736 /* 737 * Insert into list of vnodes for the new mount point, if available. 738 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 739 */ 740 if (mp == NULL) { 741 vp->v_mount = NULL; 742 return; 743 } 744 lwkt_gettoken(&mp->mnt_token); 745 vp->v_mount = mp; 746 if (mp->mnt_syncer) { 747 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 748 } else { 749 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 750 } 751 mp->mnt_nvnodelistsize++; 752 lwkt_reltoken(&mp->mnt_token); 753 } 754 755 756 /* 757 * Scan the vnodes under a mount point and issue appropriate callbacks. 758 * 759 * The fastfunc() callback is called with just the mountlist token held 760 * (no vnode lock). It may not block and the vnode may be undergoing 761 * modifications while the caller is processing it. The vnode will 762 * not be entirely destroyed, however, due to the fact that the mountlist 763 * token is held. A return value < 0 skips to the next vnode without calling 764 * the slowfunc(), a return value > 0 terminates the loop. 765 * 766 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp 767 * data structure is unstable when called from fastfunc(). 768 * 769 * The slowfunc() callback is called after the vnode has been successfully 770 * locked based on passed flags. The vnode is skipped if it gets rearranged 771 * or destroyed while blocking on the lock. A non-zero return value from 772 * the slow function terminates the loop. The slow function is allowed to 773 * arbitrarily block. The scanning code guarentees consistency of operation 774 * even if the slow function deletes or moves the node, or blocks and some 775 * other thread deletes or moves the node. 776 */ 777 int 778 vmntvnodescan( 779 struct mount *mp, 780 int flags, 781 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 782 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 783 void *data 784 ) { 785 struct vmntvnodescan_info info; 786 struct vnode *vp; 787 int r = 0; 788 int maxcount = mp->mnt_nvnodelistsize * 2; 789 int stopcount = 0; 790 int count = 0; 791 792 lwkt_gettoken(&mp->mnt_token); 793 794 /* 795 * If asked to do one pass stop after iterating available vnodes. 796 * Under heavy loads new vnodes can be added while we are scanning, 797 * so this isn't perfect. Create a slop factor of 2x. 798 */ 799 if (flags & VMSC_ONEPASS) 800 stopcount = mp->mnt_nvnodelistsize; 801 802 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 803 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 804 805 while ((vp = info.vp) != NULL) { 806 if (--maxcount == 0) { 807 kprintf("Warning: excessive fssync iteration\n"); 808 maxcount = mp->mnt_nvnodelistsize * 2; 809 } 810 811 /* 812 * Skip if visible but not ready, or special (e.g. 813 * mp->mnt_syncer) 814 */ 815 if (vp->v_type == VNON) 816 goto next; 817 KKASSERT(vp->v_mount == mp); 818 819 /* 820 * Quick test. A negative return continues the loop without 821 * calling the slow test. 0 continues onto the slow test. 822 * A positive number aborts the loop. 823 */ 824 if (fastfunc) { 825 if ((r = fastfunc(mp, vp, data)) < 0) { 826 r = 0; 827 goto next; 828 } 829 if (r) 830 break; 831 } 832 833 /* 834 * Get a vxlock on the vnode, retry if it has moved or isn't 835 * in the mountlist where we expect it. 836 */ 837 if (slowfunc) { 838 int error; 839 840 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 841 case VMSC_GETVP: 842 error = vget(vp, LK_EXCLUSIVE); 843 break; 844 case VMSC_GETVP|VMSC_NOWAIT: 845 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 846 break; 847 case VMSC_GETVX: 848 vx_get(vp); 849 error = 0; 850 break; 851 default: 852 error = 0; 853 break; 854 } 855 if (error) 856 goto next; 857 /* 858 * Do not call the slow function if the vnode is 859 * invalid or if it was ripped out from under us 860 * while we (potentially) blocked. 861 */ 862 if (info.vp == vp && vp->v_type != VNON) 863 r = slowfunc(mp, vp, data); 864 865 /* 866 * Cleanup 867 */ 868 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 869 case VMSC_GETVP: 870 case VMSC_GETVP|VMSC_NOWAIT: 871 vput(vp); 872 break; 873 case VMSC_GETVX: 874 vx_put(vp); 875 break; 876 default: 877 break; 878 } 879 if (r != 0) 880 break; 881 } 882 883 next: 884 /* 885 * Yield after some processing. Depending on the number 886 * of vnodes, we might wind up running for a long time. 887 * Because threads are not preemptable, time critical 888 * userland processes might starve. Give them a chance 889 * now and then. 890 */ 891 if (++count == 10000) { 892 /* 893 * We really want to yield a bit, so we simply 894 * sleep a tick 895 */ 896 tsleep(mp, 0, "vnodescn", 1); 897 count = 0; 898 } 899 900 /* 901 * If doing one pass this decrements to zero. If it starts 902 * at zero it is effectively unlimited for the purposes of 903 * this loop. 904 */ 905 if (--stopcount == 0) 906 break; 907 908 /* 909 * Iterate. If the vnode was ripped out from under us 910 * info.vp will already point to the next vnode, otherwise 911 * we have to obtain the next valid vnode ourselves. 912 */ 913 if (info.vp == vp) 914 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 915 } 916 917 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 918 lwkt_reltoken(&mp->mnt_token); 919 return(r); 920 } 921 922 /* 923 * Remove any vnodes in the vnode table belonging to mount point mp. 924 * 925 * If FORCECLOSE is not specified, there should not be any active ones, 926 * return error if any are found (nb: this is a user error, not a 927 * system error). If FORCECLOSE is specified, detach any active vnodes 928 * that are found. 929 * 930 * If WRITECLOSE is set, only flush out regular file vnodes open for 931 * writing. 932 * 933 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 934 * 935 * `rootrefs' specifies the base reference count for the root vnode 936 * of this filesystem. The root vnode is considered busy if its 937 * v_refcnt exceeds this value. On a successful return, vflush() 938 * will call vrele() on the root vnode exactly rootrefs times. 939 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 940 * be zero. 941 */ 942 #ifdef DIAGNOSTIC 943 static int busyprt = 0; /* print out busy vnodes */ 944 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 945 #endif 946 947 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 948 949 struct vflush_info { 950 int flags; 951 int busy; 952 thread_t td; 953 }; 954 955 int 956 vflush(struct mount *mp, int rootrefs, int flags) 957 { 958 struct thread *td = curthread; /* XXX */ 959 struct vnode *rootvp = NULL; 960 int error; 961 struct vflush_info vflush_info; 962 963 if (rootrefs > 0) { 964 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 965 ("vflush: bad args")); 966 /* 967 * Get the filesystem root vnode. We can vput() it 968 * immediately, since with rootrefs > 0, it won't go away. 969 */ 970 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 971 if ((flags & FORCECLOSE) == 0) 972 return (error); 973 rootrefs = 0; 974 /* continue anyway */ 975 } 976 if (rootrefs) 977 vput(rootvp); 978 } 979 980 vflush_info.busy = 0; 981 vflush_info.flags = flags; 982 vflush_info.td = td; 983 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 984 985 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 986 /* 987 * If just the root vnode is busy, and if its refcount 988 * is equal to `rootrefs', then go ahead and kill it. 989 */ 990 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 991 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs")); 992 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) { 993 vx_lock(rootvp); 994 vgone_vxlocked(rootvp); 995 vx_unlock(rootvp); 996 vflush_info.busy = 0; 997 } 998 } 999 if (vflush_info.busy) 1000 return (EBUSY); 1001 for (; rootrefs > 0; rootrefs--) 1002 vrele(rootvp); 1003 return (0); 1004 } 1005 1006 /* 1007 * The scan callback is made with an VX locked vnode. 1008 */ 1009 static int 1010 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1011 { 1012 struct vflush_info *info = data; 1013 struct vattr vattr; 1014 int flags = info->flags; 1015 1016 /* 1017 * Generally speaking try to deactivate on 0 refs (catch-all) 1018 */ 1019 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE); 1020 1021 /* 1022 * Skip over a vnodes marked VSYSTEM. 1023 */ 1024 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1025 return(0); 1026 } 1027 1028 /* 1029 * Do not force-close VCHR or VBLK vnodes 1030 */ 1031 if (vp->v_type == VCHR || vp->v_type == VBLK) 1032 flags &= ~(WRITECLOSE|FORCECLOSE); 1033 1034 /* 1035 * If WRITECLOSE is set, flush out unlinked but still open 1036 * files (even if open only for reading) and regular file 1037 * vnodes open for writing. 1038 */ 1039 if ((flags & WRITECLOSE) && 1040 (vp->v_type == VNON || 1041 (VOP_GETATTR(vp, &vattr) == 0 && 1042 vattr.va_nlink > 0)) && 1043 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1044 return(0); 1045 } 1046 1047 /* 1048 * If we are the only holder (refcnt of 1) or the vnode is in 1049 * termination (refcnt < 0), we can vgone the vnode. 1050 */ 1051 if (VREFCNT(vp) <= 1) { 1052 vgone_vxlocked(vp); 1053 return(0); 1054 } 1055 1056 /* 1057 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1058 * it to a dummymount structure so vop_*() functions don't deref 1059 * a NULL pointer. 1060 */ 1061 if (flags & FORCECLOSE) { 1062 vhold(vp); 1063 vgone_vxlocked(vp); 1064 if (vp->v_mount == NULL) 1065 insmntque(vp, &dummymount); 1066 vdrop(vp); 1067 return(0); 1068 } 1069 if (vp->v_type == VCHR || vp->v_type == VBLK) 1070 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1071 #ifdef DIAGNOSTIC 1072 if (busyprt) 1073 vprint("vflush: busy vnode", vp); 1074 #endif 1075 ++info->busy; 1076 return(0); 1077 } 1078 1079 void 1080 add_bio_ops(struct bio_ops *ops) 1081 { 1082 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1083 } 1084 1085 void 1086 rem_bio_ops(struct bio_ops *ops) 1087 { 1088 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1089 } 1090 1091 /* 1092 * This calls the bio_ops io_sync function either for a mount point 1093 * or generally. 1094 * 1095 * WARNING: softdeps is weirdly coded and just isn't happy unless 1096 * io_sync is called with a NULL mount from the general syncing code. 1097 */ 1098 void 1099 bio_ops_sync(struct mount *mp) 1100 { 1101 struct bio_ops *ops; 1102 1103 if (mp) { 1104 if ((ops = mp->mnt_bioops) != NULL) 1105 ops->io_sync(mp); 1106 } else { 1107 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1108 ops->io_sync(NULL); 1109 } 1110 } 1111 } 1112 1113 /* 1114 * Lookup a mount point by nch 1115 */ 1116 struct mount * 1117 mount_get_by_nc(struct namecache *ncp) 1118 { 1119 struct mount *mp = NULL; 1120 1121 lwkt_gettoken(&mountlist_token); 1122 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1123 if (ncp == mp->mnt_ncmountpt.ncp) 1124 break; 1125 } 1126 lwkt_reltoken(&mountlist_token); 1127 return (mp); 1128 } 1129 1130