1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 */ 70 71 /* 72 * External virtual filesystem routines 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/malloc.h> 79 #include <sys/mount.h> 80 #include <sys/proc.h> 81 #include <sys/vnode.h> 82 #include <sys/buf.h> 83 #include <sys/eventhandler.h> 84 #include <sys/kthread.h> 85 #include <sys/sysctl.h> 86 87 #include <machine/limits.h> 88 89 #include <sys/buf2.h> 90 #include <sys/thread2.h> 91 #include <sys/sysref2.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_object.h> 95 96 struct mountscan_info { 97 TAILQ_ENTRY(mountscan_info) msi_entry; 98 int msi_how; 99 struct mount *msi_node; 100 }; 101 102 struct vmntvnodescan_info { 103 TAILQ_ENTRY(vmntvnodescan_info) entry; 104 struct vnode *vp; 105 }; 106 107 struct vnlru_info { 108 int pass; 109 }; 110 111 static int vnlru_nowhere = 0; 112 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 113 &vnlru_nowhere, 0, 114 "Number of times the vnlru process ran without success"); 115 116 117 static struct lwkt_token mntid_token; 118 static struct mount dummymount; 119 120 /* note: mountlist exported to pstat */ 121 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 122 static TAILQ_HEAD(,mountscan_info) mountscan_list; 123 static struct lwkt_token mountlist_token; 124 125 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 126 127 /* 128 * Called from vfsinit() 129 */ 130 void 131 vfs_mount_init(void) 132 { 133 lwkt_token_init(&mountlist_token, "mntlist"); 134 lwkt_token_init(&mntid_token, "mntid"); 135 TAILQ_INIT(&mountscan_list); 136 mount_init(&dummymount); 137 dummymount.mnt_flag |= MNT_RDONLY; 138 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE; 139 } 140 141 /* 142 * Support function called to remove a vnode from the mountlist and 143 * deal with side effects for scans in progress. 144 * 145 * Target mnt_token is held on call. 146 */ 147 static void 148 vremovevnodemnt(struct vnode *vp) 149 { 150 struct vmntvnodescan_info *info; 151 struct mount *mp = vp->v_mount; 152 153 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) { 154 if (info->vp == vp) 155 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 156 } 157 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 158 } 159 160 /* 161 * Allocate a new vnode and associate it with a tag, mount point, and 162 * operations vector. 163 * 164 * A VX locked and refd vnode is returned. The caller should setup the 165 * remaining fields and vx_put() or, if he wishes to leave a vref, 166 * vx_unlock() the vnode. 167 */ 168 int 169 getnewvnode(enum vtagtype tag, struct mount *mp, 170 struct vnode **vpp, int lktimeout, int lkflags) 171 { 172 struct vnode *vp; 173 174 KKASSERT(mp != NULL); 175 176 vp = allocvnode(lktimeout, lkflags); 177 vp->v_tag = tag; 178 vp->v_data = NULL; 179 180 /* 181 * By default the vnode is assigned the mount point's normal 182 * operations vector. 183 */ 184 vp->v_ops = &mp->mnt_vn_use_ops; 185 186 /* 187 * Placing the vnode on the mount point's queue makes it visible. 188 * VNON prevents it from being messed with, however. 189 */ 190 insmntque(vp, mp); 191 192 /* 193 * A VX locked & refd vnode is returned. 194 */ 195 *vpp = vp; 196 return (0); 197 } 198 199 /* 200 * This function creates vnodes with special operations vectors. The 201 * mount point is optional. 202 * 203 * This routine is being phased out but is still used by vfs_conf to 204 * create vnodes for devices prior to the root mount (with mp == NULL). 205 */ 206 int 207 getspecialvnode(enum vtagtype tag, struct mount *mp, 208 struct vop_ops **ops, 209 struct vnode **vpp, int lktimeout, int lkflags) 210 { 211 struct vnode *vp; 212 213 vp = allocvnode(lktimeout, lkflags); 214 vp->v_tag = tag; 215 vp->v_data = NULL; 216 vp->v_ops = ops; 217 218 if (mp == NULL) 219 mp = &dummymount; 220 221 /* 222 * Placing the vnode on the mount point's queue makes it visible. 223 * VNON prevents it from being messed with, however. 224 */ 225 insmntque(vp, mp); 226 227 /* 228 * A VX locked & refd vnode is returned. 229 */ 230 *vpp = vp; 231 return (0); 232 } 233 234 /* 235 * Interlock against an unmount, return 0 on success, non-zero on failure. 236 * 237 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 238 * is in-progress. 239 * 240 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 241 * are used. A shared locked will be obtained and the filesystem will not 242 * be unmountable until the lock is released. 243 */ 244 int 245 vfs_busy(struct mount *mp, int flags) 246 { 247 int lkflags; 248 249 atomic_add_int(&mp->mnt_refs, 1); 250 lwkt_gettoken(&mp->mnt_token); 251 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 252 if (flags & LK_NOWAIT) { 253 lwkt_reltoken(&mp->mnt_token); 254 atomic_add_int(&mp->mnt_refs, -1); 255 return (ENOENT); 256 } 257 /* XXX not MP safe */ 258 mp->mnt_kern_flag |= MNTK_MWAIT; 259 /* 260 * Since all busy locks are shared except the exclusive 261 * lock granted when unmounting, the only place that a 262 * wakeup needs to be done is at the release of the 263 * exclusive lock at the end of dounmount. 264 */ 265 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 266 lwkt_reltoken(&mp->mnt_token); 267 atomic_add_int(&mp->mnt_refs, -1); 268 return (ENOENT); 269 } 270 lkflags = LK_SHARED; 271 if (lockmgr(&mp->mnt_lock, lkflags)) 272 panic("vfs_busy: unexpected lock failure"); 273 lwkt_reltoken(&mp->mnt_token); 274 return (0); 275 } 276 277 /* 278 * Free a busy filesystem. 279 * 280 * Decrement refs before releasing the lock so e.g. a pending umount 281 * doesn't give us an unexpected busy error. 282 */ 283 void 284 vfs_unbusy(struct mount *mp) 285 { 286 atomic_add_int(&mp->mnt_refs, -1); 287 lockmgr(&mp->mnt_lock, LK_RELEASE); 288 } 289 290 /* 291 * Lookup a filesystem type, and if found allocate and initialize 292 * a mount structure for it. 293 * 294 * Devname is usually updated by mount(8) after booting. 295 */ 296 int 297 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 298 { 299 struct vfsconf *vfsp; 300 struct mount *mp; 301 302 if (fstypename == NULL) 303 return (ENODEV); 304 305 vfsp = vfsconf_find_by_name(fstypename); 306 if (vfsp == NULL) 307 return (ENODEV); 308 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 309 mount_init(mp); 310 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 311 312 vfs_busy(mp, 0); 313 mp->mnt_vfc = vfsp; 314 mp->mnt_op = vfsp->vfc_vfsops; 315 vfsp->vfc_refcount++; 316 mp->mnt_stat.f_type = vfsp->vfc_typenum; 317 mp->mnt_flag |= MNT_RDONLY; 318 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 319 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 320 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 321 *mpp = mp; 322 return (0); 323 } 324 325 /* 326 * Basic mount structure initialization 327 */ 328 void 329 mount_init(struct mount *mp) 330 { 331 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0); 332 lwkt_token_init(&mp->mnt_token, "permnt"); 333 334 TAILQ_INIT(&mp->mnt_vnodescan_list); 335 TAILQ_INIT(&mp->mnt_nvnodelist); 336 TAILQ_INIT(&mp->mnt_reservedvnlist); 337 TAILQ_INIT(&mp->mnt_jlist); 338 mp->mnt_nvnodelistsize = 0; 339 mp->mnt_flag = 0; 340 mp->mnt_iosize_max = MAXPHYS; 341 } 342 343 /* 344 * Lookup a mount point by filesystem identifier. 345 */ 346 struct mount * 347 vfs_getvfs(fsid_t *fsid) 348 { 349 struct mount *mp; 350 351 lwkt_gettoken(&mountlist_token); 352 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 353 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 354 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 355 break; 356 } 357 } 358 lwkt_reltoken(&mountlist_token); 359 return (mp); 360 } 361 362 /* 363 * Get a new unique fsid. Try to make its val[0] unique, since this value 364 * will be used to create fake device numbers for stat(). Also try (but 365 * not so hard) make its val[0] unique mod 2^16, since some emulators only 366 * support 16-bit device numbers. We end up with unique val[0]'s for the 367 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 368 * 369 * Keep in mind that several mounts may be running in parallel. Starting 370 * the search one past where the previous search terminated is both a 371 * micro-optimization and a defense against returning the same fsid to 372 * different mounts. 373 */ 374 void 375 vfs_getnewfsid(struct mount *mp) 376 { 377 static u_int16_t mntid_base; 378 fsid_t tfsid; 379 int mtype; 380 381 lwkt_gettoken(&mntid_token); 382 mtype = mp->mnt_vfc->vfc_typenum; 383 tfsid.val[1] = mtype; 384 mtype = (mtype & 0xFF) << 24; 385 for (;;) { 386 tfsid.val[0] = makeudev(255, 387 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 388 mntid_base++; 389 if (vfs_getvfs(&tfsid) == NULL) 390 break; 391 } 392 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 393 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 394 lwkt_reltoken(&mntid_token); 395 } 396 397 /* 398 * Set the FSID for a new mount point to the template. Adjust 399 * the FSID to avoid collisions. 400 */ 401 int 402 vfs_setfsid(struct mount *mp, fsid_t *template) 403 { 404 int didmunge = 0; 405 406 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 407 for (;;) { 408 if (vfs_getvfs(template) == NULL) 409 break; 410 didmunge = 1; 411 ++template->val[1]; 412 } 413 mp->mnt_stat.f_fsid = *template; 414 return(didmunge); 415 } 416 417 /* 418 * This routine is called when we have too many vnodes. It attempts 419 * to free <count> vnodes and will potentially free vnodes that still 420 * have VM backing store (VM backing store is typically the cause 421 * of a vnode blowout so we want to do this). Therefore, this operation 422 * is not considered cheap. 423 * 424 * A number of conditions may prevent a vnode from being reclaimed. 425 * the buffer cache may have references on the vnode, a directory 426 * vnode may still have references due to the namei cache representing 427 * underlying files, or the vnode may be in active use. It is not 428 * desireable to reuse such vnodes. These conditions may cause the 429 * number of vnodes to reach some minimum value regardless of what 430 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 431 */ 432 433 /* 434 * This is a quick non-blocking check to determine if the vnode is a good 435 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 436 * not a good candidate, 1 if it is. 437 */ 438 static __inline int 439 vmightfree(struct vnode *vp, int page_count, int pass) 440 { 441 if (vp->v_flag & VRECLAIMED) 442 return (0); 443 #if 0 444 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 445 return (0); 446 #endif 447 if (sysref_isactive(&vp->v_sysref)) 448 return (0); 449 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 450 return (0); 451 452 /* 453 * XXX horrible hack. Up to four passes will be taken. Each pass 454 * makes a larger set of vnodes eligible. For now what this really 455 * means is that we try to recycle files opened only once before 456 * recycling files opened multiple times. 457 */ 458 switch(vp->v_flag & (VAGE0 | VAGE1)) { 459 case 0: 460 if (pass < 3) 461 return(0); 462 break; 463 case VAGE0: 464 if (pass < 2) 465 return(0); 466 break; 467 case VAGE1: 468 if (pass < 1) 469 return(0); 470 break; 471 case VAGE0 | VAGE1: 472 break; 473 } 474 return (1); 475 } 476 477 /* 478 * The vnode was found to be possibly vgone()able and the caller has locked it 479 * (thus the usecount should be 1 now). Determine if the vnode is actually 480 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 481 * can be vgone()'d, 0 otherwise. 482 * 483 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 484 * in the namecache topology and (B) this vnode has buffer cache bufs. 485 * We cannot remove vnodes with non-leaf namecache associations. We do a 486 * tentitive leaf check prior to attempting to flush out any buffers but the 487 * 'real' test when all is said in done is that v_auxrefs must become 0 for 488 * the vnode to be freeable. 489 * 490 * We could theoretically just unconditionally flush when v_auxrefs != 0, 491 * but flushing data associated with non-leaf nodes (which are always 492 * directories), just throws it away for no benefit. It is the buffer 493 * cache's responsibility to choose buffers to recycle from the cached 494 * data point of view. 495 */ 496 static int 497 visleaf(struct vnode *vp) 498 { 499 struct namecache *ncp; 500 501 spin_lock(&vp->v_spin); 502 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 503 if (!TAILQ_EMPTY(&ncp->nc_list)) { 504 spin_unlock(&vp->v_spin); 505 return(0); 506 } 507 } 508 spin_unlock(&vp->v_spin); 509 return(1); 510 } 511 512 /* 513 * Try to clean up the vnode to the point where it can be vgone()'d, returning 514 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 515 * vmightfree() this routine may flush the vnode and block. Vnodes marked 516 * VFREE are still candidates for vgone()ing because they may hold namecache 517 * resources and could be blocking the namecache directory hierarchy (and 518 * related vnodes) from being freed. 519 */ 520 static int 521 vtrytomakegoneable(struct vnode *vp, int page_count) 522 { 523 if (vp->v_flag & VRECLAIMED) 524 return (0); 525 if (vp->v_sysref.refcnt > 1) 526 return (0); 527 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 528 return (0); 529 if (vp->v_auxrefs && visleaf(vp)) { 530 vinvalbuf(vp, V_SAVE, 0, 0); 531 #if 0 /* DEBUG */ 532 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 533 "vrecycle: vp %p succeeded: %s\n"), vp, 534 (TAILQ_FIRST(&vp->v_namecache) ? 535 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 536 #endif 537 } 538 539 /* 540 * This sequence may seem a little strange, but we need to optimize 541 * the critical path a bit. We can't recycle vnodes with other 542 * references and because we are trying to recycle an otherwise 543 * perfectly fine vnode we have to invalidate the namecache in a 544 * way that avoids possible deadlocks (since the vnode lock is being 545 * held here). Finally, we have to check for other references one 546 * last time in case something snuck in during the inval. 547 */ 548 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 549 return (0); 550 if (cache_inval_vp_nonblock(vp)) 551 return (0); 552 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 553 } 554 555 /* 556 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 557 * to avoid vnodes which have lots of resident pages (we are trying to free 558 * vnodes, not memory). 559 * 560 * This routine is a callback from the mountlist scan. The mount point 561 * in question will be busied. 562 * 563 * NOTE: The 1/10 reclamation also ensures that the inactive data set 564 * (the vnodes being recycled by the one-time use) does not degenerate 565 * into too-small a set. This is important because once a vnode is 566 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 567 * will not be destroyed EXCEPT by this mechanism. VM pages can still 568 * be cleaned/freed by the pageout daemon. 569 */ 570 static int 571 vlrureclaim(struct mount *mp, void *data) 572 { 573 struct vnlru_info *info = data; 574 struct vnode *vp; 575 int done; 576 int trigger; 577 int usevnodes; 578 int count; 579 int trigger_mult = vnlru_nowhere; 580 581 /* 582 * Calculate the trigger point for the resident pages check. The 583 * minimum trigger value is approximately the number of pages in 584 * the system divded by the number of vnodes. However, due to 585 * various other system memory overheads unrelated to data caching 586 * it is a good idea to double the trigger (at least). 587 * 588 * trigger_mult starts at 0. If the recycler is having problems 589 * finding enough freeable vnodes it will increase trigger_mult. 590 * This should not happen in normal operation, even on machines with 591 * low amounts of memory, but extraordinary memory use by the system 592 * verses the amount of cached data can trigger it. 593 * 594 * (long) -> deal with 64 bit machines, intermediate overflow 595 */ 596 usevnodes = desiredvnodes; 597 if (usevnodes <= 0) 598 usevnodes = 1; 599 trigger = (long)vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 600 601 done = 0; 602 lwkt_gettoken(&mp->mnt_token); 603 count = mp->mnt_nvnodelistsize / 10 + 1; 604 605 while (count && mp->mnt_syncer) { 606 /* 607 * Next vnode. Use the special syncer vnode to placemark 608 * the LRU. This way the LRU code does not interfere with 609 * vmntvnodescan(). 610 */ 611 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 612 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 613 if (vp) { 614 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 615 mp->mnt_syncer, v_nmntvnodes); 616 } else { 617 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 618 v_nmntvnodes); 619 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 620 if (vp == NULL) 621 break; 622 } 623 624 /* 625 * __VNODESCAN__ 626 * 627 * The VP will stick around while we hold mnt_token, 628 * at least until we block, so we can safely do an initial 629 * check, and then must check again after we lock the vnode. 630 */ 631 if (vp->v_type == VNON || /* syncer or indeterminant */ 632 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 633 ) { 634 --count; 635 continue; 636 } 637 638 /* 639 * VX get the candidate vnode. If the VX get fails the 640 * vnode might still be on the mountlist. Our loop depends 641 * on us at least cycling the vnode to the end of the 642 * mountlist. 643 */ 644 if (vx_get_nonblock(vp) != 0) { 645 --count; 646 continue; 647 } 648 649 /* 650 * Since we blocked locking the vp, make sure it is still 651 * a candidate for reclamation. That is, it has not already 652 * been reclaimed and only has our VX reference associated 653 * with it. 654 */ 655 if (vp->v_type == VNON || /* syncer or indeterminant */ 656 (vp->v_flag & VRECLAIMED) || 657 vp->v_mount != mp || 658 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 659 ) { 660 --count; 661 vx_put(vp); 662 continue; 663 } 664 665 /* 666 * All right, we are good, move the vp to the end of the 667 * mountlist and clean it out. The vget will have returned 668 * an error if the vnode was destroyed (VRECLAIMED set), so we 669 * do not have to check again. The vput() will move the 670 * vnode to the free list if the vgone() was successful. 671 */ 672 KKASSERT(vp->v_mount == mp); 673 vgone_vxlocked(vp); 674 vx_put(vp); 675 ++done; 676 --count; 677 } 678 lwkt_reltoken(&mp->mnt_token); 679 return (done); 680 } 681 682 /* 683 * Attempt to recycle vnodes in a context that is always safe to block. 684 * Calling vlrurecycle() from the bowels of file system code has some 685 * interesting deadlock problems. 686 */ 687 static struct thread *vnlruthread; 688 689 static void 690 vnlru_proc(void) 691 { 692 struct thread *td = curthread; 693 struct vnlru_info info; 694 int done; 695 696 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 697 SHUTDOWN_PRI_FIRST); 698 699 for (;;) { 700 kproc_suspend_loop(); 701 702 /* 703 * Do some opportunistic roving. 704 */ 705 if (numvnodes > 100000) 706 vnode_free_rover_scan(50); 707 else if (numvnodes > 10000) 708 vnode_free_rover_scan(20); 709 else 710 vnode_free_rover_scan(5); 711 712 /* 713 * Try to free some vnodes if we have too many 714 * 715 * (long) -> deal with 64 bit machines, intermediate overflow 716 */ 717 if (numvnodes > desiredvnodes && 718 freevnodes > desiredvnodes * 2 / 10) { 719 int count = numvnodes - desiredvnodes; 720 721 if (count > freevnodes / 100) 722 count = freevnodes / 100; 723 if (count < 5) 724 count = 5; 725 freesomevnodes(count); 726 } 727 728 /* 729 * Do non-critical-path (more robust) cache cleaning, 730 * even if vnode counts are nominal, to try to avoid 731 * having to do it in the critical path. 732 */ 733 cache_hysteresis(0); 734 735 /* 736 * Nothing to do if most of our vnodes are already on 737 * the free list. 738 */ 739 if (numvnodes - freevnodes <= (long)desiredvnodes * 9 / 10) { 740 tsleep(vnlruthread, 0, "vlruwt", hz); 741 continue; 742 } 743 744 /* 745 * The pass iterates through the four combinations of 746 * VAGE0/VAGE1. We want to get rid of aged small files 747 * first. 748 */ 749 info.pass = 0; 750 done = 0; 751 while (done == 0 && info.pass < 4) { 752 done = mountlist_scan(vlrureclaim, &info, 753 MNTSCAN_FORWARD); 754 ++info.pass; 755 } 756 757 /* 758 * The vlrureclaim() call only processes 1/10 of the vnodes 759 * on each mount. If we couldn't find any repeat the loop 760 * at least enough times to cover all available vnodes before 761 * we start sleeping. Complain if the failure extends past 762 * 30 second, every 30 seconds. 763 */ 764 if (done == 0) { 765 ++vnlru_nowhere; 766 if (vnlru_nowhere % 10 == 0) 767 tsleep(vnlruthread, 0, "vlrup", hz * 3); 768 if (vnlru_nowhere % 100 == 0) 769 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 770 if (vnlru_nowhere == 1000) 771 vnlru_nowhere = 900; 772 } else { 773 vnlru_nowhere = 0; 774 } 775 } 776 } 777 778 /* 779 * MOUNTLIST FUNCTIONS 780 */ 781 782 /* 783 * mountlist_insert (MP SAFE) 784 * 785 * Add a new mount point to the mount list. 786 */ 787 void 788 mountlist_insert(struct mount *mp, int how) 789 { 790 lwkt_gettoken(&mountlist_token); 791 if (how == MNTINS_FIRST) 792 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 793 else 794 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 795 lwkt_reltoken(&mountlist_token); 796 } 797 798 /* 799 * mountlist_interlock (MP SAFE) 800 * 801 * Execute the specified interlock function with the mountlist token 802 * held. The function will be called in a serialized fashion verses 803 * other functions called through this mechanism. 804 */ 805 int 806 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 807 { 808 int error; 809 810 lwkt_gettoken(&mountlist_token); 811 error = callback(mp); 812 lwkt_reltoken(&mountlist_token); 813 return (error); 814 } 815 816 /* 817 * mountlist_boot_getfirst (DURING BOOT ONLY) 818 * 819 * This function returns the first mount on the mountlist, which is 820 * expected to be the root mount. Since no interlocks are obtained 821 * this function is only safe to use during booting. 822 */ 823 824 struct mount * 825 mountlist_boot_getfirst(void) 826 { 827 return(TAILQ_FIRST(&mountlist)); 828 } 829 830 /* 831 * mountlist_remove (MP SAFE) 832 * 833 * Remove a node from the mountlist. If this node is the next scan node 834 * for any active mountlist scans, the active mountlist scan will be 835 * adjusted to skip the node, thus allowing removals during mountlist 836 * scans. 837 */ 838 void 839 mountlist_remove(struct mount *mp) 840 { 841 struct mountscan_info *msi; 842 843 lwkt_gettoken(&mountlist_token); 844 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 845 if (msi->msi_node == mp) { 846 if (msi->msi_how & MNTSCAN_FORWARD) 847 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 848 else 849 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 850 } 851 } 852 TAILQ_REMOVE(&mountlist, mp, mnt_list); 853 lwkt_reltoken(&mountlist_token); 854 } 855 856 /* 857 * mountlist_exists (MP SAFE) 858 * 859 * Checks if a node exists in the mountlist. 860 * This function is mainly used by VFS quota code to check if a 861 * cached nullfs struct mount pointer is still valid at use time 862 * 863 * FIXME: there is no warranty the mp passed to that function 864 * will be the same one used by VFS_ACCOUNT() later 865 */ 866 int 867 mountlist_exists(struct mount *mp) 868 { 869 int node_exists = 0; 870 struct mount* lmp; 871 872 lwkt_gettoken(&mountlist_token); 873 TAILQ_FOREACH(lmp, &mountlist, mnt_list) { 874 if (lmp == mp) { 875 node_exists = 1; 876 break; 877 } 878 } 879 lwkt_reltoken(&mountlist_token); 880 return(node_exists); 881 } 882 883 /* 884 * mountlist_scan (MP SAFE) 885 * 886 * Safely scan the mount points on the mount list. Unless otherwise 887 * specified each mount point will be busied prior to the callback and 888 * unbusied afterwords. The callback may safely remove any mount point 889 * without interfering with the scan. If the current callback 890 * mount is removed the scanner will not attempt to unbusy it. 891 * 892 * If a mount node cannot be busied it is silently skipped. 893 * 894 * The callback return value is aggregated and a total is returned. A return 895 * value of < 0 is not aggregated and will terminate the scan. 896 * 897 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 898 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 899 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 900 * the mount node. 901 */ 902 int 903 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 904 { 905 struct mountscan_info info; 906 struct mount *mp; 907 int count; 908 int res; 909 910 lwkt_gettoken(&mountlist_token); 911 912 info.msi_how = how; 913 info.msi_node = NULL; /* paranoia */ 914 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 915 916 res = 0; 917 918 if (how & MNTSCAN_FORWARD) { 919 info.msi_node = TAILQ_FIRST(&mountlist); 920 while ((mp = info.msi_node) != NULL) { 921 if (how & MNTSCAN_NOBUSY) { 922 count = callback(mp, data); 923 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 924 count = callback(mp, data); 925 if (mp == info.msi_node) 926 vfs_unbusy(mp); 927 } else { 928 count = 0; 929 } 930 if (count < 0) 931 break; 932 res += count; 933 if (mp == info.msi_node) 934 info.msi_node = TAILQ_NEXT(mp, mnt_list); 935 } 936 } else if (how & MNTSCAN_REVERSE) { 937 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 938 while ((mp = info.msi_node) != NULL) { 939 if (how & MNTSCAN_NOBUSY) { 940 count = callback(mp, data); 941 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 942 count = callback(mp, data); 943 if (mp == info.msi_node) 944 vfs_unbusy(mp); 945 } else { 946 count = 0; 947 } 948 if (count < 0) 949 break; 950 res += count; 951 if (mp == info.msi_node) 952 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 953 } 954 } 955 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 956 lwkt_reltoken(&mountlist_token); 957 return(res); 958 } 959 960 /* 961 * MOUNT RELATED VNODE FUNCTIONS 962 */ 963 964 static struct kproc_desc vnlru_kp = { 965 "vnlru", 966 vnlru_proc, 967 &vnlruthread 968 }; 969 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 970 971 /* 972 * Move a vnode from one mount queue to another. 973 * 974 * MPSAFE 975 */ 976 void 977 insmntque(struct vnode *vp, struct mount *mp) 978 { 979 struct mount *omp; 980 981 /* 982 * Delete from old mount point vnode list, if on one. 983 */ 984 if ((omp = vp->v_mount) != NULL) { 985 lwkt_gettoken(&omp->mnt_token); 986 KKASSERT(omp == vp->v_mount); 987 KASSERT(omp->mnt_nvnodelistsize > 0, 988 ("bad mount point vnode list size")); 989 vremovevnodemnt(vp); 990 omp->mnt_nvnodelistsize--; 991 lwkt_reltoken(&omp->mnt_token); 992 } 993 994 /* 995 * Insert into list of vnodes for the new mount point, if available. 996 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 997 */ 998 if (mp == NULL) { 999 vp->v_mount = NULL; 1000 return; 1001 } 1002 lwkt_gettoken(&mp->mnt_token); 1003 vp->v_mount = mp; 1004 if (mp->mnt_syncer) { 1005 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 1006 } else { 1007 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 1008 } 1009 mp->mnt_nvnodelistsize++; 1010 lwkt_reltoken(&mp->mnt_token); 1011 } 1012 1013 1014 /* 1015 * Scan the vnodes under a mount point and issue appropriate callbacks. 1016 * 1017 * The fastfunc() callback is called with just the mountlist token held 1018 * (no vnode lock). It may not block and the vnode may be undergoing 1019 * modifications while the caller is processing it. The vnode will 1020 * not be entirely destroyed, however, due to the fact that the mountlist 1021 * token is held. A return value < 0 skips to the next vnode without calling 1022 * the slowfunc(), a return value > 0 terminates the loop. 1023 * 1024 * The slowfunc() callback is called after the vnode has been successfully 1025 * locked based on passed flags. The vnode is skipped if it gets rearranged 1026 * or destroyed while blocking on the lock. A non-zero return value from 1027 * the slow function terminates the loop. The slow function is allowed to 1028 * arbitrarily block. The scanning code guarentees consistency of operation 1029 * even if the slow function deletes or moves the node, or blocks and some 1030 * other thread deletes or moves the node. 1031 * 1032 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed 1033 * out from under the fastfunc()'s vnode test. It will not prevent 1034 * v_object from getting NULL'd out but it will ensure that the 1035 * pointer (if we race) will remain stable. 1036 */ 1037 int 1038 vmntvnodescan( 1039 struct mount *mp, 1040 int flags, 1041 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1042 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1043 void *data 1044 ) { 1045 struct vmntvnodescan_info info; 1046 struct vnode *vp; 1047 int r = 0; 1048 int maxcount = mp->mnt_nvnodelistsize * 2; 1049 int stopcount = 0; 1050 int count = 0; 1051 1052 lwkt_gettoken(&mp->mnt_token); 1053 lwkt_gettoken(&vmobj_token); 1054 1055 /* 1056 * If asked to do one pass stop after iterating available vnodes. 1057 * Under heavy loads new vnodes can be added while we are scanning, 1058 * so this isn't perfect. Create a slop factor of 2x. 1059 */ 1060 if (flags & VMSC_ONEPASS) 1061 stopcount = mp->mnt_nvnodelistsize; 1062 1063 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1064 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry); 1065 1066 while ((vp = info.vp) != NULL) { 1067 if (--maxcount == 0) { 1068 kprintf("Warning: excessive fssync iteration\n"); 1069 maxcount = mp->mnt_nvnodelistsize * 2; 1070 } 1071 1072 /* 1073 * Skip if visible but not ready, or special (e.g. 1074 * mp->mnt_syncer) 1075 */ 1076 if (vp->v_type == VNON) 1077 goto next; 1078 KKASSERT(vp->v_mount == mp); 1079 1080 /* 1081 * Quick test. A negative return continues the loop without 1082 * calling the slow test. 0 continues onto the slow test. 1083 * A positive number aborts the loop. 1084 */ 1085 if (fastfunc) { 1086 if ((r = fastfunc(mp, vp, data)) < 0) { 1087 r = 0; 1088 goto next; 1089 } 1090 if (r) 1091 break; 1092 } 1093 1094 /* 1095 * Get a vxlock on the vnode, retry if it has moved or isn't 1096 * in the mountlist where we expect it. 1097 */ 1098 if (slowfunc) { 1099 int error; 1100 1101 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1102 case VMSC_GETVP: 1103 error = vget(vp, LK_EXCLUSIVE); 1104 break; 1105 case VMSC_GETVP|VMSC_NOWAIT: 1106 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1107 break; 1108 case VMSC_GETVX: 1109 vx_get(vp); 1110 error = 0; 1111 break; 1112 default: 1113 error = 0; 1114 break; 1115 } 1116 if (error) 1117 goto next; 1118 /* 1119 * Do not call the slow function if the vnode is 1120 * invalid or if it was ripped out from under us 1121 * while we (potentially) blocked. 1122 */ 1123 if (info.vp == vp && vp->v_type != VNON) 1124 r = slowfunc(mp, vp, data); 1125 1126 /* 1127 * Cleanup 1128 */ 1129 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1130 case VMSC_GETVP: 1131 case VMSC_GETVP|VMSC_NOWAIT: 1132 vput(vp); 1133 break; 1134 case VMSC_GETVX: 1135 vx_put(vp); 1136 break; 1137 default: 1138 break; 1139 } 1140 if (r != 0) 1141 break; 1142 } 1143 1144 next: 1145 /* 1146 * Yield after some processing. Depending on the number 1147 * of vnodes, we might wind up running for a long time. 1148 * Because threads are not preemptable, time critical 1149 * userland processes might starve. Give them a chance 1150 * now and then. 1151 */ 1152 if (++count == 10000) { 1153 /* We really want to yield a bit, so we simply sleep a tick */ 1154 tsleep(mp, 0, "vnodescn", 1); 1155 count = 0; 1156 } 1157 1158 /* 1159 * If doing one pass this decrements to zero. If it starts 1160 * at zero it is effectively unlimited for the purposes of 1161 * this loop. 1162 */ 1163 if (--stopcount == 0) 1164 break; 1165 1166 /* 1167 * Iterate. If the vnode was ripped out from under us 1168 * info.vp will already point to the next vnode, otherwise 1169 * we have to obtain the next valid vnode ourselves. 1170 */ 1171 if (info.vp == vp) 1172 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1173 } 1174 1175 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry); 1176 lwkt_reltoken(&vmobj_token); 1177 lwkt_reltoken(&mp->mnt_token); 1178 return(r); 1179 } 1180 1181 /* 1182 * Remove any vnodes in the vnode table belonging to mount point mp. 1183 * 1184 * If FORCECLOSE is not specified, there should not be any active ones, 1185 * return error if any are found (nb: this is a user error, not a 1186 * system error). If FORCECLOSE is specified, detach any active vnodes 1187 * that are found. 1188 * 1189 * If WRITECLOSE is set, only flush out regular file vnodes open for 1190 * writing. 1191 * 1192 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1193 * 1194 * `rootrefs' specifies the base reference count for the root vnode 1195 * of this filesystem. The root vnode is considered busy if its 1196 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1197 * will call vrele() on the root vnode exactly rootrefs times. 1198 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1199 * be zero. 1200 */ 1201 #ifdef DIAGNOSTIC 1202 static int busyprt = 0; /* print out busy vnodes */ 1203 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1204 #endif 1205 1206 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1207 1208 struct vflush_info { 1209 int flags; 1210 int busy; 1211 thread_t td; 1212 }; 1213 1214 int 1215 vflush(struct mount *mp, int rootrefs, int flags) 1216 { 1217 struct thread *td = curthread; /* XXX */ 1218 struct vnode *rootvp = NULL; 1219 int error; 1220 struct vflush_info vflush_info; 1221 1222 if (rootrefs > 0) { 1223 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1224 ("vflush: bad args")); 1225 /* 1226 * Get the filesystem root vnode. We can vput() it 1227 * immediately, since with rootrefs > 0, it won't go away. 1228 */ 1229 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1230 if ((flags & FORCECLOSE) == 0) 1231 return (error); 1232 rootrefs = 0; 1233 /* continue anyway */ 1234 } 1235 if (rootrefs) 1236 vput(rootvp); 1237 } 1238 1239 vflush_info.busy = 0; 1240 vflush_info.flags = flags; 1241 vflush_info.td = td; 1242 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1243 1244 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1245 /* 1246 * If just the root vnode is busy, and if its refcount 1247 * is equal to `rootrefs', then go ahead and kill it. 1248 */ 1249 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1250 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1251 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1252 vx_lock(rootvp); 1253 vgone_vxlocked(rootvp); 1254 vx_unlock(rootvp); 1255 vflush_info.busy = 0; 1256 } 1257 } 1258 if (vflush_info.busy) 1259 return (EBUSY); 1260 for (; rootrefs > 0; rootrefs--) 1261 vrele(rootvp); 1262 return (0); 1263 } 1264 1265 /* 1266 * The scan callback is made with an VX locked vnode. 1267 */ 1268 static int 1269 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1270 { 1271 struct vflush_info *info = data; 1272 struct vattr vattr; 1273 int flags = info->flags; 1274 1275 /* 1276 * Skip over a vnodes marked VSYSTEM. 1277 */ 1278 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1279 return(0); 1280 } 1281 1282 /* 1283 * Do not force-close VCHR or VBLK vnodes 1284 */ 1285 if (vp->v_type == VCHR || vp->v_type == VBLK) 1286 flags &= ~(WRITECLOSE|FORCECLOSE); 1287 1288 /* 1289 * If WRITECLOSE is set, flush out unlinked but still open 1290 * files (even if open only for reading) and regular file 1291 * vnodes open for writing. 1292 */ 1293 if ((flags & WRITECLOSE) && 1294 (vp->v_type == VNON || 1295 (VOP_GETATTR(vp, &vattr) == 0 && 1296 vattr.va_nlink > 0)) && 1297 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1298 return(0); 1299 } 1300 1301 /* 1302 * If we are the only holder (refcnt of 1) or the vnode is in 1303 * termination (refcnt < 0), we can vgone the vnode. 1304 */ 1305 if (vp->v_sysref.refcnt <= 1) { 1306 vgone_vxlocked(vp); 1307 return(0); 1308 } 1309 1310 /* 1311 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1312 * it to a dummymount structure so vop_*() functions don't deref 1313 * a NULL pointer. 1314 */ 1315 if (flags & FORCECLOSE) { 1316 vhold(vp); 1317 vgone_vxlocked(vp); 1318 if (vp->v_mount == NULL) 1319 insmntque(vp, &dummymount); 1320 vdrop(vp); 1321 return(0); 1322 } 1323 if (vp->v_type == VCHR || vp->v_type == VBLK) 1324 kprintf("vflush: Warning, cannot destroy busy device vnode\n"); 1325 #ifdef DIAGNOSTIC 1326 if (busyprt) 1327 vprint("vflush: busy vnode", vp); 1328 #endif 1329 ++info->busy; 1330 return(0); 1331 } 1332 1333 void 1334 add_bio_ops(struct bio_ops *ops) 1335 { 1336 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1337 } 1338 1339 void 1340 rem_bio_ops(struct bio_ops *ops) 1341 { 1342 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1343 } 1344 1345 /* 1346 * This calls the bio_ops io_sync function either for a mount point 1347 * or generally. 1348 * 1349 * WARNING: softdeps is weirdly coded and just isn't happy unless 1350 * io_sync is called with a NULL mount from the general syncing code. 1351 */ 1352 void 1353 bio_ops_sync(struct mount *mp) 1354 { 1355 struct bio_ops *ops; 1356 1357 if (mp) { 1358 if ((ops = mp->mnt_bioops) != NULL) 1359 ops->io_sync(mp); 1360 } else { 1361 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1362 ops->io_sync(NULL); 1363 } 1364 } 1365 } 1366 1367 /* 1368 * Lookup a mount point by nch 1369 */ 1370 struct mount * 1371 mount_get_by_nc(struct namecache *ncp) 1372 { 1373 struct mount *mp = NULL; 1374 1375 lwkt_gettoken(&mountlist_token); 1376 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1377 if (ncp == mp->mnt_ncmountpt.ncp) 1378 break; 1379 } 1380 lwkt_reltoken(&mountlist_token); 1381 return (mp); 1382 } 1383 1384