1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 struct vnlru_info { 111 int pass; 112 }; 113 114 static int vnlru_nowhere = 0; 115 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 116 &vnlru_nowhere, 0, 117 "Number of times the vnlru process ran without success"); 118 119 120 static struct lwkt_token mntid_token; 121 static struct mount dummymount; 122 123 /* note: mountlist exported to pstat */ 124 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 125 static TAILQ_HEAD(,mountscan_info) mountscan_list; 126 static struct lwkt_token mountlist_token; 127 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 128 struct lwkt_token mntvnode_token; 129 130 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 131 132 /* 133 * Called from vfsinit() 134 */ 135 void 136 vfs_mount_init(void) 137 { 138 lwkt_token_init(&mountlist_token, 1); 139 lwkt_token_init(&mntvnode_token, 1); 140 lwkt_token_init(&mntid_token, 1); 141 TAILQ_INIT(&mountscan_list); 142 TAILQ_INIT(&mntvnodescan_list); 143 mount_init(&dummymount); 144 dummymount.mnt_flag |= MNT_RDONLY; 145 } 146 147 /* 148 * Support function called with mntvnode_token held to remove a vnode 149 * from the mountlist. We must update any list scans which are in progress. 150 */ 151 static void 152 vremovevnodemnt(struct vnode *vp) 153 { 154 struct vmntvnodescan_info *info; 155 156 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 157 if (info->vp == vp) 158 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 159 } 160 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 161 } 162 163 /* 164 * Allocate a new vnode and associate it with a tag, mount point, and 165 * operations vector. 166 * 167 * A VX locked and refd vnode is returned. The caller should setup the 168 * remaining fields and vx_put() or, if he wishes to leave a vref, 169 * vx_unlock() the vnode. 170 */ 171 int 172 getnewvnode(enum vtagtype tag, struct mount *mp, 173 struct vnode **vpp, int lktimeout, int lkflags) 174 { 175 struct vnode *vp; 176 177 KKASSERT(mp != NULL); 178 179 vp = allocvnode(lktimeout, lkflags); 180 vp->v_tag = tag; 181 vp->v_data = NULL; 182 183 /* 184 * By default the vnode is assigned the mount point's normal 185 * operations vector. 186 */ 187 vp->v_ops = &mp->mnt_vn_use_ops; 188 189 /* 190 * Placing the vnode on the mount point's queue makes it visible. 191 * VNON prevents it from being messed with, however. 192 */ 193 insmntque(vp, mp); 194 195 /* 196 * A VX locked & refd vnode is returned. 197 */ 198 *vpp = vp; 199 return (0); 200 } 201 202 /* 203 * This function creates vnodes with special operations vectors. The 204 * mount point is optional. 205 * 206 * This routine is being phased out but is still used by vfs_conf to 207 * create vnodes for devices prior to the root mount (with mp == NULL). 208 */ 209 int 210 getspecialvnode(enum vtagtype tag, struct mount *mp, 211 struct vop_ops **ops, 212 struct vnode **vpp, int lktimeout, int lkflags) 213 { 214 struct vnode *vp; 215 216 vp = allocvnode(lktimeout, lkflags); 217 vp->v_tag = tag; 218 vp->v_data = NULL; 219 vp->v_ops = ops; 220 221 if (mp == NULL) 222 mp = &dummymount; 223 224 /* 225 * Placing the vnode on the mount point's queue makes it visible. 226 * VNON prevents it from being messed with, however. 227 */ 228 insmntque(vp, mp); 229 230 /* 231 * A VX locked & refd vnode is returned. 232 */ 233 *vpp = vp; 234 return (0); 235 } 236 237 /* 238 * Interlock against an unmount, return 0 on success, non-zero on failure. 239 * 240 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 241 * is in-progress. 242 * 243 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 244 * are used. A shared locked will be obtained and the filesystem will not 245 * be unmountable until the lock is released. 246 */ 247 int 248 vfs_busy(struct mount *mp, int flags) 249 { 250 int lkflags; 251 252 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 253 if (flags & LK_NOWAIT) 254 return (ENOENT); 255 /* XXX not MP safe */ 256 mp->mnt_kern_flag |= MNTK_MWAIT; 257 /* 258 * Since all busy locks are shared except the exclusive 259 * lock granted when unmounting, the only place that a 260 * wakeup needs to be done is at the release of the 261 * exclusive lock at the end of dounmount. 262 */ 263 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 264 return (ENOENT); 265 } 266 lkflags = LK_SHARED; 267 if (lockmgr(&mp->mnt_lock, lkflags)) 268 panic("vfs_busy: unexpected lock failure"); 269 return (0); 270 } 271 272 /* 273 * Free a busy filesystem. 274 */ 275 void 276 vfs_unbusy(struct mount *mp) 277 { 278 lockmgr(&mp->mnt_lock, LK_RELEASE); 279 } 280 281 /* 282 * Lookup a filesystem type, and if found allocate and initialize 283 * a mount structure for it. 284 * 285 * Devname is usually updated by mount(8) after booting. 286 */ 287 int 288 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 289 { 290 struct vfsconf *vfsp; 291 struct mount *mp; 292 293 if (fstypename == NULL) 294 return (ENODEV); 295 296 vfsp = vfsconf_find_by_name(fstypename); 297 if (vfsp == NULL) 298 return (ENODEV); 299 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 300 mount_init(mp); 301 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 302 303 vfs_busy(mp, LK_NOWAIT); 304 mp->mnt_vfc = vfsp; 305 mp->mnt_op = vfsp->vfc_vfsops; 306 vfsp->vfc_refcount++; 307 mp->mnt_stat.f_type = vfsp->vfc_typenum; 308 mp->mnt_flag |= MNT_RDONLY; 309 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 310 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 311 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 312 *mpp = mp; 313 return (0); 314 } 315 316 /* 317 * Basic mount structure initialization 318 */ 319 void 320 mount_init(struct mount *mp) 321 { 322 lockinit(&mp->mnt_lock, "vfslock", 0, 0); 323 lwkt_token_init(&mp->mnt_token, 1); 324 325 TAILQ_INIT(&mp->mnt_nvnodelist); 326 TAILQ_INIT(&mp->mnt_reservedvnlist); 327 TAILQ_INIT(&mp->mnt_jlist); 328 mp->mnt_nvnodelistsize = 0; 329 mp->mnt_flag = 0; 330 mp->mnt_iosize_max = DFLTPHYS; 331 } 332 333 /* 334 * Lookup a mount point by filesystem identifier. 335 */ 336 struct mount * 337 vfs_getvfs(fsid_t *fsid) 338 { 339 struct mount *mp; 340 341 lwkt_gettoken(&mountlist_token); 342 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 343 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 344 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 345 break; 346 } 347 } 348 lwkt_reltoken(&mountlist_token); 349 return (mp); 350 } 351 352 /* 353 * Get a new unique fsid. Try to make its val[0] unique, since this value 354 * will be used to create fake device numbers for stat(). Also try (but 355 * not so hard) make its val[0] unique mod 2^16, since some emulators only 356 * support 16-bit device numbers. We end up with unique val[0]'s for the 357 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 358 * 359 * Keep in mind that several mounts may be running in parallel. Starting 360 * the search one past where the previous search terminated is both a 361 * micro-optimization and a defense against returning the same fsid to 362 * different mounts. 363 */ 364 void 365 vfs_getnewfsid(struct mount *mp) 366 { 367 static u_int16_t mntid_base; 368 fsid_t tfsid; 369 int mtype; 370 371 lwkt_gettoken(&mntid_token); 372 mtype = mp->mnt_vfc->vfc_typenum; 373 tfsid.val[1] = mtype; 374 mtype = (mtype & 0xFF) << 24; 375 for (;;) { 376 tfsid.val[0] = makeudev(255, 377 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 378 mntid_base++; 379 if (vfs_getvfs(&tfsid) == NULL) 380 break; 381 } 382 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 383 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 384 lwkt_reltoken(&mntid_token); 385 } 386 387 /* 388 * Set the FSID for a new mount point to the template. Adjust 389 * the FSID to avoid collisions. 390 */ 391 int 392 vfs_setfsid(struct mount *mp, fsid_t *template) 393 { 394 int didmunge = 0; 395 396 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 397 for (;;) { 398 if (vfs_getvfs(template) == NULL) 399 break; 400 didmunge = 1; 401 ++template->val[1]; 402 } 403 mp->mnt_stat.f_fsid = *template; 404 return(didmunge); 405 } 406 407 /* 408 * This routine is called when we have too many vnodes. It attempts 409 * to free <count> vnodes and will potentially free vnodes that still 410 * have VM backing store (VM backing store is typically the cause 411 * of a vnode blowout so we want to do this). Therefore, this operation 412 * is not considered cheap. 413 * 414 * A number of conditions may prevent a vnode from being reclaimed. 415 * the buffer cache may have references on the vnode, a directory 416 * vnode may still have references due to the namei cache representing 417 * underlying files, or the vnode may be in active use. It is not 418 * desireable to reuse such vnodes. These conditions may cause the 419 * number of vnodes to reach some minimum value regardless of what 420 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 421 */ 422 423 /* 424 * This is a quick non-blocking check to determine if the vnode is a good 425 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 426 * not a good candidate, 1 if it is. 427 */ 428 static __inline int 429 vmightfree(struct vnode *vp, int page_count, int pass) 430 { 431 if (vp->v_flag & VRECLAIMED) 432 return (0); 433 #if 0 434 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 435 return (0); 436 #endif 437 if (sysref_isactive(&vp->v_sysref)) 438 return (0); 439 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 440 return (0); 441 442 /* 443 * XXX horrible hack. Up to four passes will be taken. Each pass 444 * makes a larger set of vnodes eligible. For now what this really 445 * means is that we try to recycle files opened only once before 446 * recycling files opened multiple times. 447 */ 448 switch(vp->v_flag & (VAGE0 | VAGE1)) { 449 case 0: 450 if (pass < 3) 451 return(0); 452 break; 453 case VAGE0: 454 if (pass < 2) 455 return(0); 456 break; 457 case VAGE1: 458 if (pass < 1) 459 return(0); 460 break; 461 case VAGE0 | VAGE1: 462 break; 463 } 464 return (1); 465 } 466 467 /* 468 * The vnode was found to be possibly vgone()able and the caller has locked it 469 * (thus the usecount should be 1 now). Determine if the vnode is actually 470 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 471 * can be vgone()'d, 0 otherwise. 472 * 473 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 474 * in the namecache topology and (B) this vnode has buffer cache bufs. 475 * We cannot remove vnodes with non-leaf namecache associations. We do a 476 * tentitive leaf check prior to attempting to flush out any buffers but the 477 * 'real' test when all is said in done is that v_auxrefs must become 0 for 478 * the vnode to be freeable. 479 * 480 * We could theoretically just unconditionally flush when v_auxrefs != 0, 481 * but flushing data associated with non-leaf nodes (which are always 482 * directories), just throws it away for no benefit. It is the buffer 483 * cache's responsibility to choose buffers to recycle from the cached 484 * data point of view. 485 */ 486 static int 487 visleaf(struct vnode *vp) 488 { 489 struct namecache *ncp; 490 491 spin_lock_wr(&vp->v_spinlock); 492 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 493 if (!TAILQ_EMPTY(&ncp->nc_list)) { 494 spin_unlock_wr(&vp->v_spinlock); 495 return(0); 496 } 497 } 498 spin_unlock_wr(&vp->v_spinlock); 499 return(1); 500 } 501 502 /* 503 * Try to clean up the vnode to the point where it can be vgone()'d, returning 504 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 505 * vmightfree() this routine may flush the vnode and block. Vnodes marked 506 * VFREE are still candidates for vgone()ing because they may hold namecache 507 * resources and could be blocking the namecache directory hierarchy (and 508 * related vnodes) from being freed. 509 */ 510 static int 511 vtrytomakegoneable(struct vnode *vp, int page_count) 512 { 513 if (vp->v_flag & VRECLAIMED) 514 return (0); 515 if (vp->v_sysref.refcnt > 1) 516 return (0); 517 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 518 return (0); 519 if (vp->v_auxrefs && visleaf(vp)) { 520 vinvalbuf(vp, V_SAVE, 0, 0); 521 #if 0 /* DEBUG */ 522 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 523 "vrecycle: vp %p succeeded: %s\n"), vp, 524 (TAILQ_FIRST(&vp->v_namecache) ? 525 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 526 #endif 527 } 528 529 /* 530 * This sequence may seem a little strange, but we need to optimize 531 * the critical path a bit. We can't recycle vnodes with other 532 * references and because we are trying to recycle an otherwise 533 * perfectly fine vnode we have to invalidate the namecache in a 534 * way that avoids possible deadlocks (since the vnode lock is being 535 * held here). Finally, we have to check for other references one 536 * last time in case something snuck in during the inval. 537 */ 538 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 539 return (0); 540 if (cache_inval_vp_nonblock(vp)) 541 return (0); 542 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 543 } 544 545 /* 546 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 547 * to avoid vnodes which have lots of resident pages (we are trying to free 548 * vnodes, not memory). 549 * 550 * This routine is a callback from the mountlist scan. The mount point 551 * in question will be busied. 552 * 553 * NOTE: The 1/10 reclamation also ensures that the inactive data set 554 * (the vnodes being recycled by the one-time use) does not degenerate 555 * into too-small a set. This is important because once a vnode is 556 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 557 * will not be destroyed EXCEPT by this mechanism. VM pages can still 558 * be cleaned/freed by the pageout daemon. 559 */ 560 static int 561 vlrureclaim(struct mount *mp, void *data) 562 { 563 struct vnlru_info *info = data; 564 struct vnode *vp; 565 int done; 566 int trigger; 567 int usevnodes; 568 int count; 569 int trigger_mult = vnlru_nowhere; 570 571 /* 572 * Calculate the trigger point for the resident pages check. The 573 * minimum trigger value is approximately the number of pages in 574 * the system divded by the number of vnodes. However, due to 575 * various other system memory overheads unrelated to data caching 576 * it is a good idea to double the trigger (at least). 577 * 578 * trigger_mult starts at 0. If the recycler is having problems 579 * finding enough freeable vnodes it will increase trigger_mult. 580 * This should not happen in normal operation, even on machines with 581 * low amounts of memory, but extraordinary memory use by the system 582 * verses the amount of cached data can trigger it. 583 */ 584 usevnodes = desiredvnodes; 585 if (usevnodes <= 0) 586 usevnodes = 1; 587 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 588 589 done = 0; 590 lwkt_gettoken(&mntvnode_token); 591 count = mp->mnt_nvnodelistsize / 10 + 1; 592 593 while (count && mp->mnt_syncer) { 594 /* 595 * Next vnode. Use the special syncer vnode to placemark 596 * the LRU. This way the LRU code does not interfere with 597 * vmntvnodescan(). 598 */ 599 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 600 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 601 if (vp) { 602 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 603 mp->mnt_syncer, v_nmntvnodes); 604 } else { 605 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 606 v_nmntvnodes); 607 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 608 if (vp == NULL) 609 break; 610 } 611 612 /* 613 * __VNODESCAN__ 614 * 615 * The VP will stick around while we hold mntvnode_token, 616 * at least until we block, so we can safely do an initial 617 * check, and then must check again after we lock the vnode. 618 */ 619 if (vp->v_type == VNON || /* syncer or indeterminant */ 620 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 621 ) { 622 --count; 623 continue; 624 } 625 626 /* 627 * VX get the candidate vnode. If the VX get fails the 628 * vnode might still be on the mountlist. Our loop depends 629 * on us at least cycling the vnode to the end of the 630 * mountlist. 631 */ 632 if (vx_get_nonblock(vp) != 0) { 633 --count; 634 continue; 635 } 636 637 /* 638 * Since we blocked locking the vp, make sure it is still 639 * a candidate for reclamation. That is, it has not already 640 * been reclaimed and only has our VX reference associated 641 * with it. 642 */ 643 if (vp->v_type == VNON || /* syncer or indeterminant */ 644 (vp->v_flag & VRECLAIMED) || 645 vp->v_mount != mp || 646 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 647 ) { 648 --count; 649 vx_put(vp); 650 continue; 651 } 652 653 /* 654 * All right, we are good, move the vp to the end of the 655 * mountlist and clean it out. The vget will have returned 656 * an error if the vnode was destroyed (VRECLAIMED set), so we 657 * do not have to check again. The vput() will move the 658 * vnode to the free list if the vgone() was successful. 659 */ 660 KKASSERT(vp->v_mount == mp); 661 vgone_vxlocked(vp); 662 vx_put(vp); 663 ++done; 664 --count; 665 } 666 lwkt_reltoken(&mntvnode_token); 667 return (done); 668 } 669 670 /* 671 * Attempt to recycle vnodes in a context that is always safe to block. 672 * Calling vlrurecycle() from the bowels of file system code has some 673 * interesting deadlock problems. 674 */ 675 static struct thread *vnlruthread; 676 static int vnlruproc_sig; 677 678 void 679 vnlru_proc_wait(void) 680 { 681 if (vnlruproc_sig == 0) { 682 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 683 wakeup(vnlruthread); 684 } 685 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 686 } 687 688 static void 689 vnlru_proc(void) 690 { 691 struct thread *td = curthread; 692 struct vnlru_info info; 693 int done; 694 695 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 696 SHUTDOWN_PRI_FIRST); 697 698 crit_enter(); 699 for (;;) { 700 kproc_suspend_loop(); 701 702 /* 703 * Try to free some vnodes if we have too many 704 */ 705 if (numvnodes > desiredvnodes && 706 freevnodes > desiredvnodes * 2 / 10) { 707 int count = numvnodes - desiredvnodes; 708 709 if (count > freevnodes / 100) 710 count = freevnodes / 100; 711 if (count < 5) 712 count = 5; 713 freesomevnodes(count); 714 } 715 716 /* 717 * Nothing to do if most of our vnodes are already on 718 * the free list. 719 */ 720 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 721 vnlruproc_sig = 0; 722 wakeup(&vnlruproc_sig); 723 tsleep(td, 0, "vlruwt", hz); 724 continue; 725 } 726 cache_hysteresis(); 727 728 /* 729 * The pass iterates through the four combinations of 730 * VAGE0/VAGE1. We want to get rid of aged small files 731 * first. 732 */ 733 info.pass = 0; 734 done = 0; 735 while (done == 0 && info.pass < 4) { 736 done = mountlist_scan(vlrureclaim, &info, 737 MNTSCAN_FORWARD); 738 ++info.pass; 739 } 740 741 /* 742 * The vlrureclaim() call only processes 1/10 of the vnodes 743 * on each mount. If we couldn't find any repeat the loop 744 * at least enough times to cover all available vnodes before 745 * we start sleeping. Complain if the failure extends past 746 * 30 second, every 30 seconds. 747 */ 748 if (done == 0) { 749 ++vnlru_nowhere; 750 if (vnlru_nowhere % 10 == 0) 751 tsleep(td, 0, "vlrup", hz * 3); 752 if (vnlru_nowhere % 100 == 0) 753 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 754 if (vnlru_nowhere == 1000) 755 vnlru_nowhere = 900; 756 } else { 757 vnlru_nowhere = 0; 758 } 759 } 760 crit_exit(); 761 } 762 763 /* 764 * MOUNTLIST FUNCTIONS 765 */ 766 767 /* 768 * mountlist_insert (MP SAFE) 769 * 770 * Add a new mount point to the mount list. 771 */ 772 void 773 mountlist_insert(struct mount *mp, int how) 774 { 775 lwkt_gettoken(&mountlist_token); 776 if (how == MNTINS_FIRST) 777 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 778 else 779 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 780 lwkt_reltoken(&mountlist_token); 781 } 782 783 /* 784 * mountlist_interlock (MP SAFE) 785 * 786 * Execute the specified interlock function with the mountlist token 787 * held. The function will be called in a serialized fashion verses 788 * other functions called through this mechanism. 789 */ 790 int 791 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 792 { 793 int error; 794 795 lwkt_gettoken(&mountlist_token); 796 error = callback(mp); 797 lwkt_reltoken(&mountlist_token); 798 return (error); 799 } 800 801 /* 802 * mountlist_boot_getfirst (DURING BOOT ONLY) 803 * 804 * This function returns the first mount on the mountlist, which is 805 * expected to be the root mount. Since no interlocks are obtained 806 * this function is only safe to use during booting. 807 */ 808 809 struct mount * 810 mountlist_boot_getfirst(void) 811 { 812 return(TAILQ_FIRST(&mountlist)); 813 } 814 815 /* 816 * mountlist_remove (MP SAFE) 817 * 818 * Remove a node from the mountlist. If this node is the next scan node 819 * for any active mountlist scans, the active mountlist scan will be 820 * adjusted to skip the node, thus allowing removals during mountlist 821 * scans. 822 */ 823 void 824 mountlist_remove(struct mount *mp) 825 { 826 struct mountscan_info *msi; 827 828 lwkt_gettoken(&mountlist_token); 829 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 830 if (msi->msi_node == mp) { 831 if (msi->msi_how & MNTSCAN_FORWARD) 832 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 833 else 834 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 835 } 836 } 837 TAILQ_REMOVE(&mountlist, mp, mnt_list); 838 lwkt_reltoken(&mountlist_token); 839 } 840 841 /* 842 * mountlist_scan (MP SAFE) 843 * 844 * Safely scan the mount points on the mount list. Unless otherwise 845 * specified each mount point will be busied prior to the callback and 846 * unbusied afterwords. The callback may safely remove any mount point 847 * without interfering with the scan. If the current callback 848 * mount is removed the scanner will not attempt to unbusy it. 849 * 850 * If a mount node cannot be busied it is silently skipped. 851 * 852 * The callback return value is aggregated and a total is returned. A return 853 * value of < 0 is not aggregated and will terminate the scan. 854 * 855 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 856 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 857 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 858 * the mount node. 859 */ 860 int 861 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 862 { 863 struct mountscan_info info; 864 struct mount *mp; 865 thread_t td; 866 int count; 867 int res; 868 869 lwkt_gettoken(&mountlist_token); 870 871 info.msi_how = how; 872 info.msi_node = NULL; /* paranoia */ 873 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 874 875 res = 0; 876 td = curthread; 877 878 if (how & MNTSCAN_FORWARD) { 879 info.msi_node = TAILQ_FIRST(&mountlist); 880 while ((mp = info.msi_node) != NULL) { 881 if (how & MNTSCAN_NOBUSY) { 882 count = callback(mp, data); 883 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 884 count = callback(mp, data); 885 if (mp == info.msi_node) 886 vfs_unbusy(mp); 887 } else { 888 count = 0; 889 } 890 if (count < 0) 891 break; 892 res += count; 893 if (mp == info.msi_node) 894 info.msi_node = TAILQ_NEXT(mp, mnt_list); 895 } 896 } else if (how & MNTSCAN_REVERSE) { 897 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 898 while ((mp = info.msi_node) != NULL) { 899 if (how & MNTSCAN_NOBUSY) { 900 count = callback(mp, data); 901 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 902 count = callback(mp, data); 903 if (mp == info.msi_node) 904 vfs_unbusy(mp); 905 } else { 906 count = 0; 907 } 908 if (count < 0) 909 break; 910 res += count; 911 if (mp == info.msi_node) 912 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 913 } 914 } 915 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 916 lwkt_reltoken(&mountlist_token); 917 return(res); 918 } 919 920 /* 921 * MOUNT RELATED VNODE FUNCTIONS 922 */ 923 924 static struct kproc_desc vnlru_kp = { 925 "vnlru", 926 vnlru_proc, 927 &vnlruthread 928 }; 929 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 930 931 /* 932 * Move a vnode from one mount queue to another. 933 * 934 * MPSAFE 935 */ 936 void 937 insmntque(struct vnode *vp, struct mount *mp) 938 { 939 lwkt_gettoken(&mntvnode_token); 940 /* 941 * Delete from old mount point vnode list, if on one. 942 */ 943 if (vp->v_mount != NULL) { 944 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 945 ("bad mount point vnode list size")); 946 vremovevnodemnt(vp); 947 vp->v_mount->mnt_nvnodelistsize--; 948 } 949 /* 950 * Insert into list of vnodes for the new mount point, if available. 951 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 952 */ 953 if ((vp->v_mount = mp) == NULL) { 954 lwkt_reltoken(&mntvnode_token); 955 return; 956 } 957 if (mp->mnt_syncer) { 958 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 959 } else { 960 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 961 } 962 mp->mnt_nvnodelistsize++; 963 lwkt_reltoken(&mntvnode_token); 964 } 965 966 967 /* 968 * Scan the vnodes under a mount point and issue appropriate callbacks. 969 * 970 * The fastfunc() callback is called with just the mountlist token held 971 * (no vnode lock). It may not block and the vnode may be undergoing 972 * modifications while the caller is processing it. The vnode will 973 * not be entirely destroyed, however, due to the fact that the mountlist 974 * token is held. A return value < 0 skips to the next vnode without calling 975 * the slowfunc(), a return value > 0 terminates the loop. 976 * 977 * The slowfunc() callback is called after the vnode has been successfully 978 * locked based on passed flags. The vnode is skipped if it gets rearranged 979 * or destroyed while blocking on the lock. A non-zero return value from 980 * the slow function terminates the loop. The slow function is allowed to 981 * arbitrarily block. The scanning code guarentees consistency of operation 982 * even if the slow function deletes or moves the node, or blocks and some 983 * other thread deletes or moves the node. 984 */ 985 int 986 vmntvnodescan( 987 struct mount *mp, 988 int flags, 989 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 990 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 991 void *data 992 ) { 993 struct vmntvnodescan_info info; 994 struct vnode *vp; 995 int r = 0; 996 int maxcount = 1000000; 997 int stopcount = 0; 998 int count = 0; 999 1000 lwkt_gettoken(&mntvnode_token); 1001 1002 /* 1003 * If asked to do one pass stop after iterating available vnodes. 1004 * Under heavy loads new vnodes can be added while we are scanning, 1005 * so this isn't perfect. Create a slop factor of 2x. 1006 */ 1007 if (flags & VMSC_ONEPASS) 1008 stopcount = mp->mnt_nvnodelistsize * 2; 1009 1010 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1011 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 1012 while ((vp = info.vp) != NULL) { 1013 if (--maxcount == 0) 1014 panic("maxcount reached during vmntvnodescan"); 1015 1016 /* 1017 * Skip if visible but not ready, or special (e.g. 1018 * mp->mnt_syncer) 1019 */ 1020 if (vp->v_type == VNON) 1021 goto next; 1022 KKASSERT(vp->v_mount == mp); 1023 1024 /* 1025 * Quick test. A negative return continues the loop without 1026 * calling the slow test. 0 continues onto the slow test. 1027 * A positive number aborts the loop. 1028 */ 1029 if (fastfunc) { 1030 if ((r = fastfunc(mp, vp, data)) < 0) { 1031 r = 0; 1032 goto next; 1033 } 1034 if (r) 1035 break; 1036 } 1037 1038 /* 1039 * Get a vxlock on the vnode, retry if it has moved or isn't 1040 * in the mountlist where we expect it. 1041 */ 1042 if (slowfunc) { 1043 int error; 1044 1045 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1046 case VMSC_GETVP: 1047 error = vget(vp, LK_EXCLUSIVE); 1048 break; 1049 case VMSC_GETVP|VMSC_NOWAIT: 1050 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1051 break; 1052 case VMSC_GETVX: 1053 vx_get(vp); 1054 error = 0; 1055 break; 1056 default: 1057 error = 0; 1058 break; 1059 } 1060 if (error) 1061 goto next; 1062 /* 1063 * Do not call the slow function if the vnode is 1064 * invalid or if it was ripped out from under us 1065 * while we (potentially) blocked. 1066 */ 1067 if (info.vp == vp && vp->v_type != VNON) 1068 r = slowfunc(mp, vp, data); 1069 1070 /* 1071 * Cleanup 1072 */ 1073 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1074 case VMSC_GETVP: 1075 case VMSC_GETVP|VMSC_NOWAIT: 1076 vput(vp); 1077 break; 1078 case VMSC_GETVX: 1079 vx_put(vp); 1080 break; 1081 default: 1082 break; 1083 } 1084 if (r != 0) 1085 break; 1086 } 1087 1088 next: 1089 /* 1090 * Yield after some processing. Depending on the number 1091 * of vnodes, we might wind up running for a long time. 1092 * Because threads are not preemptable, time critical 1093 * userland processes might starve. Give them a chance 1094 * now and then. 1095 */ 1096 if (++count == 10000) { 1097 /* We really want to yield a bit, so we simply sleep a tick */ 1098 tsleep(mp, 0, "vnodescn", 1); 1099 count = 0; 1100 } 1101 1102 /* 1103 * If doing one pass this decrements to zero. If it starts 1104 * at zero it is effectively unlimited for the purposes of 1105 * this loop. 1106 */ 1107 if (--stopcount == 0) 1108 break; 1109 1110 /* 1111 * Iterate. If the vnode was ripped out from under us 1112 * info.vp will already point to the next vnode, otherwise 1113 * we have to obtain the next valid vnode ourselves. 1114 */ 1115 if (info.vp == vp) 1116 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1117 } 1118 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1119 lwkt_reltoken(&mntvnode_token); 1120 return(r); 1121 } 1122 1123 /* 1124 * Remove any vnodes in the vnode table belonging to mount point mp. 1125 * 1126 * If FORCECLOSE is not specified, there should not be any active ones, 1127 * return error if any are found (nb: this is a user error, not a 1128 * system error). If FORCECLOSE is specified, detach any active vnodes 1129 * that are found. 1130 * 1131 * If WRITECLOSE is set, only flush out regular file vnodes open for 1132 * writing. 1133 * 1134 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1135 * 1136 * `rootrefs' specifies the base reference count for the root vnode 1137 * of this filesystem. The root vnode is considered busy if its 1138 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1139 * will call vrele() on the root vnode exactly rootrefs times. 1140 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1141 * be zero. 1142 */ 1143 #ifdef DIAGNOSTIC 1144 static int busyprt = 0; /* print out busy vnodes */ 1145 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1146 #endif 1147 1148 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1149 1150 struct vflush_info { 1151 int flags; 1152 int busy; 1153 thread_t td; 1154 }; 1155 1156 int 1157 vflush(struct mount *mp, int rootrefs, int flags) 1158 { 1159 struct thread *td = curthread; /* XXX */ 1160 struct vnode *rootvp = NULL; 1161 int error; 1162 struct vflush_info vflush_info; 1163 1164 if (rootrefs > 0) { 1165 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1166 ("vflush: bad args")); 1167 /* 1168 * Get the filesystem root vnode. We can vput() it 1169 * immediately, since with rootrefs > 0, it won't go away. 1170 */ 1171 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1172 if ((flags & FORCECLOSE) == 0) 1173 return (error); 1174 rootrefs = 0; 1175 /* continue anyway */ 1176 } 1177 if (rootrefs) 1178 vput(rootvp); 1179 } 1180 1181 vflush_info.busy = 0; 1182 vflush_info.flags = flags; 1183 vflush_info.td = td; 1184 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1185 1186 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1187 /* 1188 * If just the root vnode is busy, and if its refcount 1189 * is equal to `rootrefs', then go ahead and kill it. 1190 */ 1191 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1192 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1193 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1194 vx_lock(rootvp); 1195 vgone_vxlocked(rootvp); 1196 vx_unlock(rootvp); 1197 vflush_info.busy = 0; 1198 } 1199 } 1200 if (vflush_info.busy) 1201 return (EBUSY); 1202 for (; rootrefs > 0; rootrefs--) 1203 vrele(rootvp); 1204 return (0); 1205 } 1206 1207 /* 1208 * The scan callback is made with an VX locked vnode. 1209 */ 1210 static int 1211 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1212 { 1213 struct vflush_info *info = data; 1214 struct vattr vattr; 1215 1216 /* 1217 * Skip over a vnodes marked VSYSTEM. 1218 */ 1219 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1220 return(0); 1221 } 1222 1223 /* 1224 * If WRITECLOSE is set, flush out unlinked but still open 1225 * files (even if open only for reading) and regular file 1226 * vnodes open for writing. 1227 */ 1228 if ((info->flags & WRITECLOSE) && 1229 (vp->v_type == VNON || 1230 (VOP_GETATTR(vp, &vattr) == 0 && 1231 vattr.va_nlink > 0)) && 1232 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1233 return(0); 1234 } 1235 1236 /* 1237 * If we are the only holder (refcnt of 1) or the vnode is in 1238 * termination (refcnt < 0), we can vgone the vnode. 1239 */ 1240 if (vp->v_sysref.refcnt <= 1) { 1241 vgone_vxlocked(vp); 1242 return(0); 1243 } 1244 1245 /* 1246 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1247 * it to a dummymount structure so vop_*() functions don't deref 1248 * a NULL pointer. 1249 */ 1250 if (info->flags & FORCECLOSE) { 1251 vhold(vp); 1252 vgone_vxlocked(vp); 1253 if (vp->v_mount == NULL) 1254 insmntque(vp, &dummymount); 1255 vdrop(vp); 1256 return(0); 1257 } 1258 #ifdef DIAGNOSTIC 1259 if (busyprt) 1260 vprint("vflush: busy vnode", vp); 1261 #endif 1262 ++info->busy; 1263 return(0); 1264 } 1265 1266 void 1267 add_bio_ops(struct bio_ops *ops) 1268 { 1269 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1270 } 1271 1272 void 1273 rem_bio_ops(struct bio_ops *ops) 1274 { 1275 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1276 } 1277 1278 /* 1279 * This calls the bio_ops io_sync function either for a mount point 1280 * or generally. 1281 * 1282 * WARNING: softdeps is weirdly coded and just isn't happy unless 1283 * io_sync is called with a NULL mount from the general syncing code. 1284 */ 1285 void 1286 bio_ops_sync(struct mount *mp) 1287 { 1288 struct bio_ops *ops; 1289 1290 if (mp) { 1291 if ((ops = mp->mnt_bioops) != NULL) 1292 ops->io_sync(mp); 1293 } else { 1294 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1295 ops->io_sync(NULL); 1296 } 1297 } 1298 } 1299 1300 /* 1301 * Lookup a mount point by nch 1302 */ 1303 struct mount * 1304 mount_get_by_nc(struct namecache *ncp) 1305 { 1306 struct mount *mp = NULL; 1307 1308 lwkt_gettoken(&mountlist_token); 1309 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1310 if (ncp == mp->mnt_ncmountpt.ncp) 1311 break; 1312 } 1313 lwkt_reltoken(&mountlist_token); 1314 return (mp); 1315 } 1316 1317