1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 #include <sys/mplock2.h> 96 97 #include <vm/vm.h> 98 #include <vm/vm_object.h> 99 100 struct mountscan_info { 101 TAILQ_ENTRY(mountscan_info) msi_entry; 102 int msi_how; 103 struct mount *msi_node; 104 }; 105 106 struct vmntvnodescan_info { 107 TAILQ_ENTRY(vmntvnodescan_info) entry; 108 struct vnode *vp; 109 }; 110 111 struct vnlru_info { 112 int pass; 113 }; 114 115 static int vnlru_nowhere = 0; 116 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 117 &vnlru_nowhere, 0, 118 "Number of times the vnlru process ran without success"); 119 120 121 static struct lwkt_token mntid_token; 122 static struct mount dummymount; 123 124 /* note: mountlist exported to pstat */ 125 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 126 static TAILQ_HEAD(,mountscan_info) mountscan_list; 127 static struct lwkt_token mountlist_token; 128 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 129 struct lwkt_token mntvnode_token; 130 131 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 132 133 /* 134 * Called from vfsinit() 135 */ 136 void 137 vfs_mount_init(void) 138 { 139 lwkt_token_init(&mountlist_token, 1, "mntlist"); 140 lwkt_token_init(&mntvnode_token, 1, "mntvnode"); 141 lwkt_token_init(&mntid_token, 1, "mntid"); 142 TAILQ_INIT(&mountscan_list); 143 TAILQ_INIT(&mntvnodescan_list); 144 mount_init(&dummymount); 145 dummymount.mnt_flag |= MNT_RDONLY; 146 } 147 148 /* 149 * Support function called with mntvnode_token held to remove a vnode 150 * from the mountlist. We must update any list scans which are in progress. 151 */ 152 static void 153 vremovevnodemnt(struct vnode *vp) 154 { 155 struct vmntvnodescan_info *info; 156 157 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 158 if (info->vp == vp) 159 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 160 } 161 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 162 } 163 164 /* 165 * Allocate a new vnode and associate it with a tag, mount point, and 166 * operations vector. 167 * 168 * A VX locked and refd vnode is returned. The caller should setup the 169 * remaining fields and vx_put() or, if he wishes to leave a vref, 170 * vx_unlock() the vnode. 171 */ 172 int 173 getnewvnode(enum vtagtype tag, struct mount *mp, 174 struct vnode **vpp, int lktimeout, int lkflags) 175 { 176 struct vnode *vp; 177 178 KKASSERT(mp != NULL); 179 180 vp = allocvnode(lktimeout, lkflags); 181 vp->v_tag = tag; 182 vp->v_data = NULL; 183 184 /* 185 * By default the vnode is assigned the mount point's normal 186 * operations vector. 187 */ 188 vp->v_ops = &mp->mnt_vn_use_ops; 189 190 /* 191 * Placing the vnode on the mount point's queue makes it visible. 192 * VNON prevents it from being messed with, however. 193 */ 194 insmntque(vp, mp); 195 196 /* 197 * A VX locked & refd vnode is returned. 198 */ 199 *vpp = vp; 200 return (0); 201 } 202 203 /* 204 * This function creates vnodes with special operations vectors. The 205 * mount point is optional. 206 * 207 * This routine is being phased out but is still used by vfs_conf to 208 * create vnodes for devices prior to the root mount (with mp == NULL). 209 */ 210 int 211 getspecialvnode(enum vtagtype tag, struct mount *mp, 212 struct vop_ops **ops, 213 struct vnode **vpp, int lktimeout, int lkflags) 214 { 215 struct vnode *vp; 216 217 vp = allocvnode(lktimeout, lkflags); 218 vp->v_tag = tag; 219 vp->v_data = NULL; 220 vp->v_ops = ops; 221 222 if (mp == NULL) 223 mp = &dummymount; 224 225 /* 226 * Placing the vnode on the mount point's queue makes it visible. 227 * VNON prevents it from being messed with, however. 228 */ 229 insmntque(vp, mp); 230 231 /* 232 * A VX locked & refd vnode is returned. 233 */ 234 *vpp = vp; 235 return (0); 236 } 237 238 /* 239 * Interlock against an unmount, return 0 on success, non-zero on failure. 240 * 241 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 242 * is in-progress. 243 * 244 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 245 * are used. A shared locked will be obtained and the filesystem will not 246 * be unmountable until the lock is released. 247 */ 248 int 249 vfs_busy(struct mount *mp, int flags) 250 { 251 int lkflags; 252 253 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 254 if (flags & LK_NOWAIT) 255 return (ENOENT); 256 /* XXX not MP safe */ 257 mp->mnt_kern_flag |= MNTK_MWAIT; 258 /* 259 * Since all busy locks are shared except the exclusive 260 * lock granted when unmounting, the only place that a 261 * wakeup needs to be done is at the release of the 262 * exclusive lock at the end of dounmount. 263 */ 264 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 265 return (ENOENT); 266 } 267 lkflags = LK_SHARED; 268 if (lockmgr(&mp->mnt_lock, lkflags)) 269 panic("vfs_busy: unexpected lock failure"); 270 return (0); 271 } 272 273 /* 274 * Free a busy filesystem. 275 */ 276 void 277 vfs_unbusy(struct mount *mp) 278 { 279 lockmgr(&mp->mnt_lock, LK_RELEASE); 280 } 281 282 /* 283 * Lookup a filesystem type, and if found allocate and initialize 284 * a mount structure for it. 285 * 286 * Devname is usually updated by mount(8) after booting. 287 */ 288 int 289 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 290 { 291 struct vfsconf *vfsp; 292 struct mount *mp; 293 294 if (fstypename == NULL) 295 return (ENODEV); 296 297 vfsp = vfsconf_find_by_name(fstypename); 298 if (vfsp == NULL) 299 return (ENODEV); 300 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 301 mount_init(mp); 302 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 303 304 vfs_busy(mp, LK_NOWAIT); 305 mp->mnt_vfc = vfsp; 306 mp->mnt_op = vfsp->vfc_vfsops; 307 vfsp->vfc_refcount++; 308 mp->mnt_stat.f_type = vfsp->vfc_typenum; 309 mp->mnt_flag |= MNT_RDONLY; 310 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 311 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 312 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 313 *mpp = mp; 314 return (0); 315 } 316 317 /* 318 * Basic mount structure initialization 319 */ 320 void 321 mount_init(struct mount *mp) 322 { 323 lockinit(&mp->mnt_lock, "vfslock", 0, 0); 324 lwkt_token_init(&mp->mnt_token, 1, "permnt"); 325 326 TAILQ_INIT(&mp->mnt_nvnodelist); 327 TAILQ_INIT(&mp->mnt_reservedvnlist); 328 TAILQ_INIT(&mp->mnt_jlist); 329 mp->mnt_nvnodelistsize = 0; 330 mp->mnt_flag = 0; 331 mp->mnt_iosize_max = DFLTPHYS; 332 } 333 334 /* 335 * Lookup a mount point by filesystem identifier. 336 */ 337 struct mount * 338 vfs_getvfs(fsid_t *fsid) 339 { 340 struct mount *mp; 341 342 lwkt_gettoken(&mountlist_token); 343 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 344 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 345 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 346 break; 347 } 348 } 349 lwkt_reltoken(&mountlist_token); 350 return (mp); 351 } 352 353 /* 354 * Get a new unique fsid. Try to make its val[0] unique, since this value 355 * will be used to create fake device numbers for stat(). Also try (but 356 * not so hard) make its val[0] unique mod 2^16, since some emulators only 357 * support 16-bit device numbers. We end up with unique val[0]'s for the 358 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 359 * 360 * Keep in mind that several mounts may be running in parallel. Starting 361 * the search one past where the previous search terminated is both a 362 * micro-optimization and a defense against returning the same fsid to 363 * different mounts. 364 */ 365 void 366 vfs_getnewfsid(struct mount *mp) 367 { 368 static u_int16_t mntid_base; 369 fsid_t tfsid; 370 int mtype; 371 372 lwkt_gettoken(&mntid_token); 373 mtype = mp->mnt_vfc->vfc_typenum; 374 tfsid.val[1] = mtype; 375 mtype = (mtype & 0xFF) << 24; 376 for (;;) { 377 tfsid.val[0] = makeudev(255, 378 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 379 mntid_base++; 380 if (vfs_getvfs(&tfsid) == NULL) 381 break; 382 } 383 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 384 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 385 lwkt_reltoken(&mntid_token); 386 } 387 388 /* 389 * Set the FSID for a new mount point to the template. Adjust 390 * the FSID to avoid collisions. 391 */ 392 int 393 vfs_setfsid(struct mount *mp, fsid_t *template) 394 { 395 int didmunge = 0; 396 397 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 398 for (;;) { 399 if (vfs_getvfs(template) == NULL) 400 break; 401 didmunge = 1; 402 ++template->val[1]; 403 } 404 mp->mnt_stat.f_fsid = *template; 405 return(didmunge); 406 } 407 408 /* 409 * This routine is called when we have too many vnodes. It attempts 410 * to free <count> vnodes and will potentially free vnodes that still 411 * have VM backing store (VM backing store is typically the cause 412 * of a vnode blowout so we want to do this). Therefore, this operation 413 * is not considered cheap. 414 * 415 * A number of conditions may prevent a vnode from being reclaimed. 416 * the buffer cache may have references on the vnode, a directory 417 * vnode may still have references due to the namei cache representing 418 * underlying files, or the vnode may be in active use. It is not 419 * desireable to reuse such vnodes. These conditions may cause the 420 * number of vnodes to reach some minimum value regardless of what 421 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 422 */ 423 424 /* 425 * This is a quick non-blocking check to determine if the vnode is a good 426 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 427 * not a good candidate, 1 if it is. 428 */ 429 static __inline int 430 vmightfree(struct vnode *vp, int page_count, int pass) 431 { 432 if (vp->v_flag & VRECLAIMED) 433 return (0); 434 #if 0 435 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 436 return (0); 437 #endif 438 if (sysref_isactive(&vp->v_sysref)) 439 return (0); 440 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 441 return (0); 442 443 /* 444 * XXX horrible hack. Up to four passes will be taken. Each pass 445 * makes a larger set of vnodes eligible. For now what this really 446 * means is that we try to recycle files opened only once before 447 * recycling files opened multiple times. 448 */ 449 switch(vp->v_flag & (VAGE0 | VAGE1)) { 450 case 0: 451 if (pass < 3) 452 return(0); 453 break; 454 case VAGE0: 455 if (pass < 2) 456 return(0); 457 break; 458 case VAGE1: 459 if (pass < 1) 460 return(0); 461 break; 462 case VAGE0 | VAGE1: 463 break; 464 } 465 return (1); 466 } 467 468 /* 469 * The vnode was found to be possibly vgone()able and the caller has locked it 470 * (thus the usecount should be 1 now). Determine if the vnode is actually 471 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 472 * can be vgone()'d, 0 otherwise. 473 * 474 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 475 * in the namecache topology and (B) this vnode has buffer cache bufs. 476 * We cannot remove vnodes with non-leaf namecache associations. We do a 477 * tentitive leaf check prior to attempting to flush out any buffers but the 478 * 'real' test when all is said in done is that v_auxrefs must become 0 for 479 * the vnode to be freeable. 480 * 481 * We could theoretically just unconditionally flush when v_auxrefs != 0, 482 * but flushing data associated with non-leaf nodes (which are always 483 * directories), just throws it away for no benefit. It is the buffer 484 * cache's responsibility to choose buffers to recycle from the cached 485 * data point of view. 486 */ 487 static int 488 visleaf(struct vnode *vp) 489 { 490 struct namecache *ncp; 491 492 spin_lock(&vp->v_spinlock); 493 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 494 if (!TAILQ_EMPTY(&ncp->nc_list)) { 495 spin_unlock(&vp->v_spinlock); 496 return(0); 497 } 498 } 499 spin_unlock(&vp->v_spinlock); 500 return(1); 501 } 502 503 /* 504 * Try to clean up the vnode to the point where it can be vgone()'d, returning 505 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 506 * vmightfree() this routine may flush the vnode and block. Vnodes marked 507 * VFREE are still candidates for vgone()ing because they may hold namecache 508 * resources and could be blocking the namecache directory hierarchy (and 509 * related vnodes) from being freed. 510 */ 511 static int 512 vtrytomakegoneable(struct vnode *vp, int page_count) 513 { 514 if (vp->v_flag & VRECLAIMED) 515 return (0); 516 if (vp->v_sysref.refcnt > 1) 517 return (0); 518 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 519 return (0); 520 if (vp->v_auxrefs && visleaf(vp)) { 521 vinvalbuf(vp, V_SAVE, 0, 0); 522 #if 0 /* DEBUG */ 523 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 524 "vrecycle: vp %p succeeded: %s\n"), vp, 525 (TAILQ_FIRST(&vp->v_namecache) ? 526 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 527 #endif 528 } 529 530 /* 531 * This sequence may seem a little strange, but we need to optimize 532 * the critical path a bit. We can't recycle vnodes with other 533 * references and because we are trying to recycle an otherwise 534 * perfectly fine vnode we have to invalidate the namecache in a 535 * way that avoids possible deadlocks (since the vnode lock is being 536 * held here). Finally, we have to check for other references one 537 * last time in case something snuck in during the inval. 538 */ 539 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 540 return (0); 541 if (cache_inval_vp_nonblock(vp)) 542 return (0); 543 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 544 } 545 546 /* 547 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 548 * to avoid vnodes which have lots of resident pages (we are trying to free 549 * vnodes, not memory). 550 * 551 * This routine is a callback from the mountlist scan. The mount point 552 * in question will be busied. 553 * 554 * NOTE: The 1/10 reclamation also ensures that the inactive data set 555 * (the vnodes being recycled by the one-time use) does not degenerate 556 * into too-small a set. This is important because once a vnode is 557 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 558 * will not be destroyed EXCEPT by this mechanism. VM pages can still 559 * be cleaned/freed by the pageout daemon. 560 */ 561 static int 562 vlrureclaim(struct mount *mp, void *data) 563 { 564 struct vnlru_info *info = data; 565 struct vnode *vp; 566 int done; 567 int trigger; 568 int usevnodes; 569 int count; 570 int trigger_mult = vnlru_nowhere; 571 572 /* 573 * Calculate the trigger point for the resident pages check. The 574 * minimum trigger value is approximately the number of pages in 575 * the system divded by the number of vnodes. However, due to 576 * various other system memory overheads unrelated to data caching 577 * it is a good idea to double the trigger (at least). 578 * 579 * trigger_mult starts at 0. If the recycler is having problems 580 * finding enough freeable vnodes it will increase trigger_mult. 581 * This should not happen in normal operation, even on machines with 582 * low amounts of memory, but extraordinary memory use by the system 583 * verses the amount of cached data can trigger it. 584 */ 585 usevnodes = desiredvnodes; 586 if (usevnodes <= 0) 587 usevnodes = 1; 588 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 589 590 done = 0; 591 lwkt_gettoken(&mntvnode_token); 592 count = mp->mnt_nvnodelistsize / 10 + 1; 593 594 while (count && mp->mnt_syncer) { 595 /* 596 * Next vnode. Use the special syncer vnode to placemark 597 * the LRU. This way the LRU code does not interfere with 598 * vmntvnodescan(). 599 */ 600 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 601 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 602 if (vp) { 603 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 604 mp->mnt_syncer, v_nmntvnodes); 605 } else { 606 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 607 v_nmntvnodes); 608 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 609 if (vp == NULL) 610 break; 611 } 612 613 /* 614 * __VNODESCAN__ 615 * 616 * The VP will stick around while we hold mntvnode_token, 617 * at least until we block, so we can safely do an initial 618 * check, and then must check again after we lock the vnode. 619 */ 620 if (vp->v_type == VNON || /* syncer or indeterminant */ 621 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 622 ) { 623 --count; 624 continue; 625 } 626 627 /* 628 * VX get the candidate vnode. If the VX get fails the 629 * vnode might still be on the mountlist. Our loop depends 630 * on us at least cycling the vnode to the end of the 631 * mountlist. 632 */ 633 if (vx_get_nonblock(vp) != 0) { 634 --count; 635 continue; 636 } 637 638 /* 639 * Since we blocked locking the vp, make sure it is still 640 * a candidate for reclamation. That is, it has not already 641 * been reclaimed and only has our VX reference associated 642 * with it. 643 */ 644 if (vp->v_type == VNON || /* syncer or indeterminant */ 645 (vp->v_flag & VRECLAIMED) || 646 vp->v_mount != mp || 647 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 648 ) { 649 --count; 650 vx_put(vp); 651 continue; 652 } 653 654 /* 655 * All right, we are good, move the vp to the end of the 656 * mountlist and clean it out. The vget will have returned 657 * an error if the vnode was destroyed (VRECLAIMED set), so we 658 * do not have to check again. The vput() will move the 659 * vnode to the free list if the vgone() was successful. 660 */ 661 KKASSERT(vp->v_mount == mp); 662 vgone_vxlocked(vp); 663 vx_put(vp); 664 ++done; 665 --count; 666 } 667 lwkt_reltoken(&mntvnode_token); 668 return (done); 669 } 670 671 /* 672 * Attempt to recycle vnodes in a context that is always safe to block. 673 * Calling vlrurecycle() from the bowels of file system code has some 674 * interesting deadlock problems. 675 */ 676 static struct thread *vnlruthread; 677 static int vnlruproc_sig; 678 679 void 680 vnlru_proc_wait(void) 681 { 682 if (vnlruproc_sig == 0) { 683 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 684 wakeup(vnlruthread); 685 } 686 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 687 } 688 689 static void 690 vnlru_proc(void) 691 { 692 struct thread *td = curthread; 693 struct vnlru_info info; 694 int done; 695 696 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 697 SHUTDOWN_PRI_FIRST); 698 699 get_mplock(); 700 crit_enter(); 701 702 for (;;) { 703 kproc_suspend_loop(); 704 705 /* 706 * Try to free some vnodes if we have too many 707 */ 708 if (numvnodes > desiredvnodes && 709 freevnodes > desiredvnodes * 2 / 10) { 710 int count = numvnodes - desiredvnodes; 711 712 if (count > freevnodes / 100) 713 count = freevnodes / 100; 714 if (count < 5) 715 count = 5; 716 freesomevnodes(count); 717 } 718 719 /* 720 * Nothing to do if most of our vnodes are already on 721 * the free list. 722 */ 723 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 724 vnlruproc_sig = 0; 725 wakeup(&vnlruproc_sig); 726 tsleep(td, 0, "vlruwt", hz); 727 continue; 728 } 729 cache_hysteresis(); 730 731 /* 732 * The pass iterates through the four combinations of 733 * VAGE0/VAGE1. We want to get rid of aged small files 734 * first. 735 */ 736 info.pass = 0; 737 done = 0; 738 while (done == 0 && info.pass < 4) { 739 done = mountlist_scan(vlrureclaim, &info, 740 MNTSCAN_FORWARD); 741 ++info.pass; 742 } 743 744 /* 745 * The vlrureclaim() call only processes 1/10 of the vnodes 746 * on each mount. If we couldn't find any repeat the loop 747 * at least enough times to cover all available vnodes before 748 * we start sleeping. Complain if the failure extends past 749 * 30 second, every 30 seconds. 750 */ 751 if (done == 0) { 752 ++vnlru_nowhere; 753 if (vnlru_nowhere % 10 == 0) 754 tsleep(td, 0, "vlrup", hz * 3); 755 if (vnlru_nowhere % 100 == 0) 756 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 757 if (vnlru_nowhere == 1000) 758 vnlru_nowhere = 900; 759 } else { 760 vnlru_nowhere = 0; 761 } 762 } 763 764 crit_exit(); 765 rel_mplock(); 766 } 767 768 /* 769 * MOUNTLIST FUNCTIONS 770 */ 771 772 /* 773 * mountlist_insert (MP SAFE) 774 * 775 * Add a new mount point to the mount list. 776 */ 777 void 778 mountlist_insert(struct mount *mp, int how) 779 { 780 lwkt_gettoken(&mountlist_token); 781 if (how == MNTINS_FIRST) 782 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 783 else 784 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 785 lwkt_reltoken(&mountlist_token); 786 } 787 788 /* 789 * mountlist_interlock (MP SAFE) 790 * 791 * Execute the specified interlock function with the mountlist token 792 * held. The function will be called in a serialized fashion verses 793 * other functions called through this mechanism. 794 */ 795 int 796 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 797 { 798 int error; 799 800 lwkt_gettoken(&mountlist_token); 801 error = callback(mp); 802 lwkt_reltoken(&mountlist_token); 803 return (error); 804 } 805 806 /* 807 * mountlist_boot_getfirst (DURING BOOT ONLY) 808 * 809 * This function returns the first mount on the mountlist, which is 810 * expected to be the root mount. Since no interlocks are obtained 811 * this function is only safe to use during booting. 812 */ 813 814 struct mount * 815 mountlist_boot_getfirst(void) 816 { 817 return(TAILQ_FIRST(&mountlist)); 818 } 819 820 /* 821 * mountlist_remove (MP SAFE) 822 * 823 * Remove a node from the mountlist. If this node is the next scan node 824 * for any active mountlist scans, the active mountlist scan will be 825 * adjusted to skip the node, thus allowing removals during mountlist 826 * scans. 827 */ 828 void 829 mountlist_remove(struct mount *mp) 830 { 831 struct mountscan_info *msi; 832 833 lwkt_gettoken(&mountlist_token); 834 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 835 if (msi->msi_node == mp) { 836 if (msi->msi_how & MNTSCAN_FORWARD) 837 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 838 else 839 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 840 } 841 } 842 TAILQ_REMOVE(&mountlist, mp, mnt_list); 843 lwkt_reltoken(&mountlist_token); 844 } 845 846 /* 847 * mountlist_scan (MP SAFE) 848 * 849 * Safely scan the mount points on the mount list. Unless otherwise 850 * specified each mount point will be busied prior to the callback and 851 * unbusied afterwords. The callback may safely remove any mount point 852 * without interfering with the scan. If the current callback 853 * mount is removed the scanner will not attempt to unbusy it. 854 * 855 * If a mount node cannot be busied it is silently skipped. 856 * 857 * The callback return value is aggregated and a total is returned. A return 858 * value of < 0 is not aggregated and will terminate the scan. 859 * 860 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 861 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 862 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 863 * the mount node. 864 */ 865 int 866 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 867 { 868 struct mountscan_info info; 869 struct mount *mp; 870 thread_t td; 871 int count; 872 int res; 873 874 lwkt_gettoken(&mountlist_token); 875 876 info.msi_how = how; 877 info.msi_node = NULL; /* paranoia */ 878 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 879 880 res = 0; 881 td = curthread; 882 883 if (how & MNTSCAN_FORWARD) { 884 info.msi_node = TAILQ_FIRST(&mountlist); 885 while ((mp = info.msi_node) != NULL) { 886 if (how & MNTSCAN_NOBUSY) { 887 count = callback(mp, data); 888 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 889 count = callback(mp, data); 890 if (mp == info.msi_node) 891 vfs_unbusy(mp); 892 } else { 893 count = 0; 894 } 895 if (count < 0) 896 break; 897 res += count; 898 if (mp == info.msi_node) 899 info.msi_node = TAILQ_NEXT(mp, mnt_list); 900 } 901 } else if (how & MNTSCAN_REVERSE) { 902 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 903 while ((mp = info.msi_node) != NULL) { 904 if (how & MNTSCAN_NOBUSY) { 905 count = callback(mp, data); 906 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 907 count = callback(mp, data); 908 if (mp == info.msi_node) 909 vfs_unbusy(mp); 910 } else { 911 count = 0; 912 } 913 if (count < 0) 914 break; 915 res += count; 916 if (mp == info.msi_node) 917 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 918 } 919 } 920 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 921 lwkt_reltoken(&mountlist_token); 922 return(res); 923 } 924 925 /* 926 * MOUNT RELATED VNODE FUNCTIONS 927 */ 928 929 static struct kproc_desc vnlru_kp = { 930 "vnlru", 931 vnlru_proc, 932 &vnlruthread 933 }; 934 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 935 936 /* 937 * Move a vnode from one mount queue to another. 938 * 939 * MPSAFE 940 */ 941 void 942 insmntque(struct vnode *vp, struct mount *mp) 943 { 944 lwkt_gettoken(&mntvnode_token); 945 /* 946 * Delete from old mount point vnode list, if on one. 947 */ 948 if (vp->v_mount != NULL) { 949 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 950 ("bad mount point vnode list size")); 951 vremovevnodemnt(vp); 952 vp->v_mount->mnt_nvnodelistsize--; 953 } 954 /* 955 * Insert into list of vnodes for the new mount point, if available. 956 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 957 */ 958 if ((vp->v_mount = mp) == NULL) { 959 lwkt_reltoken(&mntvnode_token); 960 return; 961 } 962 if (mp->mnt_syncer) { 963 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 964 } else { 965 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 966 } 967 mp->mnt_nvnodelistsize++; 968 lwkt_reltoken(&mntvnode_token); 969 } 970 971 972 /* 973 * Scan the vnodes under a mount point and issue appropriate callbacks. 974 * 975 * The fastfunc() callback is called with just the mountlist token held 976 * (no vnode lock). It may not block and the vnode may be undergoing 977 * modifications while the caller is processing it. The vnode will 978 * not be entirely destroyed, however, due to the fact that the mountlist 979 * token is held. A return value < 0 skips to the next vnode without calling 980 * the slowfunc(), a return value > 0 terminates the loop. 981 * 982 * The slowfunc() callback is called after the vnode has been successfully 983 * locked based on passed flags. The vnode is skipped if it gets rearranged 984 * or destroyed while blocking on the lock. A non-zero return value from 985 * the slow function terminates the loop. The slow function is allowed to 986 * arbitrarily block. The scanning code guarentees consistency of operation 987 * even if the slow function deletes or moves the node, or blocks and some 988 * other thread deletes or moves the node. 989 * 990 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed 991 * out from under the fastfunc()'s vnode test. It will not prevent 992 * v_object from getting NULL'd out but it will ensure that the 993 * pointer (if we race) will remain stable. 994 */ 995 int 996 vmntvnodescan( 997 struct mount *mp, 998 int flags, 999 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1000 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1001 void *data 1002 ) { 1003 struct vmntvnodescan_info info; 1004 struct vnode *vp; 1005 int r = 0; 1006 int maxcount = 1000000; 1007 int stopcount = 0; 1008 int count = 0; 1009 1010 lwkt_gettoken(&mntvnode_token); 1011 lwkt_gettoken(&vmobj_token); 1012 1013 /* 1014 * If asked to do one pass stop after iterating available vnodes. 1015 * Under heavy loads new vnodes can be added while we are scanning, 1016 * so this isn't perfect. Create a slop factor of 2x. 1017 */ 1018 if (flags & VMSC_ONEPASS) 1019 stopcount = mp->mnt_nvnodelistsize * 2; 1020 1021 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1022 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 1023 while ((vp = info.vp) != NULL) { 1024 if (--maxcount == 0) 1025 panic("maxcount reached during vmntvnodescan"); 1026 1027 /* 1028 * Skip if visible but not ready, or special (e.g. 1029 * mp->mnt_syncer) 1030 */ 1031 if (vp->v_type == VNON) 1032 goto next; 1033 KKASSERT(vp->v_mount == mp); 1034 1035 /* 1036 * Quick test. A negative return continues the loop without 1037 * calling the slow test. 0 continues onto the slow test. 1038 * A positive number aborts the loop. 1039 */ 1040 if (fastfunc) { 1041 if ((r = fastfunc(mp, vp, data)) < 0) { 1042 r = 0; 1043 goto next; 1044 } 1045 if (r) 1046 break; 1047 } 1048 1049 /* 1050 * Get a vxlock on the vnode, retry if it has moved or isn't 1051 * in the mountlist where we expect it. 1052 */ 1053 if (slowfunc) { 1054 int error; 1055 1056 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1057 case VMSC_GETVP: 1058 error = vget(vp, LK_EXCLUSIVE); 1059 break; 1060 case VMSC_GETVP|VMSC_NOWAIT: 1061 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1062 break; 1063 case VMSC_GETVX: 1064 vx_get(vp); 1065 error = 0; 1066 break; 1067 default: 1068 error = 0; 1069 break; 1070 } 1071 if (error) 1072 goto next; 1073 /* 1074 * Do not call the slow function if the vnode is 1075 * invalid or if it was ripped out from under us 1076 * while we (potentially) blocked. 1077 */ 1078 if (info.vp == vp && vp->v_type != VNON) 1079 r = slowfunc(mp, vp, data); 1080 1081 /* 1082 * Cleanup 1083 */ 1084 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1085 case VMSC_GETVP: 1086 case VMSC_GETVP|VMSC_NOWAIT: 1087 vput(vp); 1088 break; 1089 case VMSC_GETVX: 1090 vx_put(vp); 1091 break; 1092 default: 1093 break; 1094 } 1095 if (r != 0) 1096 break; 1097 } 1098 1099 next: 1100 /* 1101 * Yield after some processing. Depending on the number 1102 * of vnodes, we might wind up running for a long time. 1103 * Because threads are not preemptable, time critical 1104 * userland processes might starve. Give them a chance 1105 * now and then. 1106 */ 1107 if (++count == 10000) { 1108 /* We really want to yield a bit, so we simply sleep a tick */ 1109 tsleep(mp, 0, "vnodescn", 1); 1110 count = 0; 1111 } 1112 1113 /* 1114 * If doing one pass this decrements to zero. If it starts 1115 * at zero it is effectively unlimited for the purposes of 1116 * this loop. 1117 */ 1118 if (--stopcount == 0) 1119 break; 1120 1121 /* 1122 * Iterate. If the vnode was ripped out from under us 1123 * info.vp will already point to the next vnode, otherwise 1124 * we have to obtain the next valid vnode ourselves. 1125 */ 1126 if (info.vp == vp) 1127 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1128 } 1129 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1130 lwkt_reltoken(&vmobj_token); 1131 lwkt_reltoken(&mntvnode_token); 1132 return(r); 1133 } 1134 1135 /* 1136 * Remove any vnodes in the vnode table belonging to mount point mp. 1137 * 1138 * If FORCECLOSE is not specified, there should not be any active ones, 1139 * return error if any are found (nb: this is a user error, not a 1140 * system error). If FORCECLOSE is specified, detach any active vnodes 1141 * that are found. 1142 * 1143 * If WRITECLOSE is set, only flush out regular file vnodes open for 1144 * writing. 1145 * 1146 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1147 * 1148 * `rootrefs' specifies the base reference count for the root vnode 1149 * of this filesystem. The root vnode is considered busy if its 1150 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1151 * will call vrele() on the root vnode exactly rootrefs times. 1152 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1153 * be zero. 1154 */ 1155 #ifdef DIAGNOSTIC 1156 static int busyprt = 0; /* print out busy vnodes */ 1157 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1158 #endif 1159 1160 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1161 1162 struct vflush_info { 1163 int flags; 1164 int busy; 1165 thread_t td; 1166 }; 1167 1168 int 1169 vflush(struct mount *mp, int rootrefs, int flags) 1170 { 1171 struct thread *td = curthread; /* XXX */ 1172 struct vnode *rootvp = NULL; 1173 int error; 1174 struct vflush_info vflush_info; 1175 1176 if (rootrefs > 0) { 1177 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1178 ("vflush: bad args")); 1179 /* 1180 * Get the filesystem root vnode. We can vput() it 1181 * immediately, since with rootrefs > 0, it won't go away. 1182 */ 1183 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1184 if ((flags & FORCECLOSE) == 0) 1185 return (error); 1186 rootrefs = 0; 1187 /* continue anyway */ 1188 } 1189 if (rootrefs) 1190 vput(rootvp); 1191 } 1192 1193 vflush_info.busy = 0; 1194 vflush_info.flags = flags; 1195 vflush_info.td = td; 1196 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1197 1198 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1199 /* 1200 * If just the root vnode is busy, and if its refcount 1201 * is equal to `rootrefs', then go ahead and kill it. 1202 */ 1203 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1204 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1205 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1206 vx_lock(rootvp); 1207 vgone_vxlocked(rootvp); 1208 vx_unlock(rootvp); 1209 vflush_info.busy = 0; 1210 } 1211 } 1212 if (vflush_info.busy) 1213 return (EBUSY); 1214 for (; rootrefs > 0; rootrefs--) 1215 vrele(rootvp); 1216 return (0); 1217 } 1218 1219 /* 1220 * The scan callback is made with an VX locked vnode. 1221 */ 1222 static int 1223 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1224 { 1225 struct vflush_info *info = data; 1226 struct vattr vattr; 1227 1228 /* 1229 * Skip over a vnodes marked VSYSTEM. 1230 */ 1231 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1232 return(0); 1233 } 1234 1235 /* 1236 * If WRITECLOSE is set, flush out unlinked but still open 1237 * files (even if open only for reading) and regular file 1238 * vnodes open for writing. 1239 */ 1240 if ((info->flags & WRITECLOSE) && 1241 (vp->v_type == VNON || 1242 (VOP_GETATTR(vp, &vattr) == 0 && 1243 vattr.va_nlink > 0)) && 1244 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1245 return(0); 1246 } 1247 1248 /* 1249 * If we are the only holder (refcnt of 1) or the vnode is in 1250 * termination (refcnt < 0), we can vgone the vnode. 1251 */ 1252 if (vp->v_sysref.refcnt <= 1) { 1253 vgone_vxlocked(vp); 1254 return(0); 1255 } 1256 1257 /* 1258 * If FORCECLOSE is set, forcibly destroy the vnode and then move 1259 * it to a dummymount structure so vop_*() functions don't deref 1260 * a NULL pointer. 1261 */ 1262 if (info->flags & FORCECLOSE) { 1263 vhold(vp); 1264 vgone_vxlocked(vp); 1265 if (vp->v_mount == NULL) 1266 insmntque(vp, &dummymount); 1267 vdrop(vp); 1268 return(0); 1269 } 1270 #ifdef DIAGNOSTIC 1271 if (busyprt) 1272 vprint("vflush: busy vnode", vp); 1273 #endif 1274 ++info->busy; 1275 return(0); 1276 } 1277 1278 void 1279 add_bio_ops(struct bio_ops *ops) 1280 { 1281 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1282 } 1283 1284 void 1285 rem_bio_ops(struct bio_ops *ops) 1286 { 1287 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1288 } 1289 1290 /* 1291 * This calls the bio_ops io_sync function either for a mount point 1292 * or generally. 1293 * 1294 * WARNING: softdeps is weirdly coded and just isn't happy unless 1295 * io_sync is called with a NULL mount from the general syncing code. 1296 */ 1297 void 1298 bio_ops_sync(struct mount *mp) 1299 { 1300 struct bio_ops *ops; 1301 1302 if (mp) { 1303 if ((ops = mp->mnt_bioops) != NULL) 1304 ops->io_sync(mp); 1305 } else { 1306 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1307 ops->io_sync(NULL); 1308 } 1309 } 1310 } 1311 1312 /* 1313 * Lookup a mount point by nch 1314 */ 1315 struct mount * 1316 mount_get_by_nc(struct namecache *ncp) 1317 { 1318 struct mount *mp = NULL; 1319 1320 lwkt_gettoken(&mountlist_token); 1321 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1322 if (ncp == mp->mnt_ncmountpt.ncp) 1323 break; 1324 } 1325 lwkt_reltoken(&mountlist_token); 1326 return (mp); 1327 } 1328 1329