1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 struct vnlru_info { 111 int pass; 112 }; 113 114 static int vnlru_nowhere = 0; 115 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 116 &vnlru_nowhere, 0, 117 "Number of times the vnlru process ran without success"); 118 119 120 static struct lwkt_token mntid_token; 121 static struct mount dummymount; 122 123 /* note: mountlist exported to pstat */ 124 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 125 static TAILQ_HEAD(,mountscan_info) mountscan_list; 126 static struct lwkt_token mountlist_token; 127 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 128 struct lwkt_token mntvnode_token; 129 130 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 131 132 /* 133 * Called from vfsinit() 134 */ 135 void 136 vfs_mount_init(void) 137 { 138 lwkt_token_init(&mountlist_token); 139 lwkt_token_init(&mntvnode_token); 140 lwkt_token_init(&mntid_token); 141 TAILQ_INIT(&mountscan_list); 142 TAILQ_INIT(&mntvnodescan_list); 143 mount_init(&dummymount); 144 dummymount.mnt_flag |= MNT_RDONLY; 145 } 146 147 /* 148 * Support function called with mntvnode_token held to remove a vnode 149 * from the mountlist. We must update any list scans which are in progress. 150 */ 151 static void 152 vremovevnodemnt(struct vnode *vp) 153 { 154 struct vmntvnodescan_info *info; 155 156 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 157 if (info->vp == vp) 158 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 159 } 160 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 161 } 162 163 /* 164 * Allocate a new vnode and associate it with a tag, mount point, and 165 * operations vector. 166 * 167 * A VX locked and refd vnode is returned. The caller should setup the 168 * remaining fields and vx_put() or, if he wishes to leave a vref, 169 * vx_unlock() the vnode. 170 */ 171 int 172 getnewvnode(enum vtagtype tag, struct mount *mp, 173 struct vnode **vpp, int lktimeout, int lkflags) 174 { 175 struct vnode *vp; 176 177 KKASSERT(mp != NULL); 178 179 vp = allocvnode(lktimeout, lkflags); 180 vp->v_tag = tag; 181 vp->v_data = NULL; 182 183 /* 184 * By default the vnode is assigned the mount point's normal 185 * operations vector. 186 */ 187 vp->v_ops = &mp->mnt_vn_use_ops; 188 189 /* 190 * Placing the vnode on the mount point's queue makes it visible. 191 * VNON prevents it from being messed with, however. 192 */ 193 insmntque(vp, mp); 194 195 /* 196 * A VX locked & refd vnode is returned. 197 */ 198 *vpp = vp; 199 return (0); 200 } 201 202 /* 203 * This function creates vnodes with special operations vectors. The 204 * mount point is optional. 205 * 206 * This routine is being phased out but is still used by vfs_conf to 207 * create vnodes for devices prior to the root mount (with mp == NULL). 208 */ 209 int 210 getspecialvnode(enum vtagtype tag, struct mount *mp, 211 struct vop_ops **ops, 212 struct vnode **vpp, int lktimeout, int lkflags) 213 { 214 struct vnode *vp; 215 216 vp = allocvnode(lktimeout, lkflags); 217 vp->v_tag = tag; 218 vp->v_data = NULL; 219 vp->v_ops = ops; 220 221 if (mp == NULL) 222 mp = &dummymount; 223 224 /* 225 * Placing the vnode on the mount point's queue makes it visible. 226 * VNON prevents it from being messed with, however. 227 */ 228 insmntque(vp, mp); 229 230 /* 231 * A VX locked & refd vnode is returned. 232 */ 233 *vpp = vp; 234 return (0); 235 } 236 237 /* 238 * Interlock against an unmount, return 0 on success, non-zero on failure. 239 * 240 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 241 * is in-progress. 242 * 243 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 244 * are used. A shared locked will be obtained and the filesystem will not 245 * be unmountable until the lock is released. 246 */ 247 int 248 vfs_busy(struct mount *mp, int flags) 249 { 250 int lkflags; 251 252 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 253 if (flags & LK_NOWAIT) 254 return (ENOENT); 255 /* XXX not MP safe */ 256 mp->mnt_kern_flag |= MNTK_MWAIT; 257 /* 258 * Since all busy locks are shared except the exclusive 259 * lock granted when unmounting, the only place that a 260 * wakeup needs to be done is at the release of the 261 * exclusive lock at the end of dounmount. 262 */ 263 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 264 return (ENOENT); 265 } 266 lkflags = LK_SHARED; 267 if (lockmgr(&mp->mnt_lock, lkflags)) 268 panic("vfs_busy: unexpected lock failure"); 269 return (0); 270 } 271 272 /* 273 * Free a busy filesystem. 274 */ 275 void 276 vfs_unbusy(struct mount *mp) 277 { 278 lockmgr(&mp->mnt_lock, LK_RELEASE); 279 } 280 281 /* 282 * Lookup a filesystem type, and if found allocate and initialize 283 * a mount structure for it. 284 * 285 * Devname is usually updated by mount(8) after booting. 286 */ 287 int 288 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 289 { 290 struct vfsconf *vfsp; 291 struct mount *mp; 292 293 if (fstypename == NULL) 294 return (ENODEV); 295 296 vfsp = vfsconf_find_by_name(fstypename); 297 if (vfsp == NULL) 298 return (ENODEV); 299 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 300 mount_init(mp); 301 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 302 303 vfs_busy(mp, LK_NOWAIT); 304 mp->mnt_vfc = vfsp; 305 mp->mnt_op = vfsp->vfc_vfsops; 306 vfsp->vfc_refcount++; 307 mp->mnt_stat.f_type = vfsp->vfc_typenum; 308 mp->mnt_flag |= MNT_RDONLY; 309 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 310 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 311 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 312 *mpp = mp; 313 return (0); 314 } 315 316 /* 317 * Basic mount structure initialization 318 */ 319 void 320 mount_init(struct mount *mp) 321 { 322 lockinit(&mp->mnt_lock, "vfslock", 0, 0); 323 lwkt_token_init(&mp->mnt_token); 324 325 TAILQ_INIT(&mp->mnt_nvnodelist); 326 TAILQ_INIT(&mp->mnt_reservedvnlist); 327 TAILQ_INIT(&mp->mnt_jlist); 328 mp->mnt_nvnodelistsize = 0; 329 mp->mnt_flag = 0; 330 mp->mnt_iosize_max = DFLTPHYS; 331 } 332 333 /* 334 * Lookup a mount point by filesystem identifier. 335 */ 336 struct mount * 337 vfs_getvfs(fsid_t *fsid) 338 { 339 struct mount *mp; 340 lwkt_tokref ilock; 341 342 lwkt_gettoken(&ilock, &mountlist_token); 343 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 344 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 345 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 346 break; 347 } 348 } 349 lwkt_reltoken(&ilock); 350 return (mp); 351 } 352 353 /* 354 * Get a new unique fsid. Try to make its val[0] unique, since this value 355 * will be used to create fake device numbers for stat(). Also try (but 356 * not so hard) make its val[0] unique mod 2^16, since some emulators only 357 * support 16-bit device numbers. We end up with unique val[0]'s for the 358 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 359 * 360 * Keep in mind that several mounts may be running in parallel. Starting 361 * the search one past where the previous search terminated is both a 362 * micro-optimization and a defense against returning the same fsid to 363 * different mounts. 364 */ 365 void 366 vfs_getnewfsid(struct mount *mp) 367 { 368 static u_int16_t mntid_base; 369 lwkt_tokref ilock; 370 fsid_t tfsid; 371 int mtype; 372 373 lwkt_gettoken(&ilock, &mntid_token); 374 mtype = mp->mnt_vfc->vfc_typenum; 375 tfsid.val[1] = mtype; 376 mtype = (mtype & 0xFF) << 24; 377 for (;;) { 378 tfsid.val[0] = makeudev(255, 379 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 380 mntid_base++; 381 if (vfs_getvfs(&tfsid) == NULL) 382 break; 383 } 384 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 385 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 386 lwkt_reltoken(&ilock); 387 } 388 389 /* 390 * Set the FSID for a new mount point to the template. Adjust 391 * the FSID to avoid collisions. 392 */ 393 int 394 vfs_setfsid(struct mount *mp, fsid_t *template) 395 { 396 int didmunge = 0; 397 398 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 399 for (;;) { 400 if (vfs_getvfs(template) == NULL) 401 break; 402 didmunge = 1; 403 ++template->val[1]; 404 } 405 mp->mnt_stat.f_fsid = *template; 406 return(didmunge); 407 } 408 409 /* 410 * This routine is called when we have too many vnodes. It attempts 411 * to free <count> vnodes and will potentially free vnodes that still 412 * have VM backing store (VM backing store is typically the cause 413 * of a vnode blowout so we want to do this). Therefore, this operation 414 * is not considered cheap. 415 * 416 * A number of conditions may prevent a vnode from being reclaimed. 417 * the buffer cache may have references on the vnode, a directory 418 * vnode may still have references due to the namei cache representing 419 * underlying files, or the vnode may be in active use. It is not 420 * desireable to reuse such vnodes. These conditions may cause the 421 * number of vnodes to reach some minimum value regardless of what 422 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 423 */ 424 425 /* 426 * This is a quick non-blocking check to determine if the vnode is a good 427 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 428 * not a good candidate, 1 if it is. 429 */ 430 static __inline int 431 vmightfree(struct vnode *vp, int page_count, int pass) 432 { 433 if (vp->v_flag & VRECLAIMED) 434 return (0); 435 #if 0 436 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 437 return (0); 438 #endif 439 if (sysref_isactive(&vp->v_sysref)) 440 return (0); 441 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 442 return (0); 443 444 /* 445 * XXX horrible hack. Up to four passes will be taken. Each pass 446 * makes a larger set of vnodes eligible. For now what this really 447 * means is that we try to recycle files opened only once before 448 * recycling files opened multiple times. 449 */ 450 switch(vp->v_flag & (VAGE0 | VAGE1)) { 451 case 0: 452 if (pass < 3) 453 return(0); 454 break; 455 case VAGE0: 456 if (pass < 2) 457 return(0); 458 break; 459 case VAGE1: 460 if (pass < 1) 461 return(0); 462 break; 463 case VAGE0 | VAGE1: 464 break; 465 } 466 return (1); 467 } 468 469 /* 470 * The vnode was found to be possibly vgone()able and the caller has locked it 471 * (thus the usecount should be 1 now). Determine if the vnode is actually 472 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 473 * can be vgone()'d, 0 otherwise. 474 * 475 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 476 * in the namecache topology and (B) this vnode has buffer cache bufs. 477 * We cannot remove vnodes with non-leaf namecache associations. We do a 478 * tentitive leaf check prior to attempting to flush out any buffers but the 479 * 'real' test when all is said in done is that v_auxrefs must become 0 for 480 * the vnode to be freeable. 481 * 482 * We could theoretically just unconditionally flush when v_auxrefs != 0, 483 * but flushing data associated with non-leaf nodes (which are always 484 * directories), just throws it away for no benefit. It is the buffer 485 * cache's responsibility to choose buffers to recycle from the cached 486 * data point of view. 487 */ 488 static int 489 visleaf(struct vnode *vp) 490 { 491 struct namecache *ncp; 492 493 spin_lock_wr(&vp->v_spinlock); 494 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 495 if (!TAILQ_EMPTY(&ncp->nc_list)) { 496 spin_unlock_wr(&vp->v_spinlock); 497 return(0); 498 } 499 } 500 spin_unlock_wr(&vp->v_spinlock); 501 return(1); 502 } 503 504 /* 505 * Try to clean up the vnode to the point where it can be vgone()'d, returning 506 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 507 * vmightfree() this routine may flush the vnode and block. Vnodes marked 508 * VFREE are still candidates for vgone()ing because they may hold namecache 509 * resources and could be blocking the namecache directory hierarchy (and 510 * related vnodes) from being freed. 511 */ 512 static int 513 vtrytomakegoneable(struct vnode *vp, int page_count) 514 { 515 if (vp->v_flag & VRECLAIMED) 516 return (0); 517 if (vp->v_sysref.refcnt > 1) 518 return (0); 519 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 520 return (0); 521 if (vp->v_auxrefs && visleaf(vp)) { 522 vinvalbuf(vp, V_SAVE, 0, 0); 523 #if 0 /* DEBUG */ 524 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 525 "vrecycle: vp %p succeeded: %s\n"), vp, 526 (TAILQ_FIRST(&vp->v_namecache) ? 527 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 528 #endif 529 } 530 531 /* 532 * This sequence may seem a little strange, but we need to optimize 533 * the critical path a bit. We can't recycle vnodes with other 534 * references and because we are trying to recycle an otherwise 535 * perfectly fine vnode we have to invalidate the namecache in a 536 * way that avoids possible deadlocks (since the vnode lock is being 537 * held here). Finally, we have to check for other references one 538 * last time in case something snuck in during the inval. 539 */ 540 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 541 return (0); 542 if (cache_inval_vp_nonblock(vp)) 543 return (0); 544 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 545 } 546 547 /* 548 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 549 * to avoid vnodes which have lots of resident pages (we are trying to free 550 * vnodes, not memory). 551 * 552 * This routine is a callback from the mountlist scan. The mount point 553 * in question will be busied. 554 * 555 * NOTE: The 1/10 reclamation also ensures that the inactive data set 556 * (the vnodes being recycled by the one-time use) does not degenerate 557 * into too-small a set. This is important because once a vnode is 558 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode 559 * will not be destroyed EXCEPT by this mechanism. VM pages can still 560 * be cleaned/freed by the pageout daemon. 561 */ 562 static int 563 vlrureclaim(struct mount *mp, void *data) 564 { 565 struct vnlru_info *info = data; 566 struct vnode *vp; 567 lwkt_tokref ilock; 568 int done; 569 int trigger; 570 int usevnodes; 571 int count; 572 int trigger_mult = vnlru_nowhere; 573 574 /* 575 * Calculate the trigger point for the resident pages check. The 576 * minimum trigger value is approximately the number of pages in 577 * the system divded by the number of vnodes. However, due to 578 * various other system memory overheads unrelated to data caching 579 * it is a good idea to double the trigger (at least). 580 * 581 * trigger_mult starts at 0. If the recycler is having problems 582 * finding enough freeable vnodes it will increase trigger_mult. 583 * This should not happen in normal operation, even on machines with 584 * low amounts of memory, but extraordinary memory use by the system 585 * verses the amount of cached data can trigger it. 586 */ 587 usevnodes = desiredvnodes; 588 if (usevnodes <= 0) 589 usevnodes = 1; 590 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 591 592 done = 0; 593 lwkt_gettoken(&ilock, &mntvnode_token); 594 count = mp->mnt_nvnodelistsize / 10 + 1; 595 596 while (count && mp->mnt_syncer) { 597 /* 598 * Next vnode. Use the special syncer vnode to placemark 599 * the LRU. This way the LRU code does not interfere with 600 * vmntvnodescan(). 601 */ 602 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 603 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 604 if (vp) { 605 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 606 mp->mnt_syncer, v_nmntvnodes); 607 } else { 608 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 609 v_nmntvnodes); 610 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 611 if (vp == NULL) 612 break; 613 } 614 615 /* 616 * __VNODESCAN__ 617 * 618 * The VP will stick around while we hold mntvnode_token, 619 * at least until we block, so we can safely do an initial 620 * check, and then must check again after we lock the vnode. 621 */ 622 if (vp->v_type == VNON || /* syncer or indeterminant */ 623 !vmightfree(vp, trigger, info->pass) /* critical path opt */ 624 ) { 625 --count; 626 continue; 627 } 628 629 /* 630 * VX get the candidate vnode. If the VX get fails the 631 * vnode might still be on the mountlist. Our loop depends 632 * on us at least cycling the vnode to the end of the 633 * mountlist. 634 */ 635 if (vx_get_nonblock(vp) != 0) { 636 --count; 637 continue; 638 } 639 640 /* 641 * Since we blocked locking the vp, make sure it is still 642 * a candidate for reclamation. That is, it has not already 643 * been reclaimed and only has our VX reference associated 644 * with it. 645 */ 646 if (vp->v_type == VNON || /* syncer or indeterminant */ 647 (vp->v_flag & VRECLAIMED) || 648 vp->v_mount != mp || 649 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 650 ) { 651 --count; 652 vx_put(vp); 653 continue; 654 } 655 656 /* 657 * All right, we are good, move the vp to the end of the 658 * mountlist and clean it out. The vget will have returned 659 * an error if the vnode was destroyed (VRECLAIMED set), so we 660 * do not have to check again. The vput() will move the 661 * vnode to the free list if the vgone() was successful. 662 */ 663 KKASSERT(vp->v_mount == mp); 664 vgone_vxlocked(vp); 665 vx_put(vp); 666 ++done; 667 --count; 668 } 669 lwkt_reltoken(&ilock); 670 return (done); 671 } 672 673 /* 674 * Attempt to recycle vnodes in a context that is always safe to block. 675 * Calling vlrurecycle() from the bowels of file system code has some 676 * interesting deadlock problems. 677 */ 678 static struct thread *vnlruthread; 679 static int vnlruproc_sig; 680 681 void 682 vnlru_proc_wait(void) 683 { 684 if (vnlruproc_sig == 0) { 685 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 686 wakeup(vnlruthread); 687 } 688 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 689 } 690 691 static void 692 vnlru_proc(void) 693 { 694 struct thread *td = curthread; 695 struct vnlru_info info; 696 int done; 697 698 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 699 SHUTDOWN_PRI_FIRST); 700 701 crit_enter(); 702 for (;;) { 703 kproc_suspend_loop(); 704 705 /* 706 * Try to free some vnodes if we have too many 707 */ 708 if (numvnodes > desiredvnodes && 709 freevnodes > desiredvnodes * 2 / 10) { 710 int count = numvnodes - desiredvnodes; 711 712 if (count > freevnodes / 100) 713 count = freevnodes / 100; 714 if (count < 5) 715 count = 5; 716 freesomevnodes(count); 717 } 718 719 /* 720 * Nothing to do if most of our vnodes are already on 721 * the free list. 722 */ 723 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 724 vnlruproc_sig = 0; 725 wakeup(&vnlruproc_sig); 726 tsleep(td, 0, "vlruwt", hz); 727 continue; 728 } 729 cache_hysteresis(); 730 731 /* 732 * The pass iterates through the four combinations of 733 * VAGE0/VAGE1. We want to get rid of aged small files 734 * first. 735 */ 736 info.pass = 0; 737 done = 0; 738 while (done == 0 && info.pass < 4) { 739 done = mountlist_scan(vlrureclaim, &info, 740 MNTSCAN_FORWARD); 741 ++info.pass; 742 } 743 744 /* 745 * The vlrureclaim() call only processes 1/10 of the vnodes 746 * on each mount. If we couldn't find any repeat the loop 747 * at least enough times to cover all available vnodes before 748 * we start sleeping. Complain if the failure extends past 749 * 30 second, every 30 seconds. 750 */ 751 if (done == 0) { 752 ++vnlru_nowhere; 753 if (vnlru_nowhere % 10 == 0) 754 tsleep(td, 0, "vlrup", hz * 3); 755 if (vnlru_nowhere % 100 == 0) 756 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 757 if (vnlru_nowhere == 1000) 758 vnlru_nowhere = 900; 759 } else { 760 vnlru_nowhere = 0; 761 } 762 } 763 crit_exit(); 764 } 765 766 /* 767 * MOUNTLIST FUNCTIONS 768 */ 769 770 /* 771 * mountlist_insert (MP SAFE) 772 * 773 * Add a new mount point to the mount list. 774 */ 775 void 776 mountlist_insert(struct mount *mp, int how) 777 { 778 lwkt_tokref ilock; 779 780 lwkt_gettoken(&ilock, &mountlist_token); 781 if (how == MNTINS_FIRST) 782 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 783 else 784 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 785 lwkt_reltoken(&ilock); 786 } 787 788 /* 789 * mountlist_interlock (MP SAFE) 790 * 791 * Execute the specified interlock function with the mountlist token 792 * held. The function will be called in a serialized fashion verses 793 * other functions called through this mechanism. 794 */ 795 int 796 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 797 { 798 lwkt_tokref ilock; 799 int error; 800 801 lwkt_gettoken(&ilock, &mountlist_token); 802 error = callback(mp); 803 lwkt_reltoken(&ilock); 804 return (error); 805 } 806 807 /* 808 * mountlist_boot_getfirst (DURING BOOT ONLY) 809 * 810 * This function returns the first mount on the mountlist, which is 811 * expected to be the root mount. Since no interlocks are obtained 812 * this function is only safe to use during booting. 813 */ 814 815 struct mount * 816 mountlist_boot_getfirst(void) 817 { 818 return(TAILQ_FIRST(&mountlist)); 819 } 820 821 /* 822 * mountlist_remove (MP SAFE) 823 * 824 * Remove a node from the mountlist. If this node is the next scan node 825 * for any active mountlist scans, the active mountlist scan will be 826 * adjusted to skip the node, thus allowing removals during mountlist 827 * scans. 828 */ 829 void 830 mountlist_remove(struct mount *mp) 831 { 832 struct mountscan_info *msi; 833 lwkt_tokref ilock; 834 835 lwkt_gettoken(&ilock, &mountlist_token); 836 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 837 if (msi->msi_node == mp) { 838 if (msi->msi_how & MNTSCAN_FORWARD) 839 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 840 else 841 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 842 } 843 } 844 TAILQ_REMOVE(&mountlist, mp, mnt_list); 845 lwkt_reltoken(&ilock); 846 } 847 848 /* 849 * mountlist_scan (MP SAFE) 850 * 851 * Safely scan the mount points on the mount list. Unless otherwise 852 * specified each mount point will be busied prior to the callback and 853 * unbusied afterwords. The callback may safely remove any mount point 854 * without interfering with the scan. If the current callback 855 * mount is removed the scanner will not attempt to unbusy it. 856 * 857 * If a mount node cannot be busied it is silently skipped. 858 * 859 * The callback return value is aggregated and a total is returned. A return 860 * value of < 0 is not aggregated and will terminate the scan. 861 * 862 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 863 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 864 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 865 * the mount node. 866 */ 867 int 868 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 869 { 870 struct mountscan_info info; 871 lwkt_tokref ilock; 872 struct mount *mp; 873 thread_t td; 874 int count; 875 int res; 876 877 lwkt_gettoken(&ilock, &mountlist_token); 878 879 info.msi_how = how; 880 info.msi_node = NULL; /* paranoia */ 881 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 882 883 res = 0; 884 td = curthread; 885 886 if (how & MNTSCAN_FORWARD) { 887 info.msi_node = TAILQ_FIRST(&mountlist); 888 while ((mp = info.msi_node) != NULL) { 889 if (how & MNTSCAN_NOBUSY) { 890 count = callback(mp, data); 891 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 892 count = callback(mp, data); 893 if (mp == info.msi_node) 894 vfs_unbusy(mp); 895 } else { 896 count = 0; 897 } 898 if (count < 0) 899 break; 900 res += count; 901 if (mp == info.msi_node) 902 info.msi_node = TAILQ_NEXT(mp, mnt_list); 903 } 904 } else if (how & MNTSCAN_REVERSE) { 905 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 906 while ((mp = info.msi_node) != NULL) { 907 if (how & MNTSCAN_NOBUSY) { 908 count = callback(mp, data); 909 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 910 count = callback(mp, data); 911 if (mp == info.msi_node) 912 vfs_unbusy(mp); 913 } else { 914 count = 0; 915 } 916 if (count < 0) 917 break; 918 res += count; 919 if (mp == info.msi_node) 920 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 921 } 922 } 923 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 924 lwkt_reltoken(&ilock); 925 return(res); 926 } 927 928 /* 929 * MOUNT RELATED VNODE FUNCTIONS 930 */ 931 932 static struct kproc_desc vnlru_kp = { 933 "vnlru", 934 vnlru_proc, 935 &vnlruthread 936 }; 937 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 938 939 /* 940 * Move a vnode from one mount queue to another. 941 * 942 * MPSAFE 943 */ 944 void 945 insmntque(struct vnode *vp, struct mount *mp) 946 { 947 lwkt_tokref ilock; 948 949 lwkt_gettoken(&ilock, &mntvnode_token); 950 /* 951 * Delete from old mount point vnode list, if on one. 952 */ 953 if (vp->v_mount != NULL) { 954 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 955 ("bad mount point vnode list size")); 956 vremovevnodemnt(vp); 957 vp->v_mount->mnt_nvnodelistsize--; 958 } 959 /* 960 * Insert into list of vnodes for the new mount point, if available. 961 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 962 */ 963 if ((vp->v_mount = mp) == NULL) { 964 lwkt_reltoken(&ilock); 965 return; 966 } 967 if (mp->mnt_syncer) { 968 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 969 } else { 970 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 971 } 972 mp->mnt_nvnodelistsize++; 973 lwkt_reltoken(&ilock); 974 } 975 976 977 /* 978 * Scan the vnodes under a mount point and issue appropriate callbacks. 979 * 980 * The fastfunc() callback is called with just the mountlist token held 981 * (no vnode lock). It may not block and the vnode may be undergoing 982 * modifications while the caller is processing it. The vnode will 983 * not be entirely destroyed, however, due to the fact that the mountlist 984 * token is held. A return value < 0 skips to the next vnode without calling 985 * the slowfunc(), a return value > 0 terminates the loop. 986 * 987 * The slowfunc() callback is called after the vnode has been successfully 988 * locked based on passed flags. The vnode is skipped if it gets rearranged 989 * or destroyed while blocking on the lock. A non-zero return value from 990 * the slow function terminates the loop. The slow function is allowed to 991 * arbitrarily block. The scanning code guarentees consistency of operation 992 * even if the slow function deletes or moves the node, or blocks and some 993 * other thread deletes or moves the node. 994 */ 995 int 996 vmntvnodescan( 997 struct mount *mp, 998 int flags, 999 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 1000 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 1001 void *data 1002 ) { 1003 struct vmntvnodescan_info info; 1004 lwkt_tokref ilock; 1005 struct vnode *vp; 1006 int r = 0; 1007 int maxcount = 1000000; 1008 int stopcount = 0; 1009 int count = 0; 1010 1011 lwkt_gettoken(&ilock, &mntvnode_token); 1012 1013 /* 1014 * If asked to do one pass stop after iterating available vnodes. 1015 * Under heavy loads new vnodes can be added while we are scanning, 1016 * so this isn't perfect. Create a slop factor of 2x. 1017 */ 1018 if (flags & VMSC_ONEPASS) 1019 stopcount = mp->mnt_nvnodelistsize * 2; 1020 1021 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 1022 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 1023 while ((vp = info.vp) != NULL) { 1024 if (--maxcount == 0) 1025 panic("maxcount reached during vmntvnodescan"); 1026 1027 /* 1028 * Skip if visible but not ready, or special (e.g. 1029 * mp->mnt_syncer) 1030 */ 1031 if (vp->v_type == VNON) 1032 goto next; 1033 KKASSERT(vp->v_mount == mp); 1034 1035 /* 1036 * Quick test. A negative return continues the loop without 1037 * calling the slow test. 0 continues onto the slow test. 1038 * A positive number aborts the loop. 1039 */ 1040 if (fastfunc) { 1041 if ((r = fastfunc(mp, vp, data)) < 0) { 1042 r = 0; 1043 goto next; 1044 } 1045 if (r) 1046 break; 1047 } 1048 1049 /* 1050 * Get a vxlock on the vnode, retry if it has moved or isn't 1051 * in the mountlist where we expect it. 1052 */ 1053 if (slowfunc) { 1054 int error; 1055 1056 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1057 case VMSC_GETVP: 1058 error = vget(vp, LK_EXCLUSIVE); 1059 break; 1060 case VMSC_GETVP|VMSC_NOWAIT: 1061 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 1062 break; 1063 case VMSC_GETVX: 1064 vx_get(vp); 1065 error = 0; 1066 break; 1067 default: 1068 error = 0; 1069 break; 1070 } 1071 if (error) 1072 goto next; 1073 /* 1074 * Do not call the slow function if the vnode is 1075 * invalid or if it was ripped out from under us 1076 * while we (potentially) blocked. 1077 */ 1078 if (info.vp == vp && vp->v_type != VNON) 1079 r = slowfunc(mp, vp, data); 1080 1081 /* 1082 * Cleanup 1083 */ 1084 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1085 case VMSC_GETVP: 1086 case VMSC_GETVP|VMSC_NOWAIT: 1087 vput(vp); 1088 break; 1089 case VMSC_GETVX: 1090 vx_put(vp); 1091 break; 1092 default: 1093 break; 1094 } 1095 if (r != 0) 1096 break; 1097 } 1098 1099 next: 1100 /* 1101 * Yield after some processing. Depending on the number 1102 * of vnodes, we might wind up running for a long time. 1103 * Because threads are not preemptable, time critical 1104 * userland processes might starve. Give them a chance 1105 * now and then. 1106 */ 1107 if (++count == 10000) { 1108 /* We really want to yield a bit, so we simply sleep a tick */ 1109 tsleep(mp, 0, "vnodescn", 1); 1110 count = 0; 1111 } 1112 1113 /* 1114 * If doing one pass this decrements to zero. If it starts 1115 * at zero it is effectively unlimited for the purposes of 1116 * this loop. 1117 */ 1118 if (--stopcount == 0) 1119 break; 1120 1121 /* 1122 * Iterate. If the vnode was ripped out from under us 1123 * info.vp will already point to the next vnode, otherwise 1124 * we have to obtain the next valid vnode ourselves. 1125 */ 1126 if (info.vp == vp) 1127 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1128 } 1129 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1130 lwkt_reltoken(&ilock); 1131 return(r); 1132 } 1133 1134 /* 1135 * Remove any vnodes in the vnode table belonging to mount point mp. 1136 * 1137 * If FORCECLOSE is not specified, there should not be any active ones, 1138 * return error if any are found (nb: this is a user error, not a 1139 * system error). If FORCECLOSE is specified, detach any active vnodes 1140 * that are found. 1141 * 1142 * If WRITECLOSE is set, only flush out regular file vnodes open for 1143 * writing. 1144 * 1145 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1146 * 1147 * `rootrefs' specifies the base reference count for the root vnode 1148 * of this filesystem. The root vnode is considered busy if its 1149 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1150 * will call vrele() on the root vnode exactly rootrefs times. 1151 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1152 * be zero. 1153 */ 1154 #ifdef DIAGNOSTIC 1155 static int busyprt = 0; /* print out busy vnodes */ 1156 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1157 #endif 1158 1159 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1160 1161 struct vflush_info { 1162 int flags; 1163 int busy; 1164 thread_t td; 1165 }; 1166 1167 int 1168 vflush(struct mount *mp, int rootrefs, int flags) 1169 { 1170 struct thread *td = curthread; /* XXX */ 1171 struct vnode *rootvp = NULL; 1172 int error; 1173 struct vflush_info vflush_info; 1174 1175 if (rootrefs > 0) { 1176 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1177 ("vflush: bad args")); 1178 /* 1179 * Get the filesystem root vnode. We can vput() it 1180 * immediately, since with rootrefs > 0, it won't go away. 1181 */ 1182 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1183 if ((flags & FORCECLOSE) == 0) 1184 return (error); 1185 rootrefs = 0; 1186 /* continue anyway */ 1187 } 1188 if (rootrefs) 1189 vput(rootvp); 1190 } 1191 1192 vflush_info.busy = 0; 1193 vflush_info.flags = flags; 1194 vflush_info.td = td; 1195 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1196 1197 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1198 /* 1199 * If just the root vnode is busy, and if its refcount 1200 * is equal to `rootrefs', then go ahead and kill it. 1201 */ 1202 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1203 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1204 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1205 vx_lock(rootvp); 1206 vgone_vxlocked(rootvp); 1207 vx_unlock(rootvp); 1208 vflush_info.busy = 0; 1209 } 1210 } 1211 if (vflush_info.busy) 1212 return (EBUSY); 1213 for (; rootrefs > 0; rootrefs--) 1214 vrele(rootvp); 1215 return (0); 1216 } 1217 1218 /* 1219 * The scan callback is made with an VX locked vnode. 1220 */ 1221 static int 1222 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1223 { 1224 struct vflush_info *info = data; 1225 struct vattr vattr; 1226 1227 /* 1228 * Skip over a vnodes marked VSYSTEM. 1229 */ 1230 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1231 return(0); 1232 } 1233 1234 /* 1235 * If WRITECLOSE is set, flush out unlinked but still open 1236 * files (even if open only for reading) and regular file 1237 * vnodes open for writing. 1238 */ 1239 if ((info->flags & WRITECLOSE) && 1240 (vp->v_type == VNON || 1241 (VOP_GETATTR(vp, &vattr) == 0 && 1242 vattr.va_nlink > 0)) && 1243 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1244 return(0); 1245 } 1246 1247 /* 1248 * If we are the only holder (refcnt of 1) or the vnode is in 1249 * termination (refcnt < 0), we can vgone the vnode. 1250 */ 1251 if (vp->v_sysref.refcnt <= 1) { 1252 vgone_vxlocked(vp); 1253 return(0); 1254 } 1255 1256 /* 1257 * If FORCECLOSE is set, forcibly close the vnode. For block 1258 * or character devices we just clean and leave the vp 1259 * associated with devfs. For all other files, just kill them. 1260 * 1261 * XXX we need to do something about devfs here, I'd rather not 1262 * blow away device associations. 1263 */ 1264 if (info->flags & FORCECLOSE) { 1265 vgone_vxlocked(vp); 1266 #if 0 1267 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1268 vgone_vxlocked(vp); 1269 } else { 1270 vclean_vxlocked(vp, 0); 1271 /*vp->v_ops = &devfs_vnode_dev_vops_p;*/ 1272 insmntque(vp, NULL); 1273 } 1274 #endif 1275 return(0); 1276 } 1277 #ifdef DIAGNOSTIC 1278 if (busyprt) 1279 vprint("vflush: busy vnode", vp); 1280 #endif 1281 ++info->busy; 1282 return(0); 1283 } 1284 1285 void 1286 add_bio_ops(struct bio_ops *ops) 1287 { 1288 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1289 } 1290 1291 void 1292 rem_bio_ops(struct bio_ops *ops) 1293 { 1294 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1295 } 1296 1297 /* 1298 * This calls the bio_ops io_sync function either for a mount point 1299 * or generally. 1300 * 1301 * WARNING: softdeps is weirdly coded and just isn't happy unless 1302 * io_sync is called with a NULL mount from the general syncing code. 1303 */ 1304 void 1305 bio_ops_sync(struct mount *mp) 1306 { 1307 struct bio_ops *ops; 1308 1309 if (mp) { 1310 if ((ops = mp->mnt_bioops) != NULL) 1311 ops->io_sync(mp); 1312 } else { 1313 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1314 ops->io_sync(NULL); 1315 } 1316 } 1317 } 1318 1319 /* 1320 * Lookup a mount point by nch 1321 */ 1322 struct mount * 1323 mount_get_by_nc(struct namecache *ncp) 1324 { 1325 struct mount *mp = NULL; 1326 lwkt_tokref ilock; 1327 1328 lwkt_gettoken(&ilock, &mountlist_token); 1329 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 1330 if (ncp == mp->mnt_ncmountpt.ncp) 1331 break; 1332 } 1333 lwkt_reltoken(&ilock); 1334 return (mp); 1335 } 1336 1337