1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 static int vnlru_nowhere = 0; 111 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 112 &vnlru_nowhere, 0, 113 "Number of times the vnlru process ran without success"); 114 115 116 static struct lwkt_token mntid_token; 117 118 /* note: mountlist exported to pstat */ 119 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 120 static TAILQ_HEAD(,mountscan_info) mountscan_list; 121 static struct lwkt_token mountlist_token; 122 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 123 struct lwkt_token mntvnode_token; 124 125 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 126 127 /* 128 * Called from vfsinit() 129 */ 130 void 131 vfs_mount_init(void) 132 { 133 lwkt_token_init(&mountlist_token); 134 lwkt_token_init(&mntvnode_token); 135 lwkt_token_init(&mntid_token); 136 TAILQ_INIT(&mountscan_list); 137 TAILQ_INIT(&mntvnodescan_list); 138 } 139 140 /* 141 * Support function called with mntvnode_token held to remove a vnode 142 * from the mountlist. We must update any list scans which are in progress. 143 */ 144 static void 145 vremovevnodemnt(struct vnode *vp) 146 { 147 struct vmntvnodescan_info *info; 148 149 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 150 if (info->vp == vp) 151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 152 } 153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 154 } 155 156 /* 157 * Allocate a new vnode and associate it with a tag, mount point, and 158 * operations vector. 159 * 160 * A VX locked and refd vnode is returned. The caller should setup the 161 * remaining fields and vx_put() or, if he wishes to leave a vref, 162 * vx_unlock() the vnode. 163 */ 164 int 165 getnewvnode(enum vtagtype tag, struct mount *mp, 166 struct vnode **vpp, int lktimeout, int lkflags) 167 { 168 struct vnode *vp; 169 170 KKASSERT(mp != NULL); 171 172 vp = allocvnode(lktimeout, lkflags); 173 vp->v_tag = tag; 174 vp->v_data = NULL; 175 176 /* 177 * By default the vnode is assigned the mount point's normal 178 * operations vector. 179 */ 180 vp->v_ops = &mp->mnt_vn_use_ops; 181 182 /* 183 * Placing the vnode on the mount point's queue makes it visible. 184 * VNON prevents it from being messed with, however. 185 */ 186 insmntque(vp, mp); 187 188 /* 189 * A VX locked & refd vnode is returned. 190 */ 191 *vpp = vp; 192 return (0); 193 } 194 195 /* 196 * This function creates vnodes with special operations vectors. The 197 * mount point is optional. 198 * 199 * This routine is being phased out. 200 */ 201 int 202 getspecialvnode(enum vtagtype tag, struct mount *mp, 203 struct vop_ops **ops, 204 struct vnode **vpp, int lktimeout, int lkflags) 205 { 206 struct vnode *vp; 207 208 vp = allocvnode(lktimeout, lkflags); 209 vp->v_tag = tag; 210 vp->v_data = NULL; 211 vp->v_ops = ops; 212 213 /* 214 * Placing the vnode on the mount point's queue makes it visible. 215 * VNON prevents it from being messed with, however. 216 */ 217 insmntque(vp, mp); 218 219 /* 220 * A VX locked & refd vnode is returned. 221 */ 222 *vpp = vp; 223 return (0); 224 } 225 226 /* 227 * Interlock against an unmount, return 0 on success, non-zero on failure. 228 * 229 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 230 * is in-progress. 231 * 232 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 233 * are used. A shared locked will be obtained and the filesystem will not 234 * be unmountable until the lock is released. 235 */ 236 int 237 vfs_busy(struct mount *mp, int flags) 238 { 239 int lkflags; 240 241 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 242 if (flags & LK_NOWAIT) 243 return (ENOENT); 244 /* XXX not MP safe */ 245 mp->mnt_kern_flag |= MNTK_MWAIT; 246 /* 247 * Since all busy locks are shared except the exclusive 248 * lock granted when unmounting, the only place that a 249 * wakeup needs to be done is at the release of the 250 * exclusive lock at the end of dounmount. 251 */ 252 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 253 return (ENOENT); 254 } 255 lkflags = LK_SHARED; 256 if (lockmgr(&mp->mnt_lock, lkflags)) 257 panic("vfs_busy: unexpected lock failure"); 258 return (0); 259 } 260 261 /* 262 * Free a busy filesystem. 263 */ 264 void 265 vfs_unbusy(struct mount *mp) 266 { 267 lockmgr(&mp->mnt_lock, LK_RELEASE); 268 } 269 270 /* 271 * Lookup a filesystem type, and if found allocate and initialize 272 * a mount structure for it. 273 * 274 * Devname is usually updated by mount(8) after booting. 275 */ 276 int 277 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 278 { 279 struct vfsconf *vfsp; 280 struct mount *mp; 281 282 if (fstypename == NULL) 283 return (ENODEV); 284 285 vfsp = vfsconf_find_by_name(fstypename); 286 if (vfsp == NULL) 287 return (ENODEV); 288 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 289 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 290 vfs_busy(mp, LK_NOWAIT); 291 TAILQ_INIT(&mp->mnt_nvnodelist); 292 TAILQ_INIT(&mp->mnt_reservedvnlist); 293 TAILQ_INIT(&mp->mnt_jlist); 294 mp->mnt_nvnodelistsize = 0; 295 mp->mnt_vfc = vfsp; 296 mp->mnt_op = vfsp->vfc_vfsops; 297 mp->mnt_flag = MNT_RDONLY; 298 vfsp->vfc_refcount++; 299 mp->mnt_iosize_max = DFLTPHYS; 300 mp->mnt_stat.f_type = vfsp->vfc_typenum; 301 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 302 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 303 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 304 *mpp = mp; 305 return (0); 306 } 307 308 /* 309 * Lookup a mount point by filesystem identifier. 310 */ 311 struct mount * 312 vfs_getvfs(fsid_t *fsid) 313 { 314 struct mount *mp; 315 lwkt_tokref ilock; 316 317 lwkt_gettoken(&ilock, &mountlist_token); 318 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 319 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 320 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 321 break; 322 } 323 } 324 lwkt_reltoken(&ilock); 325 return (mp); 326 } 327 328 /* 329 * Get a new unique fsid. Try to make its val[0] unique, since this value 330 * will be used to create fake device numbers for stat(). Also try (but 331 * not so hard) make its val[0] unique mod 2^16, since some emulators only 332 * support 16-bit device numbers. We end up with unique val[0]'s for the 333 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 334 * 335 * Keep in mind that several mounts may be running in parallel. Starting 336 * the search one past where the previous search terminated is both a 337 * micro-optimization and a defense against returning the same fsid to 338 * different mounts. 339 */ 340 void 341 vfs_getnewfsid(struct mount *mp) 342 { 343 static u_int16_t mntid_base; 344 lwkt_tokref ilock; 345 fsid_t tfsid; 346 int mtype; 347 348 lwkt_gettoken(&ilock, &mntid_token); 349 mtype = mp->mnt_vfc->vfc_typenum; 350 tfsid.val[1] = mtype; 351 mtype = (mtype & 0xFF) << 24; 352 for (;;) { 353 tfsid.val[0] = makeudev(255, 354 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 355 mntid_base++; 356 if (vfs_getvfs(&tfsid) == NULL) 357 break; 358 } 359 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 360 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 361 lwkt_reltoken(&ilock); 362 } 363 364 /* 365 * Set the FSID for a new mount point to the template. Adjust 366 * the FSID to avoid collisions. 367 */ 368 int 369 vfs_setfsid(struct mount *mp, fsid_t *template) 370 { 371 int didmunge = 0; 372 373 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid)); 374 for (;;) { 375 if (vfs_getvfs(template) == NULL) 376 break; 377 didmunge = 1; 378 ++template->val[1]; 379 } 380 mp->mnt_stat.f_fsid = *template; 381 return(didmunge); 382 } 383 384 /* 385 * This routine is called when we have too many vnodes. It attempts 386 * to free <count> vnodes and will potentially free vnodes that still 387 * have VM backing store (VM backing store is typically the cause 388 * of a vnode blowout so we want to do this). Therefore, this operation 389 * is not considered cheap. 390 * 391 * A number of conditions may prevent a vnode from being reclaimed. 392 * the buffer cache may have references on the vnode, a directory 393 * vnode may still have references due to the namei cache representing 394 * underlying files, or the vnode may be in active use. It is not 395 * desireable to reuse such vnodes. These conditions may cause the 396 * number of vnodes to reach some minimum value regardless of what 397 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 398 */ 399 400 /* 401 * This is a quick non-blocking check to determine if the vnode is a good 402 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 403 * not a good candidate, 1 if it is. 404 */ 405 static __inline int 406 vmightfree(struct vnode *vp, int page_count) 407 { 408 if (vp->v_flag & VRECLAIMED) 409 return (0); 410 #if 0 411 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 412 return (0); 413 #endif 414 if (sysref_isactive(&vp->v_sysref)) 415 return (0); 416 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 417 return (0); 418 return (1); 419 } 420 421 /* 422 * The vnode was found to be possibly vgone()able and the caller has locked it 423 * (thus the usecount should be 1 now). Determine if the vnode is actually 424 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 425 * can be vgone()'d, 0 otherwise. 426 * 427 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 428 * in the namecache topology and (B) this vnode has buffer cache bufs. 429 * We cannot remove vnodes with non-leaf namecache associations. We do a 430 * tentitive leaf check prior to attempting to flush out any buffers but the 431 * 'real' test when all is said in done is that v_auxrefs must become 0 for 432 * the vnode to be freeable. 433 * 434 * We could theoretically just unconditionally flush when v_auxrefs != 0, 435 * but flushing data associated with non-leaf nodes (which are always 436 * directories), just throws it away for no benefit. It is the buffer 437 * cache's responsibility to choose buffers to recycle from the cached 438 * data point of view. 439 */ 440 static int 441 visleaf(struct vnode *vp) 442 { 443 struct namecache *ncp; 444 445 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 446 if (!TAILQ_EMPTY(&ncp->nc_list)) 447 return(0); 448 } 449 return(1); 450 } 451 452 /* 453 * Try to clean up the vnode to the point where it can be vgone()'d, returning 454 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 455 * vmightfree() this routine may flush the vnode and block. Vnodes marked 456 * VFREE are still candidates for vgone()ing because they may hold namecache 457 * resources and could be blocking the namecache directory hierarchy (and 458 * related vnodes) from being freed. 459 */ 460 static int 461 vtrytomakegoneable(struct vnode *vp, int page_count) 462 { 463 if (vp->v_flag & VRECLAIMED) 464 return (0); 465 if (vp->v_sysref.refcnt > 1) 466 return (0); 467 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 468 return (0); 469 if (vp->v_auxrefs && visleaf(vp)) { 470 vinvalbuf(vp, V_SAVE, 0, 0); 471 #if 0 /* DEBUG */ 472 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 473 "vrecycle: vp %p succeeded: %s\n"), vp, 474 (TAILQ_FIRST(&vp->v_namecache) ? 475 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 476 #endif 477 } 478 479 /* 480 * This sequence may seem a little strange, but we need to optimize 481 * the critical path a bit. We can't recycle vnodes with other 482 * references and because we are trying to recycle an otherwise 483 * perfectly fine vnode we have to invalidate the namecache in a 484 * way that avoids possible deadlocks (since the vnode lock is being 485 * held here). Finally, we have to check for other references one 486 * last time in case something snuck in during the inval. 487 */ 488 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 489 return (0); 490 if (cache_inval_vp_nonblock(vp)) 491 return (0); 492 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 493 } 494 495 /* 496 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 497 * to avoid vnodes which have lots of resident pages (we are trying to free 498 * vnodes, not memory). 499 * 500 * This routine is a callback from the mountlist scan. The mount point 501 * in question will be busied. 502 */ 503 static int 504 vlrureclaim(struct mount *mp, void *data) 505 { 506 struct vnode *vp; 507 lwkt_tokref ilock; 508 int done; 509 int trigger; 510 int usevnodes; 511 int count; 512 int trigger_mult = vnlru_nowhere; 513 514 /* 515 * Calculate the trigger point for the resident pages check. The 516 * minimum trigger value is approximately the number of pages in 517 * the system divded by the number of vnodes. However, due to 518 * various other system memory overheads unrelated to data caching 519 * it is a good idea to double the trigger (at least). 520 * 521 * trigger_mult starts at 0. If the recycler is having problems 522 * finding enough freeable vnodes it will increase trigger_mult. 523 * This should not happen in normal operation, even on machines with 524 * low amounts of memory, but extraordinary memory use by the system 525 * verses the amount of cached data can trigger it. 526 */ 527 usevnodes = desiredvnodes; 528 if (usevnodes <= 0) 529 usevnodes = 1; 530 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 531 532 done = 0; 533 lwkt_gettoken(&ilock, &mntvnode_token); 534 count = mp->mnt_nvnodelistsize / 10 + 1; 535 while (count && mp->mnt_syncer) { 536 /* 537 * Next vnode. Use the special syncer vnode to placemark 538 * the LRU. This way the LRU code does not interfere with 539 * vmntvnodescan(). 540 */ 541 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 542 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes); 543 if (vp) { 544 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, 545 mp->mnt_syncer, v_nmntvnodes); 546 } else { 547 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer, 548 v_nmntvnodes); 549 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes); 550 if (vp == NULL) 551 break; 552 } 553 554 /* 555 * __VNODESCAN__ 556 * 557 * The VP will stick around while we hold mntvnode_token, 558 * at least until we block, so we can safely do an initial 559 * check, and then must check again after we lock the vnode. 560 */ 561 if (vp->v_type == VNON || /* syncer or indeterminant */ 562 !vmightfree(vp, trigger) /* critical path opt */ 563 ) { 564 --count; 565 continue; 566 } 567 568 /* 569 * VX get the candidate vnode. If the VX get fails the 570 * vnode might still be on the mountlist. Our loop depends 571 * on us at least cycling the vnode to the end of the 572 * mountlist. 573 */ 574 if (vx_get_nonblock(vp) != 0) { 575 --count; 576 continue; 577 } 578 579 /* 580 * Since we blocked locking the vp, make sure it is still 581 * a candidate for reclamation. That is, it has not already 582 * been reclaimed and only has our VX reference associated 583 * with it. 584 */ 585 if (vp->v_type == VNON || /* syncer or indeterminant */ 586 (vp->v_flag & VRECLAIMED) || 587 vp->v_mount != mp || 588 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 589 ) { 590 --count; 591 vx_put(vp); 592 continue; 593 } 594 595 /* 596 * All right, we are good, move the vp to the end of the 597 * mountlist and clean it out. The vget will have returned 598 * an error if the vnode was destroyed (VRECLAIMED set), so we 599 * do not have to check again. The vput() will move the 600 * vnode to the free list if the vgone() was successful. 601 */ 602 KKASSERT(vp->v_mount == mp); 603 vgone_vxlocked(vp); 604 vx_put(vp); 605 ++done; 606 --count; 607 } 608 lwkt_reltoken(&ilock); 609 return (done); 610 } 611 612 /* 613 * Attempt to recycle vnodes in a context that is always safe to block. 614 * Calling vlrurecycle() from the bowels of file system code has some 615 * interesting deadlock problems. 616 */ 617 static struct thread *vnlruthread; 618 static int vnlruproc_sig; 619 620 void 621 vnlru_proc_wait(void) 622 { 623 if (vnlruproc_sig == 0) { 624 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 625 wakeup(vnlruthread); 626 } 627 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 628 } 629 630 static void 631 vnlru_proc(void) 632 { 633 struct thread *td = curthread; 634 int done; 635 636 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 637 SHUTDOWN_PRI_FIRST); 638 639 crit_enter(); 640 for (;;) { 641 kproc_suspend_loop(); 642 643 /* 644 * Try to free some vnodes if we have too many 645 */ 646 if (numvnodes > desiredvnodes && 647 freevnodes > desiredvnodes * 2 / 10) { 648 int count = numvnodes - desiredvnodes; 649 650 if (count > freevnodes / 100) 651 count = freevnodes / 100; 652 if (count < 5) 653 count = 5; 654 freesomevnodes(count); 655 } 656 657 /* 658 * Nothing to do if most of our vnodes are already on 659 * the free list. 660 */ 661 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 662 vnlruproc_sig = 0; 663 wakeup(&vnlruproc_sig); 664 tsleep(td, 0, "vlruwt", hz); 665 continue; 666 } 667 cache_cleanneg(0); 668 done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD); 669 670 /* 671 * The vlrureclaim() call only processes 1/10 of the vnodes 672 * on each mount. If we couldn't find any repeat the loop 673 * at least enough times to cover all available vnodes before 674 * we start sleeping. Complain if the failure extends past 675 * 30 second, every 30 seconds. 676 */ 677 if (done == 0) { 678 ++vnlru_nowhere; 679 if (vnlru_nowhere % 10 == 0) 680 tsleep(td, 0, "vlrup", hz * 3); 681 if (vnlru_nowhere % 100 == 0) 682 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 683 if (vnlru_nowhere == 1000) 684 vnlru_nowhere = 900; 685 } else { 686 vnlru_nowhere = 0; 687 } 688 } 689 crit_exit(); 690 } 691 692 /* 693 * MOUNTLIST FUNCTIONS 694 */ 695 696 /* 697 * mountlist_insert (MP SAFE) 698 * 699 * Add a new mount point to the mount list. 700 */ 701 void 702 mountlist_insert(struct mount *mp, int how) 703 { 704 lwkt_tokref ilock; 705 706 lwkt_gettoken(&ilock, &mountlist_token); 707 if (how == MNTINS_FIRST) 708 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 709 else 710 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 711 lwkt_reltoken(&ilock); 712 } 713 714 /* 715 * mountlist_interlock (MP SAFE) 716 * 717 * Execute the specified interlock function with the mountlist token 718 * held. The function will be called in a serialized fashion verses 719 * other functions called through this mechanism. 720 */ 721 int 722 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 723 { 724 lwkt_tokref ilock; 725 int error; 726 727 lwkt_gettoken(&ilock, &mountlist_token); 728 error = callback(mp); 729 lwkt_reltoken(&ilock); 730 return (error); 731 } 732 733 /* 734 * mountlist_boot_getfirst (DURING BOOT ONLY) 735 * 736 * This function returns the first mount on the mountlist, which is 737 * expected to be the root mount. Since no interlocks are obtained 738 * this function is only safe to use during booting. 739 */ 740 741 struct mount * 742 mountlist_boot_getfirst(void) 743 { 744 return(TAILQ_FIRST(&mountlist)); 745 } 746 747 /* 748 * mountlist_remove (MP SAFE) 749 * 750 * Remove a node from the mountlist. If this node is the next scan node 751 * for any active mountlist scans, the active mountlist scan will be 752 * adjusted to skip the node, thus allowing removals during mountlist 753 * scans. 754 */ 755 void 756 mountlist_remove(struct mount *mp) 757 { 758 struct mountscan_info *msi; 759 lwkt_tokref ilock; 760 761 lwkt_gettoken(&ilock, &mountlist_token); 762 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 763 if (msi->msi_node == mp) { 764 if (msi->msi_how & MNTSCAN_FORWARD) 765 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 766 else 767 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 768 } 769 } 770 TAILQ_REMOVE(&mountlist, mp, mnt_list); 771 lwkt_reltoken(&ilock); 772 } 773 774 /* 775 * mountlist_scan (MP SAFE) 776 * 777 * Safely scan the mount points on the mount list. Unless otherwise 778 * specified each mount point will be busied prior to the callback and 779 * unbusied afterwords. The callback may safely remove any mount point 780 * without interfering with the scan. If the current callback 781 * mount is removed the scanner will not attempt to unbusy it. 782 * 783 * If a mount node cannot be busied it is silently skipped. 784 * 785 * The callback return value is aggregated and a total is returned. A return 786 * value of < 0 is not aggregated and will terminate the scan. 787 * 788 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 789 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 790 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 791 * the mount node. 792 */ 793 int 794 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 795 { 796 struct mountscan_info info; 797 lwkt_tokref ilock; 798 struct mount *mp; 799 thread_t td; 800 int count; 801 int res; 802 803 lwkt_gettoken(&ilock, &mountlist_token); 804 805 info.msi_how = how; 806 info.msi_node = NULL; /* paranoia */ 807 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 808 809 res = 0; 810 td = curthread; 811 812 if (how & MNTSCAN_FORWARD) { 813 info.msi_node = TAILQ_FIRST(&mountlist); 814 while ((mp = info.msi_node) != NULL) { 815 if (how & MNTSCAN_NOBUSY) { 816 count = callback(mp, data); 817 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 818 count = callback(mp, data); 819 if (mp == info.msi_node) 820 vfs_unbusy(mp); 821 } else { 822 count = 0; 823 } 824 if (count < 0) 825 break; 826 res += count; 827 if (mp == info.msi_node) 828 info.msi_node = TAILQ_NEXT(mp, mnt_list); 829 } 830 } else if (how & MNTSCAN_REVERSE) { 831 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 832 while ((mp = info.msi_node) != NULL) { 833 if (how & MNTSCAN_NOBUSY) { 834 count = callback(mp, data); 835 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 836 count = callback(mp, data); 837 if (mp == info.msi_node) 838 vfs_unbusy(mp); 839 } else { 840 count = 0; 841 } 842 if (count < 0) 843 break; 844 res += count; 845 if (mp == info.msi_node) 846 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 847 } 848 } 849 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 850 lwkt_reltoken(&ilock); 851 return(res); 852 } 853 854 /* 855 * MOUNT RELATED VNODE FUNCTIONS 856 */ 857 858 static struct kproc_desc vnlru_kp = { 859 "vnlru", 860 vnlru_proc, 861 &vnlruthread 862 }; 863 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 864 865 /* 866 * Move a vnode from one mount queue to another. 867 */ 868 void 869 insmntque(struct vnode *vp, struct mount *mp) 870 { 871 lwkt_tokref ilock; 872 873 lwkt_gettoken(&ilock, &mntvnode_token); 874 /* 875 * Delete from old mount point vnode list, if on one. 876 */ 877 if (vp->v_mount != NULL) { 878 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 879 ("bad mount point vnode list size")); 880 vremovevnodemnt(vp); 881 vp->v_mount->mnt_nvnodelistsize--; 882 } 883 /* 884 * Insert into list of vnodes for the new mount point, if available. 885 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer. 886 */ 887 if ((vp->v_mount = mp) == NULL) { 888 lwkt_reltoken(&ilock); 889 return; 890 } 891 if (mp->mnt_syncer) { 892 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes); 893 } else { 894 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 895 } 896 mp->mnt_nvnodelistsize++; 897 lwkt_reltoken(&ilock); 898 } 899 900 901 /* 902 * Scan the vnodes under a mount point and issue appropriate callbacks. 903 * 904 * The fastfunc() callback is called with just the mountlist token held 905 * (no vnode lock). It may not block and the vnode may be undergoing 906 * modifications while the caller is processing it. The vnode will 907 * not be entirely destroyed, however, due to the fact that the mountlist 908 * token is held. A return value < 0 skips to the next vnode without calling 909 * the slowfunc(), a return value > 0 terminates the loop. 910 * 911 * The slowfunc() callback is called after the vnode has been successfully 912 * locked based on passed flags. The vnode is skipped if it gets rearranged 913 * or destroyed while blocking on the lock. A non-zero return value from 914 * the slow function terminates the loop. The slow function is allowed to 915 * arbitrarily block. The scanning code guarentees consistency of operation 916 * even if the slow function deletes or moves the node, or blocks and some 917 * other thread deletes or moves the node. 918 */ 919 int 920 vmntvnodescan( 921 struct mount *mp, 922 int flags, 923 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 924 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 925 void *data 926 ) { 927 struct vmntvnodescan_info info; 928 lwkt_tokref ilock; 929 struct vnode *vp; 930 int r = 0; 931 int maxcount = 1000000; 932 int stopcount = 0; 933 int count = 0; 934 935 lwkt_gettoken(&ilock, &mntvnode_token); 936 937 /* 938 * If asked to do one pass stop after iterating available vnodes. 939 * Under heavy loads new vnodes can be added while we are scanning, 940 * so this isn't perfect. Create a slop factor of 2x. 941 */ 942 if (flags & VMSC_ONEPASS) 943 stopcount = mp->mnt_nvnodelistsize * 2; 944 945 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 946 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 947 while ((vp = info.vp) != NULL) { 948 if (--maxcount == 0) 949 panic("maxcount reached during vmntvnodescan"); 950 951 /* 952 * Skip if visible but not ready, or special (e.g. 953 * mp->mnt_syncer) 954 */ 955 if (vp->v_type == VNON) 956 goto next; 957 KKASSERT(vp->v_mount == mp); 958 959 /* 960 * Quick test. A negative return continues the loop without 961 * calling the slow test. 0 continues onto the slow test. 962 * A positive number aborts the loop. 963 */ 964 if (fastfunc) { 965 if ((r = fastfunc(mp, vp, data)) < 0) { 966 r = 0; 967 goto next; 968 } 969 if (r) 970 break; 971 } 972 973 /* 974 * Get a vxlock on the vnode, retry if it has moved or isn't 975 * in the mountlist where we expect it. 976 */ 977 if (slowfunc) { 978 int error; 979 980 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 981 case VMSC_GETVP: 982 error = vget(vp, LK_EXCLUSIVE); 983 break; 984 case VMSC_GETVP|VMSC_NOWAIT: 985 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 986 break; 987 case VMSC_GETVX: 988 vx_get(vp); 989 error = 0; 990 break; 991 default: 992 error = 0; 993 break; 994 } 995 if (error) 996 goto next; 997 /* 998 * Do not call the slow function if the vnode is 999 * invalid or if it was ripped out from under us 1000 * while we (potentially) blocked. 1001 */ 1002 if (info.vp == vp && vp->v_type != VNON) 1003 r = slowfunc(mp, vp, data); 1004 1005 /* 1006 * Cleanup 1007 */ 1008 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) { 1009 case VMSC_GETVP: 1010 case VMSC_GETVP|VMSC_NOWAIT: 1011 vput(vp); 1012 break; 1013 case VMSC_GETVX: 1014 vx_put(vp); 1015 break; 1016 default: 1017 break; 1018 } 1019 if (r != 0) 1020 break; 1021 } 1022 1023 next: 1024 /* 1025 * Yield after some processing. Depending on the number 1026 * of vnodes, we might wind up running for a long time. 1027 * Because threads are not preemptable, time critical 1028 * userland processes might starve. Give them a chance 1029 * now and then. 1030 */ 1031 if (++count == 10000) { 1032 /* We really want to yield a bit, so we simply sleep a tick */ 1033 tsleep(mp, 0, "vnodescn", 1); 1034 count = 0; 1035 } 1036 1037 /* 1038 * If doing one pass this decrements to zero. If it starts 1039 * at zero it is effectively unlimited for the purposes of 1040 * this loop. 1041 */ 1042 if (--stopcount == 0) 1043 break; 1044 1045 /* 1046 * Iterate. If the vnode was ripped out from under us 1047 * info.vp will already point to the next vnode, otherwise 1048 * we have to obtain the next valid vnode ourselves. 1049 */ 1050 if (info.vp == vp) 1051 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 1052 } 1053 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 1054 lwkt_reltoken(&ilock); 1055 return(r); 1056 } 1057 1058 /* 1059 * Remove any vnodes in the vnode table belonging to mount point mp. 1060 * 1061 * If FORCECLOSE is not specified, there should not be any active ones, 1062 * return error if any are found (nb: this is a user error, not a 1063 * system error). If FORCECLOSE is specified, detach any active vnodes 1064 * that are found. 1065 * 1066 * If WRITECLOSE is set, only flush out regular file vnodes open for 1067 * writing. 1068 * 1069 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1070 * 1071 * `rootrefs' specifies the base reference count for the root vnode 1072 * of this filesystem. The root vnode is considered busy if its 1073 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1074 * will call vrele() on the root vnode exactly rootrefs times. 1075 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1076 * be zero. 1077 */ 1078 #ifdef DIAGNOSTIC 1079 static int busyprt = 0; /* print out busy vnodes */ 1080 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1081 #endif 1082 1083 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1084 1085 struct vflush_info { 1086 int flags; 1087 int busy; 1088 thread_t td; 1089 }; 1090 1091 int 1092 vflush(struct mount *mp, int rootrefs, int flags) 1093 { 1094 struct thread *td = curthread; /* XXX */ 1095 struct vnode *rootvp = NULL; 1096 int error; 1097 struct vflush_info vflush_info; 1098 1099 if (rootrefs > 0) { 1100 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1101 ("vflush: bad args")); 1102 /* 1103 * Get the filesystem root vnode. We can vput() it 1104 * immediately, since with rootrefs > 0, it won't go away. 1105 */ 1106 if ((error = VFS_ROOT(mp, &rootvp)) != 0) { 1107 if ((flags & FORCECLOSE) == 0) 1108 return (error); 1109 rootrefs = 0; 1110 /* continue anyway */ 1111 } 1112 if (rootrefs) 1113 vput(rootvp); 1114 } 1115 1116 vflush_info.busy = 0; 1117 vflush_info.flags = flags; 1118 vflush_info.td = td; 1119 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1120 1121 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1122 /* 1123 * If just the root vnode is busy, and if its refcount 1124 * is equal to `rootrefs', then go ahead and kill it. 1125 */ 1126 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1127 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1128 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1129 vx_lock(rootvp); 1130 vgone_vxlocked(rootvp); 1131 vx_unlock(rootvp); 1132 vflush_info.busy = 0; 1133 } 1134 } 1135 if (vflush_info.busy) 1136 return (EBUSY); 1137 for (; rootrefs > 0; rootrefs--) 1138 vrele(rootvp); 1139 return (0); 1140 } 1141 1142 /* 1143 * The scan callback is made with an VX locked vnode. 1144 */ 1145 static int 1146 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1147 { 1148 struct vflush_info *info = data; 1149 struct vattr vattr; 1150 1151 /* 1152 * Skip over a vnodes marked VSYSTEM. 1153 */ 1154 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1155 return(0); 1156 } 1157 1158 /* 1159 * If WRITECLOSE is set, flush out unlinked but still open 1160 * files (even if open only for reading) and regular file 1161 * vnodes open for writing. 1162 */ 1163 if ((info->flags & WRITECLOSE) && 1164 (vp->v_type == VNON || 1165 (VOP_GETATTR(vp, &vattr) == 0 && 1166 vattr.va_nlink > 0)) && 1167 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1168 return(0); 1169 } 1170 1171 /* 1172 * If we are the only holder (refcnt of 1) or the vnode is in 1173 * termination (refcnt < 0), we can vgone the vnode. 1174 */ 1175 if (vp->v_sysref.refcnt <= 1) { 1176 vgone_vxlocked(vp); 1177 return(0); 1178 } 1179 1180 /* 1181 * If FORCECLOSE is set, forcibly close the vnode. For block 1182 * or character devices we just clean and leave the vp 1183 * associated with devfs. For all other files, just kill them. 1184 * 1185 * XXX we need to do something about devfs here, I'd rather not 1186 * blow away device associations. 1187 */ 1188 if (info->flags & FORCECLOSE) { 1189 vgone_vxlocked(vp); 1190 #if 0 1191 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1192 vgone_vxlocked(vp); 1193 } else { 1194 vclean_vxlocked(vp, 0); 1195 /*vp->v_ops = &devfs_vnode_dev_vops_p;*/ 1196 insmntque(vp, NULL); 1197 } 1198 #endif 1199 return(0); 1200 } 1201 #ifdef DIAGNOSTIC 1202 if (busyprt) 1203 vprint("vflush: busy vnode", vp); 1204 #endif 1205 ++info->busy; 1206 return(0); 1207 } 1208 1209 void 1210 add_bio_ops(struct bio_ops *ops) 1211 { 1212 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1213 } 1214 1215 void 1216 rem_bio_ops(struct bio_ops *ops) 1217 { 1218 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1219 } 1220 1221 /* 1222 * This calls the bio_ops io_sync function either for a mount point 1223 * or generally. 1224 * 1225 * WARNING: softdeps is weirdly coded and just isn't happy unless 1226 * io_sync is called with a NULL mount from the general syncing code. 1227 */ 1228 void 1229 bio_ops_sync(struct mount *mp) 1230 { 1231 struct bio_ops *ops; 1232 1233 if (mp) { 1234 if ((ops = mp->mnt_bioops) != NULL) 1235 ops->io_sync(mp); 1236 } else { 1237 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1238 ops->io_sync(NULL); 1239 } 1240 } 1241 } 1242 1243