1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.30 2008/01/05 14:02:38 swildner Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 static int vnlru_nowhere = 0; 111 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 112 &vnlru_nowhere, 0, 113 "Number of times the vnlru process ran without success"); 114 115 116 static struct lwkt_token mntid_token; 117 118 /* note: mountlist exported to pstat */ 119 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 120 static TAILQ_HEAD(,mountscan_info) mountscan_list; 121 static struct lwkt_token mountlist_token; 122 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 123 struct lwkt_token mntvnode_token; 124 125 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list); 126 127 /* 128 * Called from vfsinit() 129 */ 130 void 131 vfs_mount_init(void) 132 { 133 lwkt_token_init(&mountlist_token); 134 lwkt_token_init(&mntvnode_token); 135 lwkt_token_init(&mntid_token); 136 TAILQ_INIT(&mountscan_list); 137 TAILQ_INIT(&mntvnodescan_list); 138 } 139 140 /* 141 * Support function called with mntvnode_token held to remove a vnode 142 * from the mountlist. We must update any list scans which are in progress. 143 */ 144 static void 145 vremovevnodemnt(struct vnode *vp) 146 { 147 struct vmntvnodescan_info *info; 148 149 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 150 if (info->vp == vp) 151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 152 } 153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 154 } 155 156 /* 157 * Support function called with mntvnode_token held to move a vnode to 158 * the end of the list. 159 */ 160 static void 161 vmovevnodetoend(struct mount *mp, struct vnode *vp) 162 { 163 vremovevnodemnt(vp); 164 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 165 } 166 167 168 /* 169 * Allocate a new vnode and associate it with a tag, mount point, and 170 * operations vector. 171 * 172 * A VX locked and refd vnode is returned. The caller should setup the 173 * remaining fields and vx_put() or, if he wishes to leave a vref, 174 * vx_unlock() the vnode. 175 */ 176 int 177 getnewvnode(enum vtagtype tag, struct mount *mp, 178 struct vnode **vpp, int lktimeout, int lkflags) 179 { 180 struct vnode *vp; 181 182 KKASSERT(mp != NULL); 183 184 vp = allocvnode(lktimeout, lkflags); 185 vp->v_tag = tag; 186 vp->v_data = NULL; 187 188 /* 189 * By default the vnode is assigned the mount point's normal 190 * operations vector. 191 */ 192 vp->v_ops = &mp->mnt_vn_use_ops; 193 194 /* 195 * Placing the vnode on the mount point's queue makes it visible. 196 * VNON prevents it from being messed with, however. 197 */ 198 insmntque(vp, mp); 199 200 /* 201 * A VX locked & refd vnode is returned. 202 */ 203 *vpp = vp; 204 return (0); 205 } 206 207 /* 208 * This function creates vnodes with special operations vectors. The 209 * mount point is optional. 210 * 211 * This routine is being phased out. 212 */ 213 int 214 getspecialvnode(enum vtagtype tag, struct mount *mp, 215 struct vop_ops **ops, 216 struct vnode **vpp, int lktimeout, int lkflags) 217 { 218 struct vnode *vp; 219 220 vp = allocvnode(lktimeout, lkflags); 221 vp->v_tag = tag; 222 vp->v_data = NULL; 223 vp->v_ops = ops; 224 225 /* 226 * Placing the vnode on the mount point's queue makes it visible. 227 * VNON prevents it from being messed with, however. 228 */ 229 insmntque(vp, mp); 230 231 /* 232 * A VX locked & refd vnode is returned. 233 */ 234 *vpp = vp; 235 return (0); 236 } 237 238 /* 239 * Interlock against an unmount, return 0 on success, non-zero on failure. 240 * 241 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 242 * is in-progress. 243 * 244 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 245 * are used. A shared locked will be obtained and the filesystem will not 246 * be unmountable until the lock is released. 247 */ 248 int 249 vfs_busy(struct mount *mp, int flags) 250 { 251 int lkflags; 252 253 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 254 if (flags & LK_NOWAIT) 255 return (ENOENT); 256 /* XXX not MP safe */ 257 mp->mnt_kern_flag |= MNTK_MWAIT; 258 /* 259 * Since all busy locks are shared except the exclusive 260 * lock granted when unmounting, the only place that a 261 * wakeup needs to be done is at the release of the 262 * exclusive lock at the end of dounmount. 263 */ 264 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 265 return (ENOENT); 266 } 267 lkflags = LK_SHARED; 268 if (lockmgr(&mp->mnt_lock, lkflags)) 269 panic("vfs_busy: unexpected lock failure"); 270 return (0); 271 } 272 273 /* 274 * Free a busy filesystem. 275 */ 276 void 277 vfs_unbusy(struct mount *mp) 278 { 279 lockmgr(&mp->mnt_lock, LK_RELEASE); 280 } 281 282 /* 283 * Lookup a filesystem type, and if found allocate and initialize 284 * a mount structure for it. 285 * 286 * Devname is usually updated by mount(8) after booting. 287 */ 288 int 289 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 290 { 291 struct vfsconf *vfsp; 292 struct mount *mp; 293 294 if (fstypename == NULL) 295 return (ENODEV); 296 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 297 if (!strcmp(vfsp->vfc_name, fstypename)) 298 break; 299 } 300 if (vfsp == NULL) 301 return (ENODEV); 302 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO); 303 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 304 vfs_busy(mp, LK_NOWAIT); 305 TAILQ_INIT(&mp->mnt_nvnodelist); 306 TAILQ_INIT(&mp->mnt_reservedvnlist); 307 TAILQ_INIT(&mp->mnt_jlist); 308 mp->mnt_nvnodelistsize = 0; 309 mp->mnt_vfc = vfsp; 310 mp->mnt_op = vfsp->vfc_vfsops; 311 mp->mnt_flag = MNT_RDONLY; 312 vfsp->vfc_refcount++; 313 mp->mnt_iosize_max = DFLTPHYS; 314 mp->mnt_stat.f_type = vfsp->vfc_typenum; 315 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 316 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 317 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 318 *mpp = mp; 319 return (0); 320 } 321 322 /* 323 * Lookup a mount point by filesystem identifier. 324 */ 325 struct mount * 326 vfs_getvfs(fsid_t *fsid) 327 { 328 struct mount *mp; 329 lwkt_tokref ilock; 330 331 lwkt_gettoken(&ilock, &mountlist_token); 332 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 333 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 334 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 335 break; 336 } 337 } 338 lwkt_reltoken(&ilock); 339 return (mp); 340 } 341 342 /* 343 * Get a new unique fsid. Try to make its val[0] unique, since this value 344 * will be used to create fake device numbers for stat(). Also try (but 345 * not so hard) make its val[0] unique mod 2^16, since some emulators only 346 * support 16-bit device numbers. We end up with unique val[0]'s for the 347 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 348 * 349 * Keep in mind that several mounts may be running in parallel. Starting 350 * the search one past where the previous search terminated is both a 351 * micro-optimization and a defense against returning the same fsid to 352 * different mounts. 353 */ 354 void 355 vfs_getnewfsid(struct mount *mp) 356 { 357 static u_int16_t mntid_base; 358 lwkt_tokref ilock; 359 fsid_t tfsid; 360 int mtype; 361 362 lwkt_gettoken(&ilock, &mntid_token); 363 mtype = mp->mnt_vfc->vfc_typenum; 364 tfsid.val[1] = mtype; 365 mtype = (mtype & 0xFF) << 24; 366 for (;;) { 367 tfsid.val[0] = makeudev(255, 368 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 369 mntid_base++; 370 if (vfs_getvfs(&tfsid) == NULL) 371 break; 372 } 373 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 374 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 375 lwkt_reltoken(&ilock); 376 } 377 378 /* 379 * This routine is called when we have too many vnodes. It attempts 380 * to free <count> vnodes and will potentially free vnodes that still 381 * have VM backing store (VM backing store is typically the cause 382 * of a vnode blowout so we want to do this). Therefore, this operation 383 * is not considered cheap. 384 * 385 * A number of conditions may prevent a vnode from being reclaimed. 386 * the buffer cache may have references on the vnode, a directory 387 * vnode may still have references due to the namei cache representing 388 * underlying files, or the vnode may be in active use. It is not 389 * desireable to reuse such vnodes. These conditions may cause the 390 * number of vnodes to reach some minimum value regardless of what 391 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 392 */ 393 394 /* 395 * This is a quick non-blocking check to determine if the vnode is a good 396 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 397 * not a good candidate, 1 if it is. 398 */ 399 static __inline int 400 vmightfree(struct vnode *vp, int page_count) 401 { 402 if (vp->v_flag & VRECLAIMED) 403 return (0); 404 #if 0 405 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 406 return (0); 407 #endif 408 if (sysref_isactive(&vp->v_sysref)) 409 return (0); 410 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 411 return (0); 412 return (1); 413 } 414 415 /* 416 * The vnode was found to be possibly vgone()able and the caller has locked it 417 * (thus the usecount should be 1 now). Determine if the vnode is actually 418 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 419 * can be vgone()'d, 0 otherwise. 420 * 421 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 422 * in the namecache topology and (B) this vnode has buffer cache bufs. 423 * We cannot remove vnodes with non-leaf namecache associations. We do a 424 * tentitive leaf check prior to attempting to flush out any buffers but the 425 * 'real' test when all is said in done is that v_auxrefs must become 0 for 426 * the vnode to be freeable. 427 * 428 * We could theoretically just unconditionally flush when v_auxrefs != 0, 429 * but flushing data associated with non-leaf nodes (which are always 430 * directories), just throws it away for no benefit. It is the buffer 431 * cache's responsibility to choose buffers to recycle from the cached 432 * data point of view. 433 */ 434 static int 435 visleaf(struct vnode *vp) 436 { 437 struct namecache *ncp; 438 439 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 440 if (!TAILQ_EMPTY(&ncp->nc_list)) 441 return(0); 442 } 443 return(1); 444 } 445 446 /* 447 * Try to clean up the vnode to the point where it can be vgone()'d, returning 448 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 449 * vmightfree() this routine may flush the vnode and block. Vnodes marked 450 * VFREE are still candidates for vgone()ing because they may hold namecache 451 * resources and could be blocking the namecache directory hierarchy (and 452 * related vnodes) from being freed. 453 */ 454 static int 455 vtrytomakegoneable(struct vnode *vp, int page_count) 456 { 457 if (vp->v_flag & VRECLAIMED) 458 return (0); 459 if (vp->v_sysref.refcnt > 1) 460 return (0); 461 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 462 return (0); 463 if (vp->v_auxrefs && visleaf(vp)) { 464 vinvalbuf(vp, V_SAVE, 0, 0); 465 #if 0 /* DEBUG */ 466 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 467 "vrecycle: vp %p succeeded: %s\n"), vp, 468 (TAILQ_FIRST(&vp->v_namecache) ? 469 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 470 #endif 471 } 472 473 /* 474 * This sequence may seem a little strange, but we need to optimize 475 * the critical path a bit. We can't recycle vnodes with other 476 * references and because we are trying to recycle an otherwise 477 * perfectly fine vnode we have to invalidate the namecache in a 478 * way that avoids possible deadlocks (since the vnode lock is being 479 * held here). Finally, we have to check for other references one 480 * last time in case something snuck in during the inval. 481 */ 482 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 483 return (0); 484 if (cache_inval_vp_nonblock(vp)) 485 return (0); 486 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 487 } 488 489 /* 490 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 491 * to avoid vnodes which have lots of resident pages (we are trying to free 492 * vnodes, not memory). 493 * 494 * This routine is a callback from the mountlist scan. The mount point 495 * in question will be busied. 496 */ 497 static int 498 vlrureclaim(struct mount *mp, void *data) 499 { 500 struct vnode *vp; 501 lwkt_tokref ilock; 502 int done; 503 int trigger; 504 int usevnodes; 505 int count; 506 int trigger_mult = vnlru_nowhere; 507 508 /* 509 * Calculate the trigger point for the resident pages check. The 510 * minimum trigger value is approximately the number of pages in 511 * the system divded by the number of vnodes. However, due to 512 * various other system memory overheads unrelated to data caching 513 * it is a good idea to double the trigger (at least). 514 * 515 * trigger_mult starts at 0. If the recycler is having problems 516 * finding enough freeable vnodes it will increase trigger_mult. 517 * This should not happen in normal operation, even on machines with 518 * low amounts of memory, but extraordinary memory use by the system 519 * verses the amount of cached data can trigger it. 520 */ 521 usevnodes = desiredvnodes; 522 if (usevnodes <= 0) 523 usevnodes = 1; 524 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 525 526 done = 0; 527 lwkt_gettoken(&ilock, &mntvnode_token); 528 count = mp->mnt_nvnodelistsize / 10 + 1; 529 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 530 /* 531 * __VNODESCAN__ 532 * 533 * The VP will stick around while we hold mntvnode_token, 534 * at least until we block, so we can safely do an initial 535 * check, and then must check again after we lock the vnode. 536 */ 537 if (vp->v_type == VNON || /* syncer or indeterminant */ 538 !vmightfree(vp, trigger) /* critical path opt */ 539 ) { 540 vmovevnodetoend(mp, vp); 541 --count; 542 continue; 543 } 544 545 /* 546 * VX get the candidate vnode. If the VX get fails the 547 * vnode might still be on the mountlist. Our loop depends 548 * on us at least cycling the vnode to the end of the 549 * mountlist. 550 */ 551 if (vx_get_nonblock(vp) != 0) { 552 if (vp->v_mount == mp) 553 vmovevnodetoend(mp, vp); 554 --count; 555 continue; 556 } 557 558 /* 559 * Since we blocked locking the vp, make sure it is still 560 * a candidate for reclamation. That is, it has not already 561 * been reclaimed and only has our VX reference associated 562 * with it. 563 */ 564 if (vp->v_type == VNON || /* syncer or indeterminant */ 565 (vp->v_flag & VRECLAIMED) || 566 vp->v_mount != mp || 567 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 568 ) { 569 if (vp->v_mount == mp) 570 vmovevnodetoend(mp, vp); 571 --count; 572 vx_put(vp); 573 continue; 574 } 575 576 /* 577 * All right, we are good, move the vp to the end of the 578 * mountlist and clean it out. The vget will have returned 579 * an error if the vnode was destroyed (VRECLAIMED set), so we 580 * do not have to check again. The vput() will move the 581 * vnode to the free list if the vgone() was successful. 582 */ 583 KKASSERT(vp->v_mount == mp); 584 vmovevnodetoend(mp, vp); 585 vgone_vxlocked(vp); 586 vx_put(vp); 587 ++done; 588 --count; 589 } 590 lwkt_reltoken(&ilock); 591 return (done); 592 } 593 594 /* 595 * Attempt to recycle vnodes in a context that is always safe to block. 596 * Calling vlrurecycle() from the bowels of file system code has some 597 * interesting deadlock problems. 598 */ 599 static struct thread *vnlruthread; 600 static int vnlruproc_sig; 601 602 void 603 vnlru_proc_wait(void) 604 { 605 if (vnlruproc_sig == 0) { 606 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 607 wakeup(vnlruthread); 608 } 609 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 610 } 611 612 static void 613 vnlru_proc(void) 614 { 615 struct thread *td = curthread; 616 int done; 617 618 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 619 SHUTDOWN_PRI_FIRST); 620 621 crit_enter(); 622 for (;;) { 623 kproc_suspend_loop(); 624 625 /* 626 * Try to free some vnodes if we have too many 627 */ 628 if (numvnodes > desiredvnodes && 629 freevnodes > desiredvnodes * 2 / 10) { 630 int count = numvnodes - desiredvnodes; 631 632 if (count > freevnodes / 100) 633 count = freevnodes / 100; 634 if (count < 5) 635 count = 5; 636 freesomevnodes(count); 637 } 638 639 /* 640 * Nothing to do if most of our vnodes are already on 641 * the free list. 642 */ 643 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 644 vnlruproc_sig = 0; 645 wakeup(&vnlruproc_sig); 646 tsleep(td, 0, "vlruwt", hz); 647 continue; 648 } 649 cache_cleanneg(0); 650 done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD); 651 652 /* 653 * The vlrureclaim() call only processes 1/10 of the vnodes 654 * on each mount. If we couldn't find any repeat the loop 655 * at least enough times to cover all available vnodes before 656 * we start sleeping. Complain if the failure extends past 657 * 30 second, every 30 seconds. 658 */ 659 if (done == 0) { 660 ++vnlru_nowhere; 661 if (vnlru_nowhere % 10 == 0) 662 tsleep(td, 0, "vlrup", hz * 3); 663 if (vnlru_nowhere % 100 == 0) 664 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 665 if (vnlru_nowhere == 1000) 666 vnlru_nowhere = 900; 667 } else { 668 vnlru_nowhere = 0; 669 } 670 } 671 crit_exit(); 672 } 673 674 /* 675 * MOUNTLIST FUNCTIONS 676 */ 677 678 /* 679 * mountlist_insert (MP SAFE) 680 * 681 * Add a new mount point to the mount list. 682 */ 683 void 684 mountlist_insert(struct mount *mp, int how) 685 { 686 lwkt_tokref ilock; 687 688 lwkt_gettoken(&ilock, &mountlist_token); 689 if (how == MNTINS_FIRST) 690 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 691 else 692 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 693 lwkt_reltoken(&ilock); 694 } 695 696 /* 697 * mountlist_interlock (MP SAFE) 698 * 699 * Execute the specified interlock function with the mountlist token 700 * held. The function will be called in a serialized fashion verses 701 * other functions called through this mechanism. 702 */ 703 int 704 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 705 { 706 lwkt_tokref ilock; 707 int error; 708 709 lwkt_gettoken(&ilock, &mountlist_token); 710 error = callback(mp); 711 lwkt_reltoken(&ilock); 712 return (error); 713 } 714 715 /* 716 * mountlist_boot_getfirst (DURING BOOT ONLY) 717 * 718 * This function returns the first mount on the mountlist, which is 719 * expected to be the root mount. Since no interlocks are obtained 720 * this function is only safe to use during booting. 721 */ 722 723 struct mount * 724 mountlist_boot_getfirst(void) 725 { 726 return(TAILQ_FIRST(&mountlist)); 727 } 728 729 /* 730 * mountlist_remove (MP SAFE) 731 * 732 * Remove a node from the mountlist. If this node is the next scan node 733 * for any active mountlist scans, the active mountlist scan will be 734 * adjusted to skip the node, thus allowing removals during mountlist 735 * scans. 736 */ 737 void 738 mountlist_remove(struct mount *mp) 739 { 740 struct mountscan_info *msi; 741 lwkt_tokref ilock; 742 743 lwkt_gettoken(&ilock, &mountlist_token); 744 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 745 if (msi->msi_node == mp) { 746 if (msi->msi_how & MNTSCAN_FORWARD) 747 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 748 else 749 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 750 } 751 } 752 TAILQ_REMOVE(&mountlist, mp, mnt_list); 753 lwkt_reltoken(&ilock); 754 } 755 756 /* 757 * mountlist_scan (MP SAFE) 758 * 759 * Safely scan the mount points on the mount list. Unless otherwise 760 * specified each mount point will be busied prior to the callback and 761 * unbusied afterwords. The callback may safely remove any mount point 762 * without interfering with the scan. If the current callback 763 * mount is removed the scanner will not attempt to unbusy it. 764 * 765 * If a mount node cannot be busied it is silently skipped. 766 * 767 * The callback return value is aggregated and a total is returned. A return 768 * value of < 0 is not aggregated and will terminate the scan. 769 * 770 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 771 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 772 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 773 * the mount node. 774 */ 775 int 776 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 777 { 778 struct mountscan_info info; 779 lwkt_tokref ilock; 780 struct mount *mp; 781 thread_t td; 782 int count; 783 int res; 784 785 lwkt_gettoken(&ilock, &mountlist_token); 786 787 info.msi_how = how; 788 info.msi_node = NULL; /* paranoia */ 789 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 790 791 res = 0; 792 td = curthread; 793 794 if (how & MNTSCAN_FORWARD) { 795 info.msi_node = TAILQ_FIRST(&mountlist); 796 while ((mp = info.msi_node) != NULL) { 797 if (how & MNTSCAN_NOBUSY) { 798 count = callback(mp, data); 799 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 800 count = callback(mp, data); 801 if (mp == info.msi_node) 802 vfs_unbusy(mp); 803 } else { 804 count = 0; 805 } 806 if (count < 0) 807 break; 808 res += count; 809 if (mp == info.msi_node) 810 info.msi_node = TAILQ_NEXT(mp, mnt_list); 811 } 812 } else if (how & MNTSCAN_REVERSE) { 813 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 814 while ((mp = info.msi_node) != NULL) { 815 if (how & MNTSCAN_NOBUSY) { 816 count = callback(mp, data); 817 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 818 count = callback(mp, data); 819 if (mp == info.msi_node) 820 vfs_unbusy(mp); 821 } else { 822 count = 0; 823 } 824 if (count < 0) 825 break; 826 res += count; 827 if (mp == info.msi_node) 828 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 829 } 830 } 831 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 832 lwkt_reltoken(&ilock); 833 return(res); 834 } 835 836 /* 837 * MOUNT RELATED VNODE FUNCTIONS 838 */ 839 840 static struct kproc_desc vnlru_kp = { 841 "vnlru", 842 vnlru_proc, 843 &vnlruthread 844 }; 845 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 846 847 /* 848 * Move a vnode from one mount queue to another. 849 */ 850 void 851 insmntque(struct vnode *vp, struct mount *mp) 852 { 853 lwkt_tokref ilock; 854 855 lwkt_gettoken(&ilock, &mntvnode_token); 856 /* 857 * Delete from old mount point vnode list, if on one. 858 */ 859 if (vp->v_mount != NULL) { 860 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 861 ("bad mount point vnode list size")); 862 vremovevnodemnt(vp); 863 vp->v_mount->mnt_nvnodelistsize--; 864 } 865 /* 866 * Insert into list of vnodes for the new mount point, if available. 867 */ 868 if ((vp->v_mount = mp) == NULL) { 869 lwkt_reltoken(&ilock); 870 return; 871 } 872 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 873 mp->mnt_nvnodelistsize++; 874 lwkt_reltoken(&ilock); 875 } 876 877 878 /* 879 * Scan the vnodes under a mount point and issue appropriate callbacks. 880 * 881 * The fastfunc() callback is called with just the mountlist token held 882 * (no vnode lock). It may not block and the vnode may be undergoing 883 * modifications while the caller is processing it. The vnode will 884 * not be entirely destroyed, however, due to the fact that the mountlist 885 * token is held. A return value < 0 skips to the next vnode without calling 886 * the slowfunc(), a return value > 0 terminates the loop. 887 * 888 * The slowfunc() callback is called after the vnode has been successfully 889 * locked based on passed flags. The vnode is skipped if it gets rearranged 890 * or destroyed while blocking on the lock. A non-zero return value from 891 * the slow function terminates the loop. The slow function is allowed to 892 * arbitrarily block. The scanning code guarentees consistency of operation 893 * even if the slow function deletes or moves the node, or blocks and some 894 * other thread deletes or moves the node. 895 */ 896 int 897 vmntvnodescan( 898 struct mount *mp, 899 int flags, 900 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 901 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 902 void *data 903 ) { 904 struct vmntvnodescan_info info; 905 lwkt_tokref ilock; 906 struct vnode *vp; 907 int r = 0; 908 int maxcount = 1000000; 909 910 lwkt_gettoken(&ilock, &mntvnode_token); 911 912 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 913 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 914 while ((vp = info.vp) != NULL) { 915 if (--maxcount == 0) 916 panic("maxcount reached during vmntvnodescan"); 917 918 if (vp->v_type == VNON) /* visible but not ready */ 919 goto next; 920 KKASSERT(vp->v_mount == mp); 921 922 /* 923 * Quick test. A negative return continues the loop without 924 * calling the slow test. 0 continues onto the slow test. 925 * A positive number aborts the loop. 926 */ 927 if (fastfunc) { 928 if ((r = fastfunc(mp, vp, data)) < 0) 929 goto next; 930 if (r) 931 break; 932 } 933 934 /* 935 * Get a vxlock on the vnode, retry if it has moved or isn't 936 * in the mountlist where we expect it. 937 */ 938 if (slowfunc) { 939 int error; 940 941 switch(flags) { 942 case VMSC_GETVP: 943 error = vget(vp, LK_EXCLUSIVE); 944 break; 945 case VMSC_GETVP|VMSC_NOWAIT: 946 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 947 break; 948 case VMSC_GETVX: 949 vx_get(vp); 950 error = 0; 951 break; 952 default: 953 error = 0; 954 break; 955 } 956 if (error) 957 goto next; 958 /* 959 * Do not call the slow function if the vnode is 960 * invalid or if it was ripped out from under us 961 * while we (potentially) blocked. 962 */ 963 if (info.vp == vp && vp->v_type != VNON) 964 r = slowfunc(mp, vp, data); 965 966 /* 967 * Cleanup 968 */ 969 switch(flags) { 970 case VMSC_GETVP: 971 case VMSC_GETVP|VMSC_NOWAIT: 972 vput(vp); 973 break; 974 case VMSC_GETVX: 975 vx_put(vp); 976 break; 977 default: 978 break; 979 } 980 if (r != 0) 981 break; 982 } 983 984 /* 985 * Iterate. If the vnode was ripped out from under us 986 * info.vp will already point to the next vnode, otherwise 987 * we have to obtain the next valid vnode ourselves. 988 */ 989 next: 990 if (info.vp == vp) 991 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 992 } 993 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 994 lwkt_reltoken(&ilock); 995 return(r); 996 } 997 998 /* 999 * Remove any vnodes in the vnode table belonging to mount point mp. 1000 * 1001 * If FORCECLOSE is not specified, there should not be any active ones, 1002 * return error if any are found (nb: this is a user error, not a 1003 * system error). If FORCECLOSE is specified, detach any active vnodes 1004 * that are found. 1005 * 1006 * If WRITECLOSE is set, only flush out regular file vnodes open for 1007 * writing. 1008 * 1009 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1010 * 1011 * `rootrefs' specifies the base reference count for the root vnode 1012 * of this filesystem. The root vnode is considered busy if its 1013 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1014 * will call vrele() on the root vnode exactly rootrefs times. 1015 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1016 * be zero. 1017 */ 1018 #ifdef DIAGNOSTIC 1019 static int busyprt = 0; /* print out busy vnodes */ 1020 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1021 #endif 1022 1023 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1024 1025 struct vflush_info { 1026 int flags; 1027 int busy; 1028 thread_t td; 1029 }; 1030 1031 int 1032 vflush(struct mount *mp, int rootrefs, int flags) 1033 { 1034 struct thread *td = curthread; /* XXX */ 1035 struct vnode *rootvp = NULL; 1036 int error; 1037 struct vflush_info vflush_info; 1038 1039 if (rootrefs > 0) { 1040 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1041 ("vflush: bad args")); 1042 /* 1043 * Get the filesystem root vnode. We can vput() it 1044 * immediately, since with rootrefs > 0, it won't go away. 1045 */ 1046 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1047 return (error); 1048 vput(rootvp); 1049 } 1050 1051 vflush_info.busy = 0; 1052 vflush_info.flags = flags; 1053 vflush_info.td = td; 1054 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1055 1056 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1057 /* 1058 * If just the root vnode is busy, and if its refcount 1059 * is equal to `rootrefs', then go ahead and kill it. 1060 */ 1061 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1062 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1063 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1064 vx_lock(rootvp); 1065 vgone_vxlocked(rootvp); 1066 vx_unlock(rootvp); 1067 vflush_info.busy = 0; 1068 } 1069 } 1070 if (vflush_info.busy) 1071 return (EBUSY); 1072 for (; rootrefs > 0; rootrefs--) 1073 vrele(rootvp); 1074 return (0); 1075 } 1076 1077 /* 1078 * The scan callback is made with an VX locked vnode. 1079 */ 1080 static int 1081 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1082 { 1083 struct vflush_info *info = data; 1084 struct vattr vattr; 1085 1086 /* 1087 * Skip over a vnodes marked VSYSTEM. 1088 */ 1089 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1090 return(0); 1091 } 1092 1093 /* 1094 * If WRITECLOSE is set, flush out unlinked but still open 1095 * files (even if open only for reading) and regular file 1096 * vnodes open for writing. 1097 */ 1098 if ((info->flags & WRITECLOSE) && 1099 (vp->v_type == VNON || 1100 (VOP_GETATTR(vp, &vattr) == 0 && 1101 vattr.va_nlink > 0)) && 1102 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1103 return(0); 1104 } 1105 1106 /* 1107 * If we are the only holder (refcnt of 1) or the vnode is in 1108 * termination (refcnt < 0), we can vgone the vnode. 1109 */ 1110 if (vp->v_sysref.refcnt <= 1) { 1111 vgone_vxlocked(vp); 1112 return(0); 1113 } 1114 1115 /* 1116 * If FORCECLOSE is set, forcibly close the vnode. For block 1117 * or character devices, revert to an anonymous device. For 1118 * all other files, just kill them. 1119 */ 1120 if (info->flags & FORCECLOSE) { 1121 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1122 vgone_vxlocked(vp); 1123 } else { 1124 vclean_vxlocked(vp, 0); 1125 vp->v_ops = &spec_vnode_vops_p; 1126 insmntque(vp, NULL); 1127 } 1128 return(0); 1129 } 1130 #ifdef DIAGNOSTIC 1131 if (busyprt) 1132 vprint("vflush: busy vnode", vp); 1133 #endif 1134 ++info->busy; 1135 return(0); 1136 } 1137 1138 void 1139 add_bio_ops(struct bio_ops *ops) 1140 { 1141 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry); 1142 } 1143 1144 void 1145 rem_bio_ops(struct bio_ops *ops) 1146 { 1147 TAILQ_REMOVE(&bio_ops_list, ops, entry); 1148 } 1149 1150 /* 1151 * This calls the bio_ops io_sync function either for a mount point 1152 * or generally. 1153 * 1154 * WARNING: softdeps is weirdly coded and just isn't happy unless 1155 * io_sync is called with a NULL mount from the general syncing code. 1156 */ 1157 void 1158 bio_ops_sync(struct mount *mp) 1159 { 1160 struct bio_ops *ops; 1161 1162 if (mp) { 1163 if ((ops = mp->mnt_bioops) != NULL) 1164 ops->io_sync(mp); 1165 } else { 1166 TAILQ_FOREACH(ops, &bio_ops_list, entry) { 1167 ops->io_sync(NULL); 1168 } 1169 } 1170 } 1171 1172