1 /* 2 * Copyright (c) 2004 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Matthew Dillon <dillon@backplane.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * Copyright (c) 1989, 1993 35 * The Regents of the University of California. All rights reserved. 36 * (c) UNIX System Laboratories, Inc. 37 * All or some portions of this file are derived from material licensed 38 * to the University of California by American Telephone and Telegraph 39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 40 * the permission of UNIX System Laboratories, Inc. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.27 2007/05/13 02:34:21 dillon Exp $ 71 */ 72 73 /* 74 * External virtual filesystem routines 75 */ 76 #include "opt_ddb.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/malloc.h> 82 #include <sys/mount.h> 83 #include <sys/proc.h> 84 #include <sys/vnode.h> 85 #include <sys/buf.h> 86 #include <sys/eventhandler.h> 87 #include <sys/kthread.h> 88 #include <sys/sysctl.h> 89 90 #include <machine/limits.h> 91 92 #include <sys/buf2.h> 93 #include <sys/thread2.h> 94 #include <sys/sysref2.h> 95 96 #include <vm/vm.h> 97 #include <vm/vm_object.h> 98 99 struct mountscan_info { 100 TAILQ_ENTRY(mountscan_info) msi_entry; 101 int msi_how; 102 struct mount *msi_node; 103 }; 104 105 struct vmntvnodescan_info { 106 TAILQ_ENTRY(vmntvnodescan_info) entry; 107 struct vnode *vp; 108 }; 109 110 static int vnlru_nowhere = 0; 111 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD, 112 &vnlru_nowhere, 0, 113 "Number of times the vnlru process ran without success"); 114 115 116 static struct lwkt_token mntid_token; 117 118 /* note: mountlist exported to pstat */ 119 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); 120 static TAILQ_HEAD(,mountscan_info) mountscan_list; 121 static struct lwkt_token mountlist_token; 122 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list; 123 struct lwkt_token mntvnode_token; 124 125 /* 126 * Called from vfsinit() 127 */ 128 void 129 vfs_mount_init(void) 130 { 131 lwkt_token_init(&mountlist_token); 132 lwkt_token_init(&mntvnode_token); 133 lwkt_token_init(&mntid_token); 134 TAILQ_INIT(&mountscan_list); 135 TAILQ_INIT(&mntvnodescan_list); 136 } 137 138 /* 139 * Support function called with mntvnode_token held to remove a vnode 140 * from the mountlist. We must update any list scans which are in progress. 141 */ 142 static void 143 vremovevnodemnt(struct vnode *vp) 144 { 145 struct vmntvnodescan_info *info; 146 147 TAILQ_FOREACH(info, &mntvnodescan_list, entry) { 148 if (info->vp == vp) 149 info->vp = TAILQ_NEXT(vp, v_nmntvnodes); 150 } 151 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes); 152 } 153 154 /* 155 * Support function called with mntvnode_token held to move a vnode to 156 * the end of the list. 157 */ 158 static void 159 vmovevnodetoend(struct mount *mp, struct vnode *vp) 160 { 161 vremovevnodemnt(vp); 162 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 163 } 164 165 166 /* 167 * Allocate a new vnode and associate it with a tag, mount point, and 168 * operations vector. 169 * 170 * A VX locked and refd vnode is returned. The caller should setup the 171 * remaining fields and vx_put() or, if he wishes to leave a vref, 172 * vx_unlock() the vnode. 173 */ 174 int 175 getnewvnode(enum vtagtype tag, struct mount *mp, 176 struct vnode **vpp, int lktimeout, int lkflags) 177 { 178 struct vnode *vp; 179 180 KKASSERT(mp != NULL); 181 182 vp = allocvnode(lktimeout, lkflags); 183 vp->v_tag = tag; 184 vp->v_data = NULL; 185 186 /* 187 * By default the vnode is assigned the mount point's normal 188 * operations vector. 189 */ 190 vp->v_ops = &mp->mnt_vn_use_ops; 191 192 /* 193 * Placing the vnode on the mount point's queue makes it visible. 194 * VNON prevents it from being messed with, however. 195 */ 196 insmntque(vp, mp); 197 198 /* 199 * A VX locked & refd vnode is returned. 200 */ 201 *vpp = vp; 202 return (0); 203 } 204 205 /* 206 * This function creates vnodes with special operations vectors. The 207 * mount point is optional. 208 * 209 * This routine is being phased out. 210 */ 211 int 212 getspecialvnode(enum vtagtype tag, struct mount *mp, 213 struct vop_ops **ops, 214 struct vnode **vpp, int lktimeout, int lkflags) 215 { 216 struct vnode *vp; 217 218 vp = allocvnode(lktimeout, lkflags); 219 vp->v_tag = tag; 220 vp->v_data = NULL; 221 vp->v_ops = ops; 222 223 /* 224 * Placing the vnode on the mount point's queue makes it visible. 225 * VNON prevents it from being messed with, however. 226 */ 227 insmntque(vp, mp); 228 229 /* 230 * A VX locked & refd vnode is returned. 231 */ 232 *vpp = vp; 233 return (0); 234 } 235 236 /* 237 * Interlock against an unmount, return 0 on success, non-zero on failure. 238 * 239 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount 240 * is in-progress. 241 * 242 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits 243 * are used. A shared locked will be obtained and the filesystem will not 244 * be unmountable until the lock is released. 245 */ 246 int 247 vfs_busy(struct mount *mp, int flags) 248 { 249 int lkflags; 250 251 if (mp->mnt_kern_flag & MNTK_UNMOUNT) { 252 if (flags & LK_NOWAIT) 253 return (ENOENT); 254 /* XXX not MP safe */ 255 mp->mnt_kern_flag |= MNTK_MWAIT; 256 /* 257 * Since all busy locks are shared except the exclusive 258 * lock granted when unmounting, the only place that a 259 * wakeup needs to be done is at the release of the 260 * exclusive lock at the end of dounmount. 261 */ 262 tsleep((caddr_t)mp, 0, "vfs_busy", 0); 263 return (ENOENT); 264 } 265 lkflags = LK_SHARED; 266 if (lockmgr(&mp->mnt_lock, lkflags)) 267 panic("vfs_busy: unexpected lock failure"); 268 return (0); 269 } 270 271 /* 272 * Free a busy filesystem. 273 */ 274 void 275 vfs_unbusy(struct mount *mp) 276 { 277 lockmgr(&mp->mnt_lock, LK_RELEASE); 278 } 279 280 /* 281 * Lookup a filesystem type, and if found allocate and initialize 282 * a mount structure for it. 283 * 284 * Devname is usually updated by mount(8) after booting. 285 */ 286 int 287 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp) 288 { 289 struct vfsconf *vfsp; 290 struct mount *mp; 291 292 if (fstypename == NULL) 293 return (ENODEV); 294 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { 295 if (!strcmp(vfsp->vfc_name, fstypename)) 296 break; 297 } 298 if (vfsp == NULL) 299 return (ENODEV); 300 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK); 301 bzero((char *)mp, (u_long)sizeof(struct mount)); 302 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0); 303 vfs_busy(mp, LK_NOWAIT); 304 TAILQ_INIT(&mp->mnt_nvnodelist); 305 TAILQ_INIT(&mp->mnt_reservedvnlist); 306 TAILQ_INIT(&mp->mnt_jlist); 307 mp->mnt_nvnodelistsize = 0; 308 mp->mnt_vfc = vfsp; 309 mp->mnt_op = vfsp->vfc_vfsops; 310 mp->mnt_flag = MNT_RDONLY; 311 vfsp->vfc_refcount++; 312 mp->mnt_iosize_max = DFLTPHYS; 313 mp->mnt_stat.f_type = vfsp->vfc_typenum; 314 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; 315 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN); 316 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0); 317 *mpp = mp; 318 return (0); 319 } 320 321 /* 322 * Lookup a mount point by filesystem identifier. 323 */ 324 struct mount * 325 vfs_getvfs(fsid_t *fsid) 326 { 327 struct mount *mp; 328 lwkt_tokref ilock; 329 330 lwkt_gettoken(&ilock, &mountlist_token); 331 TAILQ_FOREACH(mp, &mountlist, mnt_list) { 332 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && 333 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { 334 break; 335 } 336 } 337 lwkt_reltoken(&ilock); 338 return (mp); 339 } 340 341 /* 342 * Get a new unique fsid. Try to make its val[0] unique, since this value 343 * will be used to create fake device numbers for stat(). Also try (but 344 * not so hard) make its val[0] unique mod 2^16, since some emulators only 345 * support 16-bit device numbers. We end up with unique val[0]'s for the 346 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls. 347 * 348 * Keep in mind that several mounts may be running in parallel. Starting 349 * the search one past where the previous search terminated is both a 350 * micro-optimization and a defense against returning the same fsid to 351 * different mounts. 352 */ 353 void 354 vfs_getnewfsid(struct mount *mp) 355 { 356 static u_int16_t mntid_base; 357 lwkt_tokref ilock; 358 fsid_t tfsid; 359 int mtype; 360 361 lwkt_gettoken(&ilock, &mntid_token); 362 mtype = mp->mnt_vfc->vfc_typenum; 363 tfsid.val[1] = mtype; 364 mtype = (mtype & 0xFF) << 24; 365 for (;;) { 366 tfsid.val[0] = makeudev(255, 367 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF)); 368 mntid_base++; 369 if (vfs_getvfs(&tfsid) == NULL) 370 break; 371 } 372 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; 373 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; 374 lwkt_reltoken(&ilock); 375 } 376 377 /* 378 * This routine is called when we have too many vnodes. It attempts 379 * to free <count> vnodes and will potentially free vnodes that still 380 * have VM backing store (VM backing store is typically the cause 381 * of a vnode blowout so we want to do this). Therefore, this operation 382 * is not considered cheap. 383 * 384 * A number of conditions may prevent a vnode from being reclaimed. 385 * the buffer cache may have references on the vnode, a directory 386 * vnode may still have references due to the namei cache representing 387 * underlying files, or the vnode may be in active use. It is not 388 * desireable to reuse such vnodes. These conditions may cause the 389 * number of vnodes to reach some minimum value regardless of what 390 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low. 391 */ 392 393 /* 394 * This is a quick non-blocking check to determine if the vnode is a good 395 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is 396 * not a good candidate, 1 if it is. 397 */ 398 static __inline int 399 vmightfree(struct vnode *vp, int page_count) 400 { 401 if (vp->v_flag & VRECLAIMED) 402 return (0); 403 #if 0 404 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache)) 405 return (0); 406 #endif 407 if (sysref_isactive(&vp->v_sysref)) 408 return (0); 409 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 410 return (0); 411 return (1); 412 } 413 414 /* 415 * The vnode was found to be possibly vgone()able and the caller has locked it 416 * (thus the usecount should be 1 now). Determine if the vnode is actually 417 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode 418 * can be vgone()'d, 0 otherwise. 419 * 420 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf 421 * in the namecache topology and (B) this vnode has buffer cache bufs. 422 * We cannot remove vnodes with non-leaf namecache associations. We do a 423 * tentitive leaf check prior to attempting to flush out any buffers but the 424 * 'real' test when all is said in done is that v_auxrefs must become 0 for 425 * the vnode to be freeable. 426 * 427 * We could theoretically just unconditionally flush when v_auxrefs != 0, 428 * but flushing data associated with non-leaf nodes (which are always 429 * directories), just throws it away for no benefit. It is the buffer 430 * cache's responsibility to choose buffers to recycle from the cached 431 * data point of view. 432 */ 433 static int 434 visleaf(struct vnode *vp) 435 { 436 struct namecache *ncp; 437 438 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) { 439 if (!TAILQ_EMPTY(&ncp->nc_list)) 440 return(0); 441 } 442 return(1); 443 } 444 445 /* 446 * Try to clean up the vnode to the point where it can be vgone()'d, returning 447 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike 448 * vmightfree() this routine may flush the vnode and block. Vnodes marked 449 * VFREE are still candidates for vgone()ing because they may hold namecache 450 * resources and could be blocking the namecache directory hierarchy (and 451 * related vnodes) from being freed. 452 */ 453 static int 454 vtrytomakegoneable(struct vnode *vp, int page_count) 455 { 456 if (vp->v_flag & VRECLAIMED) 457 return (0); 458 if (vp->v_sysref.refcnt > 1) 459 return (0); 460 if (vp->v_object && vp->v_object->resident_page_count >= page_count) 461 return (0); 462 if (vp->v_auxrefs && visleaf(vp)) { 463 vinvalbuf(vp, V_SAVE, 0, 0); 464 #if 0 /* DEBUG */ 465 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" : 466 "vrecycle: vp %p succeeded: %s\n"), vp, 467 (TAILQ_FIRST(&vp->v_namecache) ? 468 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?")); 469 #endif 470 } 471 472 /* 473 * This sequence may seem a little strange, but we need to optimize 474 * the critical path a bit. We can't recycle vnodes with other 475 * references and because we are trying to recycle an otherwise 476 * perfectly fine vnode we have to invalidate the namecache in a 477 * way that avoids possible deadlocks (since the vnode lock is being 478 * held here). Finally, we have to check for other references one 479 * last time in case something snuck in during the inval. 480 */ 481 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0) 482 return (0); 483 if (cache_inval_vp_nonblock(vp)) 484 return (0); 485 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0); 486 } 487 488 /* 489 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try 490 * to avoid vnodes which have lots of resident pages (we are trying to free 491 * vnodes, not memory). 492 * 493 * This routine is a callback from the mountlist scan. The mount point 494 * in question will be busied. 495 */ 496 static int 497 vlrureclaim(struct mount *mp, void *data) 498 { 499 struct vnode *vp; 500 lwkt_tokref ilock; 501 int done; 502 int trigger; 503 int usevnodes; 504 int count; 505 int trigger_mult = vnlru_nowhere; 506 507 /* 508 * Calculate the trigger point for the resident pages check. The 509 * minimum trigger value is approximately the number of pages in 510 * the system divded by the number of vnodes. However, due to 511 * various other system memory overheads unrelated to data caching 512 * it is a good idea to double the trigger (at least). 513 * 514 * trigger_mult starts at 0. If the recycler is having problems 515 * finding enough freeable vnodes it will increase trigger_mult. 516 * This should not happen in normal operation, even on machines with 517 * low amounts of memory, but extraordinary memory use by the system 518 * verses the amount of cached data can trigger it. 519 */ 520 usevnodes = desiredvnodes; 521 if (usevnodes <= 0) 522 usevnodes = 1; 523 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes; 524 525 done = 0; 526 lwkt_gettoken(&ilock, &mntvnode_token); 527 count = mp->mnt_nvnodelistsize / 10 + 1; 528 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) { 529 /* 530 * __VNODESCAN__ 531 * 532 * The VP will stick around while we hold mntvnode_token, 533 * at least until we block, so we can safely do an initial 534 * check, and then must check again after we lock the vnode. 535 */ 536 if (vp->v_type == VNON || /* XXX */ 537 vp->v_type == VBAD || /* XXX */ 538 !vmightfree(vp, trigger) /* critical path opt */ 539 ) { 540 vmovevnodetoend(mp, vp); 541 --count; 542 continue; 543 } 544 545 /* 546 * VX get the candidate vnode. If the VX get fails the 547 * vnode might still be on the mountlist. Our loop depends 548 * on us at least cycling the vnode to the end of the 549 * mountlist. 550 */ 551 if (vx_get_nonblock(vp) != 0) { 552 if (vp->v_mount == mp) 553 vmovevnodetoend(mp, vp); 554 --count; 555 continue; 556 } 557 558 /* 559 * Since we blocked locking the vp, make sure it is still 560 * a candidate for reclamation. That is, it has not already 561 * been reclaimed and only has our VX reference associated 562 * with it. 563 */ 564 if (vp->v_type == VNON || /* XXX */ 565 vp->v_type == VBAD || /* XXX */ 566 (vp->v_flag & VRECLAIMED) || 567 vp->v_mount != mp || 568 !vtrytomakegoneable(vp, trigger) /* critical path opt */ 569 ) { 570 if (vp->v_mount == mp) 571 vmovevnodetoend(mp, vp); 572 --count; 573 vx_put(vp); 574 continue; 575 } 576 577 /* 578 * All right, we are good, move the vp to the end of the 579 * mountlist and clean it out. The vget will have returned 580 * an error if the vnode was destroyed (VRECLAIMED set), so we 581 * do not have to check again. The vput() will move the 582 * vnode to the free list if the vgone() was successful. 583 */ 584 KKASSERT(vp->v_mount == mp); 585 vmovevnodetoend(mp, vp); 586 vgone_vxlocked(vp); 587 vx_put(vp); 588 ++done; 589 --count; 590 } 591 lwkt_reltoken(&ilock); 592 return (done); 593 } 594 595 /* 596 * Attempt to recycle vnodes in a context that is always safe to block. 597 * Calling vlrurecycle() from the bowels of file system code has some 598 * interesting deadlock problems. 599 */ 600 static struct thread *vnlruthread; 601 static int vnlruproc_sig; 602 603 void 604 vnlru_proc_wait(void) 605 { 606 if (vnlruproc_sig == 0) { 607 vnlruproc_sig = 1; /* avoid unnecessary wakeups */ 608 wakeup(vnlruthread); 609 } 610 tsleep(&vnlruproc_sig, 0, "vlruwk", hz); 611 } 612 613 static void 614 vnlru_proc(void) 615 { 616 struct thread *td = curthread; 617 int done; 618 619 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 620 SHUTDOWN_PRI_FIRST); 621 622 crit_enter(); 623 for (;;) { 624 kproc_suspend_loop(); 625 626 /* 627 * Try to free some vnodes if we have too many 628 */ 629 if (numvnodes > desiredvnodes && 630 freevnodes > desiredvnodes * 2 / 10) { 631 int count = numvnodes - desiredvnodes; 632 633 if (count > freevnodes / 100) 634 count = freevnodes / 100; 635 if (count < 5) 636 count = 5; 637 freesomevnodes(count); 638 } 639 640 /* 641 * Nothing to do if most of our vnodes are already on 642 * the free list. 643 */ 644 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) { 645 vnlruproc_sig = 0; 646 wakeup(&vnlruproc_sig); 647 tsleep(td, 0, "vlruwt", hz); 648 continue; 649 } 650 cache_cleanneg(0); 651 done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD); 652 653 /* 654 * The vlrureclaim() call only processes 1/10 of the vnodes 655 * on each mount. If we couldn't find any repeat the loop 656 * at least enough times to cover all available vnodes before 657 * we start sleeping. Complain if the failure extends past 658 * 30 second, every 30 seconds. 659 */ 660 if (done == 0) { 661 ++vnlru_nowhere; 662 if (vnlru_nowhere % 10 == 0) 663 tsleep(td, 0, "vlrup", hz * 3); 664 if (vnlru_nowhere % 100 == 0) 665 kprintf("vnlru_proc: vnode recycler stopped working!\n"); 666 if (vnlru_nowhere == 1000) 667 vnlru_nowhere = 900; 668 } else { 669 vnlru_nowhere = 0; 670 } 671 } 672 crit_exit(); 673 } 674 675 /* 676 * MOUNTLIST FUNCTIONS 677 */ 678 679 /* 680 * mountlist_insert (MP SAFE) 681 * 682 * Add a new mount point to the mount list. 683 */ 684 void 685 mountlist_insert(struct mount *mp, int how) 686 { 687 lwkt_tokref ilock; 688 689 lwkt_gettoken(&ilock, &mountlist_token); 690 if (how == MNTINS_FIRST) 691 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); 692 else 693 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); 694 lwkt_reltoken(&ilock); 695 } 696 697 /* 698 * mountlist_interlock (MP SAFE) 699 * 700 * Execute the specified interlock function with the mountlist token 701 * held. The function will be called in a serialized fashion verses 702 * other functions called through this mechanism. 703 */ 704 int 705 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp) 706 { 707 lwkt_tokref ilock; 708 int error; 709 710 lwkt_gettoken(&ilock, &mountlist_token); 711 error = callback(mp); 712 lwkt_reltoken(&ilock); 713 return (error); 714 } 715 716 /* 717 * mountlist_boot_getfirst (DURING BOOT ONLY) 718 * 719 * This function returns the first mount on the mountlist, which is 720 * expected to be the root mount. Since no interlocks are obtained 721 * this function is only safe to use during booting. 722 */ 723 724 struct mount * 725 mountlist_boot_getfirst(void) 726 { 727 return(TAILQ_FIRST(&mountlist)); 728 } 729 730 /* 731 * mountlist_remove (MP SAFE) 732 * 733 * Remove a node from the mountlist. If this node is the next scan node 734 * for any active mountlist scans, the active mountlist scan will be 735 * adjusted to skip the node, thus allowing removals during mountlist 736 * scans. 737 */ 738 void 739 mountlist_remove(struct mount *mp) 740 { 741 struct mountscan_info *msi; 742 lwkt_tokref ilock; 743 744 lwkt_gettoken(&ilock, &mountlist_token); 745 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) { 746 if (msi->msi_node == mp) { 747 if (msi->msi_how & MNTSCAN_FORWARD) 748 msi->msi_node = TAILQ_NEXT(mp, mnt_list); 749 else 750 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 751 } 752 } 753 TAILQ_REMOVE(&mountlist, mp, mnt_list); 754 lwkt_reltoken(&ilock); 755 } 756 757 /* 758 * mountlist_scan (MP SAFE) 759 * 760 * Safely scan the mount points on the mount list. Unless otherwise 761 * specified each mount point will be busied prior to the callback and 762 * unbusied afterwords. The callback may safely remove any mount point 763 * without interfering with the scan. If the current callback 764 * mount is removed the scanner will not attempt to unbusy it. 765 * 766 * If a mount node cannot be busied it is silently skipped. 767 * 768 * The callback return value is aggregated and a total is returned. A return 769 * value of < 0 is not aggregated and will terminate the scan. 770 * 771 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction 772 * MNTSCAN_REVERSE - the mountlist is scanned in reverse 773 * MNTSCAN_NOBUSY - the scanner will make the callback without busying 774 * the mount node. 775 */ 776 int 777 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how) 778 { 779 struct mountscan_info info; 780 lwkt_tokref ilock; 781 struct mount *mp; 782 thread_t td; 783 int count; 784 int res; 785 786 lwkt_gettoken(&ilock, &mountlist_token); 787 788 info.msi_how = how; 789 info.msi_node = NULL; /* paranoia */ 790 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry); 791 792 res = 0; 793 td = curthread; 794 795 if (how & MNTSCAN_FORWARD) { 796 info.msi_node = TAILQ_FIRST(&mountlist); 797 while ((mp = info.msi_node) != NULL) { 798 if (how & MNTSCAN_NOBUSY) { 799 count = callback(mp, data); 800 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 801 count = callback(mp, data); 802 if (mp == info.msi_node) 803 vfs_unbusy(mp); 804 } else { 805 count = 0; 806 } 807 if (count < 0) 808 break; 809 res += count; 810 if (mp == info.msi_node) 811 info.msi_node = TAILQ_NEXT(mp, mnt_list); 812 } 813 } else if (how & MNTSCAN_REVERSE) { 814 info.msi_node = TAILQ_LAST(&mountlist, mntlist); 815 while ((mp = info.msi_node) != NULL) { 816 if (how & MNTSCAN_NOBUSY) { 817 count = callback(mp, data); 818 } else if (vfs_busy(mp, LK_NOWAIT) == 0) { 819 count = callback(mp, data); 820 if (mp == info.msi_node) 821 vfs_unbusy(mp); 822 } else { 823 count = 0; 824 } 825 if (count < 0) 826 break; 827 res += count; 828 if (mp == info.msi_node) 829 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list); 830 } 831 } 832 TAILQ_REMOVE(&mountscan_list, &info, msi_entry); 833 lwkt_reltoken(&ilock); 834 return(res); 835 } 836 837 /* 838 * MOUNT RELATED VNODE FUNCTIONS 839 */ 840 841 static struct kproc_desc vnlru_kp = { 842 "vnlru", 843 vnlru_proc, 844 &vnlruthread 845 }; 846 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp) 847 848 /* 849 * Move a vnode from one mount queue to another. 850 */ 851 void 852 insmntque(struct vnode *vp, struct mount *mp) 853 { 854 lwkt_tokref ilock; 855 856 lwkt_gettoken(&ilock, &mntvnode_token); 857 /* 858 * Delete from old mount point vnode list, if on one. 859 */ 860 if (vp->v_mount != NULL) { 861 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0, 862 ("bad mount point vnode list size")); 863 vremovevnodemnt(vp); 864 vp->v_mount->mnt_nvnodelistsize--; 865 } 866 /* 867 * Insert into list of vnodes for the new mount point, if available. 868 */ 869 if ((vp->v_mount = mp) == NULL) { 870 lwkt_reltoken(&ilock); 871 return; 872 } 873 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); 874 mp->mnt_nvnodelistsize++; 875 lwkt_reltoken(&ilock); 876 } 877 878 879 /* 880 * Scan the vnodes under a mount point and issue appropriate callbacks. 881 * 882 * The fastfunc() callback is called with just the mountlist token held 883 * (no vnode lock). It may not block and the vnode may be undergoing 884 * modifications while the caller is processing it. The vnode will 885 * not be entirely destroyed, however, due to the fact that the mountlist 886 * token is held. A return value < 0 skips to the next vnode without calling 887 * the slowfunc(), a return value > 0 terminates the loop. 888 * 889 * The slowfunc() callback is called after the vnode has been successfully 890 * locked based on passed flags. The vnode is skipped if it gets rearranged 891 * or destroyed while blocking on the lock. A non-zero return value from 892 * the slow function terminates the loop. The slow function is allowed to 893 * arbitrarily block. The scanning code guarentees consistency of operation 894 * even if the slow function deletes or moves the node, or blocks and some 895 * other thread deletes or moves the node. 896 */ 897 int 898 vmntvnodescan( 899 struct mount *mp, 900 int flags, 901 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data), 902 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 903 void *data 904 ) { 905 struct vmntvnodescan_info info; 906 lwkt_tokref ilock; 907 struct vnode *vp; 908 int r = 0; 909 int maxcount = 1000000; 910 911 lwkt_gettoken(&ilock, &mntvnode_token); 912 913 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist); 914 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry); 915 while ((vp = info.vp) != NULL) { 916 if (--maxcount == 0) 917 panic("maxcount reached during vmntvnodescan"); 918 919 if (vp->v_type == VNON) /* visible but not ready */ 920 goto next; 921 KKASSERT(vp->v_mount == mp); 922 923 /* 924 * Quick test. A negative return continues the loop without 925 * calling the slow test. 0 continues onto the slow test. 926 * A positive number aborts the loop. 927 */ 928 if (fastfunc) { 929 if ((r = fastfunc(mp, vp, data)) < 0) 930 goto next; 931 if (r) 932 break; 933 } 934 935 /* 936 * Get a vxlock on the vnode, retry if it has moved or isn't 937 * in the mountlist where we expect it. 938 */ 939 if (slowfunc) { 940 int error; 941 942 switch(flags) { 943 case VMSC_GETVP: 944 error = vget(vp, LK_EXCLUSIVE); 945 break; 946 case VMSC_GETVP|VMSC_NOWAIT: 947 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT); 948 break; 949 case VMSC_GETVX: 950 vx_get(vp); 951 error = 0; 952 break; 953 default: 954 error = 0; 955 break; 956 } 957 if (error) 958 goto next; 959 /* 960 * Do not call the slow function if the vnode is 961 * invalid or if it was ripped out from under us 962 * while we (potentially) blocked. 963 */ 964 if (info.vp == vp && vp->v_type != VNON) 965 r = slowfunc(mp, vp, data); 966 967 /* 968 * Cleanup 969 */ 970 switch(flags) { 971 case VMSC_GETVP: 972 case VMSC_GETVP|VMSC_NOWAIT: 973 vput(vp); 974 break; 975 case VMSC_GETVX: 976 vx_put(vp); 977 break; 978 default: 979 break; 980 } 981 if (r != 0) 982 break; 983 } 984 985 /* 986 * Iterate. If the vnode was ripped out from under us 987 * info.vp will already point to the next vnode, otherwise 988 * we have to obtain the next valid vnode ourselves. 989 */ 990 next: 991 if (info.vp == vp) 992 info.vp = TAILQ_NEXT(vp, v_nmntvnodes); 993 } 994 TAILQ_REMOVE(&mntvnodescan_list, &info, entry); 995 lwkt_reltoken(&ilock); 996 return(r); 997 } 998 999 /* 1000 * Remove any vnodes in the vnode table belonging to mount point mp. 1001 * 1002 * If FORCECLOSE is not specified, there should not be any active ones, 1003 * return error if any are found (nb: this is a user error, not a 1004 * system error). If FORCECLOSE is specified, detach any active vnodes 1005 * that are found. 1006 * 1007 * If WRITECLOSE is set, only flush out regular file vnodes open for 1008 * writing. 1009 * 1010 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped. 1011 * 1012 * `rootrefs' specifies the base reference count for the root vnode 1013 * of this filesystem. The root vnode is considered busy if its 1014 * v_sysref.refcnt exceeds this value. On a successful return, vflush() 1015 * will call vrele() on the root vnode exactly rootrefs times. 1016 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must 1017 * be zero. 1018 */ 1019 #ifdef DIAGNOSTIC 1020 static int busyprt = 0; /* print out busy vnodes */ 1021 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, ""); 1022 #endif 1023 1024 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data); 1025 1026 struct vflush_info { 1027 int flags; 1028 int busy; 1029 thread_t td; 1030 }; 1031 1032 int 1033 vflush(struct mount *mp, int rootrefs, int flags) 1034 { 1035 struct thread *td = curthread; /* XXX */ 1036 struct vnode *rootvp = NULL; 1037 int error; 1038 struct vflush_info vflush_info; 1039 1040 if (rootrefs > 0) { 1041 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0, 1042 ("vflush: bad args")); 1043 /* 1044 * Get the filesystem root vnode. We can vput() it 1045 * immediately, since with rootrefs > 0, it won't go away. 1046 */ 1047 if ((error = VFS_ROOT(mp, &rootvp)) != 0) 1048 return (error); 1049 vput(rootvp); 1050 } 1051 1052 vflush_info.busy = 0; 1053 vflush_info.flags = flags; 1054 vflush_info.td = td; 1055 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info); 1056 1057 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) { 1058 /* 1059 * If just the root vnode is busy, and if its refcount 1060 * is equal to `rootrefs', then go ahead and kill it. 1061 */ 1062 KASSERT(vflush_info.busy > 0, ("vflush: not busy")); 1063 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs")); 1064 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) { 1065 vx_lock(rootvp); 1066 vgone_vxlocked(rootvp); 1067 vx_unlock(rootvp); 1068 vflush_info.busy = 0; 1069 } 1070 } 1071 if (vflush_info.busy) 1072 return (EBUSY); 1073 for (; rootrefs > 0; rootrefs--) 1074 vrele(rootvp); 1075 return (0); 1076 } 1077 1078 /* 1079 * The scan callback is made with an VX locked vnode. 1080 */ 1081 static int 1082 vflush_scan(struct mount *mp, struct vnode *vp, void *data) 1083 { 1084 struct vflush_info *info = data; 1085 struct vattr vattr; 1086 1087 /* 1088 * Skip over a vnodes marked VSYSTEM. 1089 */ 1090 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { 1091 return(0); 1092 } 1093 1094 /* 1095 * If WRITECLOSE is set, flush out unlinked but still open 1096 * files (even if open only for reading) and regular file 1097 * vnodes open for writing. 1098 */ 1099 if ((info->flags & WRITECLOSE) && 1100 (vp->v_type == VNON || 1101 (VOP_GETATTR(vp, &vattr) == 0 && 1102 vattr.va_nlink > 0)) && 1103 (vp->v_writecount == 0 || vp->v_type != VREG)) { 1104 return(0); 1105 } 1106 1107 /* 1108 * If we are the only holder (refcnt of 1) or the vnode is in 1109 * termination (refcnt < 0), we can vgone the vnode. 1110 */ 1111 if (vp->v_sysref.refcnt <= 1) { 1112 vgone_vxlocked(vp); 1113 return(0); 1114 } 1115 1116 /* 1117 * If FORCECLOSE is set, forcibly close the vnode. For block 1118 * or character devices, revert to an anonymous device. For 1119 * all other files, just kill them. 1120 */ 1121 if (info->flags & FORCECLOSE) { 1122 if (vp->v_type != VBLK && vp->v_type != VCHR) { 1123 vgone_vxlocked(vp); 1124 } else { 1125 vclean_vxlocked(vp, 0); 1126 vp->v_ops = &spec_vnode_vops_p; 1127 insmntque(vp, NULL); 1128 } 1129 return(0); 1130 } 1131 #ifdef DIAGNOSTIC 1132 if (busyprt) 1133 vprint("vflush: busy vnode", vp); 1134 #endif 1135 ++info->busy; 1136 return(0); 1137 } 1138 1139