1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 79 /* 80 * The workitem queue. 81 */ 82 #define SYNCER_MAXDELAY 32 83 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS); 84 time_t syncdelay = 30; /* max time to delay syncing data */ 85 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 86 sysctl_kern_syncdelay, "I", "VFS data synchronization delay"); 87 time_t filedelay = 30; /* time to delay syncing files */ 88 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 89 &filedelay, 0, "File synchronization delay"); 90 time_t dirdelay = 29; /* time to delay syncing directories */ 91 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 92 &dirdelay, 0, "Directory synchronization delay"); 93 time_t metadelay = 28; /* time to delay syncing metadata */ 94 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 95 &metadelay, 0, "VFS metadata synchronization delay"); 96 time_t retrydelay = 1; /* retry delay after failure */ 97 SYSCTL_INT(_kern, OID_AUTO, retrydelay, CTLFLAG_RW, 98 &retrydelay, 0, "VFS retry synchronization delay"); 99 static int rushjob; /* number of slots to run ASAP */ 100 static int stat_rush_requests; /* number of times I/O speeded up */ 101 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 102 &stat_rush_requests, 0, ""); 103 104 LIST_HEAD(synclist, vnode); 105 106 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 107 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 108 109 struct syncer_ctx { 110 struct mount *sc_mp; 111 struct lwkt_token sc_token; 112 struct thread *sc_thread; 113 int sc_flags; 114 struct synclist *syncer_workitem_pending; 115 long syncer_mask; 116 int syncer_delayno; 117 int syncer_forced; 118 int syncer_rushjob; /* sequence vnodes faster */ 119 int syncer_trigger; /* trigger full sync */ 120 long syncer_count; 121 }; 122 123 static void syncer_thread(void *); 124 125 static int 126 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS) 127 { 128 int error; 129 int v = syncdelay; 130 131 error = sysctl_handle_int(oidp, &v, 0, req); 132 if (error || !req->newptr) 133 return (error); 134 if (v < 1) 135 v = 1; 136 if (v > SYNCER_MAXDELAY) 137 v = SYNCER_MAXDELAY; 138 syncdelay = v; 139 140 return(0); 141 } 142 143 /* 144 * The workitem queue. 145 * 146 * It is useful to delay writes of file data and filesystem metadata 147 * for tens of seconds so that quickly created and deleted files need 148 * not waste disk bandwidth being created and removed. To realize this, 149 * we append vnodes to a "workitem" queue. When running with a soft 150 * updates implementation, most pending metadata dependencies should 151 * not wait for more than a few seconds. Thus, mounted on block devices 152 * are delayed only about a half the time that file data is delayed. 153 * Similarly, directory updates are more critical, so are only delayed 154 * about a third the time that file data is delayed. Thus, there are 155 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 156 * one each second (driven off the filesystem syncer process). The 157 * syncer_delayno variable indicates the next queue that is to be processed. 158 * Items that need to be processed soon are placed in this queue: 159 * 160 * syncer_workitem_pending[syncer_delayno] 161 * 162 * A delay of fifteen seconds is done by placing the request fifteen 163 * entries later in the queue: 164 * 165 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 166 * 167 */ 168 169 /* 170 * Return the number of vnodes on the syncer's timed list. This will 171 * include the syncer vnode (mp->mnt_syncer) so if used, a minimum 172 * value of 1 will be returned. 173 */ 174 long 175 vn_syncer_count(struct mount *mp) 176 { 177 struct syncer_ctx *ctx; 178 179 ctx = mp->mnt_syncer_ctx; 180 if (ctx) 181 return (ctx->syncer_count); 182 return 0; 183 } 184 185 /* 186 * Add an item to the syncer work queue. 187 * 188 * WARNING: Cannot get vp->v_token here if not already held, we must 189 * depend on the syncer_token (which might already be held by 190 * the caller) to protect v_synclist and VONWORKLST. 191 * 192 * WARNING: The syncer depends on this function not blocking if the caller 193 * already holds the syncer token. 194 */ 195 void 196 vn_syncer_add(struct vnode *vp, int delay) 197 { 198 struct syncer_ctx *ctx; 199 int slot; 200 201 ctx = vp->v_mount->mnt_syncer_ctx; 202 lwkt_gettoken(&ctx->sc_token); 203 204 if (vp->v_flag & VONWORKLST) { 205 LIST_REMOVE(vp, v_synclist); 206 --ctx->syncer_count; 207 } 208 if (delay <= 0) { 209 slot = -delay & ctx->syncer_mask; 210 } else { 211 if (delay > SYNCER_MAXDELAY - 2) 212 delay = SYNCER_MAXDELAY - 2; 213 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 214 } 215 216 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 217 vsetflags(vp, VONWORKLST); 218 ++ctx->syncer_count; 219 220 lwkt_reltoken(&ctx->sc_token); 221 } 222 223 /* 224 * Removes the vnode from the syncer list. Since we might block while 225 * acquiring the syncer_token we have to [re]check conditions to determine 226 * that it is ok to remove the vnode. 227 * 228 * Force removal if force != 0. This can only occur during a forced unmount. 229 * 230 * vp->v_token held on call 231 */ 232 void 233 vn_syncer_remove(struct vnode *vp, int force) 234 { 235 struct syncer_ctx *ctx; 236 237 ctx = vp->v_mount->mnt_syncer_ctx; 238 lwkt_gettoken(&ctx->sc_token); 239 240 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST && 241 RB_EMPTY(&vp->v_rbdirty_tree)) { 242 vclrflags(vp, VONWORKLST); 243 LIST_REMOVE(vp, v_synclist); 244 --ctx->syncer_count; 245 } else if (force && (vp->v_flag & VONWORKLST)) { 246 vclrflags(vp, VONWORKLST); 247 LIST_REMOVE(vp, v_synclist); 248 --ctx->syncer_count; 249 } 250 251 lwkt_reltoken(&ctx->sc_token); 252 } 253 254 /* 255 * vnode must be locked 256 */ 257 void 258 vclrisdirty(struct vnode *vp) 259 { 260 vclrflags(vp, VISDIRTY); 261 if (vp->v_flag & VONWORKLST) 262 vn_syncer_remove(vp, 0); 263 } 264 265 void 266 vclrobjdirty(struct vnode *vp) 267 { 268 vclrflags(vp, VOBJDIRTY); 269 if (vp->v_flag & VONWORKLST) 270 vn_syncer_remove(vp, 0); 271 } 272 273 /* 274 * vnode must be stable 275 */ 276 void 277 vsetisdirty(struct vnode *vp) 278 { 279 struct syncer_ctx *ctx; 280 281 if ((vp->v_flag & VISDIRTY) == 0) { 282 ctx = vp->v_mount->mnt_syncer_ctx; 283 vsetflags(vp, VISDIRTY); 284 lwkt_gettoken(&ctx->sc_token); 285 if ((vp->v_flag & VONWORKLST) == 0) 286 vn_syncer_add(vp, syncdelay); 287 lwkt_reltoken(&ctx->sc_token); 288 } 289 } 290 291 void 292 vsetobjdirty(struct vnode *vp) 293 { 294 struct syncer_ctx *ctx; 295 296 if ((vp->v_flag & VOBJDIRTY) == 0) { 297 ctx = vp->v_mount->mnt_syncer_ctx; 298 vsetflags(vp, VOBJDIRTY); 299 lwkt_gettoken(&ctx->sc_token); 300 if ((vp->v_flag & VONWORKLST) == 0) 301 vn_syncer_add(vp, syncdelay); 302 lwkt_reltoken(&ctx->sc_token); 303 } 304 } 305 306 /* 307 * Create per-filesystem syncer process 308 */ 309 void 310 vn_syncer_thr_create(struct mount *mp) 311 { 312 struct syncer_ctx *ctx; 313 static int syncalloc = 0; 314 315 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO); 316 ctx->sc_mp = mp; 317 ctx->sc_flags = 0; 318 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF, 319 &ctx->syncer_mask); 320 ctx->syncer_delayno = 0; 321 lwkt_token_init(&ctx->sc_token, "syncer"); 322 mp->mnt_syncer_ctx = ctx; 323 kthread_create(syncer_thread, ctx, &ctx->sc_thread, 324 "syncer%d", ++syncalloc & 0x7FFFFFFF); 325 } 326 327 /* 328 * Stop per-filesystem syncer process 329 */ 330 void 331 vn_syncer_thr_stop(struct mount *mp) 332 { 333 struct syncer_ctx *ctx; 334 335 ctx = mp->mnt_syncer_ctx; 336 if (ctx == NULL) 337 return; 338 339 lwkt_gettoken(&ctx->sc_token); 340 341 /* Signal the syncer process to exit */ 342 ctx->sc_flags |= SC_FLAG_EXIT; 343 wakeup(ctx); 344 345 /* Wait till syncer process exits */ 346 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) { 347 tsleep_interlock(&ctx->sc_flags, 0); 348 lwkt_reltoken(&ctx->sc_token); 349 tsleep(&ctx->sc_flags, PINTERLOCKED, "syncexit", hz); 350 lwkt_gettoken(&ctx->sc_token); 351 } 352 353 mp->mnt_syncer_ctx = NULL; 354 lwkt_reltoken(&ctx->sc_token); 355 356 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 357 kfree(ctx, M_TEMP); 358 } 359 360 struct thread *updatethread; 361 362 /* 363 * System filesystem synchronizer daemon. 364 */ 365 static void 366 syncer_thread(void *_ctx) 367 { 368 struct syncer_ctx *ctx = _ctx; 369 struct synclist *slp; 370 struct vnode *vp; 371 long starttime; 372 int *sc_flagsp; 373 int sc_flags; 374 int vnodes_synced = 0; 375 int delta; 376 int dummy = 0; 377 378 for (;;) { 379 kproc_suspend_loop(); 380 381 starttime = time_uptime; 382 lwkt_gettoken(&ctx->sc_token); 383 384 /* 385 * Push files whose dirty time has expired. Be careful 386 * of interrupt race on slp queue. 387 * 388 * Note that vsyncscan() and vn_syncer_one() can pull items 389 * off the same list, so we shift vp's position in the 390 * list immediately. 391 */ 392 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 393 394 /* 395 * If syncer_trigger is set (from trigger_syncer(mp)), 396 * Immediately do a full filesystem sync and set up the 397 * following full filesystem sync to occur in 1 second. 398 */ 399 if (ctx->syncer_trigger) { 400 ctx->syncer_trigger = 0; 401 if (ctx->sc_mp && ctx->sc_mp->mnt_syncer) { 402 vp = ctx->sc_mp->mnt_syncer; 403 if (vp->v_flag & VONWORKLST) { 404 vn_syncer_add(vp, retrydelay); 405 if (vget(vp, LK_EXCLUSIVE) == 0) { 406 VOP_FSYNC(vp, MNT_LAZY, 0); 407 vput(vp); 408 vnodes_synced++; 409 } 410 } 411 } 412 } 413 414 /* 415 * FSYNC items in this bucket 416 */ 417 while ((vp = LIST_FIRST(slp)) != NULL) { 418 vn_syncer_add(vp, retrydelay); 419 if (ctx->syncer_forced) { 420 if (vget(vp, LK_EXCLUSIVE) == 0) { 421 VOP_FSYNC(vp, MNT_NOWAIT, 0); 422 vput(vp); 423 vnodes_synced++; 424 } 425 } else { 426 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 427 VOP_FSYNC(vp, MNT_LAZY, 0); 428 vput(vp); 429 vnodes_synced++; 430 } 431 } 432 } 433 434 /* 435 * Increment the slot upon completion. This is typically 436 * one-second but may be faster if the syncer is triggered. 437 */ 438 ctx->syncer_delayno = (ctx->syncer_delayno + 1) & 439 ctx->syncer_mask; 440 441 sc_flags = ctx->sc_flags; 442 443 /* Exit on unmount */ 444 if (sc_flags & SC_FLAG_EXIT) 445 break; 446 447 lwkt_reltoken(&ctx->sc_token); 448 449 /* 450 * Do sync processing for each mount. 451 */ 452 if (ctx->sc_mp) 453 bio_ops_sync(ctx->sc_mp); 454 455 /* 456 * The variable rushjob allows the kernel to speed up the 457 * processing of the filesystem syncer process. A rushjob 458 * value of N tells the filesystem syncer to process the next 459 * N seconds worth of work on its queue ASAP. Currently rushjob 460 * is used by the soft update code to speed up the filesystem 461 * syncer process when the incore state is getting so far 462 * ahead of the disk that the kernel memory pool is being 463 * threatened with exhaustion. 464 */ 465 delta = rushjob - ctx->syncer_rushjob; 466 if ((u_int)delta > syncdelay / 2) { 467 ctx->syncer_rushjob = rushjob - syncdelay / 2; 468 tsleep(&dummy, 0, "rush", 1); 469 continue; 470 } 471 if (delta) { 472 ++ctx->syncer_rushjob; 473 tsleep(&dummy, 0, "rush", 1); 474 continue; 475 } 476 477 /* 478 * Normal syncer operation iterates once a second, unless 479 * specifically triggered. 480 */ 481 if (time_uptime == starttime && 482 ctx->syncer_trigger == 0) { 483 tsleep_interlock(ctx, 0); 484 if (time_uptime == starttime && 485 ctx->syncer_trigger == 0 && 486 (ctx->sc_flags & SC_FLAG_EXIT) == 0) { 487 tsleep(ctx, PINTERLOCKED, "syncer", hz); 488 } 489 } 490 } 491 492 /* 493 * Unmount/exit path for per-filesystem syncers; sc_token held 494 */ 495 ctx->sc_flags |= SC_FLAG_DONE; 496 sc_flagsp = &ctx->sc_flags; 497 lwkt_reltoken(&ctx->sc_token); 498 wakeup(sc_flagsp); 499 500 kthread_exit(); 501 } 502 503 /* 504 * This allows a filesystem to pro-actively request that a dirty 505 * vnode be fsync()d. This routine does not guarantee that one 506 * will actually be fsynced. 507 */ 508 void 509 vn_syncer_one(struct mount *mp) 510 { 511 struct syncer_ctx *ctx; 512 struct synclist *slp; 513 struct vnode *vp; 514 int i; 515 int n = syncdelay; 516 517 ctx = mp->mnt_syncer_ctx; 518 i = ctx->syncer_delayno & ctx->syncer_mask; 519 cpu_ccfence(); 520 521 if (lwkt_trytoken(&ctx->sc_token) == 0) 522 return; 523 524 /* 525 * Look ahead on our syncer time array. 526 */ 527 do { 528 slp = &ctx->syncer_workitem_pending[i]; 529 vp = LIST_FIRST(slp); 530 if (vp && vp->v_type == VNON) 531 vp = LIST_NEXT(vp, v_synclist); 532 if (vp) 533 break; 534 i = (i + 1) & ctx->syncer_mask; 535 /* i will be wrong if we stop here but vp is NULL so ok */ 536 } while(--n); 537 538 /* 539 * Process one vnode, skip the syncer vnode but also stop 540 * if the syncer vnode is the only thing on this list. 541 */ 542 if (vp) { 543 vn_syncer_add(vp, retrydelay); 544 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 545 VOP_FSYNC(vp, MNT_LAZY, 0); 546 vput(vp); 547 } 548 } 549 lwkt_reltoken(&ctx->sc_token); 550 } 551 552 /* 553 * Request that the syncer daemon for a specific mount speed up its work. 554 * If mp is NULL the caller generally wants to speed up all syncers. 555 */ 556 void 557 speedup_syncer(struct mount *mp) 558 { 559 /* 560 * Don't bother protecting the test. unsleep_and_wakeup_thread() 561 * will only do something real if the thread is in the right state. 562 */ 563 atomic_add_int(&rushjob, 1); 564 ++stat_rush_requests; 565 if (mp && mp->mnt_syncer_ctx) 566 wakeup(mp->mnt_syncer_ctx); 567 } 568 569 /* 570 * trigger a full sync 571 */ 572 void 573 trigger_syncer(struct mount *mp) 574 { 575 struct syncer_ctx *ctx; 576 577 if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) { 578 if (ctx->syncer_trigger == 0) { 579 ctx->syncer_trigger = 1; 580 wakeup(ctx); 581 } 582 } 583 } 584 585 /* 586 * Routine to create and manage a filesystem syncer vnode. 587 */ 588 static int sync_close(struct vop_close_args *); 589 static int sync_fsync(struct vop_fsync_args *); 590 static int sync_inactive(struct vop_inactive_args *); 591 static int sync_reclaim (struct vop_reclaim_args *); 592 static int sync_print(struct vop_print_args *); 593 594 static struct vop_ops sync_vnode_vops = { 595 .vop_default = vop_eopnotsupp, 596 .vop_close = sync_close, 597 .vop_fsync = sync_fsync, 598 .vop_inactive = sync_inactive, 599 .vop_reclaim = sync_reclaim, 600 .vop_print = sync_print, 601 }; 602 603 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 604 605 VNODEOP_SET(sync_vnode_vops); 606 607 /* 608 * Create a new filesystem syncer vnode for the specified mount point. 609 * This vnode is placed on the worklist and is responsible for sync'ing 610 * the filesystem. 611 * 612 * NOTE: read-only mounts are also placed on the worklist. The filesystem 613 * sync code is also responsible for cleaning up vnodes. 614 */ 615 int 616 vfs_allocate_syncvnode(struct mount *mp) 617 { 618 struct vnode *vp; 619 static long start, incr, next; 620 int error; 621 622 /* Allocate a new vnode */ 623 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 624 if (error) { 625 mp->mnt_syncer = NULL; 626 return (error); 627 } 628 vp->v_type = VNON; 629 /* 630 * Place the vnode onto the syncer worklist. We attempt to 631 * scatter them about on the list so that they will go off 632 * at evenly distributed times even if all the filesystems 633 * are mounted at once. 634 */ 635 next += incr; 636 if (next == 0 || next > SYNCER_MAXDELAY) { 637 start /= 2; 638 incr /= 2; 639 if (start == 0) { 640 start = SYNCER_MAXDELAY / 2; 641 incr = SYNCER_MAXDELAY; 642 } 643 next = start; 644 } 645 646 /* 647 * Only put the syncer vnode onto the syncer list if we have a 648 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer 649 * thread. 650 */ 651 if (mp->mnt_syncer_ctx) 652 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 653 654 /* 655 * The mnt_syncer field inherits the vnode reference, which is 656 * held until later decomissioning. 657 */ 658 mp->mnt_syncer = vp; 659 vx_unlock(vp); 660 return (0); 661 } 662 663 static int 664 sync_close(struct vop_close_args *ap) 665 { 666 return (0); 667 } 668 669 /* 670 * Do a lazy sync of the filesystem. 671 * 672 * sync_fsync { struct vnode *a_vp, int a_waitfor } 673 */ 674 static int 675 sync_fsync(struct vop_fsync_args *ap) 676 { 677 struct vnode *syncvp = ap->a_vp; 678 struct mount *mp = syncvp->v_mount; 679 int asyncflag; 680 681 /* 682 * We only need to do something if this is a lazy evaluation. 683 */ 684 if ((ap->a_waitfor & MNT_LAZY) == 0) 685 return (0); 686 687 /* 688 * Move ourselves to the back of the sync list. 689 */ 690 vn_syncer_add(syncvp, syncdelay); 691 692 /* 693 * Walk the list of vnodes pushing all that are dirty and 694 * not already on the sync list, and freeing vnodes which have 695 * no refs and whos VM objects are empty. vfs_msync() handles 696 * the VM issues and must be called whether the mount is readonly 697 * or not. 698 */ 699 if (vfs_busy(mp, LK_NOWAIT) != 0) 700 return (0); 701 if (mp->mnt_flag & MNT_RDONLY) { 702 vfs_msync(mp, MNT_NOWAIT); 703 } else { 704 asyncflag = mp->mnt_flag & MNT_ASYNC; 705 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 706 vfs_msync(mp, MNT_NOWAIT); 707 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 708 if (asyncflag) 709 mp->mnt_flag |= MNT_ASYNC; 710 } 711 vfs_unbusy(mp); 712 return (0); 713 } 714 715 /* 716 * The syncer vnode is no longer referenced. 717 * 718 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 719 */ 720 static int 721 sync_inactive(struct vop_inactive_args *ap) 722 { 723 vgone_vxlocked(ap->a_vp); 724 return (0); 725 } 726 727 /* 728 * The syncer vnode is no longer needed and is being decommissioned. 729 * This can only occur when the last reference has been released on 730 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 731 * 732 * Modifications to the worklist must be protected with a critical 733 * section. 734 * 735 * sync_reclaim { struct vnode *a_vp } 736 */ 737 static int 738 sync_reclaim(struct vop_reclaim_args *ap) 739 { 740 struct vnode *vp = ap->a_vp; 741 struct syncer_ctx *ctx; 742 743 ctx = vp->v_mount->mnt_syncer_ctx; 744 if (ctx) { 745 lwkt_gettoken(&ctx->sc_token); 746 KKASSERT(vp->v_mount->mnt_syncer != vp); 747 if (vp->v_flag & VONWORKLST) { 748 LIST_REMOVE(vp, v_synclist); 749 vclrflags(vp, VONWORKLST); 750 --ctx->syncer_count; 751 } 752 lwkt_reltoken(&ctx->sc_token); 753 } else { 754 KKASSERT((vp->v_flag & VONWORKLST) == 0); 755 } 756 757 return (0); 758 } 759 760 /* 761 * This is very similar to vmntvnodescan() but it only scans the 762 * vnodes on the syncer list. VFS's which support faster VFS_SYNC 763 * operations use the VISDIRTY flag on the vnode to ensure that vnodes 764 * with dirty inodes are added to the syncer in addition to vnodes 765 * with dirty buffers, and can use this function instead of nmntvnodescan(). 766 * 767 * This scan does not issue VOP_FSYNC()s. The supplied callback is intended 768 * to synchronize the file in the manner intended by the VFS using it. 769 * 770 * This is important when a system has millions of vnodes. 771 */ 772 int 773 vsyncscan( 774 struct mount *mp, 775 int vmsc_flags, 776 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 777 void *data 778 ) { 779 struct syncer_ctx *ctx; 780 struct synclist *slp; 781 struct vnode *vp; 782 int i; 783 int count; 784 int lkflags; 785 786 if (vmsc_flags & VMSC_NOWAIT) 787 lkflags = LK_NOWAIT; 788 else 789 lkflags = 0; 790 791 /* 792 * Syncer list context. This API requires a dedicated syncer thread. 793 * (MNTK_THR_SYNC). 794 */ 795 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 796 ctx = mp->mnt_syncer_ctx; 797 lwkt_gettoken(&ctx->sc_token); 798 799 /* 800 * Setup for loop. Allow races against the syncer thread but 801 * require that the syncer thread no be lazy if we were told 802 * not to be lazy. 803 */ 804 i = ctx->syncer_delayno & ctx->syncer_mask; 805 if ((vmsc_flags & VMSC_NOWAIT) == 0) 806 ++ctx->syncer_forced; 807 for (count = 0; count <= ctx->syncer_mask; ++count) { 808 slp = &ctx->syncer_workitem_pending[i]; 809 810 while ((vp = LIST_FIRST(slp)) != NULL) { 811 KKASSERT(vp->v_mount == mp); 812 if (vmsc_flags & VMSC_GETVP) { 813 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) { 814 slowfunc(mp, vp, data); 815 vput(vp); 816 } 817 } else if (vmsc_flags & VMSC_GETVX) { 818 vx_get(vp); 819 slowfunc(mp, vp, data); 820 vx_put(vp); 821 } else { 822 vhold(vp); 823 slowfunc(mp, vp, data); 824 vdrop(vp); 825 } 826 827 /* 828 * vp could be invalid. However, if vp is still at 829 * the head of the list it is clearly valid and we 830 * can safely move it. 831 */ 832 if (LIST_FIRST(slp) == vp) 833 vn_syncer_add(vp, -(i + syncdelay)); 834 } 835 i = (i + 1) & ctx->syncer_mask; 836 } 837 838 if ((vmsc_flags & VMSC_NOWAIT) == 0) 839 --ctx->syncer_forced; 840 lwkt_reltoken(&ctx->sc_token); 841 return(0); 842 } 843 844 /* 845 * Print out a syncer vnode. 846 * 847 * sync_print { struct vnode *a_vp } 848 */ 849 static int 850 sync_print(struct vop_print_args *ap) 851 { 852 struct vnode *vp = ap->a_vp; 853 854 kprintf("syncer vnode"); 855 lockmgr_printinfo(&vp->v_lock); 856 kprintf("\n"); 857 return (0); 858 } 859 860