1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 #include <sys/thread2.h> 79 80 /* 81 * The workitem queue. 82 */ 83 #define SYNCER_MAXDELAY 32 84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS); 85 time_t syncdelay = 30; /* max time to delay syncing data */ 86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay"); 88 time_t filedelay = 30; /* time to delay syncing files */ 89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 90 &filedelay, 0, "File synchronization delay"); 91 time_t dirdelay = 29; /* time to delay syncing directories */ 92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 93 &dirdelay, 0, "Directory synchronization delay"); 94 time_t metadelay = 28; /* time to delay syncing metadata */ 95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 96 &metadelay, 0, "VFS metadata synchronization delay"); 97 time_t retrydelay = 1; /* retry delay after failure */ 98 SYSCTL_INT(_kern, OID_AUTO, retrydelay, CTLFLAG_RW, 99 &retrydelay, 0, "VFS retry synchronization delay"); 100 static int rushjob; /* number of slots to run ASAP */ 101 static int stat_rush_requests; /* number of times I/O speeded up */ 102 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 103 &stat_rush_requests, 0, ""); 104 105 LIST_HEAD(synclist, vnode); 106 107 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 108 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 109 110 struct syncer_ctx { 111 struct mount *sc_mp; 112 struct lwkt_token sc_token; 113 struct thread *sc_thread; 114 int sc_flags; 115 struct synclist *syncer_workitem_pending; 116 long syncer_mask; 117 int syncer_delayno; 118 int syncer_forced; 119 int syncer_rushjob; 120 int syncer_unused01; 121 long syncer_count; 122 }; 123 124 static void syncer_thread(void *); 125 126 static int 127 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS) 128 { 129 int error; 130 int v = syncdelay; 131 132 error = sysctl_handle_int(oidp, &v, 0, req); 133 if (error || !req->newptr) 134 return (error); 135 if (v < 1) 136 v = 1; 137 if (v > SYNCER_MAXDELAY) 138 v = SYNCER_MAXDELAY; 139 syncdelay = v; 140 141 return(0); 142 } 143 144 /* 145 * The workitem queue. 146 * 147 * It is useful to delay writes of file data and filesystem metadata 148 * for tens of seconds so that quickly created and deleted files need 149 * not waste disk bandwidth being created and removed. To realize this, 150 * we append vnodes to a "workitem" queue. When running with a soft 151 * updates implementation, most pending metadata dependencies should 152 * not wait for more than a few seconds. Thus, mounted on block devices 153 * are delayed only about a half the time that file data is delayed. 154 * Similarly, directory updates are more critical, so are only delayed 155 * about a third the time that file data is delayed. Thus, there are 156 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 157 * one each second (driven off the filesystem syncer process). The 158 * syncer_delayno variable indicates the next queue that is to be processed. 159 * Items that need to be processed soon are placed in this queue: 160 * 161 * syncer_workitem_pending[syncer_delayno] 162 * 163 * A delay of fifteen seconds is done by placing the request fifteen 164 * entries later in the queue: 165 * 166 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 167 * 168 */ 169 170 /* 171 * Return the number of vnodes on the syncer's timed list. This will 172 * include the syncer vnode (mp->mnt_syncer) so if used, a minimum 173 * value of 1 will be returned. 174 */ 175 long 176 vn_syncer_count(struct mount *mp) 177 { 178 struct syncer_ctx *ctx; 179 180 ctx = mp->mnt_syncer_ctx; 181 if (ctx) 182 return (ctx->syncer_count); 183 return 0; 184 } 185 186 /* 187 * Add an item to the syncer work queue. 188 * 189 * WARNING: Cannot get vp->v_token here if not already held, we must 190 * depend on the syncer_token (which might already be held by 191 * the caller) to protect v_synclist and VONWORKLST. 192 * 193 * MPSAFE 194 */ 195 void 196 vn_syncer_add(struct vnode *vp, int delay) 197 { 198 struct syncer_ctx *ctx; 199 int slot; 200 201 ctx = vp->v_mount->mnt_syncer_ctx; 202 lwkt_gettoken(&ctx->sc_token); 203 204 if (vp->v_flag & VONWORKLST) { 205 LIST_REMOVE(vp, v_synclist); 206 --ctx->syncer_count; 207 } 208 if (delay <= 0) { 209 slot = -delay & ctx->syncer_mask; 210 } else { 211 if (delay > SYNCER_MAXDELAY - 2) 212 delay = SYNCER_MAXDELAY - 2; 213 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 214 } 215 216 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 217 vsetflags(vp, VONWORKLST); 218 ++ctx->syncer_count; 219 220 lwkt_reltoken(&ctx->sc_token); 221 } 222 223 /* 224 * Removes the vnode from the syncer list. Since we might block while 225 * acquiring the syncer_token we have to [re]check conditions to determine 226 * that it is ok to remove the vnode. 227 * 228 * Force removal if force != 0. This can only occur during a forced unmount. 229 * 230 * vp->v_token held on call 231 */ 232 void 233 vn_syncer_remove(struct vnode *vp, int force) 234 { 235 struct syncer_ctx *ctx; 236 237 ctx = vp->v_mount->mnt_syncer_ctx; 238 lwkt_gettoken(&ctx->sc_token); 239 240 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST && 241 RB_EMPTY(&vp->v_rbdirty_tree)) { 242 vclrflags(vp, VONWORKLST); 243 LIST_REMOVE(vp, v_synclist); 244 --ctx->syncer_count; 245 } else if (force && (vp->v_flag & VONWORKLST)) { 246 vclrflags(vp, VONWORKLST); 247 LIST_REMOVE(vp, v_synclist); 248 --ctx->syncer_count; 249 } 250 251 lwkt_reltoken(&ctx->sc_token); 252 } 253 254 /* 255 * vnode must be locked 256 */ 257 void 258 vclrisdirty(struct vnode *vp) 259 { 260 vclrflags(vp, VISDIRTY); 261 if (vp->v_flag & VONWORKLST) 262 vn_syncer_remove(vp, 0); 263 } 264 265 void 266 vclrobjdirty(struct vnode *vp) 267 { 268 vclrflags(vp, VOBJDIRTY); 269 if (vp->v_flag & VONWORKLST) 270 vn_syncer_remove(vp, 0); 271 } 272 273 /* 274 * vnode must be stable 275 */ 276 void 277 vsetisdirty(struct vnode *vp) 278 { 279 struct syncer_ctx *ctx; 280 281 if ((vp->v_flag & VISDIRTY) == 0) { 282 ctx = vp->v_mount->mnt_syncer_ctx; 283 vsetflags(vp, VISDIRTY); 284 lwkt_gettoken(&ctx->sc_token); 285 if ((vp->v_flag & VONWORKLST) == 0) 286 vn_syncer_add(vp, syncdelay); 287 lwkt_reltoken(&ctx->sc_token); 288 } 289 } 290 291 void 292 vsetobjdirty(struct vnode *vp) 293 { 294 struct syncer_ctx *ctx; 295 296 if ((vp->v_flag & VOBJDIRTY) == 0) { 297 ctx = vp->v_mount->mnt_syncer_ctx; 298 vsetflags(vp, VOBJDIRTY); 299 lwkt_gettoken(&ctx->sc_token); 300 if ((vp->v_flag & VONWORKLST) == 0) 301 vn_syncer_add(vp, syncdelay); 302 lwkt_reltoken(&ctx->sc_token); 303 } 304 } 305 306 /* 307 * Create per-filesystem syncer process 308 */ 309 void 310 vn_syncer_thr_create(struct mount *mp) 311 { 312 struct syncer_ctx *ctx; 313 static int syncalloc = 0; 314 315 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO); 316 ctx->sc_mp = mp; 317 ctx->sc_flags = 0; 318 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF, 319 &ctx->syncer_mask); 320 ctx->syncer_delayno = 0; 321 lwkt_token_init(&ctx->sc_token, "syncer"); 322 mp->mnt_syncer_ctx = ctx; 323 kthread_create(syncer_thread, ctx, &ctx->sc_thread, 324 "syncer%d", ++syncalloc & 0x7FFFFFFF); 325 } 326 327 /* 328 * Stop per-filesystem syncer process 329 */ 330 void 331 vn_syncer_thr_stop(struct mount *mp) 332 { 333 struct syncer_ctx *ctx; 334 335 ctx = mp->mnt_syncer_ctx; 336 if (ctx == NULL) 337 return; 338 339 lwkt_gettoken(&ctx->sc_token); 340 341 /* Signal the syncer process to exit */ 342 ctx->sc_flags |= SC_FLAG_EXIT; 343 wakeup(ctx); 344 345 /* Wait till syncer process exits */ 346 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 347 tsleep(&ctx->sc_flags, 0, "syncexit", hz); 348 349 mp->mnt_syncer_ctx = NULL; 350 lwkt_reltoken(&ctx->sc_token); 351 352 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 353 kfree(ctx, M_TEMP); 354 } 355 356 struct thread *updatethread; 357 358 /* 359 * System filesystem synchronizer daemon. 360 */ 361 static void 362 syncer_thread(void *_ctx) 363 { 364 struct syncer_ctx *ctx = _ctx; 365 struct synclist *slp; 366 struct vnode *vp; 367 long starttime; 368 int *sc_flagsp; 369 int sc_flags; 370 int vnodes_synced = 0; 371 int delta; 372 int dummy = 0; 373 374 for (;;) { 375 kproc_suspend_loop(); 376 377 starttime = time_uptime; 378 lwkt_gettoken(&ctx->sc_token); 379 380 /* 381 * Push files whose dirty time has expired. Be careful 382 * of interrupt race on slp queue. 383 * 384 * Note that vsyncscan() and vn_syncer_one() can pull items 385 * off the same list, so we shift vp's position in the 386 * list immediately. 387 */ 388 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 389 390 while ((vp = LIST_FIRST(slp)) != NULL) { 391 vn_syncer_add(vp, retrydelay); 392 if (ctx->syncer_forced) { 393 if (vget(vp, LK_EXCLUSIVE) == 0) { 394 VOP_FSYNC(vp, MNT_NOWAIT, 0); 395 vput(vp); 396 vnodes_synced++; 397 } 398 } else { 399 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 400 VOP_FSYNC(vp, MNT_LAZY, 0); 401 vput(vp); 402 vnodes_synced++; 403 } 404 } 405 } 406 407 /* 408 * Increment the slot upon completion. 409 */ 410 ctx->syncer_delayno = (ctx->syncer_delayno + 1) & 411 ctx->syncer_mask; 412 413 sc_flags = ctx->sc_flags; 414 415 /* Exit on unmount */ 416 if (sc_flags & SC_FLAG_EXIT) 417 break; 418 419 lwkt_reltoken(&ctx->sc_token); 420 421 /* 422 * Do sync processing for each mount. 423 */ 424 if (ctx->sc_mp) 425 bio_ops_sync(ctx->sc_mp); 426 427 /* 428 * The variable rushjob allows the kernel to speed up the 429 * processing of the filesystem syncer process. A rushjob 430 * value of N tells the filesystem syncer to process the next 431 * N seconds worth of work on its queue ASAP. Currently rushjob 432 * is used by the soft update code to speed up the filesystem 433 * syncer process when the incore state is getting so far 434 * ahead of the disk that the kernel memory pool is being 435 * threatened with exhaustion. 436 */ 437 delta = rushjob - ctx->syncer_rushjob; 438 if ((u_int)delta > syncdelay / 2) { 439 ctx->syncer_rushjob = rushjob - syncdelay / 2; 440 tsleep(&dummy, 0, "rush", 1); 441 continue; 442 } 443 if (delta) { 444 ++ctx->syncer_rushjob; 445 tsleep(&dummy, 0, "rush", 1); 446 continue; 447 } 448 449 /* 450 * If it has taken us less than a second to process the 451 * current work, then wait. Otherwise start right over 452 * again. We can still lose time if any single round 453 * takes more than two seconds, but it does not really 454 * matter as we are just trying to generally pace the 455 * filesystem activity. 456 */ 457 if (time_uptime == starttime) 458 tsleep(ctx, 0, "syncer", hz); 459 } 460 461 /* 462 * Unmount/exit path for per-filesystem syncers; sc_token held 463 */ 464 ctx->sc_flags |= SC_FLAG_DONE; 465 sc_flagsp = &ctx->sc_flags; 466 lwkt_reltoken(&ctx->sc_token); 467 wakeup(sc_flagsp); 468 469 kthread_exit(); 470 } 471 472 /* 473 * This allows a filesystem to pro-actively request that a dirty 474 * vnode be fsync()d. This routine does not guarantee that one 475 * will actually be fsynced. 476 */ 477 void 478 vn_syncer_one(struct mount *mp) 479 { 480 struct syncer_ctx *ctx; 481 struct synclist *slp; 482 struct vnode *vp; 483 int i; 484 int n = syncdelay; 485 486 ctx = mp->mnt_syncer_ctx; 487 i = ctx->syncer_delayno & ctx->syncer_mask; 488 cpu_ccfence(); 489 490 if (lwkt_trytoken(&ctx->sc_token) == 0) 491 return; 492 493 /* 494 * Look ahead on our syncer time array. 495 */ 496 do { 497 slp = &ctx->syncer_workitem_pending[i]; 498 vp = LIST_FIRST(slp); 499 if (vp && vp->v_type == VNON) 500 vp = LIST_NEXT(vp, v_synclist); 501 if (vp) 502 break; 503 i = (i + 1) & ctx->syncer_mask; 504 /* i will be wrong if we stop here but vp is NULL so ok */ 505 } while(--n); 506 507 /* 508 * Process one vnode, skip the syncer vnode but also stop 509 * if the syncer vnode is the only thing on this list. 510 */ 511 if (vp) { 512 vn_syncer_add(vp, retrydelay); 513 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 514 VOP_FSYNC(vp, MNT_LAZY, 0); 515 vput(vp); 516 } 517 } 518 lwkt_reltoken(&ctx->sc_token); 519 } 520 521 /* 522 * Request that the syncer daemon for a specific mount speed up its work. 523 * If mp is NULL the caller generally wants to speed up all syncers. 524 */ 525 void 526 speedup_syncer(struct mount *mp) 527 { 528 /* 529 * Don't bother protecting the test. unsleep_and_wakeup_thread() 530 * will only do something real if the thread is in the right state. 531 */ 532 atomic_add_int(&rushjob, 1); 533 ++stat_rush_requests; 534 if (mp) 535 wakeup(mp->mnt_syncer_ctx); 536 } 537 538 /* 539 * Routine to create and manage a filesystem syncer vnode. 540 */ 541 static int sync_close(struct vop_close_args *); 542 static int sync_fsync(struct vop_fsync_args *); 543 static int sync_inactive(struct vop_inactive_args *); 544 static int sync_reclaim (struct vop_reclaim_args *); 545 static int sync_print(struct vop_print_args *); 546 547 static struct vop_ops sync_vnode_vops = { 548 .vop_default = vop_eopnotsupp, 549 .vop_close = sync_close, 550 .vop_fsync = sync_fsync, 551 .vop_inactive = sync_inactive, 552 .vop_reclaim = sync_reclaim, 553 .vop_print = sync_print, 554 }; 555 556 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 557 558 VNODEOP_SET(sync_vnode_vops); 559 560 /* 561 * Create a new filesystem syncer vnode for the specified mount point. 562 * This vnode is placed on the worklist and is responsible for sync'ing 563 * the filesystem. 564 * 565 * NOTE: read-only mounts are also placed on the worklist. The filesystem 566 * sync code is also responsible for cleaning up vnodes. 567 */ 568 int 569 vfs_allocate_syncvnode(struct mount *mp) 570 { 571 struct vnode *vp; 572 static long start, incr, next; 573 int error; 574 575 /* Allocate a new vnode */ 576 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 577 if (error) { 578 mp->mnt_syncer = NULL; 579 return (error); 580 } 581 vp->v_type = VNON; 582 /* 583 * Place the vnode onto the syncer worklist. We attempt to 584 * scatter them about on the list so that they will go off 585 * at evenly distributed times even if all the filesystems 586 * are mounted at once. 587 */ 588 next += incr; 589 if (next == 0 || next > SYNCER_MAXDELAY) { 590 start /= 2; 591 incr /= 2; 592 if (start == 0) { 593 start = SYNCER_MAXDELAY / 2; 594 incr = SYNCER_MAXDELAY; 595 } 596 next = start; 597 } 598 599 /* 600 * Only put the syncer vnode onto the syncer list if we have a 601 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer 602 * thread. 603 */ 604 if (mp->mnt_syncer_ctx) 605 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 606 607 /* 608 * The mnt_syncer field inherits the vnode reference, which is 609 * held until later decomissioning. 610 */ 611 mp->mnt_syncer = vp; 612 vx_unlock(vp); 613 return (0); 614 } 615 616 static int 617 sync_close(struct vop_close_args *ap) 618 { 619 return (0); 620 } 621 622 /* 623 * Do a lazy sync of the filesystem. 624 * 625 * sync_fsync { struct vnode *a_vp, int a_waitfor } 626 */ 627 static int 628 sync_fsync(struct vop_fsync_args *ap) 629 { 630 struct vnode *syncvp = ap->a_vp; 631 struct mount *mp = syncvp->v_mount; 632 int asyncflag; 633 634 /* 635 * We only need to do something if this is a lazy evaluation. 636 */ 637 if ((ap->a_waitfor & MNT_LAZY) == 0) 638 return (0); 639 640 /* 641 * Move ourselves to the back of the sync list. 642 */ 643 vn_syncer_add(syncvp, syncdelay); 644 645 /* 646 * Walk the list of vnodes pushing all that are dirty and 647 * not already on the sync list, and freeing vnodes which have 648 * no refs and whos VM objects are empty. vfs_msync() handles 649 * the VM issues and must be called whether the mount is readonly 650 * or not. 651 */ 652 if (vfs_busy(mp, LK_NOWAIT) != 0) 653 return (0); 654 if (mp->mnt_flag & MNT_RDONLY) { 655 vfs_msync(mp, MNT_NOWAIT); 656 } else { 657 asyncflag = mp->mnt_flag & MNT_ASYNC; 658 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 659 vfs_msync(mp, MNT_NOWAIT); 660 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 661 if (asyncflag) 662 mp->mnt_flag |= MNT_ASYNC; 663 } 664 vfs_unbusy(mp); 665 return (0); 666 } 667 668 /* 669 * The syncer vnode is no longer referenced. 670 * 671 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 672 */ 673 static int 674 sync_inactive(struct vop_inactive_args *ap) 675 { 676 vgone_vxlocked(ap->a_vp); 677 return (0); 678 } 679 680 /* 681 * The syncer vnode is no longer needed and is being decommissioned. 682 * This can only occur when the last reference has been released on 683 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 684 * 685 * Modifications to the worklist must be protected with a critical 686 * section. 687 * 688 * sync_reclaim { struct vnode *a_vp } 689 */ 690 static int 691 sync_reclaim(struct vop_reclaim_args *ap) 692 { 693 struct vnode *vp = ap->a_vp; 694 struct syncer_ctx *ctx; 695 696 ctx = vp->v_mount->mnt_syncer_ctx; 697 if (ctx) { 698 lwkt_gettoken(&ctx->sc_token); 699 KKASSERT(vp->v_mount->mnt_syncer != vp); 700 if (vp->v_flag & VONWORKLST) { 701 LIST_REMOVE(vp, v_synclist); 702 vclrflags(vp, VONWORKLST); 703 --ctx->syncer_count; 704 } 705 lwkt_reltoken(&ctx->sc_token); 706 } else { 707 KKASSERT((vp->v_flag & VONWORKLST) == 0); 708 } 709 710 return (0); 711 } 712 713 /* 714 * This is very similar to vmntvnodescan() but it only scans the 715 * vnodes on the syncer list. VFS's which support faster VFS_SYNC 716 * operations use the VISDIRTY flag on the vnode to ensure that vnodes 717 * with dirty inodes are added to the syncer in addition to vnodes 718 * with dirty buffers, and can use this function instead of nmntvnodescan(). 719 * 720 * This scan does not issue VOP_FSYNC()s. The supplied callback is intended 721 * to synchronize the file in the manner intended by the VFS using it. 722 * 723 * This is important when a system has millions of vnodes. 724 */ 725 int 726 vsyncscan( 727 struct mount *mp, 728 int vmsc_flags, 729 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 730 void *data 731 ) { 732 struct syncer_ctx *ctx; 733 struct synclist *slp; 734 struct vnode *vp; 735 int i; 736 int count; 737 int lkflags; 738 739 if (vmsc_flags & VMSC_NOWAIT) 740 lkflags = LK_NOWAIT; 741 else 742 lkflags = 0; 743 744 /* 745 * Syncer list context. This API requires a dedicated syncer thread. 746 * (MNTK_THR_SYNC). 747 */ 748 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 749 ctx = mp->mnt_syncer_ctx; 750 lwkt_gettoken(&ctx->sc_token); 751 752 /* 753 * Setup for loop. Allow races against the syncer thread but 754 * require that the syncer thread no be lazy if we were told 755 * not to be lazy. 756 */ 757 i = ctx->syncer_delayno & ctx->syncer_mask; 758 if ((vmsc_flags & VMSC_NOWAIT) == 0) 759 ++ctx->syncer_forced; 760 for (count = 0; count <= ctx->syncer_mask; ++count) { 761 slp = &ctx->syncer_workitem_pending[i]; 762 763 while ((vp = LIST_FIRST(slp)) != NULL) { 764 KKASSERT(vp->v_mount == mp); 765 if (vmsc_flags & VMSC_GETVP) { 766 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) { 767 slowfunc(mp, vp, data); 768 vput(vp); 769 } 770 } else if (vmsc_flags & VMSC_GETVX) { 771 vx_get(vp); 772 slowfunc(mp, vp, data); 773 vx_put(vp); 774 } else { 775 vhold(vp); 776 slowfunc(mp, vp, data); 777 vdrop(vp); 778 } 779 780 /* 781 * vp could be invalid. However, if vp is still at 782 * the head of the list it is clearly valid and we 783 * can safely move it. 784 */ 785 if (LIST_FIRST(slp) == vp) 786 vn_syncer_add(vp, -(i + syncdelay)); 787 } 788 i = (i + 1) & ctx->syncer_mask; 789 } 790 791 if ((vmsc_flags & VMSC_NOWAIT) == 0) 792 --ctx->syncer_forced; 793 lwkt_reltoken(&ctx->sc_token); 794 return(0); 795 } 796 797 /* 798 * Print out a syncer vnode. 799 * 800 * sync_print { struct vnode *a_vp } 801 */ 802 static int 803 sync_print(struct vop_print_args *ap) 804 { 805 struct vnode *vp = ap->a_vp; 806 807 kprintf("syncer vnode"); 808 lockmgr_printinfo(&vp->v_lock); 809 kprintf("\n"); 810 return (0); 811 } 812 813