1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 #include <sys/thread2.h> 79 80 /* 81 * The workitem queue. 82 */ 83 #define SYNCER_MAXDELAY 32 84 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS); 85 time_t syncdelay = 30; /* max time to delay syncing data */ 86 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0, 87 sysctl_kern_syncdelay, "I", "VFS data synchronization delay"); 88 time_t filedelay = 30; /* time to delay syncing files */ 89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 90 &filedelay, 0, "File synchronization delay"); 91 time_t dirdelay = 29; /* time to delay syncing directories */ 92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 93 &dirdelay, 0, "Directory synchronization delay"); 94 time_t metadelay = 28; /* time to delay syncing metadata */ 95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 96 &metadelay, 0, "VFS metadata synchronization delay"); 97 static int rushjob; /* number of slots to run ASAP */ 98 static int stat_rush_requests; /* number of times I/O speeded up */ 99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 100 &stat_rush_requests, 0, ""); 101 102 LIST_HEAD(synclist, vnode); 103 104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 106 107 struct syncer_ctx { 108 struct mount *sc_mp; 109 struct lwkt_token sc_token; 110 struct thread *sc_thread; 111 int sc_flags; 112 struct synclist *syncer_workitem_pending; 113 long syncer_mask; 114 int syncer_delayno; 115 int syncer_forced; 116 int syncer_rushjob; 117 }; 118 119 static void syncer_thread(void *); 120 121 static int 122 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS) 123 { 124 int error; 125 int v = syncdelay; 126 127 error = sysctl_handle_int(oidp, &v, 0, req); 128 if (error || !req->newptr) 129 return (error); 130 if (v < 1) 131 v = 1; 132 if (v > SYNCER_MAXDELAY) 133 v = SYNCER_MAXDELAY; 134 syncdelay = v; 135 136 return(0); 137 } 138 139 /* 140 * The workitem queue. 141 * 142 * It is useful to delay writes of file data and filesystem metadata 143 * for tens of seconds so that quickly created and deleted files need 144 * not waste disk bandwidth being created and removed. To realize this, 145 * we append vnodes to a "workitem" queue. When running with a soft 146 * updates implementation, most pending metadata dependencies should 147 * not wait for more than a few seconds. Thus, mounted on block devices 148 * are delayed only about a half the time that file data is delayed. 149 * Similarly, directory updates are more critical, so are only delayed 150 * about a third the time that file data is delayed. Thus, there are 151 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 152 * one each second (driven off the filesystem syncer process). The 153 * syncer_delayno variable indicates the next queue that is to be processed. 154 * Items that need to be processed soon are placed in this queue: 155 * 156 * syncer_workitem_pending[syncer_delayno] 157 * 158 * A delay of fifteen seconds is done by placing the request fifteen 159 * entries later in the queue: 160 * 161 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 162 * 163 */ 164 165 /* 166 * Add an item to the syncer work queue. 167 * 168 * WARNING: Cannot get vp->v_token here if not already held, we must 169 * depend on the syncer_token (which might already be held by 170 * the caller) to protect v_synclist and VONWORKLST. 171 * 172 * MPSAFE 173 */ 174 void 175 vn_syncer_add(struct vnode *vp, int delay) 176 { 177 struct syncer_ctx *ctx; 178 int slot; 179 180 ctx = vp->v_mount->mnt_syncer_ctx; 181 lwkt_gettoken(&ctx->sc_token); 182 183 if (vp->v_flag & VONWORKLST) 184 LIST_REMOVE(vp, v_synclist); 185 if (delay <= 0) { 186 slot = -delay & ctx->syncer_mask; 187 } else { 188 if (delay > SYNCER_MAXDELAY - 2) 189 delay = SYNCER_MAXDELAY - 2; 190 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 191 } 192 193 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 194 vsetflags(vp, VONWORKLST); 195 196 lwkt_reltoken(&ctx->sc_token); 197 } 198 199 /* 200 * Removes the vnode from the syncer list. Since we might block while 201 * acquiring the syncer_token we have to [re]check conditions to determine 202 * that it is ok to remove the vnode. 203 * 204 * Force removal if force != 0. This can only occur during a forced unmount. 205 * 206 * vp->v_token held on call 207 */ 208 void 209 vn_syncer_remove(struct vnode *vp, int force) 210 { 211 struct syncer_ctx *ctx; 212 213 ctx = vp->v_mount->mnt_syncer_ctx; 214 lwkt_gettoken(&ctx->sc_token); 215 216 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST && 217 RB_EMPTY(&vp->v_rbdirty_tree)) { 218 vclrflags(vp, VONWORKLST); 219 LIST_REMOVE(vp, v_synclist); 220 } else if (force && (vp->v_flag & VONWORKLST)) { 221 vclrflags(vp, VONWORKLST); 222 LIST_REMOVE(vp, v_synclist); 223 } 224 225 lwkt_reltoken(&ctx->sc_token); 226 } 227 228 /* 229 * vnode must be locked 230 */ 231 void 232 vclrisdirty(struct vnode *vp) 233 { 234 vclrflags(vp, VISDIRTY); 235 if (vp->v_flag & VONWORKLST) 236 vn_syncer_remove(vp, 0); 237 } 238 239 void 240 vclrobjdirty(struct vnode *vp) 241 { 242 vclrflags(vp, VOBJDIRTY); 243 if (vp->v_flag & VONWORKLST) 244 vn_syncer_remove(vp, 0); 245 } 246 247 /* 248 * vnode must be stable 249 */ 250 void 251 vsetisdirty(struct vnode *vp) 252 { 253 struct syncer_ctx *ctx; 254 255 if ((vp->v_flag & VISDIRTY) == 0) { 256 ctx = vp->v_mount->mnt_syncer_ctx; 257 vsetflags(vp, VISDIRTY); 258 lwkt_gettoken(&ctx->sc_token); 259 if ((vp->v_flag & VONWORKLST) == 0) 260 vn_syncer_add(vp, syncdelay); 261 lwkt_reltoken(&ctx->sc_token); 262 } 263 } 264 265 void 266 vsetobjdirty(struct vnode *vp) 267 { 268 struct syncer_ctx *ctx; 269 270 if ((vp->v_flag & VOBJDIRTY) == 0) { 271 ctx = vp->v_mount->mnt_syncer_ctx; 272 vsetflags(vp, VOBJDIRTY); 273 lwkt_gettoken(&ctx->sc_token); 274 if ((vp->v_flag & VONWORKLST) == 0) 275 vn_syncer_add(vp, syncdelay); 276 lwkt_reltoken(&ctx->sc_token); 277 } 278 } 279 280 /* 281 * Create per-filesystem syncer process 282 */ 283 void 284 vn_syncer_thr_create(struct mount *mp) 285 { 286 struct syncer_ctx *ctx; 287 static int syncalloc = 0; 288 289 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO); 290 ctx->sc_mp = mp; 291 ctx->sc_flags = 0; 292 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF, 293 &ctx->syncer_mask); 294 ctx->syncer_delayno = 0; 295 lwkt_token_init(&ctx->sc_token, "syncer"); 296 mp->mnt_syncer_ctx = ctx; 297 kthread_create(syncer_thread, ctx, &ctx->sc_thread, 298 "syncer%d", ++syncalloc & 0x7FFFFFFF); 299 } 300 301 /* 302 * Stop per-filesystem syncer process 303 */ 304 void 305 vn_syncer_thr_stop(struct mount *mp) 306 { 307 struct syncer_ctx *ctx; 308 309 ctx = mp->mnt_syncer_ctx; 310 if (ctx == NULL) 311 return; 312 313 lwkt_gettoken(&ctx->sc_token); 314 315 /* Signal the syncer process to exit */ 316 ctx->sc_flags |= SC_FLAG_EXIT; 317 wakeup(ctx); 318 319 /* Wait till syncer process exits */ 320 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 321 tsleep(&ctx->sc_flags, 0, "syncexit", hz); 322 323 mp->mnt_syncer_ctx = NULL; 324 lwkt_reltoken(&ctx->sc_token); 325 326 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 327 kfree(ctx, M_TEMP); 328 } 329 330 struct thread *updatethread; 331 332 /* 333 * System filesystem synchronizer daemon. 334 */ 335 static void 336 syncer_thread(void *_ctx) 337 { 338 struct syncer_ctx *ctx = _ctx; 339 struct synclist *slp; 340 struct vnode *vp; 341 long starttime; 342 int *sc_flagsp; 343 int sc_flags; 344 int vnodes_synced = 0; 345 int delta; 346 int dummy = 0; 347 348 for (;;) { 349 kproc_suspend_loop(); 350 351 starttime = time_uptime; 352 lwkt_gettoken(&ctx->sc_token); 353 354 /* 355 * Push files whose dirty time has expired. Be careful 356 * of interrupt race on slp queue. 357 */ 358 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 359 ctx->syncer_delayno = (ctx->syncer_delayno + 1) & 360 ctx->syncer_mask; 361 362 while ((vp = LIST_FIRST(slp)) != NULL) { 363 if (ctx->syncer_forced) { 364 if (vget(vp, LK_EXCLUSIVE) == 0) { 365 VOP_FSYNC(vp, MNT_NOWAIT, 0); 366 vput(vp); 367 vnodes_synced++; 368 } 369 } else { 370 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 371 VOP_FSYNC(vp, MNT_LAZY, 0); 372 vput(vp); 373 vnodes_synced++; 374 } 375 } 376 377 /* 378 * vp is stale but can still be used if we can 379 * verify that it remains at the head of the list. 380 * Be careful not to try to get vp->v_token as 381 * vp can become stale if this blocks. 382 * 383 * If the vp is still at the head of the list were 384 * unable to completely flush it and move it to 385 * a later slot to give other vnodes a fair shot. 386 * 387 * Note that v_tag VT_VFS vnodes can remain on the 388 * worklist with no dirty blocks, but sync_fsync() 389 * moves it to a later slot so we will never see it 390 * here. 391 * 392 * It is possible to race a vnode with no dirty 393 * buffers being removed from the list. If this 394 * occurs we will move the vnode in the synclist 395 * and then the other thread will remove it. Do 396 * not try to remove it here. 397 */ 398 if (LIST_FIRST(slp) == vp) 399 vn_syncer_add(vp, syncdelay); 400 } 401 402 sc_flags = ctx->sc_flags; 403 404 /* Exit on unmount */ 405 if (sc_flags & SC_FLAG_EXIT) 406 break; 407 408 lwkt_reltoken(&ctx->sc_token); 409 410 /* 411 * Do sync processing for each mount. 412 */ 413 if (ctx->sc_mp) 414 bio_ops_sync(ctx->sc_mp); 415 416 /* 417 * The variable rushjob allows the kernel to speed up the 418 * processing of the filesystem syncer process. A rushjob 419 * value of N tells the filesystem syncer to process the next 420 * N seconds worth of work on its queue ASAP. Currently rushjob 421 * is used by the soft update code to speed up the filesystem 422 * syncer process when the incore state is getting so far 423 * ahead of the disk that the kernel memory pool is being 424 * threatened with exhaustion. 425 */ 426 delta = rushjob - ctx->syncer_rushjob; 427 if ((u_int)delta > syncdelay / 2) { 428 ctx->syncer_rushjob = rushjob - syncdelay / 2; 429 tsleep(&dummy, 0, "rush", 1); 430 continue; 431 } 432 if (delta) { 433 ++ctx->syncer_rushjob; 434 tsleep(&dummy, 0, "rush", 1); 435 continue; 436 } 437 438 /* 439 * If it has taken us less than a second to process the 440 * current work, then wait. Otherwise start right over 441 * again. We can still lose time if any single round 442 * takes more than two seconds, but it does not really 443 * matter as we are just trying to generally pace the 444 * filesystem activity. 445 */ 446 if (time_uptime == starttime) 447 tsleep(ctx, 0, "syncer", hz); 448 } 449 450 /* 451 * Unmount/exit path for per-filesystem syncers; sc_token held 452 */ 453 ctx->sc_flags |= SC_FLAG_DONE; 454 sc_flagsp = &ctx->sc_flags; 455 lwkt_reltoken(&ctx->sc_token); 456 wakeup(sc_flagsp); 457 458 kthread_exit(); 459 } 460 461 /* 462 * Request that the syncer daemon for a specific mount speed up its work. 463 * If mp is NULL the caller generally wants to speed up all syncers. 464 */ 465 void 466 speedup_syncer(struct mount *mp) 467 { 468 /* 469 * Don't bother protecting the test. unsleep_and_wakeup_thread() 470 * will only do something real if the thread is in the right state. 471 */ 472 atomic_add_int(&rushjob, 1); 473 ++stat_rush_requests; 474 if (mp) 475 wakeup(mp->mnt_syncer_ctx); 476 } 477 478 /* 479 * Routine to create and manage a filesystem syncer vnode. 480 */ 481 static int sync_close(struct vop_close_args *); 482 static int sync_fsync(struct vop_fsync_args *); 483 static int sync_inactive(struct vop_inactive_args *); 484 static int sync_reclaim (struct vop_reclaim_args *); 485 static int sync_print(struct vop_print_args *); 486 487 static struct vop_ops sync_vnode_vops = { 488 .vop_default = vop_eopnotsupp, 489 .vop_close = sync_close, 490 .vop_fsync = sync_fsync, 491 .vop_inactive = sync_inactive, 492 .vop_reclaim = sync_reclaim, 493 .vop_print = sync_print, 494 }; 495 496 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 497 498 VNODEOP_SET(sync_vnode_vops); 499 500 /* 501 * Create a new filesystem syncer vnode for the specified mount point. 502 * This vnode is placed on the worklist and is responsible for sync'ing 503 * the filesystem. 504 * 505 * NOTE: read-only mounts are also placed on the worklist. The filesystem 506 * sync code is also responsible for cleaning up vnodes. 507 */ 508 int 509 vfs_allocate_syncvnode(struct mount *mp) 510 { 511 struct vnode *vp; 512 static long start, incr, next; 513 int error; 514 515 /* Allocate a new vnode */ 516 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 517 if (error) { 518 mp->mnt_syncer = NULL; 519 return (error); 520 } 521 vp->v_type = VNON; 522 /* 523 * Place the vnode onto the syncer worklist. We attempt to 524 * scatter them about on the list so that they will go off 525 * at evenly distributed times even if all the filesystems 526 * are mounted at once. 527 */ 528 next += incr; 529 if (next == 0 || next > SYNCER_MAXDELAY) { 530 start /= 2; 531 incr /= 2; 532 if (start == 0) { 533 start = SYNCER_MAXDELAY / 2; 534 incr = SYNCER_MAXDELAY; 535 } 536 next = start; 537 } 538 539 /* 540 * Only put the syncer vnode onto the syncer list if we have a 541 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer 542 * thread. 543 */ 544 if (mp->mnt_syncer_ctx) 545 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 546 547 /* 548 * The mnt_syncer field inherits the vnode reference, which is 549 * held until later decomissioning. 550 */ 551 mp->mnt_syncer = vp; 552 vx_unlock(vp); 553 return (0); 554 } 555 556 static int 557 sync_close(struct vop_close_args *ap) 558 { 559 return (0); 560 } 561 562 /* 563 * Do a lazy sync of the filesystem. 564 * 565 * sync_fsync { struct vnode *a_vp, int a_waitfor } 566 */ 567 static int 568 sync_fsync(struct vop_fsync_args *ap) 569 { 570 struct vnode *syncvp = ap->a_vp; 571 struct mount *mp = syncvp->v_mount; 572 int asyncflag; 573 574 /* 575 * We only need to do something if this is a lazy evaluation. 576 */ 577 if ((ap->a_waitfor & MNT_LAZY) == 0) 578 return (0); 579 580 /* 581 * Move ourselves to the back of the sync list. 582 */ 583 vn_syncer_add(syncvp, syncdelay); 584 585 /* 586 * Walk the list of vnodes pushing all that are dirty and 587 * not already on the sync list, and freeing vnodes which have 588 * no refs and whos VM objects are empty. vfs_msync() handles 589 * the VM issues and must be called whether the mount is readonly 590 * or not. 591 */ 592 if (vfs_busy(mp, LK_NOWAIT) != 0) 593 return (0); 594 if (mp->mnt_flag & MNT_RDONLY) { 595 vfs_msync(mp, MNT_NOWAIT); 596 } else { 597 asyncflag = mp->mnt_flag & MNT_ASYNC; 598 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 599 vfs_msync(mp, MNT_NOWAIT); 600 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 601 if (asyncflag) 602 mp->mnt_flag |= MNT_ASYNC; 603 } 604 vfs_unbusy(mp); 605 return (0); 606 } 607 608 /* 609 * The syncer vnode is no longer referenced. 610 * 611 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 612 */ 613 static int 614 sync_inactive(struct vop_inactive_args *ap) 615 { 616 vgone_vxlocked(ap->a_vp); 617 return (0); 618 } 619 620 /* 621 * The syncer vnode is no longer needed and is being decommissioned. 622 * This can only occur when the last reference has been released on 623 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 624 * 625 * Modifications to the worklist must be protected with a critical 626 * section. 627 * 628 * sync_reclaim { struct vnode *a_vp } 629 */ 630 static int 631 sync_reclaim(struct vop_reclaim_args *ap) 632 { 633 struct vnode *vp = ap->a_vp; 634 struct syncer_ctx *ctx; 635 636 ctx = vp->v_mount->mnt_syncer_ctx; 637 if (ctx) { 638 lwkt_gettoken(&ctx->sc_token); 639 KKASSERT(vp->v_mount->mnt_syncer != vp); 640 if (vp->v_flag & VONWORKLST) { 641 LIST_REMOVE(vp, v_synclist); 642 vclrflags(vp, VONWORKLST); 643 } 644 lwkt_reltoken(&ctx->sc_token); 645 } else { 646 KKASSERT((vp->v_flag & VONWORKLST) == 0); 647 } 648 649 return (0); 650 } 651 652 /* 653 * This is very similar to vmntvnodescan() but it only scans the 654 * vnodes on the syncer list. VFS's which support faster VFS_SYNC 655 * operations use the VISDIRTY flag on the vnode to ensure that vnodes 656 * with dirty inodes are added to the syncer in addition to vnodes 657 * with dirty buffers, and can use this function instead of nmntvnodescan(). 658 * 659 * This is important when a system has millions of vnodes. 660 */ 661 int 662 vsyncscan( 663 struct mount *mp, 664 int vmsc_flags, 665 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data), 666 void *data 667 ) { 668 struct syncer_ctx *ctx; 669 struct synclist *slp; 670 struct vnode *vp; 671 int i; 672 int count; 673 int lkflags; 674 675 if (vmsc_flags & VMSC_NOWAIT) 676 lkflags = LK_NOWAIT; 677 else 678 lkflags = 0; 679 680 /* 681 * Syncer list context. This API requires a dedicated syncer thread. 682 * (MNTK_THR_SYNC). 683 */ 684 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC); 685 ctx = mp->mnt_syncer_ctx; 686 lwkt_gettoken(&ctx->sc_token); 687 688 /* 689 * Setup for loop. Allow races against the syncer thread but 690 * require that the syncer thread no be lazy if we were told 691 * not to be lazy. 692 */ 693 i = ctx->syncer_delayno & ctx->syncer_mask; 694 if ((vmsc_flags & VMSC_NOWAIT) == 0) 695 ++ctx->syncer_forced; 696 for (count = 0; count <= ctx->syncer_mask; ++count) { 697 slp = &ctx->syncer_workitem_pending[i]; 698 699 while ((vp = LIST_FIRST(slp)) != NULL) { 700 KKASSERT(vp->v_mount == mp); 701 if (vmsc_flags & VMSC_GETVP) { 702 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) { 703 slowfunc(mp, vp, data); 704 vput(vp); 705 } 706 } else if (vmsc_flags & VMSC_GETVX) { 707 vx_get(vp); 708 slowfunc(mp, vp, data); 709 vx_put(vp); 710 } else { 711 vhold(vp); 712 slowfunc(mp, vp, data); 713 vdrop(vp); 714 } 715 716 /* 717 * vp could be invalid. However, if vp is still at 718 * the head of the list it is clearly valid and we 719 * can safely move it. 720 */ 721 if (LIST_FIRST(slp) == vp) 722 vn_syncer_add(vp, -(i + syncdelay)); 723 } 724 i = (i + 1) & ctx->syncer_mask; 725 } 726 727 if ((vmsc_flags & VMSC_NOWAIT) == 0) 728 --ctx->syncer_forced; 729 lwkt_reltoken(&ctx->sc_token); 730 return(0); 731 } 732 733 /* 734 * Print out a syncer vnode. 735 * 736 * sync_print { struct vnode *a_vp } 737 */ 738 static int 739 sync_print(struct vop_print_args *ap) 740 { 741 struct vnode *vp = ap->a_vp; 742 743 kprintf("syncer vnode"); 744 lockmgr_printinfo(&vp->v_lock); 745 kprintf("\n"); 746 return (0); 747 } 748 749