1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 36 */ 37 38 /* 39 * External virtual filesystem routines 40 */ 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/buf.h> 45 #include <sys/conf.h> 46 #include <sys/dirent.h> 47 #include <sys/domain.h> 48 #include <sys/eventhandler.h> 49 #include <sys/fcntl.h> 50 #include <sys/kernel.h> 51 #include <sys/kthread.h> 52 #include <sys/malloc.h> 53 #include <sys/mbuf.h> 54 #include <sys/mount.h> 55 #include <sys/proc.h> 56 #include <sys/namei.h> 57 #include <sys/reboot.h> 58 #include <sys/socket.h> 59 #include <sys/stat.h> 60 #include <sys/sysctl.h> 61 #include <sys/syslog.h> 62 #include <sys/vmmeter.h> 63 #include <sys/vnode.h> 64 65 #include <machine/limits.h> 66 67 #include <vm/vm.h> 68 #include <vm/vm_object.h> 69 #include <vm/vm_extern.h> 70 #include <vm/vm_kern.h> 71 #include <vm/pmap.h> 72 #include <vm/vm_map.h> 73 #include <vm/vm_page.h> 74 #include <vm/vm_pager.h> 75 #include <vm/vnode_pager.h> 76 77 #include <sys/buf2.h> 78 #include <sys/thread2.h> 79 80 /* 81 * The workitem queue. 82 */ 83 #define SYNCER_MAXDELAY 32 84 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 85 time_t syncdelay = 30; /* max time to delay syncing data */ 86 SYSCTL_INT(_kern, OID_AUTO, syncdelay, CTLFLAG_RW, 87 &syncdelay, 0, "VFS data synchronization delay"); 88 time_t filedelay = 30; /* time to delay syncing files */ 89 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 90 &filedelay, 0, "File synchronization delay"); 91 time_t dirdelay = 29; /* time to delay syncing directories */ 92 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 93 &dirdelay, 0, "Directory synchronization delay"); 94 time_t metadelay = 28; /* time to delay syncing metadata */ 95 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 96 &metadelay, 0, "VFS metadata synchronization delay"); 97 static int rushjob; /* number of slots to run ASAP */ 98 static int stat_rush_requests; /* number of times I/O speeded up */ 99 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 100 &stat_rush_requests, 0, ""); 101 102 LIST_HEAD(synclist, vnode); 103 104 #define SC_FLAG_EXIT (0x1) /* request syncer exit */ 105 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */ 106 #define SC_FLAG_BIOOPS_ALL (0x4) /* do bufops_sync(NULL) */ 107 108 struct syncer_ctx { 109 struct mount *sc_mp; 110 struct lwkt_token sc_token; 111 struct thread *sc_thread; 112 int sc_flags; 113 114 struct synclist *syncer_workitem_pending; 115 long syncer_mask; 116 int syncer_delayno; 117 }; 118 119 static struct syncer_ctx syncer_ctx0; 120 121 static void syncer_thread(void *); 122 123 static void 124 syncer_ctx_init(struct syncer_ctx *ctx, struct mount *mp) 125 { 126 ctx->sc_mp = mp; 127 lwkt_token_init(&ctx->sc_token, "syncer"); 128 ctx->sc_flags = 0; 129 130 ctx->syncer_workitem_pending = hashinit(syncer_maxdelay, M_DEVBUF, 131 &ctx->syncer_mask); 132 ctx->syncer_delayno = 0; 133 } 134 135 /* 136 * Called from vfsinit() 137 */ 138 void 139 vfs_sync_init(void) 140 { 141 syncer_ctx_init(&syncer_ctx0, NULL); 142 syncer_maxdelay = syncer_ctx0.syncer_mask + 1; 143 syncer_ctx0.sc_flags |= SC_FLAG_BIOOPS_ALL; 144 145 /* Support schedcpu wakeup of syncer0 */ 146 lbolt_syncer = &syncer_ctx0; 147 } 148 149 static struct syncer_ctx * 150 vn_get_syncer(struct vnode *vp) { 151 struct mount *mp; 152 struct syncer_ctx *ctx; 153 154 ctx = NULL; 155 mp = vp->v_mount; 156 if (mp) 157 ctx = mp->mnt_syncer_ctx; 158 if (ctx == NULL) 159 ctx = &syncer_ctx0; 160 161 return (ctx); 162 } 163 164 /* 165 * The workitem queue. 166 * 167 * It is useful to delay writes of file data and filesystem metadata 168 * for tens of seconds so that quickly created and deleted files need 169 * not waste disk bandwidth being created and removed. To realize this, 170 * we append vnodes to a "workitem" queue. When running with a soft 171 * updates implementation, most pending metadata dependencies should 172 * not wait for more than a few seconds. Thus, mounted on block devices 173 * are delayed only about a half the time that file data is delayed. 174 * Similarly, directory updates are more critical, so are only delayed 175 * about a third the time that file data is delayed. Thus, there are 176 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 177 * one each second (driven off the filesystem syncer process). The 178 * syncer_delayno variable indicates the next queue that is to be processed. 179 * Items that need to be processed soon are placed in this queue: 180 * 181 * syncer_workitem_pending[syncer_delayno] 182 * 183 * A delay of fifteen seconds is done by placing the request fifteen 184 * entries later in the queue: 185 * 186 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 187 * 188 */ 189 190 /* 191 * Add an item to the syncer work queue. 192 * 193 * WARNING: Cannot get vp->v_token here if not already held, we must 194 * depend on the syncer_token (which might already be held by 195 * the caller) to protect v_synclist and VONWORKLST. 196 * 197 * MPSAFE 198 */ 199 void 200 vn_syncer_add(struct vnode *vp, int delay) 201 { 202 struct syncer_ctx *ctx; 203 int slot; 204 205 ctx = vn_get_syncer(vp); 206 207 lwkt_gettoken(&ctx->sc_token); 208 209 if (vp->v_flag & VONWORKLST) 210 LIST_REMOVE(vp, v_synclist); 211 if (delay > syncer_maxdelay - 2) 212 delay = syncer_maxdelay - 2; 213 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask; 214 215 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist); 216 vsetflags(vp, VONWORKLST); 217 218 lwkt_reltoken(&ctx->sc_token); 219 } 220 221 /* 222 * Removes the vnode from the syncer list. Since we might block while 223 * acquiring the syncer_token we have to recheck conditions. 224 * 225 * vp->v_token held on call 226 */ 227 void 228 vn_syncer_remove(struct vnode *vp) 229 { 230 struct syncer_ctx *ctx; 231 232 ctx = vn_get_syncer(vp); 233 234 lwkt_gettoken(&ctx->sc_token); 235 236 if ((vp->v_flag & VONWORKLST) && RB_EMPTY(&vp->v_rbdirty_tree)) { 237 vclrflags(vp, VONWORKLST); 238 LIST_REMOVE(vp, v_synclist); 239 } 240 241 lwkt_reltoken(&ctx->sc_token); 242 } 243 244 /* 245 * Create per-filesystem syncer process 246 */ 247 void 248 vn_syncer_thr_create(struct mount *mp) 249 { 250 struct syncer_ctx *ctx; 251 static int syncalloc = 0; 252 int rc; 253 254 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK); 255 256 syncer_ctx_init(ctx, mp); 257 mp->mnt_syncer_ctx = ctx; 258 259 rc = kthread_create(syncer_thread, ctx, &ctx->sc_thread, 260 "syncer%d", ++syncalloc); 261 } 262 263 void * 264 vn_syncer_thr_getctx(struct mount *mp) 265 { 266 return (mp->mnt_syncer_ctx); 267 } 268 269 /* 270 * Stop per-filesystem syncer process 271 */ 272 void 273 vn_syncer_thr_stop(void *ctxp) 274 { 275 struct syncer_ctx *ctx; 276 277 ctx = ctxp; 278 279 lwkt_gettoken(&ctx->sc_token); 280 281 /* Signal the syncer process to exit */ 282 ctx->sc_flags |= SC_FLAG_EXIT; 283 wakeup(ctx); 284 285 /* Wait till syncer process exits */ 286 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) 287 tsleep(&ctx->sc_flags, 0, "syncexit", hz); 288 289 lwkt_reltoken(&ctx->sc_token); 290 291 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask); 292 kfree(ctx, M_TEMP); 293 } 294 295 struct thread *updatethread; 296 297 /* 298 * System filesystem synchronizer daemon. 299 */ 300 static void 301 syncer_thread(void *_ctx) 302 { 303 struct thread *td = curthread; 304 struct syncer_ctx *ctx = _ctx; 305 struct synclist *slp; 306 struct vnode *vp; 307 long starttime; 308 int *sc_flagsp; 309 int sc_flags; 310 int vnodes_synced = 0; 311 312 /* 313 * syncer0 runs till system shutdown; per-filesystem syncers are 314 * terminated on filesystem unmount 315 */ 316 if (ctx == &syncer_ctx0) 317 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 318 SHUTDOWN_PRI_LAST); 319 for (;;) { 320 kproc_suspend_loop(); 321 322 starttime = time_second; 323 lwkt_gettoken(&ctx->sc_token); 324 325 /* 326 * Push files whose dirty time has expired. Be careful 327 * of interrupt race on slp queue. 328 */ 329 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno]; 330 ctx->syncer_delayno += 1; 331 if (ctx->syncer_delayno == syncer_maxdelay) 332 ctx->syncer_delayno = 0; 333 334 while ((vp = LIST_FIRST(slp)) != NULL) { 335 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 336 VOP_FSYNC(vp, MNT_LAZY, 0); 337 vput(vp); 338 vnodes_synced++; 339 } 340 341 /* 342 * vp is stale but can still be used if we can 343 * verify that it remains at the head of the list. 344 * Be careful not to try to get vp->v_token as 345 * vp can become stale if this blocks. 346 * 347 * If the vp is still at the head of the list were 348 * unable to completely flush it and move it to 349 * a later slot to give other vnodes a fair shot. 350 * 351 * Note that v_tag VT_VFS vnodes can remain on the 352 * worklist with no dirty blocks, but sync_fsync() 353 * moves it to a later slot so we will never see it 354 * here. 355 * 356 * It is possible to race a vnode with no dirty 357 * buffers being removed from the list. If this 358 * occurs we will move the vnode in the synclist 359 * and then the other thread will remove it. Do 360 * not try to remove it here. 361 */ 362 if (LIST_FIRST(slp) == vp) 363 vn_syncer_add(vp, syncdelay); 364 } 365 366 sc_flags = ctx->sc_flags; 367 368 /* Exit on unmount */ 369 if (sc_flags & SC_FLAG_EXIT) 370 break; 371 372 lwkt_reltoken(&ctx->sc_token); 373 374 /* 375 * Do sync processing for each mount. 376 */ 377 if (ctx->sc_mp || sc_flags & SC_FLAG_BIOOPS_ALL) 378 bio_ops_sync(ctx->sc_mp); 379 380 /* 381 * The variable rushjob allows the kernel to speed up the 382 * processing of the filesystem syncer process. A rushjob 383 * value of N tells the filesystem syncer to process the next 384 * N seconds worth of work on its queue ASAP. Currently rushjob 385 * is used by the soft update code to speed up the filesystem 386 * syncer process when the incore state is getting so far 387 * ahead of the disk that the kernel memory pool is being 388 * threatened with exhaustion. 389 */ 390 if (ctx == &syncer_ctx0 && rushjob > 0) { 391 atomic_subtract_int(&rushjob, 1); 392 continue; 393 } 394 /* 395 * If it has taken us less than a second to process the 396 * current work, then wait. Otherwise start right over 397 * again. We can still lose time if any single round 398 * takes more than two seconds, but it does not really 399 * matter as we are just trying to generally pace the 400 * filesystem activity. 401 */ 402 if (time_second == starttime) 403 tsleep(ctx, 0, "syncer", hz); 404 } 405 406 /* 407 * Unmount/exit path for per-filesystem syncers; sc_token held 408 */ 409 ctx->sc_flags |= SC_FLAG_DONE; 410 sc_flagsp = &ctx->sc_flags; 411 lwkt_reltoken(&ctx->sc_token); 412 wakeup(sc_flagsp); 413 414 kthread_exit(); 415 } 416 417 static void 418 syncer_thread_start(void) { 419 syncer_thread(&syncer_ctx0); 420 } 421 422 static struct kproc_desc up_kp = { 423 "syncer0", 424 syncer_thread_start, 425 &updatethread 426 }; 427 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 428 429 /* 430 * Request the syncer daemon to speed up its work. 431 * We never push it to speed up more than half of its 432 * normal turn time, otherwise it could take over the cpu. 433 */ 434 int 435 speedup_syncer(void) 436 { 437 /* 438 * Don't bother protecting the test. unsleep_and_wakeup_thread() 439 * will only do something real if the thread is in the right state. 440 */ 441 wakeup(lbolt_syncer); 442 if (rushjob < syncdelay / 2) { 443 atomic_add_int(&rushjob, 1); 444 stat_rush_requests += 1; 445 return (1); 446 } 447 return(0); 448 } 449 450 /* 451 * Routine to create and manage a filesystem syncer vnode. 452 */ 453 static int sync_close(struct vop_close_args *); 454 static int sync_fsync(struct vop_fsync_args *); 455 static int sync_inactive(struct vop_inactive_args *); 456 static int sync_reclaim (struct vop_reclaim_args *); 457 static int sync_print(struct vop_print_args *); 458 459 static struct vop_ops sync_vnode_vops = { 460 .vop_default = vop_eopnotsupp, 461 .vop_close = sync_close, 462 .vop_fsync = sync_fsync, 463 .vop_inactive = sync_inactive, 464 .vop_reclaim = sync_reclaim, 465 .vop_print = sync_print, 466 }; 467 468 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops; 469 470 VNODEOP_SET(sync_vnode_vops); 471 472 /* 473 * Create a new filesystem syncer vnode for the specified mount point. 474 * This vnode is placed on the worklist and is responsible for sync'ing 475 * the filesystem. 476 * 477 * NOTE: read-only mounts are also placed on the worklist. The filesystem 478 * sync code is also responsible for cleaning up vnodes. 479 */ 480 int 481 vfs_allocate_syncvnode(struct mount *mp) 482 { 483 struct vnode *vp; 484 static long start, incr, next; 485 int error; 486 487 /* Allocate a new vnode */ 488 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0); 489 if (error) { 490 mp->mnt_syncer = NULL; 491 return (error); 492 } 493 vp->v_type = VNON; 494 /* 495 * Place the vnode onto the syncer worklist. We attempt to 496 * scatter them about on the list so that they will go off 497 * at evenly distributed times even if all the filesystems 498 * are mounted at once. 499 */ 500 next += incr; 501 if (next == 0 || next > syncer_maxdelay) { 502 start /= 2; 503 incr /= 2; 504 if (start == 0) { 505 start = syncer_maxdelay / 2; 506 incr = syncer_maxdelay; 507 } 508 next = start; 509 } 510 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0); 511 512 /* 513 * The mnt_syncer field inherits the vnode reference, which is 514 * held until later decomissioning. 515 */ 516 mp->mnt_syncer = vp; 517 vx_unlock(vp); 518 return (0); 519 } 520 521 static int 522 sync_close(struct vop_close_args *ap) 523 { 524 return (0); 525 } 526 527 /* 528 * Do a lazy sync of the filesystem. 529 * 530 * sync_fsync { struct vnode *a_vp, int a_waitfor } 531 */ 532 static int 533 sync_fsync(struct vop_fsync_args *ap) 534 { 535 struct vnode *syncvp = ap->a_vp; 536 struct mount *mp = syncvp->v_mount; 537 int asyncflag; 538 539 /* 540 * We only need to do something if this is a lazy evaluation. 541 */ 542 if ((ap->a_waitfor & MNT_LAZY) == 0) 543 return (0); 544 545 /* 546 * Move ourselves to the back of the sync list. 547 */ 548 vn_syncer_add(syncvp, syncdelay); 549 550 /* 551 * Walk the list of vnodes pushing all that are dirty and 552 * not already on the sync list, and freeing vnodes which have 553 * no refs and whos VM objects are empty. vfs_msync() handles 554 * the VM issues and must be called whether the mount is readonly 555 * or not. 556 */ 557 if (vfs_busy(mp, LK_NOWAIT) != 0) 558 return (0); 559 if (mp->mnt_flag & MNT_RDONLY) { 560 vfs_msync(mp, MNT_NOWAIT); 561 } else { 562 asyncflag = mp->mnt_flag & MNT_ASYNC; 563 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 564 vfs_msync(mp, MNT_NOWAIT); 565 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY); 566 if (asyncflag) 567 mp->mnt_flag |= MNT_ASYNC; 568 } 569 vfs_unbusy(mp); 570 return (0); 571 } 572 573 /* 574 * The syncer vnode is no longer referenced. 575 * 576 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 577 */ 578 static int 579 sync_inactive(struct vop_inactive_args *ap) 580 { 581 vgone_vxlocked(ap->a_vp); 582 return (0); 583 } 584 585 /* 586 * The syncer vnode is no longer needed and is being decommissioned. 587 * This can only occur when the last reference has been released on 588 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL. 589 * 590 * Modifications to the worklist must be protected with a critical 591 * section. 592 * 593 * sync_reclaim { struct vnode *a_vp } 594 */ 595 static int 596 sync_reclaim(struct vop_reclaim_args *ap) 597 { 598 struct vnode *vp = ap->a_vp; 599 struct syncer_ctx *ctx; 600 601 ctx = vn_get_syncer(vp); 602 603 lwkt_gettoken(&ctx->sc_token); 604 KKASSERT(vp->v_mount->mnt_syncer != vp); 605 if (vp->v_flag & VONWORKLST) { 606 LIST_REMOVE(vp, v_synclist); 607 vclrflags(vp, VONWORKLST); 608 } 609 lwkt_reltoken(&ctx->sc_token); 610 611 return (0); 612 } 613 614 /* 615 * Print out a syncer vnode. 616 * 617 * sync_print { struct vnode *a_vp } 618 */ 619 static int 620 sync_print(struct vop_print_args *ap) 621 { 622 struct vnode *vp = ap->a_vp; 623 624 kprintf("syncer vnode"); 625 lockmgr_printinfo(&vp->v_lock); 626 kprintf("\n"); 627 return (0); 628 } 629 630