1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95 39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $ 40 * $DragonFly: src/sys/kern/vfs_sync.c,v 1.2 2004/12/17 00:18:07 dillon Exp $ 41 */ 42 43 /* 44 * External virtual filesystem routines 45 */ 46 #include "opt_ddb.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/buf.h> 51 #include <sys/conf.h> 52 #include <sys/dirent.h> 53 #include <sys/domain.h> 54 #include <sys/eventhandler.h> 55 #include <sys/fcntl.h> 56 #include <sys/kernel.h> 57 #include <sys/kthread.h> 58 #include <sys/malloc.h> 59 #include <sys/mbuf.h> 60 #include <sys/mount.h> 61 #include <sys/proc.h> 62 #include <sys/namei.h> 63 #include <sys/reboot.h> 64 #include <sys/socket.h> 65 #include <sys/stat.h> 66 #include <sys/sysctl.h> 67 #include <sys/syslog.h> 68 #include <sys/vmmeter.h> 69 #include <sys/vnode.h> 70 71 #include <machine/limits.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_object.h> 75 #include <vm/vm_extern.h> 76 #include <vm/vm_kern.h> 77 #include <vm/pmap.h> 78 #include <vm/vm_map.h> 79 #include <vm/vm_page.h> 80 #include <vm/vm_pager.h> 81 #include <vm/vnode_pager.h> 82 83 #include <sys/buf2.h> 84 #include <sys/thread2.h> 85 86 /* 87 * The workitem queue. 88 */ 89 #define SYNCER_MAXDELAY 32 90 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 91 time_t syncdelay = 30; /* max time to delay syncing data */ 92 SYSCTL_INT(_kern, OID_AUTO, syncdelay, CTLFLAG_RW, 93 &syncdelay, 0, "VFS data synchronization delay"); 94 time_t filedelay = 30; /* time to delay syncing files */ 95 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, 96 &filedelay, 0, "File synchronization delay"); 97 time_t dirdelay = 29; /* time to delay syncing directories */ 98 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, 99 &dirdelay, 0, "Directory synchronization delay"); 100 time_t metadelay = 28; /* time to delay syncing metadata */ 101 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, 102 &metadelay, 0, "VFS metadata synchronization delay"); 103 static int rushjob; /* number of slots to run ASAP */ 104 static int stat_rush_requests; /* number of times I/O speeded up */ 105 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, 106 &stat_rush_requests, 0, ""); 107 108 static int syncer_delayno = 0; 109 static long syncer_mask; 110 LIST_HEAD(synclist, vnode); 111 static struct synclist *syncer_workitem_pending; 112 113 /* 114 * Called from vfsinit() 115 */ 116 void 117 vfs_sync_init(void) 118 { 119 syncer_workitem_pending = hashinit(syncer_maxdelay, M_DEVBUF, 120 &syncer_mask); 121 syncer_maxdelay = syncer_mask + 1; 122 } 123 124 /* 125 * The workitem queue. 126 * 127 * It is useful to delay writes of file data and filesystem metadata 128 * for tens of seconds so that quickly created and deleted files need 129 * not waste disk bandwidth being created and removed. To realize this, 130 * we append vnodes to a "workitem" queue. When running with a soft 131 * updates implementation, most pending metadata dependencies should 132 * not wait for more than a few seconds. Thus, mounted on block devices 133 * are delayed only about a half the time that file data is delayed. 134 * Similarly, directory updates are more critical, so are only delayed 135 * about a third the time that file data is delayed. Thus, there are 136 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 137 * one each second (driven off the filesystem syncer process). The 138 * syncer_delayno variable indicates the next queue that is to be processed. 139 * Items that need to be processed soon are placed in this queue: 140 * 141 * syncer_workitem_pending[syncer_delayno] 142 * 143 * A delay of fifteen seconds is done by placing the request fifteen 144 * entries later in the queue: 145 * 146 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 147 * 148 */ 149 150 /* 151 * Add an item to the syncer work queue. 152 */ 153 void 154 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 155 { 156 int slot; 157 158 crit_enter(); 159 160 if (vp->v_flag & VONWORKLST) { 161 LIST_REMOVE(vp, v_synclist); 162 } 163 164 if (delay > syncer_maxdelay - 2) 165 delay = syncer_maxdelay - 2; 166 slot = (syncer_delayno + delay) & syncer_mask; 167 168 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 169 vp->v_flag |= VONWORKLST; 170 crit_exit(); 171 } 172 173 struct thread *updatethread; 174 static void sched_sync (void); 175 static struct kproc_desc up_kp = { 176 "syncer", 177 sched_sync, 178 &updatethread 179 }; 180 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 181 182 /* 183 * System filesystem synchronizer daemon. 184 */ 185 void 186 sched_sync(void) 187 { 188 struct synclist *slp; 189 struct vnode *vp; 190 long starttime; 191 int s; 192 struct thread *td = curthread; 193 194 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td, 195 SHUTDOWN_PRI_LAST); 196 197 for (;;) { 198 kproc_suspend_loop(); 199 200 starttime = time_second; 201 202 /* 203 * Push files whose dirty time has expired. Be careful 204 * of interrupt race on slp queue. 205 */ 206 s = splbio(); 207 slp = &syncer_workitem_pending[syncer_delayno]; 208 syncer_delayno += 1; 209 if (syncer_delayno == syncer_maxdelay) 210 syncer_delayno = 0; 211 splx(s); 212 213 while ((vp = LIST_FIRST(slp)) != NULL) { 214 if (VOP_ISLOCKED(vp, NULL) == 0) { 215 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 216 (void) VOP_FSYNC(vp, MNT_LAZY, td); 217 VOP_UNLOCK(vp, 0, td); 218 } 219 s = splbio(); 220 if (LIST_FIRST(slp) == vp) { 221 /* 222 * Note: v_tag VT_VFS vps can remain on the 223 * worklist too with no dirty blocks, but 224 * since sync_fsync() moves it to a different 225 * slot we are safe. 226 */ 227 if (TAILQ_EMPTY(&vp->v_dirtyblkhd) && 228 !vn_isdisk(vp, NULL)) 229 panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag); 230 /* 231 * Put us back on the worklist. The worklist 232 * routine will remove us from our current 233 * position and then add us back in at a later 234 * position. 235 */ 236 vn_syncer_add_to_worklist(vp, syncdelay); 237 } 238 splx(s); 239 } 240 241 /* 242 * Do soft update processing. 243 */ 244 if (bioops.io_sync) 245 (*bioops.io_sync)(NULL); 246 247 /* 248 * The variable rushjob allows the kernel to speed up the 249 * processing of the filesystem syncer process. A rushjob 250 * value of N tells the filesystem syncer to process the next 251 * N seconds worth of work on its queue ASAP. Currently rushjob 252 * is used by the soft update code to speed up the filesystem 253 * syncer process when the incore state is getting so far 254 * ahead of the disk that the kernel memory pool is being 255 * threatened with exhaustion. 256 */ 257 if (rushjob > 0) { 258 rushjob -= 1; 259 continue; 260 } 261 /* 262 * If it has taken us less than a second to process the 263 * current work, then wait. Otherwise start right over 264 * again. We can still lose time if any single round 265 * takes more than two seconds, but it does not really 266 * matter as we are just trying to generally pace the 267 * filesystem activity. 268 */ 269 if (time_second == starttime) 270 tsleep(&lbolt, 0, "syncer", 0); 271 } 272 } 273 274 /* 275 * Request the syncer daemon to speed up its work. 276 * We never push it to speed up more than half of its 277 * normal turn time, otherwise it could take over the cpu. 278 * 279 * YYY wchan field protected by the BGL. 280 */ 281 int 282 speedup_syncer(void) 283 { 284 crit_enter(); 285 if (updatethread->td_wchan == &lbolt) { /* YYY */ 286 unsleep(updatethread); 287 lwkt_schedule(updatethread); 288 } 289 crit_exit(); 290 if (rushjob < syncdelay / 2) { 291 rushjob += 1; 292 stat_rush_requests += 1; 293 return (1); 294 } 295 return(0); 296 } 297 298 /* 299 * Routine to create and manage a filesystem syncer vnode. 300 */ 301 #define sync_close ((int (*) (struct vop_close_args *))nullop) 302 static int sync_fsync (struct vop_fsync_args *); 303 static int sync_inactive (struct vop_inactive_args *); 304 static int sync_reclaim (struct vop_reclaim_args *); 305 #define sync_lock ((int (*) (struct vop_lock_args *))vop_stdlock) 306 #define sync_unlock ((int (*) (struct vop_unlock_args *))vop_stdunlock) 307 static int sync_print (struct vop_print_args *); 308 #define sync_islocked ((int(*) (struct vop_islocked_args *))vop_stdislocked) 309 310 static struct vop_ops *sync_vnode_vops; 311 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 312 { &vop_default_desc, vop_eopnotsupp }, 313 { &vop_close_desc, (void *) sync_close }, /* close */ 314 { &vop_fsync_desc, (void *) sync_fsync }, /* fsync */ 315 { &vop_inactive_desc, (void *) sync_inactive }, /* inactive */ 316 { &vop_reclaim_desc, (void *) sync_reclaim }, /* reclaim */ 317 { &vop_lock_desc, (void *) sync_lock }, /* lock */ 318 { &vop_unlock_desc, (void *) sync_unlock }, /* unlock */ 319 { &vop_print_desc, (void *) sync_print }, /* print */ 320 { &vop_islocked_desc, (void *) sync_islocked }, /* islocked */ 321 { NULL, NULL } 322 }; 323 324 static struct vnodeopv_desc sync_vnodeop_opv_desc = 325 { &sync_vnode_vops, sync_vnodeop_entries }; 326 327 VNODEOP_SET(sync_vnodeop_opv_desc); 328 329 /* 330 * Create a new filesystem syncer vnode for the specified mount point. 331 * This vnode is placed on the worklist and is responsible for sync'ing 332 * the filesystem. 333 * 334 * NOTE: read-only mounts are also placed on the worklist. The filesystem 335 * sync code is also responsible for cleaning up vnodes. 336 */ 337 int 338 vfs_allocate_syncvnode(struct mount *mp) 339 { 340 struct vnode *vp; 341 static long start, incr, next; 342 int error; 343 344 /* Allocate a new vnode */ 345 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops, &vp, 0, 0); 346 if (error) { 347 mp->mnt_syncer = NULL; 348 return (error); 349 } 350 vp->v_type = VNON; 351 /* 352 * Place the vnode onto the syncer worklist. We attempt to 353 * scatter them about on the list so that they will go off 354 * at evenly distributed times even if all the filesystems 355 * are mounted at once. 356 */ 357 next += incr; 358 if (next == 0 || next > syncer_maxdelay) { 359 start /= 2; 360 incr /= 2; 361 if (start == 0) { 362 start = syncer_maxdelay / 2; 363 incr = syncer_maxdelay; 364 } 365 next = start; 366 } 367 vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0); 368 mp->mnt_syncer = vp; 369 vx_unlock(vp); 370 return (0); 371 } 372 373 /* 374 * Do a lazy sync of the filesystem. 375 * 376 * sync_fsync { struct vnode *a_vp, struct ucred *a_cred, int a_waitfor, 377 * struct thread *a_td } 378 */ 379 static int 380 sync_fsync(struct vop_fsync_args *ap) 381 { 382 struct vnode *syncvp = ap->a_vp; 383 struct mount *mp = syncvp->v_mount; 384 struct thread *td = ap->a_td; 385 lwkt_tokref ilock; 386 int asyncflag; 387 388 /* 389 * We only need to do something if this is a lazy evaluation. 390 */ 391 if (ap->a_waitfor != MNT_LAZY) 392 return (0); 393 394 /* 395 * Move ourselves to the back of the sync list. 396 */ 397 vn_syncer_add_to_worklist(syncvp, syncdelay); 398 399 /* 400 * Walk the list of vnodes pushing all that are dirty and 401 * not already on the sync list, and freeing vnodes which have 402 * no refs and whos VM objects are empty. vfs_msync() handles 403 * the VM issues and must be called whether the mount is readonly 404 * or not. 405 */ 406 lwkt_gettoken(&ilock, &mountlist_token); 407 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &ilock, td) != 0) { 408 lwkt_reltoken(&ilock); 409 return (0); 410 } 411 if (mp->mnt_flag & MNT_RDONLY) { 412 vfs_msync(mp, MNT_NOWAIT); 413 } else { 414 asyncflag = mp->mnt_flag & MNT_ASYNC; 415 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */ 416 vfs_msync(mp, MNT_NOWAIT); 417 VFS_SYNC(mp, MNT_LAZY, td); 418 if (asyncflag) 419 mp->mnt_flag |= MNT_ASYNC; 420 } 421 vfs_unbusy(mp, td); 422 return (0); 423 } 424 425 /* 426 * The syncer vnode is no referenced. 427 * 428 * sync_inactive { struct vnode *a_vp, struct proc *a_p } 429 */ 430 static int 431 sync_inactive(struct vop_inactive_args *ap) 432 { 433 vgone(ap->a_vp); 434 return (0); 435 } 436 437 /* 438 * The syncer vnode is no longer needed and is being decommissioned. 439 * 440 * Modifications to the worklist must be protected at splbio(). 441 * 442 * sync_reclaim { struct vnode *a_vp } 443 */ 444 static int 445 sync_reclaim(struct vop_reclaim_args *ap) 446 { 447 struct vnode *vp = ap->a_vp; 448 int s; 449 450 s = splbio(); 451 vp->v_mount->mnt_syncer = NULL; 452 if (vp->v_flag & VONWORKLST) { 453 LIST_REMOVE(vp, v_synclist); 454 vp->v_flag &= ~VONWORKLST; 455 } 456 splx(s); 457 458 return (0); 459 } 460 461 /* 462 * Print out a syncer vnode. 463 * 464 * sync_print { struct vnode *a_vp } 465 */ 466 static int 467 sync_print(struct vop_print_args *ap) 468 { 469 struct vnode *vp = ap->a_vp; 470 471 printf("syncer vnode"); 472 lockmgr_printinfo(&vp->v_lock); 473 printf("\n"); 474 return (0); 475 } 476 477