1 /* $OpenBSD: vfs_sync.c,v 1.21 2001/11/27 05:27:12 art Exp $ */ 2 3 /* 4 * Portions of this code are: 5 * 6 * Copyright (c) 1989, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. All advertising materials mentioning features or use of this software 23 * must display the following acknowledgement: 24 * This product includes software developed by the University of 25 * California, Berkeley and its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43 /* 44 * Syncer daemon 45 */ 46 47 #include <sys/queue.h> 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/mount.h> 52 #include <sys/vnode.h> 53 #include <sys/buf.h> 54 #include <sys/malloc.h> 55 56 #include <sys/kernel.h> 57 58 #ifdef FFS_SOFTUPDATES 59 int softdep_process_worklist __P((struct mount *)); 60 #endif 61 62 /* 63 * The workitem queue. 64 */ 65 #define SYNCER_MAXDELAY 32 /* maximum sync delay time */ 66 #define SYNCER_DEFAULT 30 /* default sync delay time */ 67 int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 68 time_t syncdelay = SYNCER_DEFAULT; /* time to delay syncing vnodes */ 69 70 int rushjob = 0; /* number of slots to run ASAP */ 71 int stat_rush_requests = 0; /* number of rush requests */ 72 73 static int syncer_delayno = 0; 74 static long syncer_mask; 75 LIST_HEAD(synclist, vnode); 76 static struct synclist *syncer_workitem_pending; 77 78 struct proc *syncerproc; 79 80 /* 81 * The workitem queue. 82 * 83 * It is useful to delay writes of file data and filesystem metadata 84 * for tens of seconds so that quickly created and deleted files need 85 * not waste disk bandwidth being created and removed. To realize this, 86 * we append vnodes to a "workitem" queue. When running with a soft 87 * updates implementation, most pending metadata dependencies should 88 * not wait for more than a few seconds. Thus, mounted on block devices 89 * are delayed only about a half the time that file data is delayed. 90 * Similarly, directory updates are more critical, so are only delayed 91 * about a third the time that file data is delayed. Thus, there are 92 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 93 * one each second (driven off the filesystem syner process). The 94 * syncer_delayno variable indicates the next queue that is to be processed. 95 * Items that need to be processed soon are placed in this queue: 96 * 97 * syncer_workitem_pending[syncer_delayno] 98 * 99 * A delay of fifteen seconds is done by placing the request fifteen 100 * entries later in the queue: 101 * 102 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 103 * 104 */ 105 106 void 107 vn_initialize_syncerd() 108 109 { 110 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, M_WAITOK, 111 &syncer_mask); 112 syncer_maxdelay = syncer_mask + 1; 113 } 114 115 /* 116 * Add an item to the syncer work queue. 117 */ 118 void 119 vn_syncer_add_to_worklist(vp, delay) 120 struct vnode *vp; 121 int delay; 122 { 123 int s, slot; 124 125 if (delay > syncer_maxdelay - 2) 126 delay = syncer_maxdelay - 2; 127 slot = (syncer_delayno + delay) & syncer_mask; 128 129 s = splbio(); 130 if (vp->v_bioflag & VBIOONSYNCLIST) 131 LIST_REMOVE(vp, v_synclist); 132 133 vp->v_bioflag |= VBIOONSYNCLIST; 134 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 135 splx(s); 136 } 137 138 /* 139 * System filesystem synchronizer daemon. 140 */ 141 142 void 143 sched_sync(p) 144 struct proc *p; 145 { 146 struct synclist *slp; 147 struct vnode *vp; 148 long starttime; 149 int s; 150 151 syncerproc = curproc; 152 153 for (;;) { 154 starttime = time.tv_sec; 155 156 /* 157 * Push files whose dirty time has expired. 158 */ 159 slp = &syncer_workitem_pending[syncer_delayno]; 160 syncer_delayno += 1; 161 if (syncer_delayno == syncer_maxdelay) 162 syncer_delayno = 0; 163 s = splbio(); 164 while ((vp = LIST_FIRST(slp)) != NULL) { 165 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, p) != 0) { 166 /* 167 * If we fail to get the lock, we move this 168 * vnode one second ahead in time. 169 * XXX - no good, but the best we can do. 170 */ 171 vn_syncer_add_to_worklist(vp, 1); 172 continue; 173 } 174 splx(s); 175 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 176 VOP_UNLOCK(vp, 0, p); 177 s = splbio(); 178 if (LIST_FIRST(slp) == vp) { 179 #ifdef DIAGNOSTIC 180 if (!(vp->v_bioflag & VBIOONSYNCLIST)) { 181 vprint("vnode", vp); 182 panic("sched_fsync: on synclist, but no flag"); 183 } 184 #endif 185 /* 186 * Put us back on the worklist. The worklist 187 * routine will remove us from our current 188 * position and then add us back in at a later 189 * position. 190 */ 191 vn_syncer_add_to_worklist(vp, syncdelay); 192 } 193 } 194 195 splx(s); 196 197 #ifdef FFS_SOFTUPDATES 198 /* 199 * Do soft update processing. 200 */ 201 softdep_process_worklist(NULL); 202 #endif 203 204 /* 205 * The variable rushjob allows the kernel to speed up the 206 * processing of the filesystem syncer process. A rushjob 207 * value of N tells the filesystem syncer to process the next 208 * N seconds worth of work on its queue ASAP. Currently rushjob 209 * is used by the soft update code to speed up the filesystem 210 * syncer process when the incore state is getting so far 211 * ahead of the disk that the kernel memory pool is being 212 * threatened with exhaustion. 213 */ 214 if (rushjob > 0) { 215 rushjob -= 1; 216 continue; 217 } 218 /* 219 * If it has taken us less than a second to process the 220 * current work, then wait. Otherwise start right over 221 * again. We can still lose time if any single round 222 * takes more than two seconds, but it does not really 223 * matter as we are just trying to generally pace the 224 * filesystem activity. 225 */ 226 if (time.tv_sec == starttime) 227 tsleep(&lbolt, PPAUSE, "syncer", 0); 228 } 229 } 230 231 /* 232 * Request the syncer daemon to speed up its work. 233 * We never push it to speed up more than half of its 234 * normal turn time, otherwise it could take over the cpu. 235 */ 236 int 237 speedup_syncer() 238 { 239 int s; 240 241 s = splhigh(); 242 if (syncerproc && syncerproc->p_wchan == &lbolt) 243 setrunnable(syncerproc); 244 splx(s); 245 if (rushjob < syncdelay / 2) { 246 rushjob += 1; 247 stat_rush_requests += 1; 248 return 1; 249 } 250 return 0; 251 } 252 253 /* 254 * Routine to create and manage a filesystem syncer vnode. 255 */ 256 #define sync_close nullop 257 int sync_fsync __P((void *)); 258 int sync_inactive __P((void *)); 259 #define sync_reclaim nullop 260 #define sync_lock vop_generic_lock 261 #define sync_unlock vop_generic_unlock 262 int sync_print __P((void *)); 263 #define sync_islocked vop_generic_islocked 264 265 int (**sync_vnodeop_p) __P((void *)); 266 struct vnodeopv_entry_desc sync_vnodeop_entries[] = { 267 { &vop_default_desc, vn_default_error }, 268 { &vop_close_desc, sync_close }, /* close */ 269 { &vop_fsync_desc, sync_fsync }, /* fsync */ 270 { &vop_inactive_desc, sync_inactive }, /* inactive */ 271 { &vop_reclaim_desc, sync_reclaim }, /* reclaim */ 272 { &vop_lock_desc, sync_lock }, /* lock */ 273 { &vop_unlock_desc, sync_unlock }, /* unlock */ 274 { &vop_print_desc, sync_print }, /* print */ 275 { &vop_islocked_desc, sync_islocked }, /* islocked */ 276 { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } 277 }; 278 struct vnodeopv_desc sync_vnodeop_opv_desc = { 279 &sync_vnodeop_p, sync_vnodeop_entries 280 }; 281 282 /* 283 * Create a new filesystem syncer vnode for the specified mount point. 284 */ 285 int 286 vfs_allocate_syncvnode(mp) 287 struct mount *mp; 288 { 289 struct vnode *vp; 290 static long start, incr, next; 291 int error; 292 293 /* Allocate a new vnode */ 294 if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) { 295 mp->mnt_syncer = NULL; 296 return (error); 297 } 298 vp->v_writecount = 1; 299 vp->v_type = VNON; 300 /* 301 * Place the vnode onto the syncer worklist. We attempt to 302 * scatter them about on the list so that they will go off 303 * at evenly distributed times even if all the filesystems 304 * are mounted at once. 305 */ 306 next += incr; 307 if (next == 0 || next > syncer_maxdelay) { 308 start /= 2; 309 incr /= 2; 310 if (start == 0) { 311 start = syncer_maxdelay / 2; 312 incr = syncer_maxdelay; 313 } 314 next = start; 315 } 316 vn_syncer_add_to_worklist(vp, next); 317 mp->mnt_syncer = vp; 318 return (0); 319 } 320 321 /* 322 * Do a lazy sync of the filesystem. 323 */ 324 int 325 sync_fsync(v) 326 void *v; 327 { 328 struct vop_fsync_args /* { 329 struct vnode *a_vp; 330 struct ucred *a_cred; 331 int a_waitfor; 332 struct proc *a_p; 333 } */ *ap = v; 334 struct vnode *syncvp = ap->a_vp; 335 struct mount *mp = syncvp->v_mount; 336 int asyncflag; 337 338 /* 339 * We only need to do something if this is a lazy evaluation. 340 */ 341 if (ap->a_waitfor != MNT_LAZY) 342 return (0); 343 344 /* 345 * Move ourselves to the back of the sync list. 346 */ 347 vn_syncer_add_to_worklist(syncvp, syncdelay); 348 349 /* 350 * Walk the list of vnodes pushing all that are dirty and 351 * not already on the sync list. 352 */ 353 simple_lock(&mountlist_slock); 354 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, ap->a_p) == 0) { 355 asyncflag = mp->mnt_flag & MNT_ASYNC; 356 mp->mnt_flag &= ~MNT_ASYNC; 357 VFS_SYNC(mp, MNT_LAZY, ap->a_cred, ap->a_p); 358 if (asyncflag) 359 mp->mnt_flag |= MNT_ASYNC; 360 vfs_unbusy(mp, ap->a_p); 361 } else 362 simple_unlock(&mountlist_slock); 363 364 return (0); 365 } 366 367 /* 368 * The syncer vnode is no longer needed and is being decommissioned. 369 */ 370 int 371 sync_inactive(v) 372 void *v; 373 { 374 struct vop_inactive_args /* { 375 struct vnode *a_vp; 376 struct proc *a_p; 377 } */ *ap = v; 378 379 struct vnode *vp = ap->a_vp; 380 381 if (vp->v_usecount == 0) { 382 VOP_UNLOCK(vp, 0, ap->a_p); 383 return (0); 384 } 385 vp->v_mount->mnt_syncer = NULL; 386 LIST_REMOVE(vp, v_synclist); 387 vp->v_bioflag &= ~VBIOONSYNCLIST; 388 vp->v_writecount = 0; 389 vput(vp); 390 return (0); 391 } 392 393 /* 394 * Print out a syncer vnode. 395 */ 396 int 397 sync_print(v) 398 void *v; 399 400 { 401 struct vop_print_args /* { 402 struct vnode *a_vp; 403 } */ *ap = v; 404 struct vnode *vp = ap->a_vp; 405 406 printf("syncer vnode"); 407 if (vp->v_vnlock != NULL) 408 lockmgr_printinfo(vp->v_vnlock); 409 printf("\n"); 410 return (0); 411 } 412