1 /* $OpenBSD: vfs_sync.c,v 1.65 2021/01/14 03:32:01 cheloha Exp $ */ 2 3 /* 4 * Portions of this code are: 5 * 6 * Copyright (c) 1989, 1993 7 * The Regents of the University of California. All rights reserved. 8 * (c) UNIX System Laboratories, Inc. 9 * All or some portions of this file are derived from material licensed 10 * to the University of California by American Telephone and Telegraph 11 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 12 * the permission of UNIX System Laboratories, Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 */ 38 39 /* 40 * Syncer daemon 41 */ 42 43 #include <sys/queue.h> 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/mount.h> 48 #include <sys/vnode.h> 49 #include <sys/lock.h> 50 #include <sys/malloc.h> 51 #include <sys/time.h> 52 53 #include <sys/kernel.h> 54 #include <sys/sched.h> 55 56 #ifdef FFS_SOFTUPDATES 57 int softdep_process_worklist(struct mount *); 58 #endif 59 60 /* 61 * The workitem queue. 62 */ 63 #define SYNCER_MAXDELAY 32 /* maximum sync delay time */ 64 #define SYNCER_DEFAULT 30 /* default sync delay time */ 65 int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */ 66 int syncdelay = SYNCER_DEFAULT; /* time to delay syncing vnodes */ 67 68 int rushjob = 0; /* number of slots to run ASAP */ 69 int stat_rush_requests = 0; /* number of rush requests */ 70 71 int syncer_delayno = 0; 72 long syncer_mask; 73 LIST_HEAD(synclist, vnode); 74 static struct synclist *syncer_workitem_pending; 75 76 struct proc *syncerproc; 77 int syncer_chan; 78 79 /* 80 * The workitem queue. 81 * 82 * It is useful to delay writes of file data and filesystem metadata 83 * for tens of seconds so that quickly created and deleted files need 84 * not waste disk bandwidth being created and removed. To realize this, 85 * we append vnodes to a "workitem" queue. When running with a soft 86 * updates implementation, most pending metadata dependencies should 87 * not wait for more than a few seconds. Thus, mounted block devices 88 * are delayed only about half the time that file data is delayed. 89 * Similarly, directory updates are more critical, so are only delayed 90 * about a third the time that file data is delayed. Thus, there are 91 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of 92 * one each second (driven off the filesystem syncer process). The 93 * syncer_delayno variable indicates the next queue that is to be processed. 94 * Items that need to be processed soon are placed in this queue: 95 * 96 * syncer_workitem_pending[syncer_delayno] 97 * 98 * A delay of fifteen seconds is done by placing the request fifteen 99 * entries later in the queue: 100 * 101 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask] 102 * 103 */ 104 105 void 106 vn_initialize_syncerd(void) 107 { 108 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE, M_WAITOK, 109 &syncer_mask); 110 syncer_maxdelay = syncer_mask + 1; 111 } 112 113 /* 114 * Add an item to the syncer work queue. 115 */ 116 void 117 vn_syncer_add_to_worklist(struct vnode *vp, int delay) 118 { 119 int s, slot; 120 121 if (delay > syncer_maxdelay - 2) 122 delay = syncer_maxdelay - 2; 123 slot = (syncer_delayno + delay) & syncer_mask; 124 125 s = splbio(); 126 if (vp->v_bioflag & VBIOONSYNCLIST) 127 LIST_REMOVE(vp, v_synclist); 128 129 vp->v_bioflag |= VBIOONSYNCLIST; 130 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist); 131 splx(s); 132 } 133 134 /* 135 * TODO Move getnsecuptime() to kern_tc.c and document it when we have 136 * more users in the kernel. 137 */ 138 static uint64_t 139 getnsecuptime(void) 140 { 141 struct timespec now; 142 143 getnanouptime(&now); 144 return TIMESPEC_TO_NSEC(&now); 145 } 146 147 /* 148 * System filesystem synchronizer daemon. 149 */ 150 void 151 syncer_thread(void *arg) 152 { 153 uint64_t elapsed, start; 154 struct proc *p = curproc; 155 struct synclist *slp; 156 struct vnode *vp; 157 int s; 158 159 for (;;) { 160 start = getnsecuptime(); 161 162 /* 163 * Push files whose dirty time has expired. 164 */ 165 s = splbio(); 166 slp = &syncer_workitem_pending[syncer_delayno]; 167 168 syncer_delayno += 1; 169 if (syncer_delayno == syncer_maxdelay) 170 syncer_delayno = 0; 171 172 while ((vp = LIST_FIRST(slp)) != NULL) { 173 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT)) { 174 /* 175 * If we fail to get the lock, we move this 176 * vnode one second ahead in time. 177 * XXX - no good, but the best we can do. 178 */ 179 vn_syncer_add_to_worklist(vp, 1); 180 continue; 181 } 182 splx(s); 183 (void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p); 184 vput(vp); 185 s = splbio(); 186 if (LIST_FIRST(slp) == vp) { 187 /* 188 * Note: disk vps can remain on the 189 * worklist too with no dirty blocks, but 190 * since sync_fsync() moves it to a different 191 * slot we are safe. 192 */ 193 #ifdef DIAGNOSTIC 194 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL && 195 vp->v_type != VBLK) { 196 vprint("fsync failed", vp); 197 if (vp->v_mount != NULL) 198 printf("mounted on: %s\n", 199 vp->v_mount->mnt_stat.f_mntonname); 200 panic("%s: fsync failed", __func__); 201 } 202 #endif /* DIAGNOSTIC */ 203 /* 204 * Put us back on the worklist. The worklist 205 * routine will remove us from our current 206 * position and then add us back in at a later 207 * position. 208 */ 209 vn_syncer_add_to_worklist(vp, syncdelay); 210 } 211 212 sched_pause(yield); 213 } 214 215 splx(s); 216 217 #ifdef FFS_SOFTUPDATES 218 /* 219 * Do soft update processing. 220 */ 221 softdep_process_worklist(NULL); 222 #endif 223 224 /* 225 * The variable rushjob allows the kernel to speed up the 226 * processing of the filesystem syncer process. A rushjob 227 * value of N tells the filesystem syncer to process the next 228 * N seconds worth of work on its queue ASAP. Currently rushjob 229 * is used by the soft update code to speed up the filesystem 230 * syncer process when the incore state is getting so far 231 * ahead of the disk that the kernel memory pool is being 232 * threatened with exhaustion. 233 */ 234 if (rushjob > 0) { 235 rushjob -= 1; 236 continue; 237 } 238 239 /* 240 * If it has taken us less than a second to process the 241 * current work, then wait. Otherwise start right over 242 * again. We can still lose time if any single round 243 * takes more than two seconds, but it does not really 244 * matter as we are just trying to generally pace the 245 * filesystem activity. 246 */ 247 elapsed = getnsecuptime() - start; 248 if (elapsed < SEC_TO_NSEC(1)) { 249 tsleep_nsec(&syncer_chan, PPAUSE, "syncer", 250 SEC_TO_NSEC(1) - elapsed); 251 } 252 } 253 } 254 255 /* 256 * Request the syncer daemon to speed up its work. 257 * We never push it to speed up more than half of its 258 * normal turn time, otherwise it could take over the cpu. 259 */ 260 int 261 speedup_syncer(void) 262 { 263 if (syncerproc) 264 wakeup_proc(syncerproc, &syncer_chan); 265 if (rushjob < syncdelay / 2) { 266 rushjob += 1; 267 stat_rush_requests += 1; 268 return 1; 269 } 270 return 0; 271 } 272 273 /* Routine to create and manage a filesystem syncer vnode. */ 274 int sync_fsync(void *); 275 int sync_inactive(void *); 276 int sync_print(void *); 277 278 const struct vops sync_vops = { 279 .vop_close = nullop, 280 .vop_fsync = sync_fsync, 281 .vop_inactive = sync_inactive, 282 .vop_reclaim = nullop, 283 .vop_lock = vop_generic_lock, 284 .vop_unlock = vop_generic_unlock, 285 .vop_islocked = vop_generic_islocked, 286 .vop_print = sync_print 287 }; 288 289 /* 290 * Create a new filesystem syncer vnode for the specified mount point. 291 */ 292 int 293 vfs_allocate_syncvnode(struct mount *mp) 294 { 295 struct vnode *vp; 296 static long start, incr, next; 297 int error; 298 299 /* Allocate a new vnode */ 300 if ((error = getnewvnode(VT_VFS, mp, &sync_vops, &vp)) != 0) { 301 mp->mnt_syncer = NULL; 302 return (error); 303 } 304 vp->v_writecount = 1; 305 vp->v_type = VNON; 306 /* 307 * Place the vnode onto the syncer worklist. We attempt to 308 * scatter them about on the list so that they will go off 309 * at evenly distributed times even if all the filesystems 310 * are mounted at once. 311 */ 312 next += incr; 313 if (next == 0 || next > syncer_maxdelay) { 314 start /= 2; 315 incr /= 2; 316 if (start == 0) { 317 start = syncer_maxdelay / 2; 318 incr = syncer_maxdelay; 319 } 320 next = start; 321 } 322 vn_syncer_add_to_worklist(vp, next); 323 mp->mnt_syncer = vp; 324 return (0); 325 } 326 327 /* 328 * Do a lazy sync of the filesystem. 329 */ 330 int 331 sync_fsync(void *v) 332 { 333 struct vop_fsync_args *ap = v; 334 struct vnode *syncvp = ap->a_vp; 335 struct mount *mp = syncvp->v_mount; 336 int asyncflag; 337 338 /* 339 * We only need to do something if this is a lazy evaluation. 340 */ 341 if (ap->a_waitfor != MNT_LAZY) 342 return (0); 343 344 /* 345 * Move ourselves to the back of the sync list. 346 */ 347 vn_syncer_add_to_worklist(syncvp, syncdelay); 348 349 /* 350 * Walk the list of vnodes pushing all that are dirty and 351 * not already on the sync list. 352 */ 353 if (vfs_busy(mp, VB_READ|VB_NOWAIT) == 0) { 354 asyncflag = mp->mnt_flag & MNT_ASYNC; 355 mp->mnt_flag &= ~MNT_ASYNC; 356 VFS_SYNC(mp, MNT_LAZY, 0, ap->a_cred, ap->a_p); 357 if (asyncflag) 358 mp->mnt_flag |= MNT_ASYNC; 359 vfs_unbusy(mp); 360 } 361 362 return (0); 363 } 364 365 /* 366 * The syncer vnode is no longer needed and is being decommissioned. 367 */ 368 int 369 sync_inactive(void *v) 370 { 371 struct vop_inactive_args *ap = v; 372 373 struct vnode *vp = ap->a_vp; 374 int s; 375 376 if (vp->v_usecount == 0) { 377 VOP_UNLOCK(vp); 378 return (0); 379 } 380 381 vp->v_mount->mnt_syncer = NULL; 382 383 s = splbio(); 384 385 LIST_REMOVE(vp, v_synclist); 386 vp->v_bioflag &= ~VBIOONSYNCLIST; 387 388 splx(s); 389 390 vp->v_writecount = 0; 391 vput(vp); 392 393 return (0); 394 } 395 396 /* 397 * Print out a syncer vnode. 398 */ 399 int 400 sync_print(void *v) 401 { 402 printf("syncer vnode\n"); 403 404 return (0); 405 } 406