xref: /dragonfly/sys/kern/vfs_sync.c (revision 1bc877a0)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
36  */
37 
38 /*
39  * External virtual filesystem routines
40  */
41 
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/dirent.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mount.h>
55 #include <sys/proc.h>
56 #include <sys/reboot.h>
57 #include <sys/socket.h>
58 #include <sys/stat.h>
59 #include <sys/sysctl.h>
60 #include <sys/syslog.h>
61 #include <sys/vmmeter.h>
62 #include <sys/vnode.h>
63 
64 #include <machine/limits.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_kern.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vnode_pager.h>
75 
76 #include <sys/buf2.h>
77 
78 /*
79  * The workitem queue.
80  */
81 #define SYNCER_MAXDELAY		32
82 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS);
83 time_t syncdelay = 30;		/* max time to delay syncing data */
84 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
85 		sysctl_kern_syncdelay, "I", "VFS data synchronization delay");
86 time_t filedelay = 30;		/* time to delay syncing files */
87 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
88 		&filedelay, 0, "File synchronization delay");
89 time_t dirdelay = 29;		/* time to delay syncing directories */
90 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
91 		&dirdelay, 0, "Directory synchronization delay");
92 time_t metadelay = 28;		/* time to delay syncing metadata */
93 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
94 		&metadelay, 0, "VFS metadata synchronization delay");
95 time_t retrydelay = 1;		/* retry delay after failure */
96 SYSCTL_INT(_kern, OID_AUTO, retrydelay, CTLFLAG_RW,
97 		&retrydelay, 0, "VFS retry synchronization delay");
98 static int rushjob;			/* number of slots to run ASAP */
99 static int stat_rush_requests;	/* number of times I/O speeded up */
100 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
101 		&stat_rush_requests, 0, "");
102 
103 LIST_HEAD(synclist, vnode);
104 
105 #define	SC_FLAG_EXIT		(0x1)		/* request syncer exit */
106 #define	SC_FLAG_DONE		(0x2)		/* syncer confirm exit */
107 
108 struct syncer_ctx {
109 	struct mount		*sc_mp;
110 	struct lwkt_token 	sc_token;
111 	struct thread		*sc_thread;
112 	int			sc_flags;
113 	struct synclist 	*syncer_workitem_pending;
114 	long			syncer_mask;
115 	int 			syncer_delayno;
116 	int			syncer_forced;
117 	int			syncer_rushjob;	/* sequence vnodes faster */
118 	int			syncer_trigger;	/* trigger full sync */
119 	long			syncer_count;
120 };
121 
122 static void syncer_thread(void *);
123 
124 static int
125 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)
126 {
127 	int error;
128 	int v = syncdelay;
129 
130 	error = sysctl_handle_int(oidp, &v, 0, req);
131 	if (error || !req->newptr)
132 		return (error);
133 	if (v < 1)
134 		v = 1;
135 	if (v > SYNCER_MAXDELAY)
136 		v = SYNCER_MAXDELAY;
137 	syncdelay = v;
138 
139 	return(0);
140 }
141 
142 /*
143  * The workitem queue.
144  *
145  * It is useful to delay writes of file data and filesystem metadata
146  * for tens of seconds so that quickly created and deleted files need
147  * not waste disk bandwidth being created and removed. To realize this,
148  * we append vnodes to a "workitem" queue. When running with a soft
149  * updates implementation, most pending metadata dependencies should
150  * not wait for more than a few seconds. Thus, mounted on block devices
151  * are delayed only about a half the time that file data is delayed.
152  * Similarly, directory updates are more critical, so are only delayed
153  * about a third the time that file data is delayed. Thus, there are
154  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
155  * one each second (driven off the filesystem syncer process). The
156  * syncer_delayno variable indicates the next queue that is to be processed.
157  * Items that need to be processed soon are placed in this queue:
158  *
159  *	syncer_workitem_pending[syncer_delayno]
160  *
161  * A delay of fifteen seconds is done by placing the request fifteen
162  * entries later in the queue:
163  *
164  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
165  *
166  */
167 
168 /*
169  * Return the number of vnodes on the syncer's timed list.  This will
170  * include the syncer vnode (mp->mnt_syncer) so if used, a minimum
171  * value of 1 will be returned.
172  */
173 long
174 vn_syncer_count(struct mount *mp)
175 {
176 	struct syncer_ctx *ctx;
177 
178 	ctx = mp->mnt_syncer_ctx;
179 	if (ctx)
180 		return (ctx->syncer_count);
181 	return 0;
182 }
183 
184 /*
185  * Add an item to the syncer work queue.
186  *
187  * WARNING: Cannot get vp->v_token here if not already held, we must
188  *	    depend on the syncer_token (which might already be held by
189  *	    the caller) to protect v_synclist and VONWORKLST.
190  *
191  * WARNING: The syncer depends on this function not blocking if the caller
192  *	    already holds the syncer token.
193  */
194 void
195 vn_syncer_add(struct vnode *vp, int delay)
196 {
197 	struct syncer_ctx *ctx;
198 	int slot;
199 
200 	ctx = vp->v_mount->mnt_syncer_ctx;
201 	lwkt_gettoken(&ctx->sc_token);
202 
203 	if (vp->v_flag & VONWORKLST) {
204 		LIST_REMOVE(vp, v_synclist);
205 		--ctx->syncer_count;
206 	}
207 	if (delay <= 0) {
208 		slot = -delay & ctx->syncer_mask;
209 	} else {
210 		if (delay > SYNCER_MAXDELAY - 2)
211 			delay = SYNCER_MAXDELAY - 2;
212 		slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask;
213 	}
214 
215 	LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist);
216 	vsetflags(vp, VONWORKLST);
217 	++ctx->syncer_count;
218 
219 	lwkt_reltoken(&ctx->sc_token);
220 }
221 
222 /*
223  * Removes the vnode from the syncer list.  Since we might block while
224  * acquiring the syncer_token we have to [re]check conditions to determine
225  * that it is ok to remove the vnode.
226  *
227  * Force removal if force != 0.  This can only occur during a forced unmount.
228  *
229  * vp->v_token held on call
230  */
231 void
232 vn_syncer_remove(struct vnode *vp, int force)
233 {
234 	struct syncer_ctx *ctx;
235 
236 	ctx = vp->v_mount->mnt_syncer_ctx;
237 	lwkt_gettoken(&ctx->sc_token);
238 
239 	if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
240 	    RB_EMPTY(&vp->v_rbdirty_tree)) {
241 		vclrflags(vp, VONWORKLST);
242 		LIST_REMOVE(vp, v_synclist);
243 		--ctx->syncer_count;
244 	} else if (force && (vp->v_flag & VONWORKLST)) {
245 		vclrflags(vp, VONWORKLST);
246 		LIST_REMOVE(vp, v_synclist);
247 		--ctx->syncer_count;
248 	}
249 
250 	lwkt_reltoken(&ctx->sc_token);
251 }
252 
253 /*
254  * vnode must be locked
255  */
256 void
257 vclrisdirty(struct vnode *vp)
258 {
259 	vclrflags(vp, VISDIRTY);
260 	if (vp->v_flag & VONWORKLST)
261 		vn_syncer_remove(vp, 0);
262 }
263 
264 void
265 vclrobjdirty(struct vnode *vp)
266 {
267 	vclrflags(vp, VOBJDIRTY);
268 	if (vp->v_flag & VONWORKLST)
269 		vn_syncer_remove(vp, 0);
270 }
271 
272 /*
273  * vnode must be stable
274  */
275 void
276 vsetisdirty(struct vnode *vp)
277 {
278 	struct syncer_ctx *ctx;
279 
280 	if ((vp->v_flag & VISDIRTY) == 0) {
281 		ctx = vp->v_mount->mnt_syncer_ctx;
282 		vsetflags(vp, VISDIRTY);
283 		lwkt_gettoken(&ctx->sc_token);
284 		if ((vp->v_flag & VONWORKLST) == 0)
285 			vn_syncer_add(vp, syncdelay);
286 		lwkt_reltoken(&ctx->sc_token);
287 	}
288 }
289 
290 void
291 vsetobjdirty(struct vnode *vp)
292 {
293 	struct syncer_ctx *ctx;
294 
295 	if ((vp->v_flag & VOBJDIRTY) == 0) {
296 		ctx = vp->v_mount->mnt_syncer_ctx;
297 		vsetflags(vp, VOBJDIRTY);
298 		lwkt_gettoken(&ctx->sc_token);
299 		if ((vp->v_flag & VONWORKLST) == 0)
300 			vn_syncer_add(vp, syncdelay);
301 		lwkt_reltoken(&ctx->sc_token);
302 	}
303 }
304 
305 /*
306  * Create per-filesystem syncer process
307  */
308 void
309 vn_syncer_thr_create(struct mount *mp)
310 {
311 	struct syncer_ctx *ctx;
312 	static int syncalloc = 0;
313 
314 	ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO);
315 	ctx->sc_mp = mp;
316 	ctx->sc_flags = 0;
317 	ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF,
318 						&ctx->syncer_mask);
319 	ctx->syncer_delayno = 0;
320 	lwkt_token_init(&ctx->sc_token, "syncer");
321 	mp->mnt_syncer_ctx = ctx;
322 	kthread_create(syncer_thread, ctx, &ctx->sc_thread,
323 		       "syncer%d", ++syncalloc & 0x7FFFFFFF);
324 }
325 
326 /*
327  * Stop per-filesystem syncer process
328  */
329 void
330 vn_syncer_thr_stop(struct mount *mp)
331 {
332 	struct syncer_ctx *ctx;
333 
334 	ctx = mp->mnt_syncer_ctx;
335 	if (ctx == NULL)
336 		return;
337 
338 	lwkt_gettoken(&ctx->sc_token);
339 
340 	/* Signal the syncer process to exit */
341 	ctx->sc_flags |= SC_FLAG_EXIT;
342 	wakeup(ctx);
343 
344 	/* Wait till syncer process exits */
345 	while ((ctx->sc_flags & SC_FLAG_DONE) == 0) {
346 		tsleep_interlock(&ctx->sc_flags, 0);
347 		lwkt_reltoken(&ctx->sc_token);
348 		tsleep(&ctx->sc_flags, PINTERLOCKED, "syncexit", hz);
349 		lwkt_gettoken(&ctx->sc_token);
350 	}
351 
352 	mp->mnt_syncer_ctx = NULL;
353 	lwkt_reltoken(&ctx->sc_token);
354 
355 	hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
356 	kfree(ctx, M_TEMP);
357 }
358 
359 struct  thread *updatethread;
360 
361 /*
362  * System filesystem synchronizer daemon.
363  */
364 static void
365 syncer_thread(void *_ctx)
366 {
367 	struct syncer_ctx *ctx = _ctx;
368 	struct synclist *slp;
369 	struct vnode *vp;
370 	long starttime;
371 	int *sc_flagsp;
372 	int sc_flags;
373 	int vnodes_synced = 0;
374 	int delta;
375 	int dummy = 0;
376 
377 	for (;;) {
378 		kproc_suspend_loop();
379 
380 		starttime = time_uptime;
381 		lwkt_gettoken(&ctx->sc_token);
382 
383 		/*
384 		 * Push files whose dirty time has expired.  Be careful
385 		 * of interrupt race on slp queue.
386 		 *
387 		 * Note that vsyncscan() and vn_syncer_one() can pull items
388 		 * off the same list, so we shift vp's position in the
389 		 * list immediately.
390 		 */
391 		slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
392 
393 		/*
394 		 * If syncer_trigger is set (from trigger_syncer(mp)),
395 		 * Immediately do a full filesystem sync and set up the
396 		 * following full filesystem sync to occur in 1 second.
397 		 */
398 		if (ctx->syncer_trigger) {
399 			ctx->syncer_trigger = 0;
400 			if (ctx->sc_mp && ctx->sc_mp->mnt_syncer) {
401 				vp = ctx->sc_mp->mnt_syncer;
402 				if (vp->v_flag & VONWORKLST) {
403 					vn_syncer_add(vp, retrydelay);
404 					if (vget(vp, LK_EXCLUSIVE) == 0) {
405 						VOP_FSYNC(vp, MNT_LAZY, 0);
406 						vput(vp);
407 						vnodes_synced++;
408 					}
409 				}
410 			}
411 		}
412 
413 		/*
414 		 * FSYNC items in this bucket
415 		 */
416 		while ((vp = LIST_FIRST(slp)) != NULL) {
417 			vn_syncer_add(vp, retrydelay);
418 			if (ctx->syncer_forced) {
419 				if (vget(vp, LK_EXCLUSIVE) == 0) {
420 					VOP_FSYNC(vp, MNT_NOWAIT, 0);
421 					vput(vp);
422 					vnodes_synced++;
423 				}
424 			} else {
425 				if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
426 					VOP_FSYNC(vp, MNT_LAZY, 0);
427 					vput(vp);
428 					vnodes_synced++;
429 				}
430 			}
431 		}
432 
433 		/*
434 		 * Increment the slot upon completion.  This is typically
435 		 * one-second but may be faster if the syncer is triggered.
436 		 */
437 		ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
438 				      ctx->syncer_mask;
439 
440 		sc_flags = ctx->sc_flags;
441 
442 		/* Exit on unmount */
443 		if (sc_flags & SC_FLAG_EXIT)
444 			break;
445 
446 		lwkt_reltoken(&ctx->sc_token);
447 
448 		/*
449 		 * Do sync processing for each mount.
450 		 */
451 		if (ctx->sc_mp)
452 			bio_ops_sync(ctx->sc_mp);
453 
454 		/*
455 		 * The variable rushjob allows the kernel to speed up the
456 		 * processing of the filesystem syncer process. A rushjob
457 		 * value of N tells the filesystem syncer to process the next
458 		 * N seconds worth of work on its queue ASAP. Currently rushjob
459 		 * is used by the soft update code to speed up the filesystem
460 		 * syncer process when the incore state is getting so far
461 		 * ahead of the disk that the kernel memory pool is being
462 		 * threatened with exhaustion.
463 		 */
464 		delta = rushjob - ctx->syncer_rushjob;
465 		if ((u_int)delta > syncdelay / 2) {
466 			ctx->syncer_rushjob = rushjob - syncdelay / 2;
467 			tsleep(&dummy, 0, "rush", 1);
468 			continue;
469 		}
470 		if (delta) {
471 			++ctx->syncer_rushjob;
472 			tsleep(&dummy, 0, "rush", 1);
473 			continue;
474 		}
475 
476 		/*
477 		 * Normal syncer operation iterates once a second, unless
478 		 * specifically triggered.
479 		 */
480 		if (time_uptime == starttime &&
481 		    ctx->syncer_trigger == 0) {
482 			tsleep_interlock(ctx, 0);
483 			if (time_uptime == starttime &&
484 			    ctx->syncer_trigger == 0 &&
485 			    (ctx->sc_flags & SC_FLAG_EXIT) == 0) {
486 				tsleep(ctx, PINTERLOCKED, "syncer", hz);
487 			}
488 		}
489 	}
490 
491 	/*
492 	 * Unmount/exit path for per-filesystem syncers; sc_token held
493 	 */
494 	ctx->sc_flags |= SC_FLAG_DONE;
495 	sc_flagsp = &ctx->sc_flags;
496 	lwkt_reltoken(&ctx->sc_token);
497 	wakeup(sc_flagsp);
498 
499 	kthread_exit();
500 }
501 
502 /*
503  * This allows a filesystem to pro-actively request that a dirty
504  * vnode be fsync()d.  This routine does not guarantee that one
505  * will actually be fsynced.
506  */
507 void
508 vn_syncer_one(struct mount *mp)
509 {
510 	struct syncer_ctx *ctx;
511 	struct synclist *slp;
512 	struct vnode *vp;
513 	int i;
514 	int n = syncdelay;
515 
516 	ctx = mp->mnt_syncer_ctx;
517 	i = ctx->syncer_delayno & ctx->syncer_mask;
518 	cpu_ccfence();
519 
520 	if (lwkt_trytoken(&ctx->sc_token) == 0)
521 		return;
522 
523 	/*
524 	 * Look ahead on our syncer time array.
525 	 */
526 	do {
527 		slp = &ctx->syncer_workitem_pending[i];
528 		vp = LIST_FIRST(slp);
529 		if (vp && vp->v_type == VNON)
530 			vp = LIST_NEXT(vp, v_synclist);
531 		if (vp)
532 			break;
533 		i = (i + 1) & ctx->syncer_mask;
534 		/* i will be wrong if we stop here but vp is NULL so ok */
535 	} while(--n);
536 
537 	/*
538 	 * Process one vnode, skip the syncer vnode but also stop
539 	 * if the syncer vnode is the only thing on this list.
540 	 */
541 	if (vp) {
542 		vn_syncer_add(vp, retrydelay);
543 		if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
544 			VOP_FSYNC(vp, MNT_LAZY, 0);
545 			vput(vp);
546 		}
547 	}
548 	lwkt_reltoken(&ctx->sc_token);
549 }
550 
551 /*
552  * Request that the syncer daemon for a specific mount speed up its work.
553  * If mp is NULL the caller generally wants to speed up all syncers.
554  */
555 void
556 speedup_syncer(struct mount *mp)
557 {
558 	/*
559 	 * Don't bother protecting the test.  unsleep_and_wakeup_thread()
560 	 * will only do something real if the thread is in the right state.
561 	 */
562 	atomic_add_int(&rushjob, 1);
563 	++stat_rush_requests;
564 	if (mp && mp->mnt_syncer_ctx)
565 		wakeup(mp->mnt_syncer_ctx);
566 }
567 
568 /*
569  * trigger a full sync
570  */
571 void
572 trigger_syncer(struct mount *mp)
573 {
574 	struct syncer_ctx *ctx;
575 
576 	if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) {
577 		if (ctx->syncer_trigger == 0) {
578 			ctx->syncer_trigger = 1;
579 			wakeup(ctx);
580 		}
581 	}
582 }
583 
584 /*
585  * Routine to create and manage a filesystem syncer vnode.
586  */
587 static int sync_close(struct vop_close_args *);
588 static int sync_fsync(struct vop_fsync_args *);
589 static int sync_inactive(struct vop_inactive_args *);
590 static int sync_reclaim (struct vop_reclaim_args *);
591 static int sync_print(struct vop_print_args *);
592 
593 static struct vop_ops sync_vnode_vops = {
594 	.vop_default =	vop_eopnotsupp,
595 	.vop_close =	sync_close,
596 	.vop_fsync =	sync_fsync,
597 	.vop_inactive =	sync_inactive,
598 	.vop_reclaim =	sync_reclaim,
599 	.vop_print =	sync_print,
600 };
601 
602 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
603 
604 VNODEOP_SET(sync_vnode_vops);
605 
606 /*
607  * Create a new filesystem syncer vnode for the specified mount point.
608  * This vnode is placed on the worklist and is responsible for sync'ing
609  * the filesystem.
610  *
611  * NOTE: read-only mounts are also placed on the worklist.  The filesystem
612  * sync code is also responsible for cleaning up vnodes.
613  */
614 int
615 vfs_allocate_syncvnode(struct mount *mp)
616 {
617 	struct vnode *vp;
618 	static long start, incr, next;
619 	int error;
620 
621 	/* Allocate a new vnode */
622 	error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
623 	if (error) {
624 		mp->mnt_syncer = NULL;
625 		return (error);
626 	}
627 	vp->v_type = VNON;
628 	/*
629 	 * Place the vnode onto the syncer worklist. We attempt to
630 	 * scatter them about on the list so that they will go off
631 	 * at evenly distributed times even if all the filesystems
632 	 * are mounted at once.
633 	 */
634 	next += incr;
635 	if (next == 0 || next > SYNCER_MAXDELAY) {
636 		start /= 2;
637 		incr /= 2;
638 		if (start == 0) {
639 			start = SYNCER_MAXDELAY / 2;
640 			incr = SYNCER_MAXDELAY;
641 		}
642 		next = start;
643 	}
644 
645 	/*
646 	 * Only put the syncer vnode onto the syncer list if we have a
647 	 * syncer thread.  Some VFS's (aka NULLFS) don't need a syncer
648 	 * thread.
649 	 */
650 	if (mp->mnt_syncer_ctx)
651 		vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
652 
653 	/*
654 	 * The mnt_syncer field inherits the vnode reference, which is
655 	 * held until later decomissioning.
656 	 */
657 	mp->mnt_syncer = vp;
658 	vx_unlock(vp);
659 	return (0);
660 }
661 
662 static int
663 sync_close(struct vop_close_args *ap)
664 {
665 	return (0);
666 }
667 
668 /*
669  * Do a lazy sync of the filesystem.
670  *
671  * sync_fsync { struct vnode *a_vp, int a_waitfor }
672  */
673 static int
674 sync_fsync(struct vop_fsync_args *ap)
675 {
676 	struct vnode *syncvp = ap->a_vp;
677 	struct mount *mp = syncvp->v_mount;
678 	int asyncflag;
679 
680 	/*
681 	 * We only need to do something if this is a lazy evaluation.
682 	 */
683 	if ((ap->a_waitfor & MNT_LAZY) == 0)
684 		return (0);
685 
686 	/*
687 	 * Move ourselves to the back of the sync list.
688 	 */
689 	vn_syncer_add(syncvp, syncdelay);
690 
691 	/*
692 	 * Walk the list of vnodes pushing all that are dirty and
693 	 * not already on the sync list, and freeing vnodes which have
694 	 * no refs and whos VM objects are empty.  vfs_msync() handles
695 	 * the VM issues and must be called whether the mount is readonly
696 	 * or not.
697 	 */
698 	if (vfs_busy(mp, LK_NOWAIT) != 0)
699 		return (0);
700 	if (mp->mnt_flag & MNT_RDONLY) {
701 		vfs_msync(mp, MNT_NOWAIT);
702 	} else {
703 		asyncflag = mp->mnt_flag & MNT_ASYNC;
704 		mp->mnt_flag &= ~MNT_ASYNC;	/* ZZZ hack */
705 		vfs_msync(mp, MNT_NOWAIT);
706 		VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
707 		if (asyncflag)
708 			mp->mnt_flag |= MNT_ASYNC;
709 	}
710 	vfs_unbusy(mp);
711 	return (0);
712 }
713 
714 /*
715  * The syncer vnode is no longer referenced.
716  *
717  * sync_inactive { struct vnode *a_vp, struct proc *a_p }
718  */
719 static int
720 sync_inactive(struct vop_inactive_args *ap)
721 {
722 	vgone_vxlocked(ap->a_vp);
723 	return (0);
724 }
725 
726 /*
727  * The syncer vnode is no longer needed and is being decommissioned.
728  * This can only occur when the last reference has been released on
729  * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
730  *
731  * Modifications to the worklist must be protected with a critical
732  * section.
733  *
734  *	sync_reclaim { struct vnode *a_vp }
735  */
736 static int
737 sync_reclaim(struct vop_reclaim_args *ap)
738 {
739 	struct vnode *vp = ap->a_vp;
740 	struct syncer_ctx *ctx;
741 
742 	ctx = vp->v_mount->mnt_syncer_ctx;
743 	if (ctx) {
744 		lwkt_gettoken(&ctx->sc_token);
745 		KKASSERT(vp->v_mount->mnt_syncer != vp);
746 		if (vp->v_flag & VONWORKLST) {
747 			LIST_REMOVE(vp, v_synclist);
748 			vclrflags(vp, VONWORKLST);
749 			--ctx->syncer_count;
750 		}
751 		lwkt_reltoken(&ctx->sc_token);
752 	} else {
753 		KKASSERT((vp->v_flag & VONWORKLST) == 0);
754 	}
755 
756 	return (0);
757 }
758 
759 /*
760  * This is very similar to vmntvnodescan() but it only scans the
761  * vnodes on the syncer list.  VFS's which support faster VFS_SYNC
762  * operations use the VISDIRTY flag on the vnode to ensure that vnodes
763  * with dirty inodes are added to the syncer in addition to vnodes
764  * with dirty buffers, and can use this function instead of nmntvnodescan().
765  *
766  * This scan does not issue VOP_FSYNC()s.  The supplied callback is intended
767  * to synchronize the file in the manner intended by the VFS using it.
768  *
769  * This is important when a system has millions of vnodes.
770  */
771 int
772 vsyncscan(
773     struct mount *mp,
774     int vmsc_flags,
775     int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
776     void *data
777 ) {
778 	struct syncer_ctx *ctx;
779 	struct synclist *slp;
780 	struct vnode *vp;
781 	int i;
782 	int count;
783 	int lkflags;
784 
785 	if (vmsc_flags & VMSC_NOWAIT)
786 		lkflags = LK_NOWAIT;
787 	else
788 		lkflags = 0;
789 
790 	/*
791 	 * Syncer list context.  This API requires a dedicated syncer thread.
792 	 * (MNTK_THR_SYNC).
793 	 */
794 	KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
795 	ctx = mp->mnt_syncer_ctx;
796 	lwkt_gettoken(&ctx->sc_token);
797 
798 	/*
799 	 * Setup for loop.  Allow races against the syncer thread but
800 	 * require that the syncer thread no be lazy if we were told
801 	 * not to be lazy.
802 	 */
803 	i = ctx->syncer_delayno & ctx->syncer_mask;
804 	if ((vmsc_flags & VMSC_NOWAIT) == 0)
805 		++ctx->syncer_forced;
806 	for (count = 0; count <= ctx->syncer_mask; ++count) {
807 		slp = &ctx->syncer_workitem_pending[i];
808 
809 		while ((vp = LIST_FIRST(slp)) != NULL) {
810 			KKASSERT(vp->v_mount == mp);
811 			if (vmsc_flags & VMSC_GETVP) {
812 				if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
813 					slowfunc(mp, vp, data);
814 					vput(vp);
815 				}
816 			} else if (vmsc_flags & VMSC_GETVX) {
817 				vx_get(vp);
818 				slowfunc(mp, vp, data);
819 				vx_put(vp);
820 			} else {
821 				vhold(vp);
822 				slowfunc(mp, vp, data);
823 				vdrop(vp);
824 			}
825 
826 			/*
827 			 * vp could be invalid.  However, if vp is still at
828 			 * the head of the list it is clearly valid and we
829 			 * can safely move it.
830 			 */
831 			if (LIST_FIRST(slp) == vp)
832 				vn_syncer_add(vp, -(i + syncdelay));
833 		}
834 		i = (i + 1) & ctx->syncer_mask;
835 	}
836 
837 	if ((vmsc_flags & VMSC_NOWAIT) == 0)
838 		--ctx->syncer_forced;
839 	lwkt_reltoken(&ctx->sc_token);
840 	return(0);
841 }
842 
843 /*
844  * Print out a syncer vnode.
845  *
846  *	sync_print { struct vnode *a_vp }
847  */
848 static int
849 sync_print(struct vop_print_args *ap)
850 {
851 	struct vnode *vp = ap->a_vp;
852 
853 	kprintf("syncer vnode");
854 	lockmgr_printinfo(&vp->v_lock);
855 	kprintf("\n");
856 	return (0);
857 }
858 
859