1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
36 */
37
38 /*
39 * External virtual filesystem routines
40 */
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/buf.h>
45 #include <sys/conf.h>
46 #include <sys/dirent.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/fcntl.h>
50 #include <sys/kernel.h>
51 #include <sys/kthread.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/mount.h>
55 #include <sys/proc.h>
56 #include <sys/reboot.h>
57 #include <sys/socket.h>
58 #include <sys/stat.h>
59 #include <sys/sysctl.h>
60 #include <sys/syslog.h>
61 #include <sys/vmmeter.h>
62 #include <sys/vnode.h>
63
64 #include <machine/limits.h>
65
66 #include <vm/vm.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_kern.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_page.h>
73 #include <vm/vm_pager.h>
74 #include <vm/vnode_pager.h>
75
76 #include <sys/buf2.h>
77
78 /*
79 * The workitem queue.
80 */
81 #define SYNCER_MAXDELAY 32
82 static int sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS);
83 time_t syncdelay = 30; /* max time to delay syncing data */
84 SYSCTL_PROC(_kern, OID_AUTO, syncdelay, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
85 sysctl_kern_syncdelay, "I", "VFS data synchronization delay");
86 time_t filedelay = 30; /* time to delay syncing files */
87 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW,
88 &filedelay, 0, "File synchronization delay");
89 time_t dirdelay = 29; /* time to delay syncing directories */
90 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW,
91 &dirdelay, 0, "Directory synchronization delay");
92 time_t metadelay = 28; /* time to delay syncing metadata */
93 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW,
94 &metadelay, 0, "VFS metadata synchronization delay");
95 time_t retrydelay = 1; /* retry delay after failure */
96 SYSCTL_INT(_kern, OID_AUTO, retrydelay, CTLFLAG_RW,
97 &retrydelay, 0, "VFS retry synchronization delay");
98 static int rushjob; /* number of slots to run ASAP */
99 static int stat_rush_requests; /* number of times I/O speeded up */
100 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW,
101 &stat_rush_requests, 0, "");
102
103 LIST_HEAD(synclist, vnode);
104
105 #define SC_FLAG_EXIT (0x1) /* request syncer exit */
106 #define SC_FLAG_DONE (0x2) /* syncer confirm exit */
107
108 struct syncer_ctx {
109 struct mount *sc_mp;
110 struct lwkt_token sc_token;
111 struct thread *sc_thread;
112 int sc_flags;
113 struct synclist *syncer_workitem_pending;
114 long syncer_mask;
115 int syncer_delayno;
116 int syncer_forced;
117 int syncer_rushjob; /* sequence vnodes faster */
118 int syncer_trigger; /* trigger full sync */
119 long syncer_count;
120 };
121
122 static void syncer_thread(void *);
123
124 static int
sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)125 sysctl_kern_syncdelay(SYSCTL_HANDLER_ARGS)
126 {
127 int error;
128 int v = syncdelay;
129
130 error = sysctl_handle_int(oidp, &v, 0, req);
131 if (error || !req->newptr)
132 return (error);
133 if (v < 1)
134 v = 1;
135 if (v > SYNCER_MAXDELAY)
136 v = SYNCER_MAXDELAY;
137 syncdelay = v;
138
139 return(0);
140 }
141
142 /*
143 * The workitem queue.
144 *
145 * It is useful to delay writes of file data and filesystem metadata
146 * for tens of seconds so that quickly created and deleted files need
147 * not waste disk bandwidth being created and removed. To realize this,
148 * we append vnodes to a "workitem" queue. When running with a soft
149 * updates implementation, most pending metadata dependencies should
150 * not wait for more than a few seconds. Thus, mounted on block devices
151 * are delayed only about a half the time that file data is delayed.
152 * Similarly, directory updates are more critical, so are only delayed
153 * about a third the time that file data is delayed. Thus, there are
154 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
155 * one each second (driven off the filesystem syncer process). The
156 * syncer_delayno variable indicates the next queue that is to be processed.
157 * Items that need to be processed soon are placed in this queue:
158 *
159 * syncer_workitem_pending[syncer_delayno]
160 *
161 * A delay of fifteen seconds is done by placing the request fifteen
162 * entries later in the queue:
163 *
164 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
165 *
166 */
167
168 /*
169 * Return the number of vnodes on the syncer's timed list. This will
170 * include the syncer vnode (mp->mnt_syncer) so if used, a minimum
171 * value of 1 will be returned.
172 */
173 long
vn_syncer_count(struct mount * mp)174 vn_syncer_count(struct mount *mp)
175 {
176 struct syncer_ctx *ctx;
177
178 ctx = mp->mnt_syncer_ctx;
179 if (ctx)
180 return (ctx->syncer_count);
181 return 0;
182 }
183
184 /*
185 * Add an item to the syncer work queue.
186 *
187 * WARNING: Cannot get vp->v_token here if not already held, we must
188 * depend on the syncer_token (which might already be held by
189 * the caller) to protect v_synclist and VONWORKLST.
190 *
191 * WARNING: The syncer depends on this function not blocking if the caller
192 * already holds the syncer token.
193 */
194 void
vn_syncer_add(struct vnode * vp,int delay)195 vn_syncer_add(struct vnode *vp, int delay)
196 {
197 struct syncer_ctx *ctx;
198 int slot;
199
200 ctx = vp->v_mount->mnt_syncer_ctx;
201 lwkt_gettoken(&ctx->sc_token);
202
203 if (vp->v_flag & VONWORKLST) {
204 LIST_REMOVE(vp, v_synclist);
205 --ctx->syncer_count;
206 }
207 if (delay <= 0) {
208 slot = -delay & ctx->syncer_mask;
209 } else {
210 if (delay > SYNCER_MAXDELAY - 2)
211 delay = SYNCER_MAXDELAY - 2;
212 slot = (ctx->syncer_delayno + delay) & ctx->syncer_mask;
213 }
214
215 LIST_INSERT_HEAD(&ctx->syncer_workitem_pending[slot], vp, v_synclist);
216 vsetflags(vp, VONWORKLST);
217 ++ctx->syncer_count;
218
219 lwkt_reltoken(&ctx->sc_token);
220 }
221
222 /*
223 * Removes the vnode from the syncer list. Since we might block while
224 * acquiring the syncer_token we have to [re]check conditions to determine
225 * that it is ok to remove the vnode.
226 *
227 * Force removal if force != 0. This can only occur during a forced unmount.
228 *
229 * vp->v_token held on call
230 */
231 void
vn_syncer_remove(struct vnode * vp,int force)232 vn_syncer_remove(struct vnode *vp, int force)
233 {
234 struct syncer_ctx *ctx;
235
236 ctx = vp->v_mount->mnt_syncer_ctx;
237 lwkt_gettoken(&ctx->sc_token);
238
239 if ((vp->v_flag & (VISDIRTY | VONWORKLST | VOBJDIRTY)) == VONWORKLST &&
240 RB_EMPTY(&vp->v_rbdirty_tree)) {
241 vclrflags(vp, VONWORKLST);
242 LIST_REMOVE(vp, v_synclist);
243 --ctx->syncer_count;
244 } else if (force && (vp->v_flag & VONWORKLST)) {
245 vclrflags(vp, VONWORKLST);
246 LIST_REMOVE(vp, v_synclist);
247 --ctx->syncer_count;
248 }
249
250 lwkt_reltoken(&ctx->sc_token);
251 }
252
253 /*
254 * vnode must be locked
255 */
256 void
vclrisdirty(struct vnode * vp)257 vclrisdirty(struct vnode *vp)
258 {
259 vclrflags(vp, VISDIRTY);
260 if (vp->v_flag & VONWORKLST)
261 vn_syncer_remove(vp, 0);
262 }
263
264 void
vclrobjdirty(struct vnode * vp)265 vclrobjdirty(struct vnode *vp)
266 {
267 vclrflags(vp, VOBJDIRTY);
268 if (vp->v_flag & VONWORKLST)
269 vn_syncer_remove(vp, 0);
270 }
271
272 /*
273 * vnode must be stable
274 */
275 void
vsetisdirty(struct vnode * vp)276 vsetisdirty(struct vnode *vp)
277 {
278 struct syncer_ctx *ctx;
279
280 if ((vp->v_flag & VISDIRTY) == 0) {
281 ctx = vp->v_mount->mnt_syncer_ctx;
282 vsetflags(vp, VISDIRTY);
283 lwkt_gettoken(&ctx->sc_token);
284 if ((vp->v_flag & VONWORKLST) == 0)
285 vn_syncer_add(vp, syncdelay);
286 lwkt_reltoken(&ctx->sc_token);
287 }
288 }
289
290 void
vsetobjdirty(struct vnode * vp)291 vsetobjdirty(struct vnode *vp)
292 {
293 struct syncer_ctx *ctx;
294
295 if ((vp->v_flag & VOBJDIRTY) == 0) {
296 ctx = vp->v_mount->mnt_syncer_ctx;
297 vsetflags(vp, VOBJDIRTY);
298 lwkt_gettoken(&ctx->sc_token);
299 if ((vp->v_flag & VONWORKLST) == 0)
300 vn_syncer_add(vp, syncdelay);
301 lwkt_reltoken(&ctx->sc_token);
302 }
303 }
304
305 /*
306 * Create per-filesystem syncer process
307 */
308 void
vn_syncer_thr_create(struct mount * mp)309 vn_syncer_thr_create(struct mount *mp)
310 {
311 struct syncer_ctx *ctx;
312 static int syncalloc = 0;
313
314 ctx = kmalloc(sizeof(struct syncer_ctx), M_TEMP, M_WAITOK | M_ZERO);
315 ctx->sc_mp = mp;
316 ctx->sc_flags = 0;
317 ctx->syncer_workitem_pending = hashinit(SYNCER_MAXDELAY, M_DEVBUF,
318 &ctx->syncer_mask);
319 ctx->syncer_delayno = 0;
320 lwkt_token_init(&ctx->sc_token, "syncer");
321 mp->mnt_syncer_ctx = ctx;
322 kthread_create(syncer_thread, ctx, &ctx->sc_thread,
323 "syncer%d", ++syncalloc & 0x7FFFFFFF);
324 }
325
326 /*
327 * Stop per-filesystem syncer process
328 */
329 void
vn_syncer_thr_stop(struct mount * mp)330 vn_syncer_thr_stop(struct mount *mp)
331 {
332 struct syncer_ctx *ctx;
333
334 ctx = mp->mnt_syncer_ctx;
335 if (ctx == NULL)
336 return;
337
338 lwkt_gettoken(&ctx->sc_token);
339
340 /* Signal the syncer process to exit */
341 ctx->sc_flags |= SC_FLAG_EXIT;
342 wakeup(ctx);
343
344 /* Wait till syncer process exits */
345 while ((ctx->sc_flags & SC_FLAG_DONE) == 0) {
346 tsleep_interlock(&ctx->sc_flags, 0);
347 lwkt_reltoken(&ctx->sc_token);
348 tsleep(&ctx->sc_flags, PINTERLOCKED, "syncexit", hz);
349 lwkt_gettoken(&ctx->sc_token);
350 }
351
352 mp->mnt_syncer_ctx = NULL;
353 lwkt_reltoken(&ctx->sc_token);
354
355 hashdestroy(ctx->syncer_workitem_pending, M_DEVBUF, ctx->syncer_mask);
356 kfree(ctx, M_TEMP);
357 }
358
359 struct thread *updatethread;
360
361 /*
362 * System filesystem synchronizer daemon.
363 */
364 static void
syncer_thread(void * _ctx)365 syncer_thread(void *_ctx)
366 {
367 struct syncer_ctx *ctx = _ctx;
368 struct synclist *slp;
369 struct vnode *vp;
370 long starttime;
371 int *sc_flagsp;
372 int sc_flags;
373 int vnodes_synced = 0;
374 int delta;
375 int dummy = 0;
376
377 for (;;) {
378 kproc_suspend_loop();
379
380 starttime = time_uptime;
381 lwkt_gettoken(&ctx->sc_token);
382
383 /*
384 * Push files whose dirty time has expired. Be careful
385 * of interrupt race on slp queue.
386 *
387 * Note that vsyncscan() and vn_syncer_one() can pull items
388 * off the same list, so we shift vp's position in the
389 * list immediately.
390 */
391 slp = &ctx->syncer_workitem_pending[ctx->syncer_delayno];
392
393 /*
394 * If syncer_trigger is set (from trigger_syncer(mp)),
395 * Immediately do a full filesystem sync and set up the
396 * following full filesystem sync to occur in 1 second.
397 *
398 * The normal syncer_trigger bit 0 is automatically reset.
399 * If other bits are set, they remain set and cause the
400 * syncer to keep running.
401 */
402 if (ctx->syncer_trigger) {
403 if (ctx->sc_mp && ctx->sc_mp->mnt_syncer) {
404 vp = ctx->sc_mp->mnt_syncer;
405 if (vp->v_flag & VONWORKLST) {
406 vn_syncer_add(vp, retrydelay);
407 if (vget(vp, LK_EXCLUSIVE) == 0) {
408 atomic_clear_int(&ctx->syncer_trigger, 1);
409 VOP_FSYNC(vp, MNT_LAZY, 0);
410 vput(vp);
411 vnodes_synced++;
412 }
413 }
414 }
415 }
416
417 /*
418 * FSYNC items in this bucket
419 */
420 while ((vp = LIST_FIRST(slp)) != NULL) {
421 vn_syncer_add(vp, retrydelay);
422 if (ctx->syncer_forced) {
423 if (vget(vp, LK_EXCLUSIVE) == 0) {
424 VOP_FSYNC(vp, MNT_NOWAIT, 0);
425 vput(vp);
426 vnodes_synced++;
427 }
428 } else {
429 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
430 VOP_FSYNC(vp, MNT_LAZY, 0);
431 vput(vp);
432 vnodes_synced++;
433 }
434 }
435 }
436
437 /*
438 * Increment the slot upon completion. This is typically
439 * one-second but may be faster if the syncer is triggered.
440 */
441 ctx->syncer_delayno = (ctx->syncer_delayno + 1) &
442 ctx->syncer_mask;
443
444 sc_flags = ctx->sc_flags;
445
446 /* Exit on unmount */
447 if (sc_flags & SC_FLAG_EXIT)
448 break;
449
450 lwkt_reltoken(&ctx->sc_token);
451
452 /*
453 * Do sync processing for each mount.
454 */
455 if (ctx->sc_mp)
456 bio_ops_sync(ctx->sc_mp);
457
458 /*
459 * The variable rushjob allows the kernel to speed up the
460 * processing of the filesystem syncer process. A rushjob
461 * value of N tells the filesystem syncer to process the next
462 * N seconds worth of work on its queue ASAP. Currently rushjob
463 * is used by the soft update code to speed up the filesystem
464 * syncer process when the incore state is getting so far
465 * ahead of the disk that the kernel memory pool is being
466 * threatened with exhaustion.
467 */
468 delta = rushjob - ctx->syncer_rushjob;
469 if ((u_int)delta > syncdelay / 2) {
470 ctx->syncer_rushjob = rushjob - syncdelay / 2;
471 tsleep(&dummy, 0, "rush", 1);
472 continue;
473 }
474 if (delta) {
475 ++ctx->syncer_rushjob;
476 tsleep(&dummy, 0, "rush", 1);
477 continue;
478 }
479
480 /*
481 * Normal syncer operation iterates once a second, unless
482 * specifically triggered.
483 */
484 if (time_uptime == starttime &&
485 ctx->syncer_trigger == 0) {
486 tsleep_interlock(ctx, 0);
487 if (time_uptime == starttime &&
488 ctx->syncer_trigger == 0 &&
489 (ctx->sc_flags & SC_FLAG_EXIT) == 0)
490 {
491 tsleep(ctx, PINTERLOCKED, "syncer", hz);
492 }
493 }
494 }
495
496 /*
497 * Unmount/exit path for per-filesystem syncers; sc_token held
498 */
499 ctx->sc_flags |= SC_FLAG_DONE;
500 sc_flagsp = &ctx->sc_flags;
501 lwkt_reltoken(&ctx->sc_token);
502 wakeup(sc_flagsp);
503
504 kthread_exit();
505 }
506
507 /*
508 * This allows a filesystem to pro-actively request that a dirty
509 * vnode be fsync()d. This routine does not guarantee that one
510 * will actually be fsynced.
511 */
512 void
vn_syncer_one(struct mount * mp)513 vn_syncer_one(struct mount *mp)
514 {
515 struct syncer_ctx *ctx;
516 struct synclist *slp;
517 struct vnode *vp;
518 int i;
519 int n = syncdelay;
520
521 ctx = mp->mnt_syncer_ctx;
522 i = ctx->syncer_delayno & ctx->syncer_mask;
523 cpu_ccfence();
524
525 if (lwkt_trytoken(&ctx->sc_token) == 0)
526 return;
527
528 /*
529 * Look ahead on our syncer time array.
530 */
531 do {
532 slp = &ctx->syncer_workitem_pending[i];
533 vp = LIST_FIRST(slp);
534 if (vp && vp->v_type == VNON)
535 vp = LIST_NEXT(vp, v_synclist);
536 if (vp)
537 break;
538 i = (i + 1) & ctx->syncer_mask;
539 /* i will be wrong if we stop here but vp is NULL so ok */
540 } while(--n);
541
542 /*
543 * Process one vnode, skip the syncer vnode but also stop
544 * if the syncer vnode is the only thing on this list.
545 */
546 if (vp) {
547 vn_syncer_add(vp, retrydelay);
548 if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
549 VOP_FSYNC(vp, MNT_LAZY, 0);
550 vput(vp);
551 }
552 }
553 lwkt_reltoken(&ctx->sc_token);
554 }
555
556 /*
557 * Request that the syncer daemon for a specific mount speed up its work.
558 * If mp is NULL the caller generally wants to speed up all syncers.
559 */
560 void
speedup_syncer(struct mount * mp)561 speedup_syncer(struct mount *mp)
562 {
563 /*
564 * Don't bother protecting the test. unsleep_and_wakeup_thread()
565 * will only do something real if the thread is in the right state.
566 */
567 atomic_add_int(&rushjob, 1);
568 ++stat_rush_requests;
569 if (mp && mp->mnt_syncer_ctx)
570 wakeup(mp->mnt_syncer_ctx);
571 }
572
573 /*
574 * Force continuous full syncs until stopped. This may be used by
575 * filesystems waiting on dirty data to be flushed to avoid syncer/tsleep
576 * races.
577 */
578 void
trigger_syncer_start(struct mount * mp)579 trigger_syncer_start(struct mount *mp)
580 {
581 struct syncer_ctx *ctx;
582
583 if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) {
584 if (atomic_fetchadd_int(&ctx->syncer_trigger, 2) <= 1)
585 wakeup(ctx);
586 }
587 }
588
589 void
trigger_syncer_stop(struct mount * mp)590 trigger_syncer_stop(struct mount *mp)
591 {
592 struct syncer_ctx *ctx;
593
594 if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) {
595 atomic_add_int(&ctx->syncer_trigger, -2);
596 }
597 }
598
599 /*
600 * trigger a full sync (auto-reset)
601 */
602 void
trigger_syncer(struct mount * mp)603 trigger_syncer(struct mount *mp)
604 {
605 struct syncer_ctx *ctx;
606
607 if (mp && (ctx = mp->mnt_syncer_ctx) != NULL) {
608 if ((ctx->syncer_trigger & 1) == 0) {
609 atomic_set_int(&ctx->syncer_trigger, 1);
610 wakeup(ctx);
611 }
612 }
613 }
614
615 /*
616 * Routine to create and manage a filesystem syncer vnode.
617 */
618 static int sync_close(struct vop_close_args *);
619 static int sync_fsync(struct vop_fsync_args *);
620 static int sync_inactive(struct vop_inactive_args *);
621 static int sync_reclaim (struct vop_reclaim_args *);
622 static int sync_print(struct vop_print_args *);
623
624 static struct vop_ops sync_vnode_vops = {
625 .vop_default = vop_eopnotsupp,
626 .vop_close = sync_close,
627 .vop_fsync = sync_fsync,
628 .vop_inactive = sync_inactive,
629 .vop_reclaim = sync_reclaim,
630 .vop_print = sync_print,
631 };
632
633 static struct vop_ops *sync_vnode_vops_p = &sync_vnode_vops;
634
635 VNODEOP_SET(sync_vnode_vops);
636
637 /*
638 * Create a new filesystem syncer vnode for the specified mount point.
639 * This vnode is placed on the worklist and is responsible for sync'ing
640 * the filesystem.
641 *
642 * NOTE: read-only mounts are also placed on the worklist. The filesystem
643 * sync code is also responsible for cleaning up vnodes.
644 */
645 int
vfs_allocate_syncvnode(struct mount * mp)646 vfs_allocate_syncvnode(struct mount *mp)
647 {
648 struct vnode *vp;
649 static long start, incr, next;
650 int error;
651
652 /* Allocate a new vnode */
653 error = getspecialvnode(VT_VFS, mp, &sync_vnode_vops_p, &vp, 0, 0);
654 if (error) {
655 mp->mnt_syncer = NULL;
656 return (error);
657 }
658 vp->v_type = VNON;
659 /*
660 * Place the vnode onto the syncer worklist. We attempt to
661 * scatter them about on the list so that they will go off
662 * at evenly distributed times even if all the filesystems
663 * are mounted at once.
664 */
665 next += incr;
666 if (next == 0 || next > SYNCER_MAXDELAY) {
667 start /= 2;
668 incr /= 2;
669 if (start == 0) {
670 start = SYNCER_MAXDELAY / 2;
671 incr = SYNCER_MAXDELAY;
672 }
673 next = start;
674 }
675
676 /*
677 * Only put the syncer vnode onto the syncer list if we have a
678 * syncer thread. Some VFS's (aka NULLFS) don't need a syncer
679 * thread.
680 */
681 if (mp->mnt_syncer_ctx)
682 vn_syncer_add(vp, syncdelay > 0 ? next % syncdelay : 0);
683
684 /*
685 * The mnt_syncer field inherits the vnode reference, which is
686 * held until later decomissioning.
687 */
688 mp->mnt_syncer = vp;
689 vx_unlock(vp);
690 return (0);
691 }
692
693 static int
sync_close(struct vop_close_args * ap)694 sync_close(struct vop_close_args *ap)
695 {
696 return (0);
697 }
698
699 /*
700 * Do a lazy sync of the filesystem.
701 *
702 * sync_fsync { struct vnode *a_vp, int a_waitfor }
703 */
704 static int
sync_fsync(struct vop_fsync_args * ap)705 sync_fsync(struct vop_fsync_args *ap)
706 {
707 struct vnode *syncvp = ap->a_vp;
708 struct mount *mp = syncvp->v_mount;
709 int asyncflag;
710
711 /*
712 * We only need to do something if this is a lazy evaluation.
713 */
714 if ((ap->a_waitfor & MNT_LAZY) == 0)
715 return (0);
716
717 /*
718 * Move ourselves to the back of the sync list.
719 */
720 vn_syncer_add(syncvp, syncdelay);
721
722 /*
723 * Walk the list of vnodes pushing all that are dirty and
724 * not already on the sync list, and freeing vnodes which have
725 * no refs and whos VM objects are empty. vfs_msync() handles
726 * the VM issues and must be called whether the mount is readonly
727 * or not.
728 */
729 if (vfs_busy(mp, LK_NOWAIT) != 0)
730 return (0);
731 if (mp->mnt_flag & MNT_RDONLY) {
732 vfs_msync(mp, MNT_NOWAIT);
733 } else {
734 asyncflag = mp->mnt_flag & MNT_ASYNC;
735 mp->mnt_flag &= ~MNT_ASYNC; /* ZZZ hack */
736 vfs_msync(mp, MNT_NOWAIT);
737 VFS_SYNC(mp, MNT_NOWAIT | MNT_LAZY);
738 if (asyncflag)
739 mp->mnt_flag |= MNT_ASYNC;
740 }
741 vfs_unbusy(mp);
742 return (0);
743 }
744
745 /*
746 * The syncer vnode is no longer referenced.
747 *
748 * sync_inactive { struct vnode *a_vp, struct proc *a_p }
749 */
750 static int
sync_inactive(struct vop_inactive_args * ap)751 sync_inactive(struct vop_inactive_args *ap)
752 {
753 vgone_vxlocked(ap->a_vp);
754 return (0);
755 }
756
757 /*
758 * The syncer vnode is no longer needed and is being decommissioned.
759 * This can only occur when the last reference has been released on
760 * mp->mnt_syncer, so mp->mnt_syncer had better be NULL.
761 *
762 * Modifications to the worklist must be protected with a critical
763 * section.
764 *
765 * sync_reclaim { struct vnode *a_vp }
766 */
767 static int
sync_reclaim(struct vop_reclaim_args * ap)768 sync_reclaim(struct vop_reclaim_args *ap)
769 {
770 struct vnode *vp = ap->a_vp;
771 struct syncer_ctx *ctx;
772
773 ctx = vp->v_mount->mnt_syncer_ctx;
774 if (ctx) {
775 lwkt_gettoken(&ctx->sc_token);
776 KKASSERT(vp->v_mount->mnt_syncer != vp);
777 if (vp->v_flag & VONWORKLST) {
778 LIST_REMOVE(vp, v_synclist);
779 vclrflags(vp, VONWORKLST);
780 --ctx->syncer_count;
781 }
782 lwkt_reltoken(&ctx->sc_token);
783 } else {
784 KKASSERT((vp->v_flag & VONWORKLST) == 0);
785 }
786
787 return (0);
788 }
789
790 /*
791 * This is very similar to vmntvnodescan() but it only scans the
792 * vnodes on the syncer list. VFS's which support faster VFS_SYNC
793 * operations use the VISDIRTY flag on the vnode to ensure that vnodes
794 * with dirty inodes are added to the syncer in addition to vnodes
795 * with dirty buffers, and can use this function instead of nmntvnodescan().
796 *
797 * This scan does not issue VOP_FSYNC()s. The supplied callback is intended
798 * to synchronize the file in the manner intended by the VFS using it.
799 *
800 * This is important when a system has millions of vnodes.
801 */
802 int
vsyncscan(struct mount * mp,int vmsc_flags,int (* slowfunc)(struct mount * mp,struct vnode * vp,void * data),void * data)803 vsyncscan(
804 struct mount *mp,
805 int vmsc_flags,
806 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
807 void *data
808 ) {
809 struct syncer_ctx *ctx;
810 struct synclist *slp;
811 struct vnode *vp;
812 int i;
813 int count;
814 int lkflags;
815
816 if (vmsc_flags & VMSC_NOWAIT)
817 lkflags = LK_NOWAIT;
818 else
819 lkflags = 0;
820
821 /*
822 * Syncer list context. This API requires a dedicated syncer thread.
823 * (MNTK_THR_SYNC).
824 */
825 KKASSERT(mp->mnt_kern_flag & MNTK_THR_SYNC);
826 ctx = mp->mnt_syncer_ctx;
827 lwkt_gettoken(&ctx->sc_token);
828
829 /*
830 * Setup for loop. Allow races against the syncer thread but
831 * require that the syncer thread no be lazy if we were told
832 * not to be lazy.
833 */
834 i = ctx->syncer_delayno & ctx->syncer_mask;
835 if ((vmsc_flags & VMSC_NOWAIT) == 0)
836 ++ctx->syncer_forced;
837 for (count = 0; count <= ctx->syncer_mask; ++count) {
838 slp = &ctx->syncer_workitem_pending[i];
839
840 while ((vp = LIST_FIRST(slp)) != NULL) {
841 KKASSERT(vp->v_mount == mp);
842 if (vmsc_flags & VMSC_GETVP) {
843 if (vget(vp, LK_EXCLUSIVE | lkflags) == 0) {
844 slowfunc(mp, vp, data);
845 vput(vp);
846 }
847 } else if (vmsc_flags & VMSC_GETVX) {
848 vx_get(vp);
849 slowfunc(mp, vp, data);
850 vx_put(vp);
851 } else {
852 vhold(vp);
853 slowfunc(mp, vp, data);
854 vdrop(vp);
855 }
856
857 /*
858 * vp could be invalid. However, if vp is still at
859 * the head of the list it is clearly valid and we
860 * can safely move it.
861 */
862 if (LIST_FIRST(slp) == vp)
863 vn_syncer_add(vp, -(i + syncdelay));
864 }
865 i = (i + 1) & ctx->syncer_mask;
866 }
867
868 if ((vmsc_flags & VMSC_NOWAIT) == 0)
869 --ctx->syncer_forced;
870 lwkt_reltoken(&ctx->sc_token);
871 return(0);
872 }
873
874 /*
875 * Print out a syncer vnode.
876 *
877 * sync_print { struct vnode *a_vp }
878 */
879 static int
sync_print(struct vop_print_args * ap)880 sync_print(struct vop_print_args *ap)
881 {
882 struct vnode *vp = ap->a_vp;
883
884 kprintf("syncer vnode");
885 lockmgr_printinfo(&vp->v_lock);
886 kprintf("\n");
887 return (0);
888 }
889
890