xref: /dragonfly/sys/vfs/hammer/hammer_flusher.c (revision 81c11cd3)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42 
43 #include "hammer.h"
44 
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 					hammer_transaction_t trans);
50 
51 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52               hammer_ino_rb_compare);
53 
54 /*
55  * Inodes are sorted and assigned to slave threads in groups of 128.
56  * We want a flush group size large enough such that the slave threads
57  * are not likely to interfere with each other when accessing the B-Tree,
58  * but not so large that we lose concurrency.
59  */
60 #define HAMMER_FLUSH_GROUP_SIZE 128
61 
62 /*
63  * Support structures for the flusher threads.
64  */
65 struct hammer_flusher_info {
66 	TAILQ_ENTRY(hammer_flusher_info) entry;
67 	struct hammer_mount *hmp;
68 	thread_t	td;
69 	int		runstate;
70 	int		count;
71 	hammer_flush_group_t flg;
72 	hammer_inode_t	work_array[HAMMER_FLUSH_GROUP_SIZE];
73 };
74 
75 typedef struct hammer_flusher_info *hammer_flusher_info_t;
76 
77 /*
78  * Sync all inodes pending on the flusher.
79  *
80  * All flush groups will be flushed.  This does not queue dirty inodes
81  * to the flush groups, it just flushes out what has already been queued!
82  */
83 void
84 hammer_flusher_sync(hammer_mount_t hmp)
85 {
86 	int seq;
87 
88 	seq = hammer_flusher_async(hmp, NULL);
89 	hammer_flusher_wait(hmp, seq);
90 }
91 
92 /*
93  * Sync all flush groups through to close_flg - return immediately.
94  * If close_flg is NULL all flush groups are synced.
95  *
96  * Returns the sequence number of the last closed flush group,
97  * which may be close_flg.  When syncing to the end if there
98  * are no flush groups pending we still cycle the flusher, so
99  * we return the next seq number not yet allocated.
100  */
101 int
102 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
103 {
104 	hammer_flush_group_t flg;
105 	int seq;
106 
107 	/*
108 	 * Already closed
109 	 */
110 	if (close_flg && close_flg->closed)
111 		return(close_flg->seq);
112 
113 	/*
114 	 * Close flush groups until we hit the end of the list
115 	 * or close_flg.
116 	 */
117 	while ((flg = hmp->next_flush_group) != NULL) {
118 		KKASSERT(flg->closed == 0 && flg->running == 0);
119 		flg->closed = 1;
120 		hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
121 		if (flg == close_flg)
122 			break;
123 	}
124 
125 	if (hmp->flusher.td) {
126 		if (hmp->flusher.signal++ == 0)
127 			wakeup(&hmp->flusher.signal);
128 		seq = flg ? flg->seq : hmp->flusher.next;
129 	} else {
130 		seq = hmp->flusher.done;
131 	}
132 	return(seq);
133 }
134 
135 /*
136  * Flush the current/next flushable flg.  This function is typically called
137  * in a loop along with hammer_flusher_wait(hmp, returned_seq) to iterate
138  * flush groups until specific conditions are met.
139  *
140  * If a flush is currently in progress its seq is returned.
141  *
142  * If no flush is currently in progress the next available flush group
143  * will be flushed and its seq returned.
144  *
145  * If no flush groups are present a dummy seq will be allocated and
146  * returned and the flusher will be activated (e.g. to flush the
147  * undo/redo and the volume header).
148  */
149 int
150 hammer_flusher_async_one(hammer_mount_t hmp)
151 {
152 	hammer_flush_group_t flg;
153 	int seq;
154 
155 	if (hmp->flusher.td) {
156 		flg = TAILQ_FIRST(&hmp->flush_group_list);
157 		seq = hammer_flusher_async(hmp, flg);
158 	} else {
159 		seq = hmp->flusher.done;
160 	}
161 	return(seq);
162 }
163 
164 /*
165  * Wait for the flusher to get to the specified sequence number.
166  * Signal the flusher as often as necessary to keep it going.
167  */
168 void
169 hammer_flusher_wait(hammer_mount_t hmp, int seq)
170 {
171 	while ((int)(seq - hmp->flusher.done) > 0) {
172 		if ((int)(seq - hmp->flusher.act) > 0) {
173 			if (hmp->flusher.signal++ == 0)
174 				wakeup(&hmp->flusher.signal);
175 		}
176 		tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
177 	}
178 }
179 
180 void
181 hammer_flusher_wait_next(hammer_mount_t hmp)
182 {
183 	int seq;
184 
185 	seq = hammer_flusher_async_one(hmp);
186 	hammer_flusher_wait(hmp, seq);
187 }
188 
189 void
190 hammer_flusher_create(hammer_mount_t hmp)
191 {
192 	hammer_flusher_info_t info;
193 	int i;
194 
195 	hmp->flusher.signal = 0;
196 	hmp->flusher.act = 0;
197 	hmp->flusher.done = 0;
198 	hmp->flusher.next = 1;
199 	hammer_ref(&hmp->flusher.finalize_lock);
200 	TAILQ_INIT(&hmp->flusher.run_list);
201 	TAILQ_INIT(&hmp->flusher.ready_list);
202 
203 	lwkt_create(hammer_flusher_master_thread, hmp,
204 		    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
205 	for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
206 		info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
207 		info->hmp = hmp;
208 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
209 		lwkt_create(hammer_flusher_slave_thread, info,
210 			    &info->td, NULL, 0, -1, "hammer-S%d", i);
211 	}
212 }
213 
214 void
215 hammer_flusher_destroy(hammer_mount_t hmp)
216 {
217 	hammer_flusher_info_t info;
218 
219 	/*
220 	 * Kill the master
221 	 */
222 	hmp->flusher.exiting = 1;
223 	while (hmp->flusher.td) {
224 		++hmp->flusher.signal;
225 		wakeup(&hmp->flusher.signal);
226 		tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
227 	}
228 
229 	/*
230 	 * Kill the slaves
231 	 */
232 	while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
233 		KKASSERT(info->runstate == 0);
234 		TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
235 		info->runstate = -1;
236 		wakeup(&info->runstate);
237 		while (info->td)
238 			tsleep(&info->td, 0, "hmrwwc", 0);
239 		kfree(info, hmp->m_misc);
240 	}
241 }
242 
243 /*
244  * The master flusher thread manages the flusher sequence id and
245  * synchronization with the slave work threads.
246  */
247 static void
248 hammer_flusher_master_thread(void *arg)
249 {
250 	hammer_flush_group_t flg;
251 	hammer_mount_t hmp;
252 
253 	hmp = arg;
254 
255 	lwkt_gettoken(&hmp->fs_token);
256 
257 	for (;;) {
258 		/*
259 		 * Flush all closed flgs.  If no flg's are closed we still
260 		 * do at least one flush cycle as we may have to update
261 		 * the UNDO FIFO even if no inodes are queued.
262 		 */
263 		for (;;) {
264 			while (hmp->flusher.group_lock)
265 				tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
266 			hammer_flusher_clean_loose_ios(hmp);
267 			hammer_flusher_flush(hmp);
268 			hmp->flusher.done = hmp->flusher.act;
269 			wakeup(&hmp->flusher.done);
270 			flg = TAILQ_FIRST(&hmp->flush_group_list);
271 			if (flg == NULL || flg->closed == 0)
272 				break;
273 			if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
274 				break;
275 		}
276 
277 		/*
278 		 * Wait for activity.
279 		 */
280 		if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
281 			break;
282 		while (hmp->flusher.signal == 0)
283 			tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
284 
285 		/*
286 		 * Flush for each count on signal but only allow one extra
287 		 * flush request to build up.
288 		 */
289 		if (--hmp->flusher.signal != 0)
290 			hmp->flusher.signal = 1;
291 	}
292 
293 	/*
294 	 * And we are done.
295 	 */
296 	hmp->flusher.td = NULL;
297 	wakeup(&hmp->flusher.exiting);
298 	lwkt_reltoken(&hmp->fs_token);
299 	lwkt_exit();
300 }
301 
302 /*
303  * Flush all inodes in the current flush group.
304  */
305 static void
306 hammer_flusher_flush(hammer_mount_t hmp)
307 {
308 	hammer_flusher_info_t info;
309 	hammer_flush_group_t flg;
310 	hammer_reserve_t resv;
311 	hammer_inode_t ip;
312 	hammer_inode_t next_ip;
313 	int slave_index;
314 	int count;
315 
316 	/*
317 	 * Just in-case there's a flush race on mount
318 	 */
319 	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
320 		return;
321 	}
322 
323 	/*
324 	 * Set the actively flushing sequence number.  If no flushable
325 	 * groups are present allocate a dummy sequence number for the
326 	 * operation.
327 	 */
328 	flg = TAILQ_FIRST(&hmp->flush_group_list);
329 	if (flg == NULL) {
330 		hmp->flusher.act = hmp->flusher.next;
331 		++hmp->flusher.next;
332 	} else if (flg->closed) {
333 		KKASSERT(flg->running == 0);
334 		flg->running = 1;
335 		hmp->flusher.act = flg->seq;
336 		if (hmp->fill_flush_group == flg)
337 			hmp->fill_flush_group = TAILQ_NEXT(flg, flush_entry);
338 	}
339 
340 	/*
341 	 * We only do one flg but we may have to loop/retry.
342 	 *
343 	 * Due to various races it is possible to come across a flush
344 	 * group which as not yet been closed.
345 	 */
346 	count = 0;
347 	while (flg && flg->running) {
348 		++count;
349 		if (hammer_debug_general & 0x0001) {
350 			kprintf("hammer_flush %d ttl=%d recs=%d\n",
351 				hmp->flusher.act,
352 				flg->total_count,
353 				flg->refs);
354 		}
355 		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
356 			break;
357 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
358 
359 		/*
360 		 * If the previous flush cycle just about exhausted our
361 		 * UNDO space we may have to do a dummy cycle to move the
362 		 * first_offset up before actually digging into a new cycle,
363 		 * or the new cycle will not have sufficient undo space.
364 		 */
365 		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
366 			hammer_flusher_finalize(&hmp->flusher.trans, 0);
367 
368 		KKASSERT(hmp->next_flush_group != flg);
369 
370 		/*
371 		 * Iterate the inodes in the flg's flush_tree and assign
372 		 * them to slaves.
373 		 */
374 		slave_index = 0;
375 		info = TAILQ_FIRST(&hmp->flusher.ready_list);
376 		next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
377 
378 		while ((ip = next_ip) != NULL) {
379 			next_ip = RB_NEXT(hammer_fls_rb_tree,
380 					  &flg->flush_tree, ip);
381 
382 			if (++hmp->check_yield > hammer_yield_check) {
383 				hmp->check_yield = 0;
384 				lwkt_yield();
385 			}
386 
387 			/*
388 			 * Add ip to the slave's work array.  The slave is
389 			 * not currently running.
390 			 */
391 			info->work_array[info->count++] = ip;
392 			if (info->count != HAMMER_FLUSH_GROUP_SIZE)
393 				continue;
394 
395 			/*
396 			 * Get the slave running
397 			 */
398 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
399 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
400 			info->flg = flg;
401 			info->runstate = 1;
402 			wakeup(&info->runstate);
403 
404 			/*
405 			 * Get a new slave.  We may have to wait for one to
406 			 * finish running.
407 			 */
408 			while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
409 				tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
410 			}
411 		}
412 
413 		/*
414 		 * Run the current slave if necessary
415 		 */
416 		if (info->count) {
417 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
418 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
419 			info->flg = flg;
420 			info->runstate = 1;
421 			wakeup(&info->runstate);
422 		}
423 
424 		/*
425 		 * Wait for all slaves to finish running
426 		 */
427 		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
428 			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
429 
430 		/*
431 		 * Do the final finalization, clean up
432 		 */
433 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
434 		hmp->flusher.tid = hmp->flusher.trans.tid;
435 
436 		hammer_done_transaction(&hmp->flusher.trans);
437 
438 		/*
439 		 * Loop up on the same flg.  If the flg is done clean it up
440 		 * and break out.  We only flush one flg.
441 		 */
442 		if (RB_EMPTY(&flg->flush_tree)) {
443 			KKASSERT(flg->refs == 0);
444 			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
445 			kfree(flg, hmp->m_misc);
446 			break;
447 		}
448 		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
449 	}
450 
451 	/*
452 	 * We may have pure meta-data to flush, or we may have to finish
453 	 * cycling the UNDO FIFO, even if there were no flush groups.
454 	 */
455 	if (count == 0 && hammer_flusher_haswork(hmp)) {
456 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
457 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
458 		hammer_done_transaction(&hmp->flusher.trans);
459 	}
460 
461 	/*
462 	 * Clean up any freed big-blocks (typically zone-2).
463 	 * resv->flush_group is typically set several flush groups ahead
464 	 * of the free to ensure that the freed block is not reused until
465 	 * it can no longer be reused.
466 	 */
467 	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
468 		if (resv->flush_group != hmp->flusher.act)
469 			break;
470 		hammer_reserve_clrdelay(hmp, resv);
471 	}
472 }
473 
474 
475 /*
476  * The slave flusher thread pulls work off the master flush list until no
477  * work is left.
478  */
479 static void
480 hammer_flusher_slave_thread(void *arg)
481 {
482 	hammer_flush_group_t flg;
483 	hammer_flusher_info_t info;
484 	hammer_mount_t hmp;
485 	hammer_inode_t ip;
486 	int i;
487 
488 	info = arg;
489 	hmp = info->hmp;
490 	lwkt_gettoken(&hmp->fs_token);
491 
492 	for (;;) {
493 		while (info->runstate == 0)
494 			tsleep(&info->runstate, 0, "hmrssw", 0);
495 		if (info->runstate < 0)
496 			break;
497 		flg = info->flg;
498 
499 		for (i = 0; i < info->count; ++i) {
500 			ip = info->work_array[i];
501 			hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
502 			++hammer_stats_inode_flushes;
503 		}
504 		info->count = 0;
505 		info->runstate = 0;
506 		TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
507 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
508 		wakeup(&hmp->flusher.ready_list);
509 	}
510 	info->td = NULL;
511 	wakeup(&info->td);
512 	lwkt_reltoken(&hmp->fs_token);
513 	lwkt_exit();
514 }
515 
516 void
517 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
518 {
519 	hammer_buffer_t buffer;
520 	hammer_io_t io;
521 
522 	/*
523 	 * loose ends - buffers without bp's aren't tracked by the kernel
524 	 * and can build up, so clean them out.  This can occur when an
525 	 * IO completes on a buffer with no references left.
526 	 *
527 	 * The io_token is needed to protect the list.
528 	 */
529 	if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
530 		lwkt_gettoken(&hmp->io_token);
531 		while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
532 			KKASSERT(io->mod_root == &hmp->lose_root);
533 			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
534 			io->mod_root = NULL;
535 			hammer_ref(&io->lock);
536 			buffer = (void *)io;
537 			hammer_rel_buffer(buffer, 0);
538 		}
539 		lwkt_reltoken(&hmp->io_token);
540 	}
541 }
542 
543 /*
544  * Flush a single inode that is part of a flush group.
545  *
546  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
547  * the front-end should have reserved sufficient space on the media.  Any
548  * error other then EWOULDBLOCK will force the mount to be read-only.
549  */
550 static
551 void
552 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
553 {
554 	hammer_mount_t hmp = ip->hmp;
555 	int error;
556 
557 	hammer_flusher_clean_loose_ios(hmp);
558 	error = hammer_sync_inode(trans, ip);
559 
560 	/*
561 	 * EWOULDBLOCK can happen under normal operation, all other errors
562 	 * are considered extremely serious.  We must set WOULDBLOCK
563 	 * mechanics to deal with the mess left over from the abort of the
564 	 * previous flush.
565 	 */
566 	if (error) {
567 		ip->flags |= HAMMER_INODE_WOULDBLOCK;
568 		if (error == EWOULDBLOCK)
569 			error = 0;
570 	}
571 	hammer_flush_inode_done(ip, error);
572 	while (hmp->flusher.finalize_want)
573 		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
574 	if (hammer_flusher_undo_exhausted(trans, 1)) {
575 		kprintf("HAMMER: Warning: UNDO area too small!\n");
576 		hammer_flusher_finalize(trans, 1);
577 	} else if (hammer_flusher_meta_limit(trans->hmp)) {
578 		hammer_flusher_finalize(trans, 0);
579 	}
580 }
581 
582 /*
583  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
584  * space left.
585  *
586  * 1/4 - Emergency free undo space level.  Below this point the flusher
587  *	 will finalize even if directory dependancies have not been resolved.
588  *
589  * 2/4 - Used by the pruning and reblocking code.  These functions may be
590  *	 running in parallel with a flush and cannot be allowed to drop
591  *	 available undo space to emergency levels.
592  *
593  * 3/4 - Used at the beginning of a flush to force-sync the volume header
594  *	 to give the flush plenty of runway to work in.
595  */
596 int
597 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
598 {
599 	if (hammer_undo_space(trans) <
600 	    hammer_undo_max(trans->hmp) * quarter / 4) {
601 		return(1);
602 	} else {
603 		return(0);
604 	}
605 }
606 
607 /*
608  * Flush all pending UNDOs, wait for write completion, update the volume
609  * header with the new UNDO end position, and flush it.  Then
610  * asynchronously flush the meta-data.
611  *
612  * If this is the last finalization in a flush group we also synchronize
613  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
614  * fifo first_offset so the next flush resets the FIFO pointers.
615  *
616  * If this is not final it is being called because too many dirty meta-data
617  * buffers have built up and must be flushed with UNDO synchronization to
618  * avoid a buffer cache deadlock.
619  */
620 void
621 hammer_flusher_finalize(hammer_transaction_t trans, int final)
622 {
623 	hammer_volume_t root_volume;
624 	hammer_blockmap_t cundomap, dundomap;
625 	hammer_mount_t hmp;
626 	hammer_io_t io;
627 	hammer_off_t save_undo_next_offset;
628 	int count;
629 	int i;
630 
631 	hmp = trans->hmp;
632 	root_volume = trans->rootvol;
633 
634 	/*
635 	 * Exclusively lock the flusher.  This guarantees that all dirty
636 	 * buffers will be idled (have a mod-count of 0).
637 	 */
638 	++hmp->flusher.finalize_want;
639 	hammer_lock_ex(&hmp->flusher.finalize_lock);
640 
641 	/*
642 	 * If this isn't the final sync several threads may have hit the
643 	 * meta-limit at the same time and raced.  Only sync if we really
644 	 * have to, after acquiring the lock.
645 	 */
646 	if (final == 0 && !hammer_flusher_meta_limit(hmp))
647 		goto done;
648 
649 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
650 		goto done;
651 
652 	/*
653 	 * Flush data buffers.  This can occur asynchronously and at any
654 	 * time.  We must interlock against the frontend direct-data write
655 	 * but do not have to acquire the sync-lock yet.
656 	 *
657 	 * These data buffers have already been collected prior to the
658 	 * related inode(s) getting queued to the flush group.
659 	 */
660 	count = 0;
661 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->data_root)) != NULL) {
662 		if (io->ioerror)
663 			break;
664 		hammer_ref(&io->lock);
665 		hammer_io_write_interlock(io);
666 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
667 		hammer_io_flush(io, 0);
668 		hammer_io_done_interlock(io);
669 		hammer_rel_buffer((hammer_buffer_t)io, 0);
670 		hammer_io_limit_backlog(hmp);
671 		++count;
672 	}
673 
674 	/*
675 	 * The sync-lock is required for the remaining sequence.  This lock
676 	 * prevents meta-data from being modified.
677 	 */
678 	hammer_sync_lock_ex(trans);
679 
680 	/*
681 	 * If we have been asked to finalize the volume header sync the
682 	 * cached blockmap to the on-disk blockmap.  Generate an UNDO
683 	 * record for the update.
684 	 */
685 	if (final) {
686 		cundomap = &hmp->blockmap[0];
687 		dundomap = &root_volume->ondisk->vol0_blockmap[0];
688 		if (root_volume->io.modified) {
689 			hammer_modify_volume(trans, root_volume,
690 					     dundomap, sizeof(hmp->blockmap));
691 			for (i = 0; i < HAMMER_MAX_ZONES; ++i)
692 				hammer_crc_set_blockmap(&cundomap[i]);
693 			bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
694 			hammer_modify_volume_done(root_volume);
695 		}
696 	}
697 
698 	/*
699 	 * Flush UNDOs.  This can occur concurrently with the data flush
700 	 * because data writes never overwrite.
701 	 *
702 	 * This also waits for I/Os to complete and flushes the cache on
703 	 * the target disk.
704 	 *
705 	 * Record the UNDO append point as this can continue to change
706 	 * after we have flushed the UNDOs.
707 	 */
708 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
709 	hammer_lock_ex(&hmp->undo_lock);
710 	save_undo_next_offset = cundomap->next_offset;
711 	hammer_unlock(&hmp->undo_lock);
712 	hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
713 
714 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
715 		goto failed;
716 
717 	/*
718 	 * HAMMER VERSION < 4:
719 	 *	Update the on-disk volume header with new UNDO FIFO end
720 	 *	position (do not generate new UNDO records for this change).
721 	 *	We have to do this for the UNDO FIFO whether (final) is
722 	 *	set or not in order for the UNDOs to be recognized on
723 	 *	recovery.
724 	 *
725 	 * HAMMER VERSION >= 4:
726 	 *	The UNDO FIFO data written above will be recognized on
727 	 *	recovery without us having to sync the volume header.
728 	 *
729 	 * Also update the on-disk next_tid field.  This does not require
730 	 * an UNDO.  However, because our TID is generated before we get
731 	 * the sync lock another sync may have beat us to the punch.
732 	 *
733 	 * This also has the side effect of updating first_offset based on
734 	 * a prior finalization when the first finalization of the next flush
735 	 * cycle occurs, removing any undo info from the prior finalization
736 	 * from consideration.
737 	 *
738 	 * The volume header will be flushed out synchronously.
739 	 */
740 	dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
741 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
742 
743 	if (dundomap->first_offset != cundomap->first_offset ||
744 		   dundomap->next_offset != save_undo_next_offset) {
745 		hammer_modify_volume(NULL, root_volume, NULL, 0);
746 		dundomap->first_offset = cundomap->first_offset;
747 		dundomap->next_offset = save_undo_next_offset;
748 		hammer_crc_set_blockmap(dundomap);
749 		hammer_modify_volume_done(root_volume);
750 	}
751 
752 	/*
753 	 * vol0_next_tid is used for TID selection and is updated without
754 	 * an UNDO so we do not reuse a TID that may have been rolled-back.
755 	 *
756 	 * vol0_last_tid is the highest fully-synchronized TID.  It is
757 	 * set-up when the UNDO fifo is fully synced, later on (not here).
758 	 *
759 	 * The root volume can be open for modification by other threads
760 	 * generating UNDO or REDO records.  For example, reblocking,
761 	 * pruning, REDO mode fast-fsyncs, so the write interlock is
762 	 * mandatory.
763 	 */
764 	if (root_volume->io.modified) {
765 		hammer_modify_volume(NULL, root_volume, NULL, 0);
766 		if (root_volume->ondisk->vol0_next_tid < trans->tid)
767 			root_volume->ondisk->vol0_next_tid = trans->tid;
768 		hammer_crc_set_volume(root_volume->ondisk);
769 		hammer_modify_volume_done(root_volume);
770 		hammer_io_write_interlock(&root_volume->io);
771 		hammer_io_flush(&root_volume->io, 0);
772 		hammer_io_done_interlock(&root_volume->io);
773 	}
774 
775 	/*
776 	 * Wait for I/Os to complete.
777 	 *
778 	 * For HAMMER VERSION 4+ filesystems we do not have to wait for
779 	 * the I/O to complete as the new UNDO FIFO entries are recognized
780 	 * even without the volume header update.  This allows the volume
781 	 * header to flushed along with meta-data, significantly reducing
782 	 * flush overheads.
783 	 */
784 	hammer_flusher_clean_loose_ios(hmp);
785 	if (hmp->version < HAMMER_VOL_VERSION_FOUR)
786 		hammer_io_wait_all(hmp, "hmrfl3", 1);
787 
788 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
789 		goto failed;
790 
791 	/*
792 	 * Flush meta-data.  The meta-data will be undone if we crash
793 	 * so we can safely flush it asynchronously.  There is no need
794 	 * to wait for I/O to complete (or issue a synchronous disk flush).
795 	 *
796 	 * In fact, even if we did wait the meta-data will still be undone
797 	 * by a crash up until the next flush cycle due to the first_offset
798 	 * in the volume header for the UNDO FIFO not being adjusted until
799 	 * the following flush cycle.
800 	 *
801 	 * No io interlock is needed, bioops callbacks will not mess with
802 	 * meta data buffers.
803 	 */
804 	count = 0;
805 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->meta_root)) != NULL) {
806 		if (io->ioerror)
807 			break;
808 		KKASSERT(io->modify_refs == 0);
809 		hammer_ref(&io->lock);
810 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
811 		hammer_io_flush(io, 0);
812 		hammer_rel_buffer((hammer_buffer_t)io, 0);
813 		hammer_io_limit_backlog(hmp);
814 		++count;
815 	}
816 
817 	/*
818 	 * If this is the final finalization for the flush group set
819 	 * up for the next sequence by setting a new first_offset in
820 	 * our cached blockmap and clearing the undo history.
821 	 *
822 	 * Even though we have updated our cached first_offset, the on-disk
823 	 * first_offset still governs available-undo-space calculations.
824 	 *
825 	 * We synchronize to save_undo_next_offset rather than
826 	 * cundomap->next_offset because that is what we flushed out
827 	 * above.
828 	 *
829 	 * NOTE! UNDOs can only be added with the sync_lock held
830 	 *	 so we can clear the undo history without racing.
831 	 *	 REDOs can be added at any time which is why we
832 	 *	 have to be careful and use save_undo_next_offset
833 	 *	 when setting the new first_offset.
834 	 */
835 	if (final) {
836 		cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
837 		if (cundomap->first_offset != save_undo_next_offset) {
838 			cundomap->first_offset = save_undo_next_offset;
839 			hmp->hflags |= HMNT_UNDO_DIRTY;
840 		} else if (cundomap->first_offset != cundomap->next_offset) {
841 			hmp->hflags |= HMNT_UNDO_DIRTY;
842 		} else {
843 			hmp->hflags &= ~HMNT_UNDO_DIRTY;
844 		}
845 		hammer_clear_undo_history(hmp);
846 
847 		/*
848 		 * Flush tid sequencing.  flush_tid1 is fully synchronized,
849 		 * meaning a crash will not roll it back.  flush_tid2 has
850 		 * been written out asynchronously and a crash will roll
851 		 * it back.  flush_tid1 is used for all mirroring masters.
852 		 */
853 		if (hmp->flush_tid1 != hmp->flush_tid2) {
854 			hmp->flush_tid1 = hmp->flush_tid2;
855 			wakeup(&hmp->flush_tid1);
856 		}
857 		hmp->flush_tid2 = trans->tid;
858 
859 		/*
860 		 * Clear the REDO SYNC flag.  This flag is used to ensure
861 		 * that the recovery span in the UNDO/REDO FIFO contains
862 		 * at least one REDO SYNC record.
863 		 */
864 		hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
865 	}
866 
867 	/*
868 	 * Cleanup.  Report any critical errors.
869 	 */
870 failed:
871 	hammer_sync_unlock(trans);
872 
873 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
874 		kprintf("HAMMER(%s): Critical write error during flush, "
875 			"refusing to sync UNDO FIFO\n",
876 			root_volume->ondisk->vol_name);
877 	}
878 
879 done:
880 	hammer_unlock(&hmp->flusher.finalize_lock);
881 
882 	if (--hmp->flusher.finalize_want == 0)
883 		wakeup(&hmp->flusher.finalize_want);
884 	hammer_stats_commits += final;
885 }
886 
887 /*
888  * Flush UNDOs.
889  */
890 void
891 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
892 {
893 	hammer_io_t io;
894 	int count;
895 
896 	count = 0;
897 	while ((io = RB_FIRST(hammer_mod_rb_tree, &hmp->undo_root)) != NULL) {
898 		if (io->ioerror)
899 			break;
900 		hammer_ref(&io->lock);
901 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
902 		hammer_io_write_interlock(io);
903 		hammer_io_flush(io, hammer_undo_reclaim(io));
904 		hammer_io_done_interlock(io);
905 		hammer_rel_buffer((hammer_buffer_t)io, 0);
906 		hammer_io_limit_backlog(hmp);
907 		++count;
908 	}
909 	hammer_flusher_clean_loose_ios(hmp);
910 	if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
911 	    (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
912 		hammer_io_wait_all(hmp, "hmrfl1", 1);
913 	} else {
914 		hammer_io_wait_all(hmp, "hmrfl2", 0);
915 	}
916 }
917 
918 /*
919  * Return non-zero if too many dirty meta-data buffers have built up.
920  *
921  * Since we cannot allow such buffers to flush until we have dealt with
922  * the UNDOs, we risk deadlocking the kernel's buffer cache.
923  */
924 int
925 hammer_flusher_meta_limit(hammer_mount_t hmp)
926 {
927 	if (hmp->locked_dirty_space + hmp->io_running_space >
928 	    hammer_limit_dirtybufspace) {
929 		return(1);
930 	}
931 	return(0);
932 }
933 
934 /*
935  * Return non-zero if too many dirty meta-data buffers have built up.
936  *
937  * This version is used by background operations (mirror, prune, reblock)
938  * to leave room for foreground operations.
939  */
940 int
941 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
942 {
943 	if (hmp->locked_dirty_space + hmp->io_running_space >
944 	    hammer_limit_dirtybufspace / 2) {
945 		return(1);
946 	}
947 	return(0);
948 }
949 
950 /*
951  * Return non-zero if the flusher still has something to flush.
952  */
953 int
954 hammer_flusher_haswork(hammer_mount_t hmp)
955 {
956 	if (hmp->ronly)
957 		return(0);
958 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
959 		return(0);
960 	if (TAILQ_FIRST(&hmp->flush_group_list) ||	/* dirty inodes */
961 	    RB_ROOT(&hmp->volu_root) ||			/* dirty buffers */
962 	    RB_ROOT(&hmp->undo_root) ||
963 	    RB_ROOT(&hmp->data_root) ||
964 	    RB_ROOT(&hmp->meta_root) ||
965 	    (hmp->hflags & HMNT_UNDO_DIRTY)		/* UNDO FIFO sync */
966 	) {
967 		return(1);
968 	}
969 	return(0);
970 }
971 
972