xref: /dragonfly/sys/vfs/hammer/hammer_flusher.c (revision e293de53)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42 
43 #include "hammer.h"
44 
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 					hammer_transaction_t trans);
50 
51 /*
52  * Support structures for the flusher threads.
53  */
54 struct hammer_flusher_info {
55 	TAILQ_ENTRY(hammer_flusher_info) entry;
56 	struct hammer_mount *hmp;
57 	thread_t	td;
58 	int		runstate;
59 	int		count;
60 	hammer_flush_group_t flg;
61 	hammer_inode_t	work_array[HAMMER_FLUSH_GROUP_SIZE];
62 };
63 
64 typedef struct hammer_flusher_info *hammer_flusher_info_t;
65 
66 /*
67  * Sync all inodes pending on the flusher.
68  *
69  * All flush groups will be flushed.  This does not queue dirty inodes
70  * to the flush groups, it just flushes out what has already been queued!
71  */
72 void
73 hammer_flusher_sync(hammer_mount_t hmp)
74 {
75 	int seq;
76 
77 	seq = hammer_flusher_async(hmp, NULL);
78 	hammer_flusher_wait(hmp, seq);
79 }
80 
81 /*
82  * Sync all inodes pending on the flusher - return immediately.
83  *
84  * All flush groups will be flushed.
85  */
86 int
87 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
88 {
89 	hammer_flush_group_t flg;
90 	int seq = hmp->flusher.next;
91 
92 	TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
93 		if (flg->running == 0)
94 			++seq;
95 		flg->closed = 1;
96 		if (flg == close_flg)
97 			break;
98 	}
99 	if (hmp->flusher.td) {
100 		if (hmp->flusher.signal++ == 0)
101 			wakeup(&hmp->flusher.signal);
102 	} else {
103 		seq = hmp->flusher.done;
104 	}
105 	return(seq);
106 }
107 
108 int
109 hammer_flusher_async_one(hammer_mount_t hmp)
110 {
111 	int seq;
112 
113 	if (hmp->flusher.td) {
114 		seq = hmp->flusher.next;
115 		if (hmp->flusher.signal++ == 0)
116 			wakeup(&hmp->flusher.signal);
117 	} else {
118 		seq = hmp->flusher.done;
119 	}
120 	return(seq);
121 }
122 
123 /*
124  * Wait for the flusher to get to the specified sequence number.
125  * Signal the flusher as often as necessary to keep it going.
126  */
127 void
128 hammer_flusher_wait(hammer_mount_t hmp, int seq)
129 {
130 	while ((int)(seq - hmp->flusher.done) > 0) {
131 		if (hmp->flusher.act != seq) {
132 			if (hmp->flusher.signal++ == 0)
133 				wakeup(&hmp->flusher.signal);
134 		}
135 		tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
136 	}
137 }
138 
139 void
140 hammer_flusher_wait_next(hammer_mount_t hmp)
141 {
142 	int seq;
143 
144 	seq = hammer_flusher_async_one(hmp);
145 	hammer_flusher_wait(hmp, seq);
146 }
147 
148 void
149 hammer_flusher_create(hammer_mount_t hmp)
150 {
151 	hammer_flusher_info_t info;
152 	int i;
153 
154 	hmp->flusher.signal = 0;
155 	hmp->flusher.act = 0;
156 	hmp->flusher.done = 0;
157 	hmp->flusher.next = 1;
158 	hammer_ref(&hmp->flusher.finalize_lock);
159 	TAILQ_INIT(&hmp->flusher.run_list);
160 	TAILQ_INIT(&hmp->flusher.ready_list);
161 
162 	lwkt_create(hammer_flusher_master_thread, hmp,
163 		    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
164 	for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
165 		info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
166 		info->hmp = hmp;
167 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
168 		lwkt_create(hammer_flusher_slave_thread, info,
169 			    &info->td, NULL, 0, -1, "hammer-S%d", i);
170 	}
171 }
172 
173 void
174 hammer_flusher_destroy(hammer_mount_t hmp)
175 {
176 	hammer_flusher_info_t info;
177 
178 	/*
179 	 * Kill the master
180 	 */
181 	hmp->flusher.exiting = 1;
182 	while (hmp->flusher.td) {
183 		++hmp->flusher.signal;
184 		wakeup(&hmp->flusher.signal);
185 		tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
186 	}
187 
188 	/*
189 	 * Kill the slaves
190 	 */
191 	while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
192 		KKASSERT(info->runstate == 0);
193 		TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
194 		info->runstate = -1;
195 		wakeup(&info->runstate);
196 		while (info->td)
197 			tsleep(&info->td, 0, "hmrwwc", 0);
198 		kfree(info, hmp->m_misc);
199 	}
200 }
201 
202 /*
203  * The master flusher thread manages the flusher sequence id and
204  * synchronization with the slave work threads.
205  */
206 static void
207 hammer_flusher_master_thread(void *arg)
208 {
209 	hammer_flush_group_t flg;
210 	hammer_mount_t hmp;
211 
212 	hmp = arg;
213 
214 	for (;;) {
215 		/*
216 		 * Do at least one flush cycle.  We may have to update the
217 		 * UNDO FIFO even if no inodes are queued.
218 		 */
219 		for (;;) {
220 			while (hmp->flusher.group_lock)
221 				tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
222 			hmp->flusher.act = hmp->flusher.next;
223 			++hmp->flusher.next;
224 			hammer_flusher_clean_loose_ios(hmp);
225 			hammer_flusher_flush(hmp);
226 			hmp->flusher.done = hmp->flusher.act;
227 			wakeup(&hmp->flusher.done);
228 			flg = TAILQ_FIRST(&hmp->flush_group_list);
229 			if (flg == NULL || flg->closed == 0)
230 				break;
231 			if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
232 				break;
233 		}
234 
235 		/*
236 		 * Wait for activity.
237 		 */
238 		if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
239 			break;
240 		while (hmp->flusher.signal == 0)
241 			tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
242 
243 		/*
244 		 * Flush for each count on signal but only allow one extra
245 		 * flush request to build up.
246 		 */
247 		if (--hmp->flusher.signal != 0)
248 			hmp->flusher.signal = 1;
249 	}
250 
251 	/*
252 	 * And we are done.
253 	 */
254 	hmp->flusher.td = NULL;
255 	wakeup(&hmp->flusher.exiting);
256 	lwkt_exit();
257 }
258 
259 /*
260  * Flush all inodes in the current flush group.
261  */
262 static void
263 hammer_flusher_flush(hammer_mount_t hmp)
264 {
265 	hammer_flusher_info_t info;
266 	hammer_flush_group_t flg;
267 	hammer_reserve_t resv;
268 	hammer_inode_t ip;
269 	hammer_inode_t next_ip;
270 	int slave_index;
271 	int count;
272 
273 	/*
274 	 * Just in-case there's a flush race on mount
275 	 */
276 	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
277 		return;
278 
279 	/*
280 	 * We only do one flg but we may have to loop/retry.
281 	 */
282 	count = 0;
283 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
284 		++count;
285 		if (hammer_debug_general & 0x0001) {
286 			kprintf("hammer_flush %d ttl=%d recs=%d\n",
287 				hmp->flusher.act,
288 				flg->total_count, flg->refs);
289 		}
290 		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
291 			break;
292 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
293 
294 		/*
295 		 * If the previous flush cycle just about exhausted our
296 		 * UNDO space we may have to do a dummy cycle to move the
297 		 * first_offset up before actually digging into a new cycle,
298 		 * or the new cycle will not have sufficient undo space.
299 		 */
300 		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
301 			hammer_flusher_finalize(&hmp->flusher.trans, 0);
302 
303 		/*
304 		 * Ok, we are running this flush group now (this prevents new
305 		 * additions to it).
306 		 */
307 		flg->running = 1;
308 		if (hmp->next_flush_group == flg)
309 			hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
310 
311 		/*
312 		 * Iterate the inodes in the flg's flush_list and assign
313 		 * them to slaves.
314 		 */
315 		slave_index = 0;
316 		info = TAILQ_FIRST(&hmp->flusher.ready_list);
317 		next_ip = TAILQ_FIRST(&flg->flush_list);
318 
319 		while ((ip = next_ip) != NULL) {
320 			next_ip = TAILQ_NEXT(ip, flush_entry);
321 
322 			/*
323 			 * Add ip to the slave's work array.  The slave is
324 			 * not currently running.
325 			 */
326 			info->work_array[info->count++] = ip;
327 			if (info->count != HAMMER_FLUSH_GROUP_SIZE)
328 				continue;
329 
330 			/*
331 			 * Get the slave running
332 			 */
333 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
334 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
335 			info->flg = flg;
336 			info->runstate = 1;
337 			wakeup(&info->runstate);
338 
339 			/*
340 			 * Get a new slave.  We may have to wait for one to
341 			 * finish running.
342 			 */
343 			while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
344 				tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
345 			}
346 		}
347 
348 		/*
349 		 * Run the current slave if necessary
350 		 */
351 		if (info->count) {
352 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
353 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
354 			info->flg = flg;
355 			info->runstate = 1;
356 			wakeup(&info->runstate);
357 		}
358 
359 		/*
360 		 * Wait for all slaves to finish running
361 		 */
362 		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
363 			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
364 
365 		/*
366 		 * Do the final finalization, clean up
367 		 */
368 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
369 		hmp->flusher.tid = hmp->flusher.trans.tid;
370 
371 		hammer_done_transaction(&hmp->flusher.trans);
372 
373 		/*
374 		 * Loop up on the same flg.  If the flg is done clean it up
375 		 * and break out.  We only flush one flg.
376 		 */
377 		if (TAILQ_FIRST(&flg->flush_list) == NULL) {
378 			KKASSERT(TAILQ_EMPTY(&flg->flush_list));
379 			KKASSERT(flg->refs == 0);
380 			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
381 			kfree(flg, hmp->m_misc);
382 			break;
383 		}
384 	}
385 
386 	/*
387 	 * We may have pure meta-data to flush, or we may have to finish
388 	 * cycling the UNDO FIFO, even if there were no flush groups.
389 	 */
390 	if (count == 0 && hammer_flusher_haswork(hmp)) {
391 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
392 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
393 		hammer_done_transaction(&hmp->flusher.trans);
394 	}
395 
396 	/*
397 	 * Clean up any freed big-blocks (typically zone-2).
398 	 * resv->flush_group is typically set several flush groups ahead
399 	 * of the free to ensure that the freed block is not reused until
400 	 * it can no longer be reused.
401 	 */
402 	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
403 		if (resv->flush_group != hmp->flusher.act)
404 			break;
405 		hammer_reserve_clrdelay(hmp, resv);
406 	}
407 }
408 
409 
410 /*
411  * The slave flusher thread pulls work off the master flush_list until no
412  * work is left.
413  */
414 static void
415 hammer_flusher_slave_thread(void *arg)
416 {
417 	hammer_flush_group_t flg;
418 	hammer_flusher_info_t info;
419 	hammer_mount_t hmp;
420 	hammer_inode_t ip;
421 	int i;
422 
423 	info = arg;
424 	hmp = info->hmp;
425 
426 	for (;;) {
427 		while (info->runstate == 0)
428 			tsleep(&info->runstate, 0, "hmrssw", 0);
429 		if (info->runstate < 0)
430 			break;
431 		flg = info->flg;
432 
433 		for (i = 0; i < info->count; ++i) {
434 			ip = info->work_array[i];
435 			hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
436 			++hammer_stats_inode_flushes;
437 		}
438 		info->count = 0;
439 		info->runstate = 0;
440 		TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
441 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
442 		wakeup(&hmp->flusher.ready_list);
443 	}
444 	info->td = NULL;
445 	wakeup(&info->td);
446 	lwkt_exit();
447 }
448 
449 void
450 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
451 {
452 	hammer_buffer_t buffer;
453 	hammer_io_t io;
454 
455 	/*
456 	 * loose ends - buffers without bp's aren't tracked by the kernel
457 	 * and can build up, so clean them out.  This can occur when an
458 	 * IO completes on a buffer with no references left.
459 	 */
460 	if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
461 		crit_enter();	/* biodone() race */
462 		while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
463 			KKASSERT(io->mod_list == &hmp->lose_list);
464 			TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
465 			io->mod_list = NULL;
466 			if (io->lock.refs == 0)
467 				++hammer_count_refedbufs;
468 			hammer_ref(&io->lock);
469 			buffer = (void *)io;
470 			hammer_rel_buffer(buffer, 0);
471 		}
472 		crit_exit();
473 	}
474 }
475 
476 /*
477  * Flush a single inode that is part of a flush group.
478  *
479  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
480  * the front-end should have reserved sufficient space on the media.  Any
481  * error other then EWOULDBLOCK will force the mount to be read-only.
482  */
483 static
484 void
485 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
486 {
487 	hammer_mount_t hmp = ip->hmp;
488 	int error;
489 
490 	hammer_flusher_clean_loose_ios(hmp);
491 	error = hammer_sync_inode(trans, ip);
492 
493 	/*
494 	 * EWOULDBLOCK can happen under normal operation, all other errors
495 	 * are considered extremely serious.  We must set WOULDBLOCK
496 	 * mechanics to deal with the mess left over from the abort of the
497 	 * previous flush.
498 	 */
499 	if (error) {
500 		ip->flags |= HAMMER_INODE_WOULDBLOCK;
501 		if (error == EWOULDBLOCK)
502 			error = 0;
503 	}
504 	hammer_flush_inode_done(ip, error);
505 	while (hmp->flusher.finalize_want)
506 		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
507 	if (hammer_flusher_undo_exhausted(trans, 1)) {
508 		kprintf("HAMMER: Warning: UNDO area too small!\n");
509 		hammer_flusher_finalize(trans, 1);
510 	} else if (hammer_flusher_meta_limit(trans->hmp)) {
511 		hammer_flusher_finalize(trans, 0);
512 	}
513 }
514 
515 /*
516  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
517  * space left.
518  *
519  * 1/4 - Emergency free undo space level.  Below this point the flusher
520  *	 will finalize even if directory dependancies have not been resolved.
521  *
522  * 2/4 - Used by the pruning and reblocking code.  These functions may be
523  *	 running in parallel with a flush and cannot be allowed to drop
524  *	 available undo space to emergency levels.
525  *
526  * 3/4 - Used at the beginning of a flush to force-sync the volume header
527  *	 to give the flush plenty of runway to work in.
528  */
529 int
530 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
531 {
532 	if (hammer_undo_space(trans) <
533 	    hammer_undo_max(trans->hmp) * quarter / 4) {
534 		return(1);
535 	} else {
536 		return(0);
537 	}
538 }
539 
540 /*
541  * Flush all pending UNDOs, wait for write completion, update the volume
542  * header with the new UNDO end position, and flush it.  Then
543  * asynchronously flush the meta-data.
544  *
545  * If this is the last finalization in a flush group we also synchronize
546  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
547  * fifo first_offset so the next flush resets the FIFO pointers.
548  *
549  * If this is not final it is being called because too many dirty meta-data
550  * buffers have built up and must be flushed with UNDO synchronization to
551  * avoid a buffer cache deadlock.
552  */
553 void
554 hammer_flusher_finalize(hammer_transaction_t trans, int final)
555 {
556 	hammer_volume_t root_volume;
557 	hammer_blockmap_t cundomap, dundomap;
558 	hammer_mount_t hmp;
559 	hammer_io_t io;
560 	int count;
561 	int i;
562 
563 	hmp = trans->hmp;
564 	root_volume = trans->rootvol;
565 
566 	/*
567 	 * Exclusively lock the flusher.  This guarantees that all dirty
568 	 * buffers will be idled (have a mod-count of 0).
569 	 */
570 	++hmp->flusher.finalize_want;
571 	hammer_lock_ex(&hmp->flusher.finalize_lock);
572 
573 	/*
574 	 * If this isn't the final sync several threads may have hit the
575 	 * meta-limit at the same time and raced.  Only sync if we really
576 	 * have to, after acquiring the lock.
577 	 */
578 	if (final == 0 && !hammer_flusher_meta_limit(hmp))
579 		goto done;
580 
581 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
582 		goto done;
583 
584 	/*
585 	 * Flush data buffers.  This can occur asynchronously and at any
586 	 * time.  We must interlock against the frontend direct-data write
587 	 * but do not have to acquire the sync-lock yet.
588 	 */
589 	count = 0;
590 	while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
591 		if (io->ioerror)
592 			break;
593 		if (io->lock.refs == 0)
594 			++hammer_count_refedbufs;
595 		hammer_ref(&io->lock);
596 		hammer_io_write_interlock(io);
597 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
598 		hammer_io_flush(io);
599 		hammer_io_done_interlock(io);
600 		hammer_rel_buffer((hammer_buffer_t)io, 0);
601 		++count;
602 	}
603 
604 	/*
605 	 * The sync-lock is required for the remaining sequence.  This lock
606 	 * prevents meta-data from being modified.
607 	 */
608 	hammer_sync_lock_ex(trans);
609 
610 	/*
611 	 * If we have been asked to finalize the volume header sync the
612 	 * cached blockmap to the on-disk blockmap.  Generate an UNDO
613 	 * record for the update.
614 	 */
615 	if (final) {
616 		cundomap = &hmp->blockmap[0];
617 		dundomap = &root_volume->ondisk->vol0_blockmap[0];
618 		if (root_volume->io.modified) {
619 			hammer_modify_volume(trans, root_volume,
620 					     dundomap, sizeof(hmp->blockmap));
621 			for (i = 0; i < HAMMER_MAX_ZONES; ++i)
622 				hammer_crc_set_blockmap(&cundomap[i]);
623 			bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
624 			hammer_modify_volume_done(root_volume);
625 		}
626 	}
627 
628 	/*
629 	 * Flush UNDOs
630 	 */
631 	count = 0;
632 	while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
633 		if (io->ioerror)
634 			break;
635 		KKASSERT(io->modify_refs == 0);
636 		if (io->lock.refs == 0)
637 			++hammer_count_refedbufs;
638 		hammer_ref(&io->lock);
639 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
640 		hammer_io_flush(io);
641 		hammer_rel_buffer((hammer_buffer_t)io, 0);
642 		++count;
643 	}
644 
645 	/*
646 	 * Wait for I/Os to complete
647 	 */
648 	hammer_flusher_clean_loose_ios(hmp);
649 	hammer_io_wait_all(hmp, "hmrfl1");
650 
651 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
652 		goto failed;
653 
654 	/*
655 	 * Update the on-disk volume header with new UNDO FIFO end position
656 	 * (do not generate new UNDO records for this change).  We have to
657 	 * do this for the UNDO FIFO whether (final) is set or not.
658 	 *
659 	 * Also update the on-disk next_tid field.  This does not require
660 	 * an UNDO.  However, because our TID is generated before we get
661 	 * the sync lock another sync may have beat us to the punch.
662 	 *
663 	 * This also has the side effect of updating first_offset based on
664 	 * a prior finalization when the first finalization of the next flush
665 	 * cycle occurs, removing any undo info from the prior finalization
666 	 * from consideration.
667 	 *
668 	 * The volume header will be flushed out synchronously.
669 	 */
670 	dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
671 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
672 
673 	if (dundomap->first_offset != cundomap->first_offset ||
674 		   dundomap->next_offset != cundomap->next_offset) {
675 		hammer_modify_volume(NULL, root_volume, NULL, 0);
676 		dundomap->first_offset = cundomap->first_offset;
677 		dundomap->next_offset = cundomap->next_offset;
678 		hammer_crc_set_blockmap(dundomap);
679 		hammer_modify_volume_done(root_volume);
680 	}
681 
682 	/*
683 	 * vol0_next_tid is used for TID selection and is updated without
684 	 * an UNDO so we do not reuse a TID that may have been rolled-back.
685 	 *
686 	 * vol0_last_tid is the highest fully-synchronized TID.  It is
687 	 * set-up when the UNDO fifo is fully synced, later on (not here).
688 	 */
689 	if (root_volume->io.modified) {
690 		hammer_modify_volume(NULL, root_volume, NULL, 0);
691 		if (root_volume->ondisk->vol0_next_tid < trans->tid)
692 			root_volume->ondisk->vol0_next_tid = trans->tid;
693 		hammer_crc_set_volume(root_volume->ondisk);
694 		hammer_modify_volume_done(root_volume);
695 		hammer_io_flush(&root_volume->io);
696 	}
697 
698 	/*
699 	 * Wait for I/Os to complete
700 	 */
701 	hammer_flusher_clean_loose_ios(hmp);
702 	hammer_io_wait_all(hmp, "hmrfl2");
703 
704 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
705 		goto failed;
706 
707 	/*
708 	 * Flush meta-data.  The meta-data will be undone if we crash
709 	 * so we can safely flush it asynchronously.
710 	 *
711 	 * Repeated catchups will wind up flushing this update's meta-data
712 	 * and the UNDO buffers for the next update simultaniously.  This
713 	 * is ok.
714 	 */
715 	count = 0;
716 	while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
717 		if (io->ioerror)
718 			break;
719 		KKASSERT(io->modify_refs == 0);
720 		if (io->lock.refs == 0)
721 			++hammer_count_refedbufs;
722 		hammer_ref(&io->lock);
723 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
724 		hammer_io_flush(io);
725 		hammer_rel_buffer((hammer_buffer_t)io, 0);
726 		++count;
727 	}
728 
729 	/*
730 	 * If this is the final finalization for the flush group set
731 	 * up for the next sequence by setting a new first_offset in
732 	 * our cached blockmap and clearing the undo history.
733 	 *
734 	 * Even though we have updated our cached first_offset, the on-disk
735 	 * first_offset still governs available-undo-space calculations.
736 	 */
737 	if (final) {
738 		cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
739 		if (cundomap->first_offset == cundomap->next_offset) {
740 			hmp->hflags &= ~HMNT_UNDO_DIRTY;
741 		} else {
742 			cundomap->first_offset = cundomap->next_offset;
743 			hmp->hflags |= HMNT_UNDO_DIRTY;
744 		}
745 		hammer_clear_undo_history(hmp);
746 
747 		/*
748 		 * Flush tid sequencing.  flush_tid1 is fully synchronized,
749 		 * meaning a crash will not roll it back.  flush_tid2 has
750 		 * been written out asynchronously and a crash will roll
751 		 * it back.  flush_tid1 is used for all mirroring masters.
752 		 */
753 		if (hmp->flush_tid1 != hmp->flush_tid2) {
754 			hmp->flush_tid1 = hmp->flush_tid2;
755 			wakeup(&hmp->flush_tid1);
756 		}
757 		hmp->flush_tid2 = trans->tid;
758 	}
759 
760 	/*
761 	 * Cleanup.  Report any critical errors.
762 	 */
763 failed:
764 	hammer_sync_unlock(trans);
765 
766 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
767 		kprintf("HAMMER(%s): Critical write error during flush, "
768 			"refusing to sync UNDO FIFO\n",
769 			root_volume->ondisk->vol_name);
770 	}
771 
772 done:
773 	hammer_unlock(&hmp->flusher.finalize_lock);
774 
775 	if (--hmp->flusher.finalize_want == 0)
776 		wakeup(&hmp->flusher.finalize_want);
777 	hammer_stats_commits += final;
778 }
779 
780 /*
781  * Return non-zero if too many dirty meta-data buffers have built up.
782  *
783  * Since we cannot allow such buffers to flush until we have dealt with
784  * the UNDOs, we risk deadlocking the kernel's buffer cache.
785  */
786 int
787 hammer_flusher_meta_limit(hammer_mount_t hmp)
788 {
789 	if (hmp->locked_dirty_space + hmp->io_running_space >
790 	    hammer_limit_dirtybufspace) {
791 		return(1);
792 	}
793 	return(0);
794 }
795 
796 /*
797  * Return non-zero if too many dirty meta-data buffers have built up.
798  *
799  * This version is used by background operations (mirror, prune, reblock)
800  * to leave room for foreground operations.
801  */
802 int
803 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
804 {
805 	if (hmp->locked_dirty_space + hmp->io_running_space >
806 	    hammer_limit_dirtybufspace / 2) {
807 		return(1);
808 	}
809 	return(0);
810 }
811 
812 /*
813  * Return non-zero if the flusher still has something to flush.
814  */
815 int
816 hammer_flusher_haswork(hammer_mount_t hmp)
817 {
818 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
819 		return(0);
820 	if (TAILQ_FIRST(&hmp->flush_group_list) ||	/* dirty inodes */
821 	    TAILQ_FIRST(&hmp->volu_list) ||		/* dirty bufffers */
822 	    TAILQ_FIRST(&hmp->undo_list) ||
823 	    TAILQ_FIRST(&hmp->data_list) ||
824 	    TAILQ_FIRST(&hmp->meta_list) ||
825 	    (hmp->hflags & HMNT_UNDO_DIRTY)		/* UNDO FIFO sync */
826 	) {
827 		return(1);
828 	}
829 	return(0);
830 }
831 
832