xref: /dragonfly/sys/vfs/hammer/hammer_flusher.c (revision a68e0df0)
1 /*
2  * Copyright (c) 2008 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * $DragonFly: src/sys/vfs/hammer/hammer_flusher.c,v 1.45 2008/07/31 04:42:04 dillon Exp $
35  */
36 /*
37  * HAMMER dependancy flusher thread
38  *
39  * Meta data updates create buffer dependancies which are arranged as a
40  * hierarchy of lists.
41  */
42 
43 #include "hammer.h"
44 
45 static void hammer_flusher_master_thread(void *arg);
46 static void hammer_flusher_slave_thread(void *arg);
47 static void hammer_flusher_flush(hammer_mount_t hmp);
48 static void hammer_flusher_flush_inode(hammer_inode_t ip,
49 					hammer_transaction_t trans);
50 
51 RB_GENERATE(hammer_fls_rb_tree, hammer_inode, rb_flsnode,
52               hammer_ino_rb_compare);
53 
54 /*
55  * Inodes are sorted and assigned to slave threads in groups of 128.
56  * We want a flush group size large enough such that the slave threads
57  * are not likely to interfere with each other when accessing the B-Tree,
58  * but not so large that we lose concurrency.
59  */
60 #define HAMMER_FLUSH_GROUP_SIZE 128
61 
62 /*
63  * Support structures for the flusher threads.
64  */
65 struct hammer_flusher_info {
66 	TAILQ_ENTRY(hammer_flusher_info) entry;
67 	struct hammer_mount *hmp;
68 	thread_t	td;
69 	int		runstate;
70 	int		count;
71 	hammer_flush_group_t flg;
72 	hammer_inode_t	work_array[HAMMER_FLUSH_GROUP_SIZE];
73 };
74 
75 typedef struct hammer_flusher_info *hammer_flusher_info_t;
76 
77 /*
78  * Sync all inodes pending on the flusher.
79  *
80  * All flush groups will be flushed.  This does not queue dirty inodes
81  * to the flush groups, it just flushes out what has already been queued!
82  */
83 void
84 hammer_flusher_sync(hammer_mount_t hmp)
85 {
86 	int seq;
87 
88 	seq = hammer_flusher_async(hmp, NULL);
89 	hammer_flusher_wait(hmp, seq);
90 }
91 
92 /*
93  * Sync all inodes pending on the flusher - return immediately.
94  *
95  * All flush groups will be flushed.
96  */
97 int
98 hammer_flusher_async(hammer_mount_t hmp, hammer_flush_group_t close_flg)
99 {
100 	hammer_flush_group_t flg;
101 	int seq = hmp->flusher.next;
102 
103 	TAILQ_FOREACH(flg, &hmp->flush_group_list, flush_entry) {
104 		if (flg->running == 0)
105 			++seq;
106 		flg->closed = 1;
107 		if (flg == close_flg)
108 			break;
109 	}
110 	if (hmp->flusher.td) {
111 		if (hmp->flusher.signal++ == 0)
112 			wakeup(&hmp->flusher.signal);
113 	} else {
114 		seq = hmp->flusher.done;
115 	}
116 	return(seq);
117 }
118 
119 int
120 hammer_flusher_async_one(hammer_mount_t hmp)
121 {
122 	int seq;
123 
124 	if (hmp->flusher.td) {
125 		seq = hmp->flusher.next;
126 		if (hmp->flusher.signal++ == 0)
127 			wakeup(&hmp->flusher.signal);
128 	} else {
129 		seq = hmp->flusher.done;
130 	}
131 	return(seq);
132 }
133 
134 /*
135  * Wait for the flusher to get to the specified sequence number.
136  * Signal the flusher as often as necessary to keep it going.
137  */
138 void
139 hammer_flusher_wait(hammer_mount_t hmp, int seq)
140 {
141 	while ((int)(seq - hmp->flusher.done) > 0) {
142 		if (hmp->flusher.act != seq) {
143 			if (hmp->flusher.signal++ == 0)
144 				wakeup(&hmp->flusher.signal);
145 		}
146 		tsleep(&hmp->flusher.done, 0, "hmrfls", 0);
147 	}
148 }
149 
150 void
151 hammer_flusher_wait_next(hammer_mount_t hmp)
152 {
153 	int seq;
154 
155 	seq = hammer_flusher_async_one(hmp);
156 	hammer_flusher_wait(hmp, seq);
157 }
158 
159 void
160 hammer_flusher_create(hammer_mount_t hmp)
161 {
162 	hammer_flusher_info_t info;
163 	int i;
164 
165 	hmp->flusher.signal = 0;
166 	hmp->flusher.act = 0;
167 	hmp->flusher.done = 0;
168 	hmp->flusher.next = 1;
169 	hammer_ref(&hmp->flusher.finalize_lock);
170 	TAILQ_INIT(&hmp->flusher.run_list);
171 	TAILQ_INIT(&hmp->flusher.ready_list);
172 
173 	lwkt_create(hammer_flusher_master_thread, hmp,
174 		    &hmp->flusher.td, NULL, 0, -1, "hammer-M");
175 	for (i = 0; i < HAMMER_MAX_FLUSHERS; ++i) {
176 		info = kmalloc(sizeof(*info), hmp->m_misc, M_WAITOK|M_ZERO);
177 		info->hmp = hmp;
178 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
179 		lwkt_create(hammer_flusher_slave_thread, info,
180 			    &info->td, NULL, 0, -1, "hammer-S%d", i);
181 	}
182 }
183 
184 void
185 hammer_flusher_destroy(hammer_mount_t hmp)
186 {
187 	hammer_flusher_info_t info;
188 
189 	/*
190 	 * Kill the master
191 	 */
192 	hmp->flusher.exiting = 1;
193 	while (hmp->flusher.td) {
194 		++hmp->flusher.signal;
195 		wakeup(&hmp->flusher.signal);
196 		tsleep(&hmp->flusher.exiting, 0, "hmrwex", hz);
197 	}
198 
199 	/*
200 	 * Kill the slaves
201 	 */
202 	while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
203 		KKASSERT(info->runstate == 0);
204 		TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
205 		info->runstate = -1;
206 		wakeup(&info->runstate);
207 		while (info->td)
208 			tsleep(&info->td, 0, "hmrwwc", 0);
209 		kfree(info, hmp->m_misc);
210 	}
211 }
212 
213 /*
214  * The master flusher thread manages the flusher sequence id and
215  * synchronization with the slave work threads.
216  */
217 static void
218 hammer_flusher_master_thread(void *arg)
219 {
220 	hammer_flush_group_t flg;
221 	hammer_mount_t hmp;
222 
223 	hmp = arg;
224 
225 	for (;;) {
226 		/*
227 		 * Do at least one flush cycle.  We may have to update the
228 		 * UNDO FIFO even if no inodes are queued.
229 		 */
230 		for (;;) {
231 			while (hmp->flusher.group_lock)
232 				tsleep(&hmp->flusher.group_lock, 0, "hmrhld", 0);
233 			hmp->flusher.act = hmp->flusher.next;
234 			++hmp->flusher.next;
235 			hammer_flusher_clean_loose_ios(hmp);
236 			hammer_flusher_flush(hmp);
237 			hmp->flusher.done = hmp->flusher.act;
238 			wakeup(&hmp->flusher.done);
239 			flg = TAILQ_FIRST(&hmp->flush_group_list);
240 			if (flg == NULL || flg->closed == 0)
241 				break;
242 			if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
243 				break;
244 		}
245 
246 		/*
247 		 * Wait for activity.
248 		 */
249 		if (hmp->flusher.exiting && TAILQ_EMPTY(&hmp->flush_group_list))
250 			break;
251 		while (hmp->flusher.signal == 0)
252 			tsleep(&hmp->flusher.signal, 0, "hmrwwa", 0);
253 
254 		/*
255 		 * Flush for each count on signal but only allow one extra
256 		 * flush request to build up.
257 		 */
258 		if (--hmp->flusher.signal != 0)
259 			hmp->flusher.signal = 1;
260 	}
261 
262 	/*
263 	 * And we are done.
264 	 */
265 	hmp->flusher.td = NULL;
266 	wakeup(&hmp->flusher.exiting);
267 	lwkt_exit();
268 }
269 
270 /*
271  * Flush all inodes in the current flush group.
272  */
273 static void
274 hammer_flusher_flush(hammer_mount_t hmp)
275 {
276 	hammer_flusher_info_t info;
277 	hammer_flush_group_t flg;
278 	hammer_reserve_t resv;
279 	hammer_inode_t ip;
280 	hammer_inode_t next_ip;
281 	int slave_index;
282 	int count;
283 
284 	/*
285 	 * Just in-case there's a flush race on mount
286 	 */
287 	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL)
288 		return;
289 
290 	/*
291 	 * We only do one flg but we may have to loop/retry.
292 	 */
293 	count = 0;
294 	while ((flg = TAILQ_FIRST(&hmp->flush_group_list)) != NULL) {
295 		++count;
296 		if (hammer_debug_general & 0x0001) {
297 			kprintf("hammer_flush %d ttl=%d recs=%d\n",
298 				hmp->flusher.act,
299 				flg->total_count, flg->refs);
300 		}
301 		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
302 			break;
303 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
304 
305 		/*
306 		 * If the previous flush cycle just about exhausted our
307 		 * UNDO space we may have to do a dummy cycle to move the
308 		 * first_offset up before actually digging into a new cycle,
309 		 * or the new cycle will not have sufficient undo space.
310 		 */
311 		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
312 			hammer_flusher_finalize(&hmp->flusher.trans, 0);
313 
314 		/*
315 		 * Ok, we are running this flush group now (this prevents new
316 		 * additions to it).
317 		 */
318 		flg->running = 1;
319 		if (hmp->next_flush_group == flg)
320 			hmp->next_flush_group = TAILQ_NEXT(flg, flush_entry);
321 
322 		/*
323 		 * Iterate the inodes in the flg's flush_tree and assign
324 		 * them to slaves.
325 		 */
326 		slave_index = 0;
327 		info = TAILQ_FIRST(&hmp->flusher.ready_list);
328 		next_ip = RB_FIRST(hammer_fls_rb_tree, &flg->flush_tree);
329 
330 		while ((ip = next_ip) != NULL) {
331 			next_ip = RB_NEXT(hammer_fls_rb_tree,
332 					  &flg->flush_tree, ip);
333 
334 			if (++hmp->check_yield > hammer_yield_check) {
335 				hmp->check_yield = 0;
336 				lwkt_user_yield();
337 			}
338 
339 			/*
340 			 * Add ip to the slave's work array.  The slave is
341 			 * not currently running.
342 			 */
343 			info->work_array[info->count++] = ip;
344 			if (info->count != HAMMER_FLUSH_GROUP_SIZE)
345 				continue;
346 
347 			/*
348 			 * Get the slave running
349 			 */
350 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
351 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
352 			info->flg = flg;
353 			info->runstate = 1;
354 			wakeup(&info->runstate);
355 
356 			/*
357 			 * Get a new slave.  We may have to wait for one to
358 			 * finish running.
359 			 */
360 			while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) == NULL) {
361 				tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
362 			}
363 		}
364 
365 		/*
366 		 * Run the current slave if necessary
367 		 */
368 		if (info->count) {
369 			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
370 			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
371 			info->flg = flg;
372 			info->runstate = 1;
373 			wakeup(&info->runstate);
374 		}
375 
376 		/*
377 		 * Wait for all slaves to finish running
378 		 */
379 		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
380 			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);
381 
382 		/*
383 		 * Do the final finalization, clean up
384 		 */
385 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
386 		hmp->flusher.tid = hmp->flusher.trans.tid;
387 
388 		hammer_done_transaction(&hmp->flusher.trans);
389 
390 		/*
391 		 * Loop up on the same flg.  If the flg is done clean it up
392 		 * and break out.  We only flush one flg.
393 		 */
394 		if (RB_EMPTY(&flg->flush_tree)) {
395 			KKASSERT(flg->refs == 0);
396 			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
397 			kfree(flg, hmp->m_misc);
398 			break;
399 		}
400 	}
401 
402 	/*
403 	 * We may have pure meta-data to flush, or we may have to finish
404 	 * cycling the UNDO FIFO, even if there were no flush groups.
405 	 */
406 	if (count == 0 && hammer_flusher_haswork(hmp)) {
407 		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
408 		hammer_flusher_finalize(&hmp->flusher.trans, 1);
409 		hammer_done_transaction(&hmp->flusher.trans);
410 	}
411 
412 	/*
413 	 * Clean up any freed big-blocks (typically zone-2).
414 	 * resv->flush_group is typically set several flush groups ahead
415 	 * of the free to ensure that the freed block is not reused until
416 	 * it can no longer be reused.
417 	 */
418 	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
419 		if (resv->flush_group != hmp->flusher.act)
420 			break;
421 		hammer_reserve_clrdelay(hmp, resv);
422 	}
423 }
424 
425 
426 /*
427  * The slave flusher thread pulls work off the master flush list until no
428  * work is left.
429  */
430 static void
431 hammer_flusher_slave_thread(void *arg)
432 {
433 	hammer_flush_group_t flg;
434 	hammer_flusher_info_t info;
435 	hammer_mount_t hmp;
436 	hammer_inode_t ip;
437 	int i;
438 
439 	info = arg;
440 	hmp = info->hmp;
441 
442 	for (;;) {
443 		while (info->runstate == 0)
444 			tsleep(&info->runstate, 0, "hmrssw", 0);
445 		if (info->runstate < 0)
446 			break;
447 		flg = info->flg;
448 
449 		for (i = 0; i < info->count; ++i) {
450 			ip = info->work_array[i];
451 			hammer_flusher_flush_inode(ip, &hmp->flusher.trans);
452 			++hammer_stats_inode_flushes;
453 		}
454 		info->count = 0;
455 		info->runstate = 0;
456 		TAILQ_REMOVE(&hmp->flusher.run_list, info, entry);
457 		TAILQ_INSERT_TAIL(&hmp->flusher.ready_list, info, entry);
458 		wakeup(&hmp->flusher.ready_list);
459 	}
460 	info->td = NULL;
461 	wakeup(&info->td);
462 	lwkt_exit();
463 }
464 
465 void
466 hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
467 {
468 	hammer_buffer_t buffer;
469 	hammer_io_t io;
470 
471 	/*
472 	 * loose ends - buffers without bp's aren't tracked by the kernel
473 	 * and can build up, so clean them out.  This can occur when an
474 	 * IO completes on a buffer with no references left.
475 	 */
476 	if ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
477 		crit_enter();	/* biodone() race */
478 		while ((io = TAILQ_FIRST(&hmp->lose_list)) != NULL) {
479 			KKASSERT(io->mod_list == &hmp->lose_list);
480 			TAILQ_REMOVE(&hmp->lose_list, io, mod_entry);
481 			io->mod_list = NULL;
482 			hammer_ref(&io->lock);
483 			buffer = (void *)io;
484 			hammer_rel_buffer(buffer, 0);
485 		}
486 		crit_exit();
487 	}
488 }
489 
490 /*
491  * Flush a single inode that is part of a flush group.
492  *
493  * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
494  * the front-end should have reserved sufficient space on the media.  Any
495  * error other then EWOULDBLOCK will force the mount to be read-only.
496  */
497 static
498 void
499 hammer_flusher_flush_inode(hammer_inode_t ip, hammer_transaction_t trans)
500 {
501 	hammer_mount_t hmp = ip->hmp;
502 	int error;
503 
504 	hammer_flusher_clean_loose_ios(hmp);
505 	error = hammer_sync_inode(trans, ip);
506 
507 	/*
508 	 * EWOULDBLOCK can happen under normal operation, all other errors
509 	 * are considered extremely serious.  We must set WOULDBLOCK
510 	 * mechanics to deal with the mess left over from the abort of the
511 	 * previous flush.
512 	 */
513 	if (error) {
514 		ip->flags |= HAMMER_INODE_WOULDBLOCK;
515 		if (error == EWOULDBLOCK)
516 			error = 0;
517 	}
518 	hammer_flush_inode_done(ip, error);
519 	while (hmp->flusher.finalize_want)
520 		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
521 	if (hammer_flusher_undo_exhausted(trans, 1)) {
522 		kprintf("HAMMER: Warning: UNDO area too small!\n");
523 		hammer_flusher_finalize(trans, 1);
524 	} else if (hammer_flusher_meta_limit(trans->hmp)) {
525 		hammer_flusher_finalize(trans, 0);
526 	}
527 }
528 
529 /*
530  * Return non-zero if the UNDO area has less then (QUARTER / 4) of its
531  * space left.
532  *
533  * 1/4 - Emergency free undo space level.  Below this point the flusher
534  *	 will finalize even if directory dependancies have not been resolved.
535  *
536  * 2/4 - Used by the pruning and reblocking code.  These functions may be
537  *	 running in parallel with a flush and cannot be allowed to drop
538  *	 available undo space to emergency levels.
539  *
540  * 3/4 - Used at the beginning of a flush to force-sync the volume header
541  *	 to give the flush plenty of runway to work in.
542  */
543 int
544 hammer_flusher_undo_exhausted(hammer_transaction_t trans, int quarter)
545 {
546 	if (hammer_undo_space(trans) <
547 	    hammer_undo_max(trans->hmp) * quarter / 4) {
548 		return(1);
549 	} else {
550 		return(0);
551 	}
552 }
553 
554 /*
555  * Flush all pending UNDOs, wait for write completion, update the volume
556  * header with the new UNDO end position, and flush it.  Then
557  * asynchronously flush the meta-data.
558  *
559  * If this is the last finalization in a flush group we also synchronize
560  * our cached blockmap and set hmp->flusher_undo_start and our cached undo
561  * fifo first_offset so the next flush resets the FIFO pointers.
562  *
563  * If this is not final it is being called because too many dirty meta-data
564  * buffers have built up and must be flushed with UNDO synchronization to
565  * avoid a buffer cache deadlock.
566  */
567 void
568 hammer_flusher_finalize(hammer_transaction_t trans, int final)
569 {
570 	hammer_volume_t root_volume;
571 	hammer_blockmap_t cundomap, dundomap;
572 	hammer_mount_t hmp;
573 	hammer_io_t io;
574 	hammer_off_t save_undo_next_offset;
575 	int count;
576 	int i;
577 
578 	hmp = trans->hmp;
579 	root_volume = trans->rootvol;
580 
581 	/*
582 	 * Exclusively lock the flusher.  This guarantees that all dirty
583 	 * buffers will be idled (have a mod-count of 0).
584 	 */
585 	++hmp->flusher.finalize_want;
586 	hammer_lock_ex(&hmp->flusher.finalize_lock);
587 
588 	/*
589 	 * If this isn't the final sync several threads may have hit the
590 	 * meta-limit at the same time and raced.  Only sync if we really
591 	 * have to, after acquiring the lock.
592 	 */
593 	if (final == 0 && !hammer_flusher_meta_limit(hmp))
594 		goto done;
595 
596 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
597 		goto done;
598 
599 	/*
600 	 * Flush data buffers.  This can occur asynchronously and at any
601 	 * time.  We must interlock against the frontend direct-data write
602 	 * but do not have to acquire the sync-lock yet.
603 	 *
604 	 * These data buffers have already been collected prior to the
605 	 * related inode(s) getting queued to the flush group.
606 	 */
607 	count = 0;
608 	while ((io = TAILQ_FIRST(&hmp->data_list)) != NULL) {
609 		if (io->ioerror)
610 			break;
611 		hammer_ref(&io->lock);
612 		hammer_io_write_interlock(io);
613 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
614 		hammer_io_flush(io, 0);
615 		hammer_io_done_interlock(io);
616 		hammer_rel_buffer((hammer_buffer_t)io, 0);
617 		++count;
618 	}
619 
620 	/*
621 	 * The sync-lock is required for the remaining sequence.  This lock
622 	 * prevents meta-data from being modified.
623 	 */
624 	hammer_sync_lock_ex(trans);
625 
626 	/*
627 	 * If we have been asked to finalize the volume header sync the
628 	 * cached blockmap to the on-disk blockmap.  Generate an UNDO
629 	 * record for the update.
630 	 */
631 	if (final) {
632 		cundomap = &hmp->blockmap[0];
633 		dundomap = &root_volume->ondisk->vol0_blockmap[0];
634 		if (root_volume->io.modified) {
635 			hammer_modify_volume(trans, root_volume,
636 					     dundomap, sizeof(hmp->blockmap));
637 			for (i = 0; i < HAMMER_MAX_ZONES; ++i)
638 				hammer_crc_set_blockmap(&cundomap[i]);
639 			bcopy(cundomap, dundomap, sizeof(hmp->blockmap));
640 			hammer_modify_volume_done(root_volume);
641 		}
642 	}
643 
644 	/*
645 	 * Flush UNDOs.  This can occur concurrently with the data flush
646 	 * because data writes never overwrite.
647 	 *
648 	 * This also waits for I/Os to complete and flushes the cache on
649 	 * the target disk.
650 	 *
651 	 * Record the UNDO append point as this can continue to change
652 	 * after we have flushed the UNDOs.
653 	 */
654 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
655 	hammer_lock_ex(&hmp->undo_lock);
656 	save_undo_next_offset = cundomap->next_offset;
657 	hammer_unlock(&hmp->undo_lock);
658 	hammer_flusher_flush_undos(hmp, HAMMER_FLUSH_UNDOS_FORCED);
659 
660 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
661 		goto failed;
662 
663 	/*
664 	 * HAMMER VERSION < 4:
665 	 *	Update the on-disk volume header with new UNDO FIFO end
666 	 *	position (do not generate new UNDO records for this change).
667 	 *	We have to do this for the UNDO FIFO whether (final) is
668 	 *	set or not in order for the UNDOs to be recognized on
669 	 *	recovery.
670 	 *
671 	 * HAMMER VERSION >= 4:
672 	 *	The UNDO FIFO data written above will be recognized on
673 	 *	recovery without us having to sync the volume header.
674 	 *
675 	 * Also update the on-disk next_tid field.  This does not require
676 	 * an UNDO.  However, because our TID is generated before we get
677 	 * the sync lock another sync may have beat us to the punch.
678 	 *
679 	 * This also has the side effect of updating first_offset based on
680 	 * a prior finalization when the first finalization of the next flush
681 	 * cycle occurs, removing any undo info from the prior finalization
682 	 * from consideration.
683 	 *
684 	 * The volume header will be flushed out synchronously.
685 	 */
686 	dundomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
687 	cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
688 
689 	if (dundomap->first_offset != cundomap->first_offset ||
690 		   dundomap->next_offset != save_undo_next_offset) {
691 		hammer_modify_volume(NULL, root_volume, NULL, 0);
692 		dundomap->first_offset = cundomap->first_offset;
693 		dundomap->next_offset = save_undo_next_offset;
694 		hammer_crc_set_blockmap(dundomap);
695 		hammer_modify_volume_done(root_volume);
696 	}
697 
698 	/*
699 	 * vol0_next_tid is used for TID selection and is updated without
700 	 * an UNDO so we do not reuse a TID that may have been rolled-back.
701 	 *
702 	 * vol0_last_tid is the highest fully-synchronized TID.  It is
703 	 * set-up when the UNDO fifo is fully synced, later on (not here).
704 	 *
705 	 * The root volume can be open for modification by other threads
706 	 * generating UNDO or REDO records.  For example, reblocking,
707 	 * pruning, REDO mode fast-fsyncs, so the write interlock is
708 	 * mandatory.
709 	 */
710 	if (root_volume->io.modified) {
711 		hammer_modify_volume(NULL, root_volume, NULL, 0);
712 		if (root_volume->ondisk->vol0_next_tid < trans->tid)
713 			root_volume->ondisk->vol0_next_tid = trans->tid;
714 		hammer_crc_set_volume(root_volume->ondisk);
715 		hammer_modify_volume_done(root_volume);
716 		hammer_io_write_interlock(&root_volume->io);
717 		hammer_io_flush(&root_volume->io, 0);
718 		hammer_io_done_interlock(&root_volume->io);
719 	}
720 
721 	/*
722 	 * Wait for I/Os to complete.
723 	 *
724 	 * For HAMMER VERSION 4+ filesystems we do not have to wait for
725 	 * the I/O to complete as the new UNDO FIFO entries are recognized
726 	 * even without the volume header update.  This allows the volume
727 	 * header to flushed along with meta-data, significantly reducing
728 	 * flush overheads.
729 	 */
730 	hammer_flusher_clean_loose_ios(hmp);
731 	if (hmp->version < HAMMER_VOL_VERSION_FOUR)
732 		hammer_io_wait_all(hmp, "hmrfl3", 1);
733 
734 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
735 		goto failed;
736 
737 	/*
738 	 * Flush meta-data.  The meta-data will be undone if we crash
739 	 * so we can safely flush it asynchronously.  There is no need
740 	 * to wait for I/O to complete (or issue a synchronous disk flush).
741 	 *
742 	 * In fact, even if we did wait the meta-data will still be undone
743 	 * by a crash up until the next flush cycle due to the first_offset
744 	 * in the volume header for the UNDO FIFO not being adjusted until
745 	 * the following flush cycle.
746 	 */
747 	count = 0;
748 	while ((io = TAILQ_FIRST(&hmp->meta_list)) != NULL) {
749 		if (io->ioerror)
750 			break;
751 		KKASSERT(io->modify_refs == 0);
752 		hammer_ref(&io->lock);
753 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
754 		hammer_io_flush(io, 0);
755 		hammer_rel_buffer((hammer_buffer_t)io, 0);
756 		++count;
757 	}
758 
759 	/*
760 	 * If this is the final finalization for the flush group set
761 	 * up for the next sequence by setting a new first_offset in
762 	 * our cached blockmap and clearing the undo history.
763 	 *
764 	 * Even though we have updated our cached first_offset, the on-disk
765 	 * first_offset still governs available-undo-space calculations.
766 	 *
767 	 * We synchronize to save_undo_next_offset rather than
768 	 * cundomap->next_offset because that is what we flushed out
769 	 * above.
770 	 *
771 	 * NOTE! UNDOs can only be added with the sync_lock held
772 	 *	 so we can clear the undo history without racing.
773 	 *	 REDOs can be added at any time which is why we
774 	 *	 have to be careful and use save_undo_next_offset
775 	 *	 when setting the new first_offset.
776 	 */
777 	if (final) {
778 		cundomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
779 		if (cundomap->first_offset != save_undo_next_offset) {
780 			cundomap->first_offset = save_undo_next_offset;
781 			hmp->hflags |= HMNT_UNDO_DIRTY;
782 		} else if (cundomap->first_offset != cundomap->next_offset) {
783 			hmp->hflags |= HMNT_UNDO_DIRTY;
784 		} else {
785 			hmp->hflags &= ~HMNT_UNDO_DIRTY;
786 		}
787 		hammer_clear_undo_history(hmp);
788 
789 		/*
790 		 * Flush tid sequencing.  flush_tid1 is fully synchronized,
791 		 * meaning a crash will not roll it back.  flush_tid2 has
792 		 * been written out asynchronously and a crash will roll
793 		 * it back.  flush_tid1 is used for all mirroring masters.
794 		 */
795 		if (hmp->flush_tid1 != hmp->flush_tid2) {
796 			hmp->flush_tid1 = hmp->flush_tid2;
797 			wakeup(&hmp->flush_tid1);
798 		}
799 		hmp->flush_tid2 = trans->tid;
800 
801 		/*
802 		 * Clear the REDO SYNC flag.  This flag is used to ensure
803 		 * that the recovery span in the UNDO/REDO FIFO contains
804 		 * at least one REDO SYNC record.
805 		 */
806 		hmp->flags &= ~HAMMER_MOUNT_REDO_SYNC;
807 	}
808 
809 	/*
810 	 * Cleanup.  Report any critical errors.
811 	 */
812 failed:
813 	hammer_sync_unlock(trans);
814 
815 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR) {
816 		kprintf("HAMMER(%s): Critical write error during flush, "
817 			"refusing to sync UNDO FIFO\n",
818 			root_volume->ondisk->vol_name);
819 	}
820 
821 done:
822 	hammer_unlock(&hmp->flusher.finalize_lock);
823 
824 	if (--hmp->flusher.finalize_want == 0)
825 		wakeup(&hmp->flusher.finalize_want);
826 	hammer_stats_commits += final;
827 }
828 
829 /*
830  * Flush UNDOs.
831  */
832 void
833 hammer_flusher_flush_undos(hammer_mount_t hmp, int mode)
834 {
835 	hammer_io_t io;
836 	int count;
837 
838 	count = 0;
839 	while ((io = TAILQ_FIRST(&hmp->undo_list)) != NULL) {
840 		if (io->ioerror)
841 			break;
842 		hammer_ref(&io->lock);
843 		KKASSERT(io->type != HAMMER_STRUCTURE_VOLUME);
844 		hammer_io_write_interlock(io);
845 		hammer_io_flush(io, hammer_undo_reclaim(io));
846 		hammer_io_done_interlock(io);
847 		hammer_rel_buffer((hammer_buffer_t)io, 0);
848 		++count;
849 	}
850 	hammer_flusher_clean_loose_ios(hmp);
851 	if (mode == HAMMER_FLUSH_UNDOS_FORCED ||
852 	    (mode == HAMMER_FLUSH_UNDOS_AUTO && count)) {
853 		hammer_io_wait_all(hmp, "hmrfl1", 1);
854 	} else {
855 		hammer_io_wait_all(hmp, "hmrfl2", 0);
856 	}
857 }
858 
859 /*
860  * Return non-zero if too many dirty meta-data buffers have built up.
861  *
862  * Since we cannot allow such buffers to flush until we have dealt with
863  * the UNDOs, we risk deadlocking the kernel's buffer cache.
864  */
865 int
866 hammer_flusher_meta_limit(hammer_mount_t hmp)
867 {
868 	if (hmp->locked_dirty_space + hmp->io_running_space >
869 	    hammer_limit_dirtybufspace) {
870 		return(1);
871 	}
872 	return(0);
873 }
874 
875 /*
876  * Return non-zero if too many dirty meta-data buffers have built up.
877  *
878  * This version is used by background operations (mirror, prune, reblock)
879  * to leave room for foreground operations.
880  */
881 int
882 hammer_flusher_meta_halflimit(hammer_mount_t hmp)
883 {
884 	if (hmp->locked_dirty_space + hmp->io_running_space >
885 	    hammer_limit_dirtybufspace / 2) {
886 		return(1);
887 	}
888 	return(0);
889 }
890 
891 /*
892  * Return non-zero if the flusher still has something to flush.
893  */
894 int
895 hammer_flusher_haswork(hammer_mount_t hmp)
896 {
897 	if (hmp->ronly)
898 		return(0);
899 	if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
900 		return(0);
901 	if (TAILQ_FIRST(&hmp->flush_group_list) ||	/* dirty inodes */
902 	    TAILQ_FIRST(&hmp->volu_list) ||		/* dirty buffers */
903 	    TAILQ_FIRST(&hmp->undo_list) ||
904 	    TAILQ_FIRST(&hmp->data_list) ||
905 	    TAILQ_FIRST(&hmp->meta_list) ||
906 	    (hmp->hflags & HMNT_UNDO_DIRTY)		/* UNDO FIFO sync */
907 	) {
908 		return(1);
909 	}
910 	return(0);
911 }
912 
913